From e99966bfc4aba83b0c0009a3dff608da1255d783 Mon Sep 17 00:00:00 2001 From: Vladislav Sovrasov Date: Wed, 25 Oct 2023 17:35:01 +0200 Subject: [PATCH 01/39] POC: xpu support (#2553) * Upgrade torch==2.0.1 torchvision==0.15.2 mmcv-full=1.7.1 Signed-off-by: Songki Choi * Rollback to mmcv-full==1.7.0 due to mmaction conflict Signed-off-by: Songki Choi * Fix unit test Signed-off-by: Songki Choi * Update cls to run on xpu * Enable XPU for detection * Disable FP16 for detection models * Add xpu data parallel to core utils * Del copys of XPUDP * Update sseg XPU support * Update XPUDP * added fix for OD and IS. Not debugged * Upgrade torch==2.0.1 torchvision==0.15.2 mmcv-full=1.7.1 Signed-off-by: Songki Choi * Rollback to mmcv-full==1.7.0 due to mmaction conflict Signed-off-by: Songki Choi * Fix unit test Signed-off-by: Songki Choi * Update cls to run on xpu * Enable XPU for detection * Disable FP16 for detection models * Add xpu data parallel to core utils * Del copys of XPUDP * Update sseg XPU support * Update XPUDP * Fix import of assigner * Add mmdet ops patching to inference * Add xpu seed * Fix linters * Cleanup in cls train runner * Cleanup in segm train function * Fix linters * Fix linters * Cleanup * Update NMS patch * Use GPU id to move model on target XPU * Disable fp16 for swinT * Add XPU device info to OTX env report * Fix linters * Update new inits * Update autocast * Update fallback to CPU * Update target device in XPU data parallel * Revert "Update autocast" This reverts commit 0ff53ebccd2007fc38e387919e306d476a695635. * Revert "Patch to enable FP16 on XPU" This reverts commit fd91f6744f238546c92ae46a7d193369aebc17b4. * FP16 support for XPU * Enable FP 16 * Patch to enable FP16 on XPU * Fix linters --------- Signed-off-by: Songki Choi Co-authored-by: Songki Choi Co-authored-by: kprokofi --- .../adapters/mmcls/apis/__init__.py | 8 + .../adapters/mmcls/apis/train.py | 151 +++++++++++ .../classification/adapters/mmcls/task.py | 2 +- .../common/adapters/mmcv/configurer.py | 20 +- .../common/adapters/mmcv/utils/__init__.py | 3 +- .../utils/_builder_build_data_parallel.py | 55 +++- src/otx/algorithms/common/utils/__init__.py | 2 + src/otx/algorithms/common/utils/utils.py | 8 + .../detection/adapters/mmdet/apis/__init__.py | 8 + .../detection/adapters/mmdet/apis/train.py | 240 ++++++++++++++++++ .../mmdet/models/assigners/__init__.py | 3 +- .../models/assigners/xpu_atss_assigner.py | 212 ++++++++++++++++ .../detection/adapters/mmdet/task.py | 13 +- .../detection/mobilenetv2_atss/model.py | 2 +- .../mobilenetv2_atss/semisl/model.py | 2 +- .../detection/resnext101_atss/model.py | 2 +- .../detection/resnext101_atss/semisl/model.py | 2 +- .../adapters/mmseg/apis/__init__.py | 8 + .../segmentation/adapters/mmseg/apis/train.py | 155 +++++++++++ .../segmentation/adapters/mmseg/task.py | 2 +- src/otx/cli/utils/report.py | 9 + 21 files changed, 892 insertions(+), 15 deletions(-) create mode 100644 src/otx/algorithms/classification/adapters/mmcls/apis/__init__.py create mode 100644 src/otx/algorithms/classification/adapters/mmcls/apis/train.py create mode 100644 src/otx/algorithms/detection/adapters/mmdet/apis/__init__.py create mode 100644 src/otx/algorithms/detection/adapters/mmdet/apis/train.py create mode 100644 src/otx/algorithms/detection/adapters/mmdet/models/assigners/xpu_atss_assigner.py create mode 100644 src/otx/algorithms/segmentation/adapters/mmseg/apis/__init__.py create mode 100644 src/otx/algorithms/segmentation/adapters/mmseg/apis/train.py diff --git a/src/otx/algorithms/classification/adapters/mmcls/apis/__init__.py b/src/otx/algorithms/classification/adapters/mmcls/apis/__init__.py new file mode 100644 index 00000000000..d294852f865 --- /dev/null +++ b/src/otx/algorithms/classification/adapters/mmcls/apis/__init__.py @@ -0,0 +1,8 @@ +"""Adapters of classification - mmcls.""" + +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from .train import train_model + +__all__ = ["train_model"] diff --git a/src/otx/algorithms/classification/adapters/mmcls/apis/train.py b/src/otx/algorithms/classification/adapters/mmcls/apis/train.py new file mode 100644 index 00000000000..3134c90c0d1 --- /dev/null +++ b/src/otx/algorithms/classification/adapters/mmcls/apis/train.py @@ -0,0 +1,151 @@ +"""Train function for classification task.""" + +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch +from mmcls.core import DistEvalHook, DistOptimizerHook, EvalHook +from mmcls.datasets import build_dataloader, build_dataset +from mmcls.utils import get_root_logger, wrap_distributed_model, wrap_non_distributed_model +from mmcv.runner import DistSamplerSeedHook, build_optimizer, build_runner + +from otx.algorithms.common.adapters.mmcv.utils import XPUDataParallel + + +def train_model(model, dataset, cfg, distributed=False, validate=False, timestamp=None, device=None, meta=None): + """Train a model. + + This method will build dataloaders, wrap the model and build a runner + according to the provided config. + + Args: + model (:obj:`torch.nn.Module`): The model to be run. + dataset (:obj:`mmcls.datasets.BaseDataset` | List[BaseDataset]): + The dataset used to train the model. It can be a single dataset, + or a list of dataset with the same length as workflow. + cfg (:obj:`mmcv.utils.Config`): The configs of the experiment. + distributed (bool): Whether to train the model in a distributed + environment. Defaults to False. + validate (bool): Whether to do validation with + :obj:`mmcv.runner.EvalHook`. Defaults to False. + timestamp (str, optional): The timestamp string to auto generate the + name of log files. Defaults to None. + device (str, optional): TODO + meta (dict, optional): A dict records some import information such as + environment info and seed, which will be logged in logger hook. + Defaults to None. + """ + logger = get_root_logger() + + # prepare data loaders + dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset] + + # The default loader config + loader_cfg = dict( + # cfg.gpus will be ignored if distributed + num_gpus=cfg.ipu_replicas if device == "ipu" else len(cfg.gpu_ids), + dist=distributed, + round_up=True, + seed=cfg.get("seed"), + sampler_cfg=cfg.get("sampler", None), + ) + # The overall dataloader settings + loader_cfg.update( + { + k: v + for k, v in cfg.data.items() + if k not in ["train", "val", "test", "train_dataloader", "val_dataloader", "test_dataloader"] + } + ) + # The specific dataloader settings + train_loader_cfg = {**loader_cfg, **cfg.data.get("train_dataloader", {})} + + data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset] + + fp16_cfg = cfg.get("fp16_", None) + # put model on gpus + if distributed: + find_unused_parameters = cfg.get("find_unused_parameters", False) + # Sets the `find_unused_parameters` parameter in + # torch.nn.parallel.DistributedDataParallel + model = wrap_distributed_model( + model, cfg.device, broadcast_buffers=False, find_unused_parameters=find_unused_parameters + ) + elif cfg.device == "xpu": + assert len(cfg.gpu_ids) == 1 + model.to(f"xpu:{cfg.gpu_ids[0]}") + model = XPUDataParallel(model, dim=0, device_ids=cfg.gpu_ids, enable_autocast=bool(fp16_cfg)) + else: + model = wrap_non_distributed_model(model, cfg.device, device_ids=cfg.gpu_ids) + + # build runner + optimizer = build_optimizer(model, cfg.optimizer) + if cfg.device == "xpu": + if fp16_cfg is not None: + dtype = torch.bfloat16 + else: + dtype = torch.float32 + model.train() + model, optimizer = torch.xpu.optimize(model, optimizer=optimizer, dtype=dtype) + + if cfg.get("runner") is None: + cfg.runner = {"type": "EpochBasedRunner", "max_epochs": cfg.total_epochs} + warnings.warn( + "config is now expected to have a `runner` section, " "please set `runner` in your config.", UserWarning + ) + + runner = build_runner( + cfg.runner, + default_args=dict( + model=model, batch_processor=None, optimizer=optimizer, work_dir=cfg.work_dir, logger=logger, meta=meta + ), + ) + + # an ugly walkaround to make the .log and .log.json filenames the same + runner.timestamp = timestamp + + if fp16_cfg is None and distributed and "type" not in cfg.optimizer_config: + optimizer_config = DistOptimizerHook(**cfg.optimizer_config) + else: + optimizer_config = cfg.optimizer_config + + # register hooks + runner.register_training_hooks( + cfg.lr_config, + optimizer_config, + cfg.checkpoint_config, + cfg.log_config, + cfg.get("momentum_config", None), + custom_hooks_config=cfg.get("custom_hooks", None), + ) + if distributed and cfg.runner["type"] == "EpochBasedRunner": + runner.register_hook(DistSamplerSeedHook()) + + # register eval hooks + if validate: + val_dataset = build_dataset(cfg.data.val, dict(test_mode=True)) + # The specific dataloader settings + val_loader_cfg = { + **loader_cfg, + "shuffle": False, # Not shuffle by default + "sampler_cfg": None, # Not use sampler by default + "drop_last": False, # Not drop last by default + **cfg.data.get("val_dataloader", {}), + } + val_dataloader = build_dataloader(val_dataset, **val_loader_cfg) + eval_cfg = cfg.get("evaluation", {}) + eval_cfg["by_epoch"] = cfg.runner["type"] != "IterBasedRunner" + eval_hook = DistEvalHook if distributed else EvalHook + # `EvalHook` needs to be executed after `IterTimerHook`. + # Otherwise, it will cause a bug if use `IterBasedRunner`. + # Refers to https://github.com/open-mmlab/mmcv/issues/1261 + runner.register_hook(eval_hook(val_dataloader, **eval_cfg), priority="LOW") + + if cfg.resume_from: + runner.resume(cfg.resume_from) + elif cfg.load_from: + runner.load_checkpoint(cfg.load_from) + runner.run(data_loaders, cfg.workflow) diff --git a/src/otx/algorithms/classification/adapters/mmcls/task.py b/src/otx/algorithms/classification/adapters/mmcls/task.py index 425ddc6153d..be1eb6bc940 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/task.py +++ b/src/otx/algorithms/classification/adapters/mmcls/task.py @@ -12,7 +12,6 @@ from typing import Any, Dict, Optional, Type, Union import torch -from mmcls.apis import train_model from mmcls.datasets import build_dataloader, build_dataset from mmcls.models.backbones.vision_transformer import VisionTransformer from mmcls.utils import collect_env @@ -20,6 +19,7 @@ from mmcv.utils import Config, ConfigDict from otx.algorithms import TRANSFORMER_BACKBONES +from otx.algorithms.classification.adapters.mmcls.apis.train import train_model from otx.algorithms.classification.adapters.mmcls.utils.exporter import ( ClassificationExporter, ) diff --git a/src/otx/algorithms/common/adapters/mmcv/configurer.py b/src/otx/algorithms/common/adapters/mmcv/configurer.py index 68e2ea6c35c..83aa919c98e 100644 --- a/src/otx/algorithms/common/adapters/mmcv/configurer.py +++ b/src/otx/algorithms/common/adapters/mmcv/configurer.py @@ -27,7 +27,7 @@ update_or_add_custom_hook, ) from otx.algorithms.common.tasks.base_task import OnHookInitialized -from otx.algorithms.common.utils import UncopiableDefaultDict, append_dist_rank_suffix +from otx.algorithms.common.utils import UncopiableDefaultDict, append_dist_rank_suffix, is_xpu_available from otx.algorithms.common.utils.data import compute_robust_dataset_statistics from otx.algorithms.common.utils.logger import get_logger from otx.api.usecases.reporting.time_monitor_callback import TimeMonitorCallback @@ -171,10 +171,18 @@ def configure_device(self, cfg): elif "gpu_ids" not in cfg: cfg.gpu_ids = range(1) - # consider "cuda" and "cpu" device only + # consider "cuda", "xpu", and "cpu" devices only if not torch.cuda.is_available(): - cfg.device = "cpu" - cfg.gpu_ids = range(-1, 0) + try: + import intel_extension_for_pytorch as ipex # noqa: F401 + + if is_xpu_available(): + cfg.device = "xpu" + else: + cfg.device = "cpu" + except ModuleNotFoundError: + cfg.device = "cpu" + cfg.gpu_ids = range(-1, 0) else: cfg.device = "cuda" @@ -241,9 +249,11 @@ def configure_fp16(cfg: Config): """Configure Fp16OptimizerHook and Fp16SAMOptimizerHook.""" fp16_config = cfg.pop("fp16", None) + # workaround to forward FP16 config to mmapi.train funcitons + cfg.fp16_ = fp16_config if fp16_config is not None: - if torch.cuda.is_available(): + if torch.cuda.is_available() or is_xpu_available(): optim_type = cfg.optimizer_config.get("type", "OptimizerHook") opts: Dict[str, Any] = dict( distributed=getattr(cfg, "distributed", False), diff --git a/src/otx/algorithms/common/adapters/mmcv/utils/__init__.py b/src/otx/algorithms/common/adapters/mmcv/utils/__init__.py index d6c1ce5a3db..0fac8a67e31 100644 --- a/src/otx/algorithms/common/adapters/mmcv/utils/__init__.py +++ b/src/otx/algorithms/common/adapters/mmcv/utils/__init__.py @@ -3,7 +3,7 @@ # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -from ._builder_build_data_parallel import build_data_parallel +from ._builder_build_data_parallel import XPUDataParallel, build_data_parallel from ._config_utils_get_configs_by_keys import get_configs_by_keys from ._config_utils_get_configs_by_pairs import get_configs_by_pairs from .automatic_bs import adapt_batch_size @@ -47,5 +47,6 @@ "OTXConfig", "adapt_batch_size", "InputSizeManager", + "XPUDataParallel", "patch_from_hyperparams", ] diff --git a/src/otx/algorithms/common/adapters/mmcv/utils/_builder_build_data_parallel.py b/src/otx/algorithms/common/adapters/mmcv/utils/_builder_build_data_parallel.py index 7cb296b5e4c..1b084f3a815 100644 --- a/src/otx/algorithms/common/adapters/mmcv/utils/_builder_build_data_parallel.py +++ b/src/otx/algorithms/common/adapters/mmcv/utils/_builder_build_data_parallel.py @@ -12,6 +12,8 @@ from mmcv import Config from mmcv.parallel import MMDataParallel, MMDistributedDataParallel +from otx.algorithms.common.utils import is_xpu_available + @overload def build_data_parallel( @@ -58,7 +60,10 @@ def build_data_parallel( :param distributed: Enable distributed training mode. :return: """ - if torch.cuda.is_available() and config.get("gpu_ids", []): + if is_xpu_available() and config.get("gpu_ids", []): + model = model.xpu() + model = XPUDataParallel(model, device_ids=config.gpu_ids) + elif torch.cuda.is_available() and config.get("gpu_ids", []): if distributed: model = model.cuda() # put model on gpus @@ -81,3 +86,51 @@ def build_data_parallel( model = MMDataParallel(model, device_ids=[]) torch.cuda.is_available = bak return model + + +class XPUDataParallel(MMDataParallel): + def __init__(self, *args, enable_autocast: bool = False, **kwargs): + super().__init__(*args, **kwargs) + self.enable_autocast = enable_autocast + + def scatter(self, inputs, kwargs, device_ids): + inputs, kwargs = super().scatter(inputs, kwargs, [-1]) + target_device = torch.device(f"xpu:{device_ids[0]}") + + for x in inputs: + if isinstance(x, tuple): + for val in x: + if isinstance(val, dict): + for k in val: + if isinstance(val[k], torch.Tensor): + val[k] = val[k].to(target_device) + elif isinstance(val[k], list): + for i, item in enumerate(val[k]): + if isinstance(item, torch.Tensor): + val[k][i] = item.to(target_device) + + for x in kwargs: + if isinstance(x, dict): + for k in x: + if isinstance(x[k], torch.Tensor): + x[k] = x[k].to(target_device) + elif isinstance(x[k], list): + for i, item in enumerate(x[k]): + if isinstance(item, torch.Tensor): + x[k][i] = item.to(target_device) + + return inputs, kwargs + + def forward(self, *inputs, **kwargs): + # we have to apply autocast here, because the original mmcv's fp16 decorator is hard to override. + # Perhaps, one global autocast is not as accurate as original mmcv's approach + with torch.autocast(device_type="xpu", dtype=torch.bfloat16, enabled=self.enable_autocast): + return super().forward(*inputs, **kwargs) + + def train_step(self, *inputs, **kwargs): + with torch.autocast(device_type="xpu", dtype=torch.bfloat16, enabled=self.enable_autocast): + return super().train_step(*inputs, **kwargs) + + def val_step(self, *inputs, **kwargs): + with torch.autocast(device_type="xpu", dtype=torch.bfloat16, enabled=self.enable_autocast): + return super().val_step(*inputs, **kwargs) diff --git a/src/otx/algorithms/common/utils/__init__.py b/src/otx/algorithms/common/utils/__init__.py index 21b772d026a..cd63629bb28 100644 --- a/src/otx/algorithms/common/utils/__init__.py +++ b/src/otx/algorithms/common/utils/__init__.py @@ -27,6 +27,7 @@ get_arg_spec, get_default_async_reqs_num, get_task_class, + is_xpu_available, load_template, read_py_config, set_random_seed, @@ -49,4 +50,5 @@ "OTXOpenVinoDataLoader", "read_py_config", "get_default_async_reqs_num", + "is_xpu_available", ] diff --git a/src/otx/algorithms/common/utils/utils.py b/src/otx/algorithms/common/utils/utils.py index 3eede66c23c..88495a8397b 100644 --- a/src/otx/algorithms/common/utils/utils.py +++ b/src/otx/algorithms/common/utils/utils.py @@ -14,6 +14,7 @@ import numpy as np import onnx +import torch import yaml from addict import Dict as adict @@ -104,6 +105,8 @@ def set_random_seed(seed, logger=None, deterministic=False): np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) + if is_xpu_available(): + torch.xpu.manual_seed_all(seed) os.environ["PYTHONHASHSEED"] = str(seed) if logger: logger.info(f"Training seed was set to {seed} w/ deterministic={deterministic}.") @@ -157,3 +160,8 @@ def embed_onnx_model_data(onnx_file: str, extra_model_data: Dict[Tuple[str, str] meta.value = str(extra_model_data[item]) onnx.save(model, onnx_file) + + +def is_xpu_available(): + """Checks if XPU device is available.""" + return hasattr(torch, "xpu") and torch.xpu.is_available() diff --git a/src/otx/algorithms/detection/adapters/mmdet/apis/__init__.py b/src/otx/algorithms/detection/adapters/mmdet/apis/__init__.py new file mode 100644 index 00000000000..dd3628c1d0b --- /dev/null +++ b/src/otx/algorithms/detection/adapters/mmdet/apis/__init__.py @@ -0,0 +1,8 @@ +"""Adapters of classification - mmdet.""" + +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from .train import train_detector + +__all__ = ["train_detector"] diff --git a/src/otx/algorithms/detection/adapters/mmdet/apis/train.py b/src/otx/algorithms/detection/adapters/mmdet/apis/train.py new file mode 100644 index 00000000000..bde2e65cdc6 --- /dev/null +++ b/src/otx/algorithms/detection/adapters/mmdet/apis/train.py @@ -0,0 +1,240 @@ +"""Train function for detection task.""" + +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# Copyright (c) OpenMMLab. All rights reserved. +import os + +import torch +from mmcv.ops.nms import NMSop +from mmcv.ops.roi_align import RoIAlign +from mmcv.runner import ( + DistSamplerSeedHook, + EpochBasedRunner, + OptimizerHook, + build_runner, + get_dist_info, +) +from mmcv.utils import ext_loader +from mmdet.core import DistEvalHook, EvalHook, build_optimizer +from mmdet.datasets import build_dataloader, build_dataset, replace_ImageToTensor +from mmdet.utils import build_ddp, compat_cfg, find_latest_checkpoint, get_root_logger +from mmdet.utils.util_distribution import build_dp, dp_factory +from torchvision.ops import nms as tv_nms +from torchvision.ops import roi_align as tv_roi_align + +from otx.algorithms.common.adapters.mmcv.utils import XPUDataParallel + +ext_module = ext_loader.load_ext("_ext", ["nms", "softnms", "nms_match", "nms_rotated", "nms_quadri"]) +dp_factory["xpu"] = XPUDataParallel + + +def auto_scale_lr(cfg, distributed, logger): + """Automatically scaling LR according to GPU number and sample per GPU. + + Args: + cfg (config): Training config. + distributed (bool): Using distributed or not. + logger (logging.Logger): Logger. + """ + # Get flag from config + if ("auto_scale_lr" not in cfg) or (not cfg.auto_scale_lr.get("enable", False)): + logger.info("Automatic scaling of learning rate (LR)" " has been disabled.") + return + + # Get base batch size from config + base_batch_size = cfg.auto_scale_lr.get("base_batch_size", None) + if base_batch_size is None: + return + + # Get gpu number + if distributed: + _, world_size = get_dist_info() + num_gpus = len(range(world_size)) + else: + num_gpus = len(cfg.gpu_ids) + + # calculate the batch size + samples_per_gpu = cfg.data.train_dataloader.samples_per_gpu + batch_size = num_gpus * samples_per_gpu + logger.info( + f"Training with {num_gpus} GPU(s) with {samples_per_gpu} " + f"samples per GPU. The total batch size is {batch_size}." + ) + + if batch_size != base_batch_size: + # scale LR with + # [linear scaling rule](https://arxiv.org/abs/1706.02677) + scaled_lr = (batch_size / base_batch_size) * cfg.optimizer.lr + logger.info("LR has been automatically scaled " f"from {cfg.optimizer.lr} to {scaled_lr}") + cfg.optimizer.lr = scaled_lr + else: + logger.info( + "The batch size match the " + f"base batch size: {base_batch_size}, " + f"will not scaling the LR ({cfg.optimizer.lr})." + ) + + +def train_detector(model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None): + """Trains a detector via mmdet.""" + + cfg = compat_cfg(cfg) + logger = get_root_logger(log_level=cfg.log_level) + + # prepare data loaders + dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset] + + runner_type = "EpochBasedRunner" if "runner" not in cfg else cfg.runner["type"] + + train_dataloader_default_args = dict( + samples_per_gpu=2, + workers_per_gpu=2, + # `num_gpus` will be ignored if distributed + num_gpus=len(cfg.gpu_ids), + dist=distributed, + seed=cfg.seed, + runner_type=runner_type, + persistent_workers=False, + ) + + train_loader_cfg = {**train_dataloader_default_args, **cfg.data.get("train_dataloader", {})} + + data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset] + + fp16_cfg = cfg.get("fp16_", None) + # put model on gpus + if distributed: + find_unused_parameters = cfg.get("find_unused_parameters", False) + # Sets the `find_unused_parameters` parameter in + # torch.nn.parallel.DistributedDataParallel + model = build_ddp( + model, + cfg.device, + device_ids=[int(os.environ["LOCAL_RANK"])], + broadcast_buffers=False, + find_unused_parameters=find_unused_parameters, + ) + elif cfg.device == "xpu": + model = build_dp(model, cfg.device, device_ids=cfg.gpu_ids, enable_autocast=bool(fp16_cfg)) + model.to(f"xpu:{cfg.gpu_ids[0]}") + else: + model = build_dp(model, cfg.device, device_ids=cfg.gpu_ids) + + # build optimizer + auto_scale_lr(cfg, distributed, logger) + optimizer = build_optimizer(model, cfg.optimizer) + + if cfg.device == "xpu": + # dinamic patch for nms and roi_align + NMSop.forward = monkey_patched_xpu_nms + RoIAlign.forward = monkey_patched_xpu_roi_align + if fp16_cfg is not None: + dtype = torch.bfloat16 + else: + dtype = torch.float32 + model.train() + model, optimizer = torch.xpu.optimize(model, optimizer=optimizer, dtype=dtype) + + runner = build_runner( + cfg.runner, default_args=dict(model=model, optimizer=optimizer, work_dir=cfg.work_dir, logger=logger, meta=meta) + ) + + # an ugly workaround to make .log and .log.json filenames the same + runner.timestamp = timestamp + + if fp16_cfg is None and distributed and "type" not in cfg.optimizer_config: + optimizer_config = OptimizerHook(**cfg.optimizer_config) + else: + optimizer_config = cfg.optimizer_config + + # register hooks + runner.register_training_hooks( + cfg.lr_config, + optimizer_config, + cfg.checkpoint_config, + cfg.log_config, + cfg.get("momentum_config", None), + custom_hooks_config=cfg.get("custom_hooks", None), + ) + + if distributed: + if isinstance(runner, EpochBasedRunner): + runner.register_hook(DistSamplerSeedHook()) + + # register eval hooks + if validate: + val_dataloader_default_args = dict( + samples_per_gpu=1, workers_per_gpu=2, dist=distributed, shuffle=False, persistent_workers=False + ) + + val_dataloader_args = {**val_dataloader_default_args, **cfg.data.get("val_dataloader", {})} + # Support batch_size > 1 in validation + + if val_dataloader_args["samples_per_gpu"] > 1: + # Replace 'ImageToTensor' to 'DefaultFormatBundle' + cfg.data.val.pipeline = replace_ImageToTensor(cfg.data.val.pipeline) + val_dataset = build_dataset(cfg.data.val, dict(test_mode=True)) + + val_dataloader = build_dataloader(val_dataset, **val_dataloader_args) + eval_cfg = cfg.get("evaluation", {}) + eval_cfg["by_epoch"] = cfg.runner["type"] != "IterBasedRunner" + eval_hook = DistEvalHook if distributed else EvalHook + # In this PR (https://github.com/open-mmlab/mmcv/pull/1193), the + # priority of IterTimerHook has been modified from 'NORMAL' to 'LOW'. + runner.register_hook(eval_hook(val_dataloader, **eval_cfg), priority="LOW") + + resume_from = None + if cfg.resume_from is None and cfg.get("auto_resume"): + resume_from = find_latest_checkpoint(cfg.work_dir) + if resume_from is not None: + cfg.resume_from = resume_from + + if cfg.resume_from: + runner.resume(cfg.resume_from) + elif cfg.load_from: + runner.load_checkpoint(cfg.load_from) + runner.run(data_loaders, cfg.workflow) + + +def monkey_patched_xpu_nms(ctx, bboxes, scores, iou_threshold, offset, score_threshold, max_num): + """Runs MMCVs NMS with torchvision.nms, or forces NMS from MMCV to run on CPU.""" + is_filtering_by_score = score_threshold > 0 + if is_filtering_by_score: + valid_mask = scores > score_threshold + bboxes, scores = bboxes[valid_mask], scores[valid_mask] + valid_inds = torch.nonzero(valid_mask, as_tuple=False).squeeze(dim=1) + + if offset == 0: + inds = tv_nms(bboxes, scores, float(iou_threshold)) + else: + device = bboxes.device + bboxes = bboxes.to("cpu") + scores = scores.to("cpu") + inds = ext_module.nms(bboxes, scores, iou_threshold=float(iou_threshold), offset=offset) + bboxes = bboxes.to(device) + scores = scores.to(device) + + if max_num > 0: + inds = inds[:max_num] + if is_filtering_by_score: + inds = valid_inds[inds] + return inds + + +def monkey_patched_xpu_roi_align(self, input, rois): + """Replaces MMCVs roi align with the one from torchvision. + + Args: + self: patched instance + input: NCHW images + rois: Bx5 boxes. First column is the index into N. The other 4 columns are xyxy. + """ + + if "aligned" in tv_roi_align.__code__.co_varnames: + return tv_roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.aligned) + else: + if self.aligned: + rois -= rois.new_tensor([0.0] + [0.5 / self.spatial_scale] * 4) + return tv_roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio) diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/assigners/__init__.py b/src/otx/algorithms/detection/adapters/mmdet/models/assigners/__init__.py index 71418724251..8384d1c61ab 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/models/assigners/__init__.py +++ b/src/otx/algorithms/detection/adapters/mmdet/models/assigners/__init__.py @@ -4,5 +4,6 @@ # from .custom_max_iou_assigner import CustomMaxIoUAssigner +from .xpu_atss_assigner import XPUATSSAssigner -__all__ = ["CustomMaxIoUAssigner"] +__all__ = ["CustomMaxIoUAssigner", "XPUATSSAssigner"] diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/assigners/xpu_atss_assigner.py b/src/otx/algorithms/detection/adapters/mmdet/models/assigners/xpu_atss_assigner.py new file mode 100644 index 00000000000..4f699828137 --- /dev/null +++ b/src/otx/algorithms/detection/adapters/mmdet/models/assigners/xpu_atss_assigner.py @@ -0,0 +1,212 @@ +"""Custom assigner to workaround a bug in IPEX.""" +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# Copyright (c) OpenMMLab. All rights reserved. + +import warnings + +import torch +from mmdet.core.bbox import AssignResult +from mmdet.core.bbox.assigners import ATSSAssigner +from mmdet.core.bbox.builder import BBOX_ASSIGNERS + + +@BBOX_ASSIGNERS.register_module() +class XPUATSSAssigner(ATSSAssigner): + """Assign a corresponding gt bbox or background to each bbox. + + Each proposals will be assigned with `0` or a positive integer + indicating the ground truth index. + + - 0: negative sample, no assigned gt + - positive integer: positive sample, index (1-based) of assigned gt + + If ``alpha`` is not None, it means that the dynamic cost + ATSSAssigner is adopted, which is currently only used in the DDOD. + + Args: + topk (float): number of bbox selected in each level + """ + + def assign( + self, + bboxes, + num_level_bboxes, + gt_bboxes, + gt_bboxes_ignore=None, + gt_labels=None, + cls_scores=None, + bbox_preds=None, + ): + """Assign gt to bboxes. + + The assignment is done in following steps + + 1. compute iou between all bbox (bbox of all pyramid levels) and gt + 2. compute center distance between all bbox and gt + 3. on each pyramid level, for each gt, select k bbox whose center + are closest to the gt center, so we total select k*l bbox as + candidates for each gt + 4. get corresponding iou for the these candidates, and compute the + mean and std, set mean + std as the iou threshold + 5. select these candidates whose iou are greater than or equal to + the threshold as positive + 6. limit the positive sample's center in gt + + If ``alpha`` is not None, and ``cls_scores`` and `bbox_preds` + are not None, the overlaps calculation in the first step + will also include dynamic cost, which is currently only used in + the DDOD. + + Args: + bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). + num_level_bboxes (List): num of bboxes in each level + gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). + gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are + labelled as `ignored`, e.g., crowd boxes in COCO. Default None. + gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). + cls_scores (list[Tensor]): Classification scores for all scale + levels, each is a 4D-tensor, the channels number is + num_base_priors * num_classes. Default None. + bbox_preds (list[Tensor]): Box energies / deltas for all scale + levels, each is a 4D-tensor, the channels number is + num_base_priors * 4. Default None. + + Returns: + :obj:`AssignResult`: The assign result. + """ + INF = 100000000 + bboxes = bboxes[:, :4] + num_gt, num_bboxes = gt_bboxes.size(0), bboxes.size(0) + + message = ( + "Invalid alpha parameter because cls_scores or " + "bbox_preds are None. If you want to use the " + "cost-based ATSSAssigner, please set cls_scores, " + "bbox_preds and self.alpha at the same time. " + ) + + if self.alpha is None: + # ATSSAssigner + overlaps = self.iou_calculator(bboxes, gt_bboxes) + if cls_scores is not None or bbox_preds is not None: + warnings.warn(message) + else: + # Dynamic cost ATSSAssigner in DDOD + assert cls_scores is not None and bbox_preds is not None, message + + # compute cls cost for bbox and GT + cls_cost = torch.sigmoid(cls_scores[:, gt_labels]) + + # compute iou between all bbox and gt + overlaps = self.iou_calculator(bbox_preds, gt_bboxes) + + # make sure that we are in element-wise multiplication + assert cls_cost.shape == overlaps.shape + + # overlaps is actually a cost matrix + overlaps = cls_cost ** (1 - self.alpha) * overlaps**self.alpha + + # assign 0 by default + assigned_gt_inds = overlaps.new_full((num_bboxes,), 0, dtype=torch.long) + + if num_gt == 0 or num_bboxes == 0: + # No ground truth or boxes, return empty assignment + max_overlaps = overlaps.new_zeros((num_bboxes,)) + if num_gt == 0: + # No truth, assign everything to background + assigned_gt_inds[:] = 0 + if gt_labels is None: + assigned_labels = None + else: + assigned_labels = overlaps.new_full((num_bboxes,), -1, dtype=torch.long) + return AssignResult(num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) + + # compute center distance between all bbox and gt + gt_cx = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0 + gt_cy = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0 + gt_points = torch.stack((gt_cx, gt_cy), dim=1) + + bboxes_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0 + bboxes_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0 + bboxes_points = torch.stack((bboxes_cx, bboxes_cy), dim=1) + + distances = (bboxes_points[:, None, :] - gt_points[None, :, :]).pow(2).sum(-1).sqrt() + + if ( + self.ignore_iof_thr > 0 + and gt_bboxes_ignore is not None + and gt_bboxes_ignore.numel() > 0 + and bboxes.numel() > 0 + ): + ignore_overlaps = self.iou_calculator(bboxes, gt_bboxes_ignore, mode="iof") + ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) + ignore_idxs = ignore_max_overlaps > self.ignore_iof_thr + distances[ignore_idxs, :] = INF + assigned_gt_inds[ignore_idxs] = -1 + + # Selecting candidates based on the center distance + candidate_idxs = [] + start_idx = 0 + for level, bboxes_per_level in enumerate(num_level_bboxes): + # on each pyramid level, for each gt, + # select k bbox whose center are closest to the gt center + end_idx = start_idx + bboxes_per_level + distances_per_level = distances[start_idx:end_idx, :] + selectable_k = min(self.topk, bboxes_per_level) + + dim_1 = distances_per_level.shape[1] + if dim_1 == 1: + distances_per_level = distances_per_level.reshape(-1) + _, topk_idxs_per_level = distances_per_level.topk(selectable_k, dim=0, largest=False) + topk_idxs_per_level = topk_idxs_per_level.reshape(selectable_k, dim_1) + + candidate_idxs.append(topk_idxs_per_level + start_idx) + start_idx = end_idx + candidate_idxs = torch.cat(candidate_idxs, dim=0) + + # get corresponding iou for the these candidates, and compute the + # mean and std, set mean + std as the iou threshold + candidate_overlaps = overlaps[candidate_idxs, torch.arange(num_gt)] + overlaps_mean_per_gt = candidate_overlaps.mean(0) + overlaps_std_per_gt = candidate_overlaps.std(0) + overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt + + is_pos = candidate_overlaps >= overlaps_thr_per_gt[None, :] + + # limit the positive sample's center in gt + for gt_idx in range(num_gt): + candidate_idxs[:, gt_idx] += gt_idx * num_bboxes + ep_bboxes_cx = bboxes_cx.view(1, -1).expand(num_gt, num_bboxes).contiguous().view(-1) + ep_bboxes_cy = bboxes_cy.view(1, -1).expand(num_gt, num_bboxes).contiguous().view(-1) + candidate_idxs = candidate_idxs.view(-1) + + # calculate the left, top, right, bottom distance between positive + # bbox center and gt side + l_ = ep_bboxes_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0] + t_ = ep_bboxes_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1] + r_ = gt_bboxes[:, 2] - ep_bboxes_cx[candidate_idxs].view(-1, num_gt) + b_ = gt_bboxes[:, 3] - ep_bboxes_cy[candidate_idxs].view(-1, num_gt) + is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01 + + is_pos = is_pos & is_in_gts + + # if an anchor box is assigned to multiple gts, + # the one with the highest IoU will be selected. + overlaps_inf = torch.full_like(overlaps, -INF).t().contiguous().view(-1) + index = candidate_idxs.view(-1)[is_pos.view(-1)] + overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index] + overlaps_inf = overlaps_inf.view(num_gt, -1).t() + + max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1) + assigned_gt_inds[max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1 + + if gt_labels is not None: + assigned_labels = assigned_gt_inds.new_full((num_bboxes,), -1) + pos_inds = torch.nonzero(assigned_gt_inds > 0, as_tuple=False).squeeze() + if pos_inds.numel() > 0: + assigned_labels[pos_inds] = gt_labels[assigned_gt_inds[pos_inds] - 1] + else: + assigned_labels = None + return AssignResult(num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) diff --git a/src/otx/algorithms/detection/adapters/mmdet/task.py b/src/otx/algorithms/detection/adapters/mmdet/task.py index bf8079b15e1..ed24c3bebac 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/task.py +++ b/src/otx/algorithms/detection/adapters/mmdet/task.py @@ -13,10 +13,12 @@ from typing import Any, Dict, Optional, Union import torch +from mmcv.ops.nms import NMSop +from mmcv.ops.roi_align import RoIAlign from mmcv.runner import wrap_fp16_model from mmcv.utils import Config, ConfigDict, get_git_hash from mmdet import __version__ -from mmdet.apis import single_gpu_test, train_detector +from mmdet.apis import single_gpu_test from mmdet.datasets import build_dataloader, build_dataset, replace_ImageToTensor from mmdet.models.detectors import DETR, TwoStageDetector from mmdet.utils import collect_env @@ -41,6 +43,11 @@ from otx.algorithms.common.tasks.nncf_task import NNCFBaseTask from otx.algorithms.common.utils.data import get_dataset from otx.algorithms.common.utils.logger import get_logger +from otx.algorithms.detection.adapters.mmdet.apis.train import ( + monkey_patched_xpu_nms, + monkey_patched_xpu_roi_align, + train_detector, +) from otx.algorithms.detection.adapters.mmdet.configurer import ( DetectionConfigurer, IncrDetectionConfigurer, @@ -341,6 +348,10 @@ def _infer_model( else: target_classes = mm_dataset.CLASSES + if cfg.device == "xpu": + NMSop.forward = monkey_patched_xpu_nms + RoIAlign.forward = monkey_patched_xpu_roi_align + # Model model = self.build_model(cfg, fp16=cfg.get("fp16", False)) model.CLASSES = target_classes diff --git a/src/otx/algorithms/detection/configs/detection/mobilenetv2_atss/model.py b/src/otx/algorithms/detection/configs/detection/mobilenetv2_atss/model.py index 6c9655789fd..2b0e1d9d71c 100644 --- a/src/otx/algorithms/detection/configs/detection/mobilenetv2_atss/model.py +++ b/src/otx/algorithms/detection/configs/detection/mobilenetv2_atss/model.py @@ -63,7 +63,7 @@ ), ), train_cfg=dict( - assigner=dict(type="ATSSAssigner", topk=9), + assigner=dict(type="XPUATSSAssigner", topk=9), allowed_border=-1, pos_weight=-1, debug=False, diff --git a/src/otx/algorithms/detection/configs/detection/mobilenetv2_atss/semisl/model.py b/src/otx/algorithms/detection/configs/detection/mobilenetv2_atss/semisl/model.py index 7fe599b1fa8..8cce2a2b365 100644 --- a/src/otx/algorithms/detection/configs/detection/mobilenetv2_atss/semisl/model.py +++ b/src/otx/algorithms/detection/configs/detection/mobilenetv2_atss/semisl/model.py @@ -66,7 +66,7 @@ ), ), train_cfg=dict( - assigner=dict(type="ATSSAssigner", topk=9), + assigner=dict(type="XPUATSSAssigner", topk=9), allowed_border=-1, pos_weight=-1, debug=False, diff --git a/src/otx/algorithms/detection/configs/detection/resnext101_atss/model.py b/src/otx/algorithms/detection/configs/detection/resnext101_atss/model.py index a7043f71c1f..579e382adb7 100644 --- a/src/otx/algorithms/detection/configs/detection/resnext101_atss/model.py +++ b/src/otx/algorithms/detection/configs/detection/resnext101_atss/model.py @@ -63,7 +63,7 @@ ), ), train_cfg=dict( - assigner=dict(type="ATSSAssigner", topk=9), + assigner=dict(type="XPUATSSAssigner", topk=9), allowed_border=-1, pos_weight=-1, debug=False, diff --git a/src/otx/algorithms/detection/configs/detection/resnext101_atss/semisl/model.py b/src/otx/algorithms/detection/configs/detection/resnext101_atss/semisl/model.py index 735f0e12d09..0264f220c79 100644 --- a/src/otx/algorithms/detection/configs/detection/resnext101_atss/semisl/model.py +++ b/src/otx/algorithms/detection/configs/detection/resnext101_atss/semisl/model.py @@ -66,7 +66,7 @@ ), ), train_cfg=dict( - assigner=dict(type="ATSSAssigner", topk=9), + assigner=dict(type="XPUATSSAssigner", topk=9), allowed_border=-1, pos_weight=-1, debug=False, diff --git a/src/otx/algorithms/segmentation/adapters/mmseg/apis/__init__.py b/src/otx/algorithms/segmentation/adapters/mmseg/apis/__init__.py new file mode 100644 index 00000000000..9a003db651d --- /dev/null +++ b/src/otx/algorithms/segmentation/adapters/mmseg/apis/__init__.py @@ -0,0 +1,8 @@ +"""Adapters of classification - mmseg.""" + +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from .train import train_segmentor + +__all__ = ["train_segmentor"] diff --git a/src/otx/algorithms/segmentation/adapters/mmseg/apis/train.py b/src/otx/algorithms/segmentation/adapters/mmseg/apis/train.py new file mode 100644 index 00000000000..247eb527032 --- /dev/null +++ b/src/otx/algorithms/segmentation/adapters/mmseg/apis/train.py @@ -0,0 +1,155 @@ +"""Train function for segmentation task.""" + +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# Copyright (c) OpenMMLab. All rights reserved. +import os +import warnings + +import mmcv +import torch +from mmcv.runner import HOOKS, DistSamplerSeedHook, EpochBasedRunner, build_runner +from mmcv.utils import build_from_cfg +from mmseg import digit_version +from mmseg.core import DistEvalHook, EvalHook, build_optimizer +from mmseg.datasets import build_dataloader, build_dataset +from mmseg.utils import build_ddp, find_latest_checkpoint, get_root_logger +from mmseg.utils.util_distribution import build_dp, dp_factory + +from otx.algorithms.common.adapters.mmcv.utils import XPUDataParallel + +dp_factory["xpu"] = XPUDataParallel + + +def train_segmentor(model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None): + """Launch segmentor training.""" + logger = get_root_logger(cfg.log_level) + + # prepare data loaders + dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset] + # The default loader config + loader_cfg = dict( + # cfg.gpus will be ignored if distributed + num_gpus=len(cfg.gpu_ids), + dist=distributed, + seed=cfg.seed, + drop_last=True, + ) + # The overall dataloader settings + loader_cfg.update( + { + k: v + for k, v in cfg.data.items() + if k not in ["train", "val", "test", "train_dataloader", "val_dataloader", "test_dataloader"] + } + ) + + # The specific dataloader settings + train_loader_cfg = {**loader_cfg, **cfg.data.get("train_dataloader", {})} + data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset] + + # put model on devices + if distributed: + find_unused_parameters = cfg.get("find_unused_parameters", False) + # Sets the `find_unused_parameters` parameter in + # DDP wrapper + model = build_ddp( + model, + cfg.device, + device_ids=[int(os.environ["LOCAL_RANK"])], + broadcast_buffers=False, + find_unused_parameters=find_unused_parameters, + ) + else: + if not torch.cuda.is_available(): + assert digit_version(mmcv.__version__) >= digit_version( + "1.4.4" + ), "Please use MMCV >= 1.4.4 for CPU training!" + + if cfg.device == "xpu": + use_autocast = bool(cfg.get("fp16_", False)) + model = build_dp(model, cfg.device, device_ids=cfg.gpu_ids, enable_autocast=use_autocast) + model.to(f"xpu:{cfg.gpu_ids[0]}") + else: + model = build_dp(model, cfg.device, device_ids=cfg.gpu_ids) + + # build runner + optimizer = build_optimizer(model, cfg.optimizer) + + if cfg.device == "xpu": + fp16_cfg = cfg.get("fp16_", None) + if fp16_cfg is not None: + dtype = torch.bfloat16 + else: + dtype = torch.float32 + model.train() + model, optimizer = torch.xpu.optimize(model, optimizer=optimizer, dtype=dtype) + + if cfg.get("runner") is None: + cfg.runner = {"type": "IterBasedRunner", "max_iters": cfg.total_iters} + warnings.warn( + "config is now expected to have a `runner` section, " "please set `runner` in your config.", UserWarning + ) + + runner = build_runner( + cfg.runner, + default_args=dict( + model=model, batch_processor=None, optimizer=optimizer, work_dir=cfg.work_dir, logger=logger, meta=meta + ), + ) + + # register hooks + runner.register_training_hooks( + cfg.lr_config, cfg.optimizer_config, cfg.checkpoint_config, cfg.log_config, cfg.get("momentum_config", None) + ) + if distributed: + # when distributed training by epoch, using`DistSamplerSeedHook` to set + # the different seed to distributed sampler for each epoch, it will + # shuffle dataset at each epoch and avoid overfitting. + if isinstance(runner, EpochBasedRunner): + runner.register_hook(DistSamplerSeedHook()) + + # an ugly walkaround to make the .log and .log.json filenames the same + runner.timestamp = timestamp + + # register eval hooks + if validate: + val_dataset = build_dataset(cfg.data.val, dict(test_mode=True)) + # The specific dataloader settings + val_loader_cfg = { + **loader_cfg, + "samples_per_gpu": 1, + "shuffle": False, # Not shuffle by default + **cfg.data.get("val_dataloader", {}), + } + val_dataloader = build_dataloader(val_dataset, **val_loader_cfg) + eval_cfg = cfg.get("evaluation", {}) + eval_cfg["by_epoch"] = cfg.runner["type"] != "IterBasedRunner" + eval_hook = DistEvalHook if distributed else EvalHook + # In this PR (https://github.com/open-mmlab/mmcv/pull/1193), the + # priority of IterTimerHook has been modified from 'NORMAL' to 'LOW'. + runner.register_hook(eval_hook(val_dataloader, **eval_cfg), priority="LOW") + + # user-defined hooks + if cfg.get("custom_hooks", None): + custom_hooks = cfg.custom_hooks + assert isinstance(custom_hooks, list), f"custom_hooks expect list type, but got {type(custom_hooks)}" + for hook_cfg in cfg.custom_hooks: + assert isinstance(hook_cfg, dict), ( + "Each item in custom_hooks expects dict type, but got " f"{type(hook_cfg)}" + ) + hook_cfg = hook_cfg.copy() + priority = hook_cfg.pop("priority", "NORMAL") + hook = build_from_cfg(hook_cfg, HOOKS) + runner.register_hook(hook, priority=priority) + + if cfg.resume_from is None and cfg.get("auto_resume"): + resume_from = find_latest_checkpoint(cfg.work_dir) + if resume_from is not None: + cfg.resume_from = resume_from + if cfg.resume_from: + runner.resume(cfg.resume_from) + elif cfg.load_from: + runner.load_checkpoint(cfg.load_from) + runner.run(data_loaders, cfg.workflow) diff --git a/src/otx/algorithms/segmentation/adapters/mmseg/task.py b/src/otx/algorithms/segmentation/adapters/mmseg/task.py index 0c671a06820..4e9c3544d2e 100644 --- a/src/otx/algorithms/segmentation/adapters/mmseg/task.py +++ b/src/otx/algorithms/segmentation/adapters/mmseg/task.py @@ -17,7 +17,6 @@ from mmcv.runner import wrap_fp16_model from mmcv.utils import Config, ConfigDict, get_git_hash from mmseg import __version__ -from mmseg.apis import train_segmentor from mmseg.datasets import build_dataloader, build_dataset from mmseg.utils import collect_env @@ -40,6 +39,7 @@ from otx.algorithms.common.tasks.nncf_task import NNCFBaseTask from otx.algorithms.common.utils.data import get_dataset from otx.algorithms.common.utils.logger import get_logger +from otx.algorithms.segmentation.adapters.mmseg.apis.train import train_segmentor from otx.algorithms.segmentation.adapters.mmseg.configurer import ( IncrSegmentationConfigurer, SegmentationConfigurer, diff --git a/src/otx/cli/utils/report.py b/src/otx/cli/utils/report.py index 99559cda113..6baf796a9c3 100644 --- a/src/otx/cli/utils/report.py +++ b/src/otx/cli/utils/report.py @@ -11,6 +11,7 @@ import torch import otx +from otx.algorithms.common.utils import is_xpu_available from otx.api.entities.model_template import ModelTemplate @@ -88,6 +89,14 @@ def env_info_to_str(): for name, device_ids in devices.items(): env_info["GPU " + ",".join(device_ids)] = name env_info["PyTorch"] = torch.__version__ + + if is_xpu_available(): + devices = defaultdict(list) + for k in range(torch.xpu.device_count()): + devices[torch.xpu.get_device_name(k)].append(str(k)) + for name, device_ids in devices.items(): + env_info["GPU " + ",".join(device_ids)] = name + for key, value in env_info.items(): report_str += f"\t{key}: {value}\n" return report_str From 83e7faa8eb5b1dc9bbf6bf30f92454da23d61651 Mon Sep 17 00:00:00 2001 From: Vladislav Sovrasov Date: Mon, 30 Oct 2023 14:17:27 +0100 Subject: [PATCH 02/39] Fix tv nms check (#2582) --- src/otx/algorithms/detection/adapters/mmdet/apis/train.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/otx/algorithms/detection/adapters/mmdet/apis/train.py b/src/otx/algorithms/detection/adapters/mmdet/apis/train.py index bde2e65cdc6..caf8720b59a 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/apis/train.py +++ b/src/otx/algorithms/detection/adapters/mmdet/apis/train.py @@ -127,7 +127,7 @@ def train_detector(model, dataset, cfg, distributed=False, validate=False, times optimizer = build_optimizer(model, cfg.optimizer) if cfg.device == "xpu": - # dinamic patch for nms and roi_align + # dynamic patch for nms and roi_align NMSop.forward = monkey_patched_xpu_nms RoIAlign.forward = monkey_patched_xpu_roi_align if fp16_cfg is not None: @@ -206,6 +206,11 @@ def monkey_patched_xpu_nms(ctx, bboxes, scores, iou_threshold, offset, score_thr bboxes, scores = bboxes[valid_mask], scores[valid_mask] valid_inds = torch.nonzero(valid_mask, as_tuple=False).squeeze(dim=1) + if bboxes.dtype == torch.bfloat16: + bboxes = bboxes.to(torch.float32) + if scores.dtype == torch.bfloat16: + scores = scores.to(torch.float32) + if offset == 0: inds = tv_nms(bboxes, scores, float(iou_threshold)) else: From 575b4ec0740d1512af8826eb075085a4dbef0640 Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Wed, 8 Nov 2023 17:06:06 +0900 Subject: [PATCH 03/39] POC: HPU support (#2574) * Add hpu.is_available * Add hpu in `configure_device` * Add hpu optimizers * Update `train_model` * Add `HPUDataParallel` * Fix * Enable HPUOptimizerHooks * Enable autocast * Update * (WIP) Enable dataloader * (tmp) Disenable habana loader * Fix to convert bfloat16 tensor to float32 for numpy * Update `htcore.mark_step` location * Move setting optimizer hooks into configurer * Fix to enable bf16 * (WIP) Enable gpu migration * enable ocr_lite_hrnet_18_mod2 training * register hpu optimizer once * detch seg logit for calculating acc only when hpu is used * change hpu_optimizer path * cache whether hpu_available * align with other args * move hpu_migration into otx.algo.common.util init file * Refactoring * move the code changing adam to adamw into mmseg.apis.train.py * move the code model.to in mmseg.api.train.py * use use_autocast properly * remove HPUDistOptimizerHook * precommit * Fix unit test * Use `self.src_device_obj` * Add function for repeated pattern * precommit --------- Co-authored-by: Shin, Eunwoo --- .../adapters/mmcls/apis/train.py | 11 +++- .../mmcls/models/heads/custom_cls_head.py | 3 ++ .../heads/custom_vision_transformer_head.py | 7 +++ .../mmcls/models/heads/non_linear_cls_head.py | 3 ++ .../common/adapters/mmcv/configurer.py | 50 +++++++++++++----- .../common/adapters/mmcv/hooks/__init__.py | 7 +++ .../adapters/mmcv/hooks/hpu_optimizer_hook.py | 30 +++++++++++ .../common/adapters/mmcv/utils/__init__.py | 3 +- .../utils/_builder_build_data_parallel.py | 51 ++++++++++++++++++- .../adapters/mmcv/utils/hpu_optimizers.py | 31 +++++++++++ src/otx/algorithms/common/utils/__init__.py | 8 +++ src/otx/algorithms/common/utils/utils.py | 25 +++++++++ .../segmentation/adapters/mmseg/apis/train.py | 16 +++++- .../mmseg/models/heads/custom_otx_head.py | 3 ++ 14 files changed, 230 insertions(+), 18 deletions(-) create mode 100644 src/otx/algorithms/common/adapters/mmcv/hooks/hpu_optimizer_hook.py create mode 100644 src/otx/algorithms/common/adapters/mmcv/utils/hpu_optimizers.py diff --git a/src/otx/algorithms/classification/adapters/mmcls/apis/train.py b/src/otx/algorithms/classification/adapters/mmcls/apis/train.py index 3134c90c0d1..de22e38087f 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/apis/train.py +++ b/src/otx/algorithms/classification/adapters/mmcls/apis/train.py @@ -12,7 +12,8 @@ from mmcls.utils import get_root_logger, wrap_distributed_model, wrap_non_distributed_model from mmcv.runner import DistSamplerSeedHook, build_optimizer, build_runner -from otx.algorithms.common.adapters.mmcv.utils import XPUDataParallel +from otx.algorithms.common.adapters.mmcv.utils import HPUDataParallel, XPUDataParallel +from otx.algorithms.common.adapters.mmcv.utils.hpu_optimizers import HABANA_OPTIMIZERS def train_model(model, dataset, cfg, distributed=False, validate=False, timestamp=None, device=None, meta=None): @@ -78,11 +79,19 @@ def train_model(model, dataset, cfg, distributed=False, validate=False, timestam assert len(cfg.gpu_ids) == 1 model.to(f"xpu:{cfg.gpu_ids[0]}") model = XPUDataParallel(model, dim=0, device_ids=cfg.gpu_ids, enable_autocast=bool(fp16_cfg)) + elif cfg.device == "hpu": + assert len(cfg.gpu_ids) == 1 + model = HPUDataParallel(model.cuda(), dim=0, device_ids=cfg.gpu_ids, enable_autocast=bool(fp16_cfg)) else: model = wrap_non_distributed_model(model, cfg.device, device_ids=cfg.gpu_ids) # build runner + if cfg.device == "hpu": + if (new_type := "Fused" + cfg.optimizer.get("type", "SGD")) in HABANA_OPTIMIZERS: + cfg.optimizer["type"] = new_type + optimizer = build_optimizer(model, cfg.optimizer) + if cfg.device == "xpu": if fp16_cfg is not None: dtype = torch.bfloat16 diff --git a/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_cls_head.py b/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_cls_head.py index fcf2008e795..bc466c303dc 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_cls_head.py +++ b/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_cls_head.py @@ -8,6 +8,8 @@ from mmcls.models.builder import HEADS from mmcls.models.heads import LinearClsHead +from otx.algorithms.common.utils import cast_bf16_to_fp32 + from .non_linear_cls_head import NonLinearClsHead @@ -89,6 +91,7 @@ def simple_test(self, img): if torch.onnx.is_in_onnx_export(): return cls_score pred = F.softmax(cls_score, dim=1) if cls_score is not None else None + pred = cast_bf16_to_fp32(pred) return self.post_process(pred) diff --git a/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_vision_transformer_head.py b/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_vision_transformer_head.py index b9ce9ef6c8f..405203de559 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_vision_transformer_head.py +++ b/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_vision_transformer_head.py @@ -6,6 +6,8 @@ from mmcls.models.builder import HEADS from mmcls.models.heads import VisionTransformerClsHead +from otx.algorithms.common.utils import cast_bf16_to_fp32 + @HEADS.register_module() class CustomVisionTransformerClsHead(VisionTransformerClsHead): @@ -31,3 +33,8 @@ def loss(self, cls_score, gt_label, feature=None): losses["accuracy"] = {f"top-{k}": a for k, a in zip(self.topk, acc)} losses["loss"] = loss return losses + + def post_process(self, pred): + """Post processing.""" + pred = cast_bf16_to_fp32(pred) + return super().post_process(pred) diff --git a/src/otx/algorithms/classification/adapters/mmcls/models/heads/non_linear_cls_head.py b/src/otx/algorithms/classification/adapters/mmcls/models/heads/non_linear_cls_head.py index fa37c5c9c72..4dcb387023d 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/models/heads/non_linear_cls_head.py +++ b/src/otx/algorithms/classification/adapters/mmcls/models/heads/non_linear_cls_head.py @@ -10,6 +10,8 @@ from mmcv.cnn import build_activation_layer, constant_init, normal_init from torch import nn +from otx.algorithms.common.utils import cast_bf16_to_fp32 + @HEADS.register_module() class NonLinearClsHead(ClsHead): @@ -84,6 +86,7 @@ def simple_test(self, img): if torch.onnx.is_in_onnx_export(): return cls_score pred = F.softmax(cls_score, dim=1) if cls_score is not None else None + pred = cast_bf16_to_fp32(pred) pred = list(pred.detach().cpu().numpy()) return pred diff --git a/src/otx/algorithms/common/adapters/mmcv/configurer.py b/src/otx/algorithms/common/adapters/mmcv/configurer.py index 83aa919c98e..54ee9326b80 100644 --- a/src/otx/algorithms/common/adapters/mmcv/configurer.py +++ b/src/otx/algorithms/common/adapters/mmcv/configurer.py @@ -27,7 +27,12 @@ update_or_add_custom_hook, ) from otx.algorithms.common.tasks.base_task import OnHookInitialized -from otx.algorithms.common.utils import UncopiableDefaultDict, append_dist_rank_suffix, is_xpu_available +from otx.algorithms.common.utils import ( + UncopiableDefaultDict, + append_dist_rank_suffix, + is_hpu_available, + is_xpu_available, +) from otx.algorithms.common.utils.data import compute_robust_dataset_statistics from otx.algorithms.common.utils.logger import get_logger from otx.api.usecases.reporting.time_monitor_callback import TimeMonitorCallback @@ -171,20 +176,22 @@ def configure_device(self, cfg): elif "gpu_ids" not in cfg: cfg.gpu_ids = range(1) - # consider "cuda", "xpu", and "cpu" devices only - if not torch.cuda.is_available(): + # consider "cuda", "hpu" and "cpu" device only + if is_hpu_available(): + cfg.device = "hpu" + elif torch.cuda.is_available(): + cfg.device = "cuda" + elif is_xpu_available(): try: import intel_extension_for_pytorch as ipex # noqa: F401 - if is_xpu_available(): - cfg.device = "xpu" - else: - cfg.device = "cpu" + cfg.device = "xpu" except ModuleNotFoundError: cfg.device = "cpu" cfg.gpu_ids = range(-1, 0) else: - cfg.device = "cuda" + cfg.device = "cpu" + cfg.gpu_ids = range(-1, 0) @staticmethod def configure_distributed(cfg: Config) -> None: @@ -252,13 +259,18 @@ def configure_fp16(cfg: Config): # workaround to forward FP16 config to mmapi.train funcitons cfg.fp16_ = fp16_config + optim_type = cfg.optimizer_config.get("type", "OptimizerHook") + distributed = getattr(cfg, "distributed", False) + opts: Dict[str, Any] = {} if fp16_config is not None: - if torch.cuda.is_available() or is_xpu_available(): - optim_type = cfg.optimizer_config.get("type", "OptimizerHook") - opts: Dict[str, Any] = dict( - distributed=getattr(cfg, "distributed", False), - **fp16_config, - ) + if is_hpu_available(): + if optim_type == "SAMOptimizerHook": + # TODO (sungchul): consider SAM optimizer + logger.warning("SAMOptimizerHook is not supported on HPU. Changed to OptimizerHook.") + opts["type"] = "HPUOptimizerHook" + cfg.optimizer_config.update(opts) + elif torch.cuda.is_available() or is_xpu_available(): + opts.update({"distributed": distributed, **fp16_config}) if optim_type == "SAMOptimizerHook": opts["type"] = "Fp16SAMOptimizerHook" elif optim_type == "OptimizerHook": @@ -272,6 +284,16 @@ def configure_fp16(cfg: Config): else: logger.info("Revert FP16 to FP32 on CPU device") + elif is_hpu_available(): + if distributed: + opts["type"] = "HPUDistOptimizerHook" + else: + opts["type"] = "HPUOptimizerHook" + cfg.optimizer_config.update(opts) + + else: + logger.info("Revert FP16 to FP32 on CPU device") + def configure_model(self, cfg, data_classes, model_classes, ir_options, **kwargs): """Configuration model config settings.""" diff --git a/src/otx/algorithms/common/adapters/mmcv/hooks/__init__.py b/src/otx/algorithms/common/adapters/mmcv/hooks/__init__.py index ed724ff53ab..a7c41d80fee 100644 --- a/src/otx/algorithms/common/adapters/mmcv/hooks/__init__.py +++ b/src/otx/algorithms/common/adapters/mmcv/hooks/__init__.py @@ -91,3 +91,10 @@ "MemCacheHook", "LossDynamicsTrackingHook", ] + +try: + from .hpu_optimizer_hook import HPUOptimizerHook + + __all__ += ["HPUOptimizerHook"] +except: # noqa: E722 + pass diff --git a/src/otx/algorithms/common/adapters/mmcv/hooks/hpu_optimizer_hook.py b/src/otx/algorithms/common/adapters/mmcv/hooks/hpu_optimizer_hook.py new file mode 100644 index 00000000000..f5e26c49083 --- /dev/null +++ b/src/otx/algorithms/common/adapters/mmcv/hooks/hpu_optimizer_hook.py @@ -0,0 +1,30 @@ +"""Optimizer hook for HPU.""" + +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import habana_frameworks.torch.core as htcore +from mmcv.runner import HOOKS, OptimizerHook + + +@HOOKS.register_module() +class HPUOptimizerHook(OptimizerHook): + """A hook contains custom operations for the optimizer on HPU.""" + + def after_train_iter(self, runner): + """After train iter.""" + runner.optimizer.zero_grad() + if self.detect_anomalous_params: + self.detect_anomalous_parameters(runner.outputs["loss"], runner) + runner.outputs["loss"].backward() + htcore.mark_step() + + if self.grad_clip is not None: + grad_norm = self.clip_grads(runner.model.parameters()) + if grad_norm is not None: + # Add grad norm to the logger + runner.log_buffer.update({"grad_norm": float(grad_norm)}, runner.outputs["num_samples"]) + + runner.optimizer.step() + htcore.mark_step() diff --git a/src/otx/algorithms/common/adapters/mmcv/utils/__init__.py b/src/otx/algorithms/common/adapters/mmcv/utils/__init__.py index 0fac8a67e31..4c5a77e1d87 100644 --- a/src/otx/algorithms/common/adapters/mmcv/utils/__init__.py +++ b/src/otx/algorithms/common/adapters/mmcv/utils/__init__.py @@ -3,7 +3,7 @@ # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -from ._builder_build_data_parallel import XPUDataParallel, build_data_parallel +from ._builder_build_data_parallel import HPUDataParallel, XPUDataParallel, build_data_parallel from ._config_utils_get_configs_by_keys import get_configs_by_keys from ._config_utils_get_configs_by_pairs import get_configs_by_pairs from .automatic_bs import adapt_batch_size @@ -48,5 +48,6 @@ "adapt_batch_size", "InputSizeManager", "XPUDataParallel", + "HPUDataParallel", "patch_from_hyperparams", ] diff --git a/src/otx/algorithms/common/adapters/mmcv/utils/_builder_build_data_parallel.py b/src/otx/algorithms/common/adapters/mmcv/utils/_builder_build_data_parallel.py index 1b084f3a815..39c9bf5f7b3 100644 --- a/src/otx/algorithms/common/adapters/mmcv/utils/_builder_build_data_parallel.py +++ b/src/otx/algorithms/common/adapters/mmcv/utils/_builder_build_data_parallel.py @@ -12,7 +12,7 @@ from mmcv import Config from mmcv.parallel import MMDataParallel, MMDistributedDataParallel -from otx.algorithms.common.utils import is_xpu_available +from otx.algorithms.common.utils import is_hpu_available, is_xpu_available @overload @@ -63,6 +63,9 @@ def build_data_parallel( if is_xpu_available() and config.get("gpu_ids", []): model = model.xpu() model = XPUDataParallel(model, device_ids=config.gpu_ids) + elif is_hpu_available() and config.get("gpu_ids", []): + model = model.hpu() + model = HPUDataParallel(model, device_ids=config.gpu_ids) elif torch.cuda.is_available() and config.get("gpu_ids", []): if distributed: model = model.cuda() @@ -134,3 +137,49 @@ def train_step(self, *inputs, **kwargs): def val_step(self, *inputs, **kwargs): with torch.autocast(device_type="xpu", dtype=torch.bfloat16, enabled=self.enable_autocast): return super().val_step(*inputs, **kwargs) + + +class HPUDataParallel(MMDataParallel): + def __init__(self, *args, enable_autocast: bool = False, **kwargs): + super().__init__(*args, **kwargs) + self.enable_autocast = enable_autocast + self.src_device_obj = torch.device("hpu", self.device_ids[0]) + + def scatter(self, inputs, kwargs, device_ids): + inputs, kwargs = super().scatter(inputs, kwargs, [-1]) + + for x in inputs: + if isinstance(x, tuple): + for val in x: + if isinstance(val, dict): + for k in val: + if isinstance(val[k], torch.Tensor): + val[k] = val[k].to(self.src_device_obj) + elif isinstance(val[k], list): + for i, item in enumerate(val[k]): + if isinstance(item, torch.Tensor): + val[k][i] = item.to(self.src_device_obj) + + for x in kwargs: + if isinstance(x, dict): + for k in x: + if isinstance(x[k], torch.Tensor): + x[k] = x[k].to(f"hpu:{device_ids[0]}") + elif isinstance(x[k], list): + for i, item in enumerate(x[k]): + if isinstance(item, torch.Tensor): + x[k][i] = item.to(self.src_device_obj) + + return inputs, kwargs + + def forward(self, *inputs, **kwargs): + with torch.cuda.amp.autocast(dtype=torch.bfloat16, enabled=self.enable_autocast): + return super().forward(*inputs, **kwargs) + + def train_step(self, *inputs, **kwargs): + with torch.cuda.amp.autocast(dtype=torch.bfloat16, enabled=self.enable_autocast): + return super().train_step(*inputs, **kwargs) + + def val_step(self, *inputs, **kwargs): + with torch.cuda.amp.autocast(dtype=torch.bfloat16, enabled=self.enable_autocast): + return super().val_step(*inputs, **kwargs) diff --git a/src/otx/algorithms/common/adapters/mmcv/utils/hpu_optimizers.py b/src/otx/algorithms/common/adapters/mmcv/utils/hpu_optimizers.py new file mode 100644 index 00000000000..eabb26fbcf1 --- /dev/null +++ b/src/otx/algorithms/common/adapters/mmcv/utils/hpu_optimizers.py @@ -0,0 +1,31 @@ +"""Optimizers for HPU.""" + +import inspect +from typing import List + +import torch +from mmcv.runner import OPTIMIZERS + +try: + import habana_frameworks.torch.hpex.optimizers as hoptimizers +except ImportError: + hoptimizers = None + + +def register_habana_optimizers() -> List: + """Register habana optimizers.""" + if hoptimizers is None: + return [] + + habana_optimizers = [] + for module_name in dir(hoptimizers): + if module_name.startswith("__"): + continue + _optim = getattr(hoptimizers, module_name) + if inspect.isclass(_optim) and issubclass(_optim, torch.optim.Optimizer): + OPTIMIZERS.register_module()(_optim) + habana_optimizers.append(module_name) + return habana_optimizers + + +HABANA_OPTIMIZERS = register_habana_optimizers() diff --git a/src/otx/algorithms/common/utils/__init__.py b/src/otx/algorithms/common/utils/__init__.py index cd63629bb28..6395bd6e60d 100644 --- a/src/otx/algorithms/common/utils/__init__.py +++ b/src/otx/algorithms/common/utils/__init__.py @@ -24,9 +24,11 @@ from .ir import embed_ir_model_data from .utils import ( UncopiableDefaultDict, + cast_bf16_to_fp32, get_arg_spec, get_default_async_reqs_num, get_task_class, + is_hpu_available, is_xpu_available, load_template, read_py_config, @@ -51,4 +53,10 @@ "read_py_config", "get_default_async_reqs_num", "is_xpu_available", + "is_hpu_available", + "cast_bf16_to_fp32", ] + + +if is_hpu_available(): + import habana_frameworks.torch.gpu_migration # noqa: F401 diff --git a/src/otx/algorithms/common/utils/utils.py b/src/otx/algorithms/common/utils/utils.py index 88495a8397b..92e1b2f0853 100644 --- a/src/otx/algorithms/common/utils/utils.py +++ b/src/otx/algorithms/common/utils/utils.py @@ -18,6 +18,13 @@ import yaml from addict import Dict as adict +HPU_AVAILABLE = None +try: + import habana_frameworks.torch as htorch +except ImportError: + HPU_AVAILABLE = False + htorch = None + class UncopiableDefaultDict(defaultdict): """Defauldict type object to avoid deepcopy.""" @@ -165,3 +172,21 @@ def embed_onnx_model_data(onnx_file: str, extra_model_data: Dict[Tuple[str, str] def is_xpu_available(): """Checks if XPU device is available.""" return hasattr(torch, "xpu") and torch.xpu.is_available() + + +def is_hpu_available() -> bool: + """Check if HPU device is available.""" + global HPU_AVAILABLE # noqa: PLW0603 + if HPU_AVAILABLE is None: + HPU_AVAILABLE = htorch.hpu.is_available() + return HPU_AVAILABLE + + +def cast_bf16_to_fp32(tensor: torch.Tensor) -> torch.Tensor: + """Cast bf16 tensor to fp32 before processed by numpy. + + numpy doesn't support bfloat16, it is required to convert bfloat16 tensor to float32. + """ + if tensor.dtype == torch.bfloat16: + tensor = tensor.to(torch.float32) + return tensor diff --git a/src/otx/algorithms/segmentation/adapters/mmseg/apis/train.py b/src/otx/algorithms/segmentation/adapters/mmseg/apis/train.py index 247eb527032..6ed2ec50dc8 100644 --- a/src/otx/algorithms/segmentation/adapters/mmseg/apis/train.py +++ b/src/otx/algorithms/segmentation/adapters/mmseg/apis/train.py @@ -17,9 +17,11 @@ from mmseg.utils import build_ddp, find_latest_checkpoint, get_root_logger from mmseg.utils.util_distribution import build_dp, dp_factory -from otx.algorithms.common.adapters.mmcv.utils import XPUDataParallel +from otx.algorithms.common.adapters.mmcv.utils import HPUDataParallel, XPUDataParallel +from otx.algorithms.common.adapters.mmcv.utils.hpu_optimizers import HABANA_OPTIMIZERS dp_factory["xpu"] = XPUDataParallel +dp_factory["hpu"] = HPUDataParallel def train_segmentor(model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None): @@ -71,10 +73,22 @@ def train_segmentor(model, dataset, cfg, distributed=False, validate=False, time use_autocast = bool(cfg.get("fp16_", False)) model = build_dp(model, cfg.device, device_ids=cfg.gpu_ids, enable_autocast=use_autocast) model.to(f"xpu:{cfg.gpu_ids[0]}") + elif cfg.device == "hpu": + use_autocast = bool(cfg.get("fp16_", False)) + model = build_dp(model, cfg.device, device_ids=cfg.gpu_ids, enable_autocast=use_autocast) + model.to(model.src_device_obj) else: model = build_dp(model, cfg.device, device_ids=cfg.gpu_ids) # build runner + if cfg.device == "hpu": + optim_type = cfg.optimizer.get("type", "SGD") + if optim_type == "Adam": # to avoid segmentation fault + optim_type = "AdamW" + cfg.optimizer.type = optim_type + if (new_type := "Fused" + optim_type) in HABANA_OPTIMIZERS: + cfg.optimizer["type"] = new_type + optimizer = build_optimizer(model, cfg.optimizer) if cfg.device == "xpu": diff --git a/src/otx/algorithms/segmentation/adapters/mmseg/models/heads/custom_otx_head.py b/src/otx/algorithms/segmentation/adapters/mmseg/models/heads/custom_otx_head.py index 4c2ac39ea29..4f62c26fa6e 100644 --- a/src/otx/algorithms/segmentation/adapters/mmseg/models/heads/custom_otx_head.py +++ b/src/otx/algorithms/segmentation/adapters/mmseg/models/heads/custom_otx_head.py @@ -188,6 +188,9 @@ def losses( seg_logit, seg_label, weight=seg_weight, ignore_index=self.ignore_index, **valid_label_mask_cfg ) + if seg_logit.device.type == "hpu": + seg_logit = seg_logit.detach().clone().to("cpu") + loss["acc_seg"] = accuracy(seg_logit, seg_label, ignore_index=self.ignore_index) return loss From e203e581524d96a1136e101e31d3072067e0cb7f Mon Sep 17 00:00:00 2001 From: Vladislav Sovrasov Date: Wed, 8 Nov 2023 09:21:55 +0100 Subject: [PATCH 04/39] Merge develop & fix YOLOX inference in BFP16 on XPU (#2602) Fix yolox with bfp16 dtype on xpu --- .../models/detectors/custom_yolox_detector.py | 10 +++ .../mmdet/models/heads/custom_yolox_head.py | 82 +++++++++++++++++++ 2 files changed, 92 insertions(+) diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_yolox_detector.py b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_yolox_detector.py index d86e700ec8b..114e6b3c065 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_yolox_detector.py +++ b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_yolox_detector.py @@ -56,6 +56,16 @@ def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=N """Forward function for CustomYOLOX.""" return super().forward_train(img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=gt_bboxes_ignore) + def extract_feat(self, img): + """Directly extract features from the backbone+neck.""" + # workaround for xpu device, since the input converted to fp16 by mmcv + if "xpu" in str(img.device) and img.dtype == torch.float16: + img = img.to(torch.bfloat16) + x = self.backbone(img) + if self.with_neck: + x = self.neck(x) + return x + @staticmethod def load_state_dict_pre_hook(model, model_classes, chkpt_classes, chkpt_dict, prefix, *args, **kwargs): """Modify input state_dict according to class name matching before weight loading.""" diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/heads/custom_yolox_head.py b/src/otx/algorithms/detection/adapters/mmdet/models/heads/custom_yolox_head.py index 0d9036a20f6..5de9fc272ff 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/models/heads/custom_yolox_head.py +++ b/src/otx/algorithms/detection/adapters/mmdet/models/heads/custom_yolox_head.py @@ -21,6 +21,88 @@ class CustomYOLOXHead(YOLOXHead): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) + @force_fp32(apply_to=("cls_scores", "bbox_preds", "objectnesses")) + def loss(self, cls_scores, bbox_preds, objectnesses, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): + """Compute loss of the head. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level, + each is a 4D-tensor, the channel number is + num_priors * num_classes. + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level, each is a 4D-tensor, the channel number is + num_priors * 4. + objectnesses (list[Tensor], Optional): Score factor for + all scale level, each is a 4D-tensor, has shape + (batch_size, 1, H, W). + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (None | list[Tensor]): specify which bounding + boxes can be ignored when computing the loss. + """ + num_imgs = len(img_metas) + featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] + mlvl_priors = self.prior_generator.grid_priors( + featmap_sizes, dtype=cls_scores[0].dtype, device=cls_scores[0].device, with_stride=True + ) + + flatten_cls_preds = [ + cls_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.cls_out_channels) for cls_pred in cls_scores + ] + flatten_bbox_preds = [bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) for bbox_pred in bbox_preds] + flatten_objectness = [objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1) for objectness in objectnesses] + + flatten_cls_preds = torch.cat(flatten_cls_preds, dim=1) + flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1) + flatten_objectness = torch.cat(flatten_objectness, dim=1) + flatten_priors = torch.cat(mlvl_priors) + flatten_bboxes = self._bbox_decode(flatten_priors, flatten_bbox_preds) + + (pos_masks, cls_targets, obj_targets, bbox_targets, l1_targets, num_fg_imgs) = multi_apply( + self._get_target_single, + flatten_cls_preds.detach(), + flatten_objectness.detach(), + flatten_priors.unsqueeze(0).repeat(num_imgs, 1, 1), + flatten_bboxes.detach(), + gt_bboxes, + gt_labels, + ) + + # The experimental results show that ‘reduce_mean’ can improve + # performance on the COCO dataset. + num_pos = torch.tensor(sum(num_fg_imgs), dtype=torch.float, device=flatten_cls_preds.device) + num_total_samples = max(reduce_mean(num_pos), 1.0) + + pos_masks = torch.cat(pos_masks, 0) + cls_targets = torch.cat(cls_targets, 0) + obj_targets = torch.cat(obj_targets, 0) + bbox_targets = torch.cat(bbox_targets, 0) + if self.use_l1: + l1_targets = torch.cat(l1_targets, 0) + + loss_bbox = self.loss_bbox(flatten_bboxes.view(-1, 4)[pos_masks], bbox_targets) / num_total_samples + loss_obj = self.loss_obj(flatten_objectness.view(-1, 1), obj_targets) / num_total_samples + if "xpu" in str(flatten_cls_preds.device): + loss_cls = ( + self.loss_cls(flatten_cls_preds.reshape(-1, self.num_classes)[pos_masks], cls_targets) + / num_total_samples + ) + else: + loss_cls = ( + self.loss_cls(flatten_cls_preds.view(-1, self.num_classes)[pos_masks], cls_targets) / num_total_samples + ) + + loss_dict = dict(loss_cls=loss_cls, loss_bbox=loss_bbox, loss_obj=loss_obj) + + if self.use_l1: + loss_l1 = self.loss_l1(flatten_bbox_preds.view(-1, 4)[pos_masks], l1_targets) / num_total_samples + loss_dict.update(loss_l1=loss_l1) + + return loss_dict + @HEADS.register_module() class CustomYOLOXHeadTrackingLossDynamics(TrackingLossDynamicsMixIn, CustomYOLOXHead): From 2cd9eda7e767d4bf5c7d9de9270d88d59e09e825 Mon Sep 17 00:00:00 2001 From: Prokofiev Kirill Date: Fri, 10 Nov 2023 03:08:23 +0100 Subject: [PATCH 05/39] Object Detection with Gaudi2 (#2608) * added support for OD on habana * added hpu_opt * added OD support. * optimize a bit YOLOX. Now, inference is fast. Training still freezes * SSD, ATSS e2e training * stabilize mask rcnn a bit * don't put gt on hpu for OD * minor fix * Enable e2e training for Instance Segmentation. * clean the code stage 1 * clean code 2 * fix pre-commit * minor * change cast of bf16 * reply comments * align with pre-commit --------- Co-authored-by: eunwoosh --- .../common/adapters/mmcv/configurer.py | 2 +- .../mmcv/hooks/recording_forward_hook.py | 3 +- .../utils/_builder_build_data_parallel.py | 9 ++++-- src/otx/algorithms/common/utils/__init__.py | 3 ++ .../detection/adapters/mmdet/apis/train.py | 24 +++++++++++---- .../adapters/mmdet/evaluation/evaluator.py | 24 ++++++++++++--- .../detectors/custom_single_stage_detector.py | 1 - .../adapters/mmdet/models/heads/__init__.py | 2 ++ .../mmdet/models/heads/custom_atss_head.py | 26 +++++++++++++++++ .../mmdet/models/heads/custom_roi_head.py | 18 ++++++++++++ .../mmdet/models/heads/custom_rpn_head.py | 25 ++++++++++++++++ .../mmdet/models/heads/custom_ssd_head.py | 29 +++++++++++++++++++ .../mmdet/models/heads/custom_yolox_head.py | 11 +++++++ .../detection/adapters/mmdet/task.py | 10 +++---- .../efficientnetb2b_maskrcnn/model.py | 2 +- .../maskrcnn_swin_t/model.py | 2 +- .../resnet50_maskrcnn/model.py | 2 +- 17 files changed, 170 insertions(+), 23 deletions(-) create mode 100644 src/otx/algorithms/detection/adapters/mmdet/models/heads/custom_rpn_head.py diff --git a/src/otx/algorithms/common/adapters/mmcv/configurer.py b/src/otx/algorithms/common/adapters/mmcv/configurer.py index 54ee9326b80..83d43192ed7 100644 --- a/src/otx/algorithms/common/adapters/mmcv/configurer.py +++ b/src/otx/algorithms/common/adapters/mmcv/configurer.py @@ -176,7 +176,7 @@ def configure_device(self, cfg): elif "gpu_ids" not in cfg: cfg.gpu_ids = range(1) - # consider "cuda", "hpu" and "cpu" device only + # consider "cuda", "xpu", "hpu" and "cpu" device only if is_hpu_available(): cfg.device = "hpu" elif torch.cuda.is_available(): diff --git a/src/otx/algorithms/common/adapters/mmcv/hooks/recording_forward_hook.py b/src/otx/algorithms/common/adapters/mmcv/hooks/recording_forward_hook.py index 062cc230367..a3b2698babb 100644 --- a/src/otx/algorithms/common/adapters/mmcv/hooks/recording_forward_hook.py +++ b/src/otx/algorithms/common/adapters/mmcv/hooks/recording_forward_hook.py @@ -23,6 +23,7 @@ from torch.nn import LayerNorm from otx.algorithms.classification import MMCLS_AVAILABLE +from otx.algorithms.common.utils.utils import cast_bf16_to_fp32 if MMCLS_AVAILABLE: from mmcls.models.necks.gap import GlobalAveragePooling @@ -74,7 +75,7 @@ def _recording_forward( ): # pylint: disable=unused-argument tensors = self.func(output) if isinstance(tensors, torch.Tensor): - tensors_np = tensors.detach().cpu().numpy() + tensors_np = cast_bf16_to_fp32(tensors).detach().cpu().numpy() elif isinstance(tensors, np.ndarray): tensors_np = tensors else: diff --git a/src/otx/algorithms/common/adapters/mmcv/utils/_builder_build_data_parallel.py b/src/otx/algorithms/common/adapters/mmcv/utils/_builder_build_data_parallel.py index 39c9bf5f7b3..226e5e8cc25 100644 --- a/src/otx/algorithms/common/adapters/mmcv/utils/_builder_build_data_parallel.py +++ b/src/otx/algorithms/common/adapters/mmcv/utils/_builder_build_data_parallel.py @@ -64,7 +64,7 @@ def build_data_parallel( model = model.xpu() model = XPUDataParallel(model, device_ids=config.gpu_ids) elif is_hpu_available() and config.get("gpu_ids", []): - model = model.hpu() + model = model.to("hpu") model = HPUDataParallel(model, device_ids=config.gpu_ids) elif torch.cuda.is_available() and config.get("gpu_ids", []): if distributed: @@ -140,9 +140,10 @@ def val_step(self, *inputs, **kwargs): class HPUDataParallel(MMDataParallel): - def __init__(self, *args, enable_autocast: bool = False, **kwargs): + def __init__(self, *args, enable_autocast: bool = False, put_gt_on_device=True, **kwargs): super().__init__(*args, **kwargs) self.enable_autocast = enable_autocast + self.put_gt_on_device = put_gt_on_device self.src_device_obj = torch.device("hpu", self.device_ids[0]) def scatter(self, inputs, kwargs, device_ids): @@ -153,6 +154,10 @@ def scatter(self, inputs, kwargs, device_ids): for val in x: if isinstance(val, dict): for k in val: + # don't put annotations on the HPU to proceed + # post-processing on the CPU + if not self.put_gt_on_device and k.startswith("gt_"): + continue if isinstance(val[k], torch.Tensor): val[k] = val[k].to(self.src_device_obj) elif isinstance(val[k], list): diff --git a/src/otx/algorithms/common/utils/__init__.py b/src/otx/algorithms/common/utils/__init__.py index 6395bd6e60d..80372c59b4b 100644 --- a/src/otx/algorithms/common/utils/__init__.py +++ b/src/otx/algorithms/common/utils/__init__.py @@ -14,6 +14,8 @@ # See the License for the specific language governing permissions # and limitations under the License. +import os + from .callback import ( InferenceProgressCallback, OptimizationProgressCallback, @@ -59,4 +61,5 @@ if is_hpu_available(): + os.environ["PT_HPU_LAZY_MODE"] = "1" import habana_frameworks.torch.gpu_migration # noqa: F401 diff --git a/src/otx/algorithms/detection/adapters/mmdet/apis/train.py b/src/otx/algorithms/detection/adapters/mmdet/apis/train.py index caf8720b59a..4565631880d 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/apis/train.py +++ b/src/otx/algorithms/detection/adapters/mmdet/apis/train.py @@ -24,10 +24,12 @@ from torchvision.ops import nms as tv_nms from torchvision.ops import roi_align as tv_roi_align -from otx.algorithms.common.adapters.mmcv.utils import XPUDataParallel +from otx.algorithms.common.adapters.mmcv.utils import HPUDataParallel, XPUDataParallel +from otx.algorithms.common.adapters.mmcv.utils.hpu_optimizers import HABANA_OPTIMIZERS ext_module = ext_loader.load_ext("_ext", ["nms", "softnms", "nms_match", "nms_rotated", "nms_quadri"]) dp_factory["xpu"] = XPUDataParallel +dp_factory["hpu"] = HPUDataParallel def auto_scale_lr(cfg, distributed, logger): @@ -119,17 +121,27 @@ def train_detector(model, dataset, cfg, distributed=False, validate=False, times elif cfg.device == "xpu": model = build_dp(model, cfg.device, device_ids=cfg.gpu_ids, enable_autocast=bool(fp16_cfg)) model.to(f"xpu:{cfg.gpu_ids[0]}") + elif cfg.device == "hpu": + model = build_dp( + model, cfg.device, device_ids=cfg.gpu_ids, dim=0, enable_autocast=bool(fp16_cfg), put_gt_on_device=False + ) + # patch optimizer + if (new_type := "Fused" + cfg.optimizer.get("type", "SGD")) in HABANA_OPTIMIZERS: + cfg.optimizer["type"] = new_type else: model = build_dp(model, cfg.device, device_ids=cfg.gpu_ids) # build optimizer auto_scale_lr(cfg, distributed, logger) + + if cfg.device in ["hpu", "xpu"]: + # dynamic patch for nms and roi_align + NMSop.forward = monkey_patched_nms + RoIAlign.forward = monkey_patched_roi_align + optimizer = build_optimizer(model, cfg.optimizer) if cfg.device == "xpu": - # dynamic patch for nms and roi_align - NMSop.forward = monkey_patched_xpu_nms - RoIAlign.forward = monkey_patched_xpu_roi_align if fp16_cfg is not None: dtype = torch.bfloat16 else: @@ -198,7 +210,7 @@ def train_detector(model, dataset, cfg, distributed=False, validate=False, times runner.run(data_loaders, cfg.workflow) -def monkey_patched_xpu_nms(ctx, bboxes, scores, iou_threshold, offset, score_threshold, max_num): +def monkey_patched_nms(ctx, bboxes, scores, iou_threshold, offset, score_threshold, max_num): """Runs MMCVs NMS with torchvision.nms, or forces NMS from MMCV to run on CPU.""" is_filtering_by_score = score_threshold > 0 if is_filtering_by_score: @@ -228,7 +240,7 @@ def monkey_patched_xpu_nms(ctx, bboxes, scores, iou_threshold, offset, score_thr return inds -def monkey_patched_xpu_roi_align(self, input, rois): +def monkey_patched_roi_align(self, input, rois): """Replaces MMCVs roi align with the one from torchvision. Args: diff --git a/src/otx/algorithms/detection/adapters/mmdet/evaluation/evaluator.py b/src/otx/algorithms/detection/adapters/mmdet/evaluation/evaluator.py index b6e5e6ab2dd..36bda12206f 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/evaluation/evaluator.py +++ b/src/otx/algorithms/detection/adapters/mmdet/evaluation/evaluator.py @@ -15,6 +15,7 @@ # and limitations under the License. import multiprocessing as mp +import time from typing import Dict, List, Tuple, Union import mmcv @@ -22,11 +23,13 @@ import pycocotools.mask as mask_util from mmcv.utils import print_log from mmdet.core import BitmapMasks, PolygonMasks, eval_map +from mmdet.core.evaluation import mean_ap from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps from mmdet.core.evaluation.class_names import get_classes from mmdet.core.evaluation.mean_ap import average_precision from terminaltables import AsciiTable +from otx.algorithms.common.utils.utils import is_hpu_available from otx.api.entities.label import Domain from otx.api.utils.time_utils import timeit @@ -59,6 +62,7 @@ def print_map_summary( # pylint: disable=too-many-locals,too-many-branches if scale_ranges is not None: assert len(scale_ranges) == num_scales + segmentation = "miou" in results num_classes = len(results) recalls = np.zeros((num_scales, num_classes), dtype=np.float32) @@ -69,7 +73,8 @@ def print_map_summary( # pylint: disable=too-many-locals,too-many-branches if cls_result["recall"].size > 0: recalls[:, i] = np.array(cls_result["recall"], ndmin=2)[:, -1] aps[:, i] = cls_result["ap"] - mious[:, i] = cls_result["miou"] + if segmentation: + mious[:, i] = cls_result["miou"] num_gts[:, i] = cls_result["num_gts"] if dataset is None: @@ -82,7 +87,9 @@ def print_map_summary( # pylint: disable=too-many-locals,too-many-branches if not isinstance(mean_ap, list): mean_ap = [mean_ap] - header = ["class", "gts", "dets", "recall", "ap", "miou"] + header = ["class", "gts", "dets", "recall", "ap"] + if segmentation: + header.append("miou") for i in range(num_scales): if scale_ranges is not None: print_log(f"Scale range {scale_ranges[i]}", logger=logger) @@ -94,12 +101,20 @@ def print_map_summary( # pylint: disable=too-many-locals,too-many-branches results[j]["num_dets"], f"{recalls[i, j]:.3f}", f"{aps[i, j]:.3f}", - f"{mious[i, j]:.3f}", ] + if segmentation: + row_data.append(f"{mious[i, j]:.3f}") table_data.append(row_data) - table_data.append(["mAP", "", "", "", f"{mean_ap[i]:.3f}", f"{np.mean(mious[i]):.3f}"]) + table_ = ( + ["mAP", "", "", "", f"{mean_ap[i]:.3f}", f"{np.mean(mious[i]):.3f}"] + if segmentation + else ["mAP", "", "", "", f"{mean_ap[i]:.3f}"] + ) + table_data.append(table_) table = AsciiTable(table_data) table.inner_footing_row_border = True + if is_hpu_available(): + time.sleep(0.1) # prevent segmentation fault print_log("\n" + table.table, logger=logger) @@ -245,6 +260,7 @@ def __init__(self, annotation: List[Dict], domain: Domain, classes: List[str], n else: self.annotation = annotation self.nproc = nproc + mean_ap.print_map_summary = print_map_summary def get_gt_instance_masks(self, annotation: List[Dict]): """Format ground truth instance mask annotation. diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_single_stage_detector.py b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_single_stage_detector.py index f690c38d86b..a8e926cae5d 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_single_stage_detector.py +++ b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_single_stage_detector.py @@ -38,7 +38,6 @@ class CustomSingleStageDetector(SAMDetectorMixin, DetLossDynamicsTrackingMixin, def __init__(self, *args, task_adapt=None, **kwargs): super().__init__(*args, **kwargs) - # Hook for class-sensitive weight loading if task_adapt: self._register_load_state_dict_pre_hook( diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/heads/__init__.py b/src/otx/algorithms/detection/adapters/mmdet/models/heads/__init__.py index 28da39d0a1b..e705d18bdc8 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/models/heads/__init__.py +++ b/src/otx/algorithms/detection/adapters/mmdet/models/heads/__init__.py @@ -10,6 +10,7 @@ from .custom_fcn_mask_head import CustomFCNMaskHead from .custom_retina_head import CustomRetinaHead from .custom_roi_head import CustomRoIHead +from .custom_rpn_head import CustomRPNHead from .custom_ssd_head import CustomSSDHead from .custom_vfnet_head import CustomVFNetHead from .custom_yolox_head import CustomYOLOXHead @@ -27,6 +28,7 @@ "CustomVFNetHead", "CustomYOLOXHead", "DETRHeadExtension", + "CustomRPNHead", # Loss dynamics tracking "CustomATSSHeadTrackingLossDynamics", ] diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/heads/custom_atss_head.py b/src/otx/algorithms/detection/adapters/mmdet/models/heads/custom_atss_head.py index 477790e0d4d..41b7fd3aa8b 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/models/heads/custom_atss_head.py +++ b/src/otx/algorithms/detection/adapters/mmdet/models/heads/custom_atss_head.py @@ -49,6 +49,32 @@ def __init__(self, *args, bg_loss_weight=-1.0, use_qfl=False, qfl_cfg=None, **kw self.bg_loss_weight = bg_loss_weight self.use_qfl = use_qfl + def forward_single(self, x, scale): + """Forward feature of a single scale level. + + Args: + x (Tensor): Features of a single scale level. + scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize + the bbox prediction. + + Returns: + tuple: + cls_score (Tensor): Cls scores for a single scale level + the channels number is num_anchors * num_classes. + bbox_pred (Tensor): Box energies / deltas for a single scale + level, the channels number is num_anchors * 4. + centerness (Tensor): Centerness for a single scale level, the + channel number is (N, num_anchors * 1, H, W). + """ + cls_score, bbox_pred, centerness = super().forward_single(x, scale) + if cls_score.device.type == "hpu": + # put further post-processing on cpu + cls_score = cls_score.cpu() + bbox_pred = bbox_pred.cpu() + centerness = centerness.cpu() + + return cls_score, bbox_pred, centerness + @force_fp32(apply_to=("cls_scores", "bbox_preds", "centernesses")) def loss(self, cls_scores, bbox_preds, centernesses, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/heads/custom_roi_head.py b/src/otx/algorithms/detection/adapters/mmdet/models/heads/custom_roi_head.py index 05902fc9e70..d8e546a5f91 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/models/heads/custom_roi_head.py +++ b/src/otx/algorithms/detection/adapters/mmdet/models/heads/custom_roi_head.py @@ -54,6 +54,14 @@ def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, img_met bbox_results.update(loss_bbox=loss_bbox) return bbox_results + def _mask_forward(self, x, rois=None, pos_inds=None, bbox_feats=None): + """Mask head forward function used in both training and testing.""" + mask_results = super()._mask_forward(x, rois, pos_inds, bbox_feats) + if mask_results["mask_pred"].device.type == "hpu": + mask_results["mask_pred"] = mask_results["mask_pred"].cpu() + mask_results["mask_feats"] = mask_results["mask_feats"].cpu() + return mask_results + @HEADS.register_module() class CustomConvFCBBoxHead(Shared2FCBBoxHead, CrossDatasetDetectorHead): @@ -125,6 +133,16 @@ def get_targets(self, sampling_results, gt_bboxes, gt_labels, img_metas, rcnn_tr valid_label_mask = torch.cat(valid_label_mask, 0) return labels, label_weights, bbox_targets, bbox_weights, valid_label_mask + def forward(self, x): + """ConvFCBBoxHead forward.""" + # shared part + cls_score, bbox_pred = super().forward(x) + if cls_score.device.type == "hpu": + cls_score = cls_score.cpu() + bbox_pred = bbox_pred.cpu() + + return cls_score, bbox_pred + @force_fp32(apply_to=("cls_score", "bbox_pred")) def loss( self, diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/heads/custom_rpn_head.py b/src/otx/algorithms/detection/adapters/mmdet/models/heads/custom_rpn_head.py new file mode 100644 index 00000000000..b5bb4184fe3 --- /dev/null +++ b/src/otx/algorithms/detection/adapters/mmdet/models/heads/custom_rpn_head.py @@ -0,0 +1,25 @@ +"""Custom RPN head for OTX template.""" +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# +from mmdet.models.builder import HEADS +from mmdet.models.dense_heads import RPNHead + + +@HEADS.register_module() +class CustomRPNHead(RPNHead): + """RPN head. + + Args: + in_channels (int): Number of channels in the input feature map. + init_cfg (dict or list[dict], optional): Initialization config dict. + num_convs (int): Number of convolution layers in the head. Default 1. + """ + + def forward_single(self, x): + """Forward feature map of a single scale level.""" + rpn_cls_score, rpn_bbox_pred = super().forward_single(x) + if rpn_cls_score.device.type == "hpu": + rpn_cls_score = rpn_cls_score.cpu() + rpn_bbox_pred = rpn_bbox_pred.cpu() + return rpn_cls_score, rpn_bbox_pred diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/heads/custom_ssd_head.py b/src/otx/algorithms/detection/adapters/mmdet/models/heads/custom_ssd_head.py index 6d5f1ce8427..7aebbcb3173 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/models/heads/custom_ssd_head.py +++ b/src/otx/algorithms/detection/adapters/mmdet/models/heads/custom_ssd_head.py @@ -81,6 +81,35 @@ def _init_layers(self): nn.Conv2d(in_channel, num_base_priors * self.cls_out_channels, kernel_size=3, padding=1) ) + def forward(self, feats): + """Forward features from the upstream network. + + Args: + feats (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + + Returns: + tuple: + cls_scores (list[Tensor]): Classification scores for all scale + levels, each is a 4D-tensor, the channels number is + num_anchors * num_classes. + bbox_preds (list[Tensor]): Box energies / deltas for all scale + levels, each is a 4D-tensor, the channels number is + num_anchors * 4. + """ + cls_scores = [] + bbox_preds = [] + for feat, reg_conv, cls_conv in zip(feats, self.reg_convs, self.cls_convs): + cls_out = cls_conv(feat) + reg_out = reg_conv(feat) + if cls_out.device.type == "hpu": + cls_scores.append(cls_out.cpu()) + bbox_preds.append(reg_out.cpu()) + else: + cls_scores.append(cls_out) + bbox_preds.append(reg_out) + return cls_scores, bbox_preds + def loss_single( self, cls_score, diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/heads/custom_yolox_head.py b/src/otx/algorithms/detection/adapters/mmdet/models/heads/custom_yolox_head.py index 5de9fc272ff..161d692e4f4 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/models/heads/custom_yolox_head.py +++ b/src/otx/algorithms/detection/adapters/mmdet/models/heads/custom_yolox_head.py @@ -103,6 +103,16 @@ def loss(self, cls_scores, bbox_preds, objectnesses, gt_bboxes, gt_labels, img_m return loss_dict + def forward_single(self, x, cls_convs, reg_convs, conv_cls, conv_reg, conv_obj): + """Forward feature of a single scale level.""" + cls_score, bbox_pred, objectness = super().forward_single(x, cls_convs, reg_convs, conv_cls, conv_reg, conv_obj) + if cls_score.device.type == "hpu": + # put on cpu for further post-processing + cls_score = cls_score.cpu() + bbox_pred = bbox_pred.cpu() + objectness = objectness.cpu() + return cls_score, bbox_pred, objectness + @HEADS.register_module() class CustomYOLOXHeadTrackingLossDynamics(TrackingLossDynamicsMixIn, CustomYOLOXHead): @@ -245,6 +255,7 @@ def _get_target_single(self, cls_preds, objectness, priors, decoded_bboxes, gt_b num_priors = priors.size(0) num_gts = gt_labels.size(0) gt_bboxes = gt_bboxes.to(decoded_bboxes.dtype) + # No target if num_gts == 0: cls_target = cls_preds.new_zeros((0, self.num_classes)) diff --git a/src/otx/algorithms/detection/adapters/mmdet/task.py b/src/otx/algorithms/detection/adapters/mmdet/task.py index 3b8040408be..53c00ceaf32 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/task.py +++ b/src/otx/algorithms/detection/adapters/mmdet/task.py @@ -44,8 +44,8 @@ from otx.algorithms.common.utils.data import get_dataset from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.detection.adapters.mmdet.apis.train import ( - monkey_patched_xpu_nms, - monkey_patched_xpu_roi_align, + monkey_patched_nms, + monkey_patched_roi_align, train_detector, ) from otx.algorithms.detection.adapters.mmdet.configurer import ( @@ -348,9 +348,9 @@ def _infer_model( else: target_classes = mm_dataset.CLASSES - if cfg.device == "xpu": - NMSop.forward = monkey_patched_xpu_nms - RoIAlign.forward = monkey_patched_xpu_roi_align + if cfg.device in ["xpu", "hpu"]: + NMSop.forward = monkey_patched_nms + RoIAlign.forward = monkey_patched_roi_align # Model model = self.build_model(cfg, fp16=cfg.get("fp16", False)) diff --git a/src/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/model.py b/src/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/model.py index 72ca9481ef3..03cb21733dc 100644 --- a/src/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/model.py +++ b/src/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/model.py @@ -28,7 +28,7 @@ type="CustomMaskRCNN", # Use CustomMaskRCNN for Incremental Learning neck=dict(type="FPN", in_channels=[24, 48, 120, 352], out_channels=80, num_outs=5), rpn_head=dict( - type="RPNHead", + type="CustomRPNHead", in_channels=80, feat_channels=80, anchor_generator=dict(type="AnchorGenerator", scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), diff --git a/src/otx/algorithms/detection/configs/instance_segmentation/maskrcnn_swin_t/model.py b/src/otx/algorithms/detection/configs/instance_segmentation/maskrcnn_swin_t/model.py index 66f7522bdee..203470d2fac 100644 --- a/src/otx/algorithms/detection/configs/instance_segmentation/maskrcnn_swin_t/model.py +++ b/src/otx/algorithms/detection/configs/instance_segmentation/maskrcnn_swin_t/model.py @@ -38,7 +38,7 @@ ), neck=dict(type="FPN", in_channels=[96, 192, 384, 768], out_channels=256, num_outs=5), rpn_head=dict( - type="RPNHead", + type="CustomRPNHead", in_channels=256, feat_channels=256, anchor_generator=dict(type="AnchorGenerator", scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), diff --git a/src/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/model.py b/src/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/model.py index 6832028e425..d8918edc33f 100644 --- a/src/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/model.py +++ b/src/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/model.py @@ -33,7 +33,7 @@ num_outs=5, ), rpn_head=dict( - type="RPNHead", + type="CustomRPNHead", in_channels=256, feat_channels=256, anchor_generator=dict( From ecb786f0c898443cfa80361f3fde20b1e5f8e85a Mon Sep 17 00:00:00 2001 From: Vladislav Sovrasov Date: Mon, 13 Nov 2023 04:01:02 +0100 Subject: [PATCH 06/39] Update XPU detection (#2623) --- .../algorithms/common/adapters/mmcv/configurer.py | 8 +------- src/otx/algorithms/common/utils/utils.py | 14 ++++++++++++-- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/src/otx/algorithms/common/adapters/mmcv/configurer.py b/src/otx/algorithms/common/adapters/mmcv/configurer.py index 83d43192ed7..2136ee91f72 100644 --- a/src/otx/algorithms/common/adapters/mmcv/configurer.py +++ b/src/otx/algorithms/common/adapters/mmcv/configurer.py @@ -182,13 +182,7 @@ def configure_device(self, cfg): elif torch.cuda.is_available(): cfg.device = "cuda" elif is_xpu_available(): - try: - import intel_extension_for_pytorch as ipex # noqa: F401 - - cfg.device = "xpu" - except ModuleNotFoundError: - cfg.device = "cpu" - cfg.gpu_ids = range(-1, 0) + cfg.device = "xpu" else: cfg.device = "cpu" cfg.gpu_ids = range(-1, 0) diff --git a/src/otx/algorithms/common/utils/utils.py b/src/otx/algorithms/common/utils/utils.py index 92e1b2f0853..675aab5bb9b 100644 --- a/src/otx/algorithms/common/utils/utils.py +++ b/src/otx/algorithms/common/utils/utils.py @@ -25,6 +25,13 @@ HPU_AVAILABLE = False htorch = None +XPU_AVAILABLE = None +try: + import intel_extension_for_pytorch as ipex +except ImportError: + XPU_AVAILABLE = False + ipex = None + class UncopiableDefaultDict(defaultdict): """Defauldict type object to avoid deepcopy.""" @@ -169,9 +176,12 @@ def embed_onnx_model_data(onnx_file: str, extra_model_data: Dict[Tuple[str, str] onnx.save(model, onnx_file) -def is_xpu_available(): +def is_xpu_available() -> bool: """Checks if XPU device is available.""" - return hasattr(torch, "xpu") and torch.xpu.is_available() + global XPU_AVAILABLE # noqa: PLW0603 + if XPU_AVAILABLE is None: + XPU_AVAILABLE = hasattr(torch, "xpu") and torch.xpu.is_available() + return XPU_AVAILABLE def is_hpu_available() -> bool: From 08917e16ca46b86dfed965df1a05222afe9a5ff9 Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Mon, 13 Nov 2023 16:57:30 +0900 Subject: [PATCH 07/39] Add `ModuleCacher` for HPU graphs (#2624) * Add `ModuleCacher` on classification and segmentation * Fix --- .../mmcls/models/heads/custom_vision_transformer_head.py | 4 ++++ src/otx/algorithms/classification/adapters/mmcls/task.py | 8 ++++++++ src/otx/algorithms/segmentation/adapters/mmseg/task.py | 9 +++++++++ 3 files changed, 21 insertions(+) diff --git a/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_vision_transformer_head.py b/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_vision_transformer_head.py index 405203de559..4c3713c66e6 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_vision_transformer_head.py +++ b/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_vision_transformer_head.py @@ -38,3 +38,7 @@ def post_process(self, pred): """Post processing.""" pred = cast_bf16_to_fp32(pred) return super().post_process(pred) + + def forward(self, x): + """Forward fuction of CustomVisionTransformerClsHead class.""" + return self.simple_test(x) diff --git a/src/otx/algorithms/classification/adapters/mmcls/task.py b/src/otx/algorithms/classification/adapters/mmcls/task.py index be1eb6bc940..cd78a3ccdf7 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/task.py +++ b/src/otx/algorithms/classification/adapters/mmcls/task.py @@ -52,6 +52,7 @@ from otx.algorithms.common.configs.configuration_enums import BatchSizeAdaptType from otx.algorithms.common.configs.training_base import TrainType from otx.algorithms.common.tasks.nncf_task import NNCFBaseTask +from otx.algorithms.common.utils import is_hpu_available from otx.algorithms.common.utils.data import get_dataset from otx.algorithms.common.utils.logger import get_logger from otx.api.entities.datasets import DatasetEntity @@ -69,6 +70,9 @@ ) from .utils import build_classifier +if is_hpu_available(): + import habana_frameworks.torch.core as htcore + logger = get_logger() # TODO Remove unnecessary pylint disable @@ -367,6 +371,10 @@ def _train_model( # Model model = self.build_model(cfg, fp16=cfg.get("fp16", False)) model.train() + if is_hpu_available(): + # TODO (sungchul): move it to appropriate location if needed + htcore.hpu.ModuleCacher(max_graphs=10)(model=model.backbone, inplace=True) + htcore.hpu.ModuleCacher(max_graphs=10)(model=model.head, inplace=True) if cfg.distributed: convert_sync_batchnorm(model) diff --git a/src/otx/algorithms/segmentation/adapters/mmseg/task.py b/src/otx/algorithms/segmentation/adapters/mmseg/task.py index 4e9c3544d2e..5d60580cabc 100644 --- a/src/otx/algorithms/segmentation/adapters/mmseg/task.py +++ b/src/otx/algorithms/segmentation/adapters/mmseg/task.py @@ -37,6 +37,7 @@ from otx.algorithms.common.configs.configuration_enums import BatchSizeAdaptType from otx.algorithms.common.configs.training_base import TrainType from otx.algorithms.common.tasks.nncf_task import NNCFBaseTask +from otx.algorithms.common.utils import is_hpu_available from otx.algorithms.common.utils.data import get_dataset from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.segmentation.adapters.mmseg.apis.train import train_segmentor @@ -63,6 +64,9 @@ from otx.api.serialization.label_mapper import label_schema_to_bytes from otx.api.usecases.tasks.interfaces.export_interface import ExportType +if is_hpu_available(): + import habana_frameworks.torch.core as htcore + logger = get_logger() # TODO Remove unnecessary pylint disable @@ -352,6 +356,11 @@ def _train_model( model.train() model.CLASSES = target_classes + if is_hpu_available(): + # TODO (sungchul): move it to appropriate location if needed + htcore.hpu.ModuleCacher(max_graphs=10)(model=model.backbone, inplace=True) + htcore.hpu.ModuleCacher(max_graphs=10)(model=model.decode_head, inplace=True) + if cfg.distributed: convert_sync_batchnorm(model) From 2239ac13a4b8f25c80e0ca972dfd9aa444062c25 Mon Sep 17 00:00:00 2001 From: Vladislav Sovrasov Date: Thu, 23 Nov 2023 05:12:08 +0100 Subject: [PATCH 08/39] Update Intel devices branch to the latest develop state (#2666) * Update base.txt updated dependency version of datumaro * Update __init__.py update version string * Update requirements.txt * Temporarily skip visual prompting openvino integration test (#2323) * Fix import dm.DatasetSubset (#2324) Signed-off-by: Kim, Vinnam * Fix semantic segmentation soft prediction dtype (#2322) * Fix semantic segmentation soft prediction dtype * relax ref sal vals check --------- Co-authored-by: Songki Choi * Contrain yapf verison lesser than 0.40.0 (#2328) contrain_yapf_version * Fix detection e2e tests (#2327) Fix for detection * Mergeback: Label addtion/deletion 1.2.4 --> 1.4.0 (#2326) * Make black happy * Fix conflicts * Merge-back: add test datasets and edit the test code * Make black happy * Fix mis-merge * Make balck happy * Fix typo * Fix typoi --------- Co-authored-by: Songki Choi * Bump datumaro up to 1.4.0rc2 (#2332) bump datumaro up to 1.4.0rc2 * Tiling Doc for releases 1.4.0 (#2333) * Add tiling documentation * Bump otx version to 1.4.0rc2 (#2341) * OTX deploy for visual prompting task (#2311) * Enable `otx deploy` * (WIP) integration test * Docstring * Update args for create_model * Manually set image embedding layout * Enable to use model api for preprocessing - `fit_to_window` doesn't work expectedly, so newly implemented `VisualPromptingOpenvinoAdapter` to use new resize function * Remove skipped test * Updated * Update unit tests on model wrappers * Update * Update configuration * Fix not to patch pretrained path * pylint & update model api version in docstring --------- Co-authored-by: Wonju Lee * Bump albumentations version in anomaly requirements (#2350) increment albumentations version * Update action detection (#2346) * Remove skip mark for PTQ test of action detection * Update action detection documentation * Fix e2e (#2348) * Change classification dataset from dummy to toy * Revert test changes * Change label name for multilabel dataset * Revert e2e test changes * Change ov test cases' threshold * Add parent's label * Update ModelAPI in 1.4 release (#2347) * Upgrade model API * Update otx in exportable code * Fix unit tests * Fix black * Fix detection inference * Fix det tiling * Fix mypy * Fix demo * Fix visualizer in demo * Fix black * Add OTX optimize for visual prompting task (#2318) * Initial commit * Update block * (WIP) otx optimize * Fix * WIP * Update configs & exported outputs * Remove unused modules for torch * Add unit tests * pre-commit * Update CHANGELOG * Update detection docs (#2335) * Update detection docs * Revert template id changes * Fix wrong template id * Update docs/source/guide/explanation/algorithms/object_detection/object_detection.rst Co-authored-by: Eunwoo Shin * Update docs/source/guide/explanation/algorithms/object_detection/object_detection.rst Co-authored-by: Eunwoo Shin --------- Co-authored-by: Eunwoo Shin * Add visual prompting documentation (#2354) * (WIP) write docs * Add visual prompting documentation * Update CHANGELOG --------- Co-authored-by: sungchul.kim * Remove custom modelapi patch in visual prompting (#2359) * Remove custom modelapi patch * Update test * Fix graph metric order and label issues (#2356) * Fix graph metric going backward issue * Add license notice * Fix pre-commit issue * Add rename items & logic for metric --------- Signed-off-by: Songki Choi * Update multi-label document and conversion script (#2358) Update docs, label convert script * Update third party programs (#2365) * Make anomaly task compatible with older albumentations versions (#2363) * fix transforms export in metadata * wrap transform dict * add todo for updating to_dict call * Fixing detection saliency map for one class case (#2368) * fix softmax * fix validity tests * Add e2e test for visual prompting (#2360) * (WIP) otx optimize * pre-commit * (WIP) set e2e * Remove nncf config * Add visual prompting requirement * Add visual prompting in tox * Add visual prompting in setup.py * Fix typo * Delete unused configuration.yaml * Edit test_name * Add to limit activation range * Update from `vp` to `visprompt` * Fix about no returning the first label * pre-commit * (WIP) otx optimize * pre-commit * (WIP) set e2e * Remove nncf config * Add visual prompting requirement * Add visual prompting in tox * Add visual prompting in setup.py * Fix typo * pre-commit * Add actions * Update tests/e2e/cli/visual_prompting/test_visual_prompting.py Co-authored-by: Jaeguk Hyun * Skip PTQ e2e test * Change task name * Remove skipped tc --------- Co-authored-by: Jaeguk Hyun * Fix e2e (#2366) * Change e2e reference name * Update openvino eval threshold for multiclass classification * Change comment message * Fix tiling e2e tests --------- Co-authored-by: GalyaZalesskaya * Add Dino head unit tests (#2344) Recover DINO head unit tests * Update for release 1.4.0rc2 (#2370) * update for release 1.4.0rc2 * Add skip mark for unstable unit tests --------- Co-authored-by: jaegukhyun * Fix NNCF training on CPU (#2373) * Align label order between Geti and OTX (#2369) * align label order * align with pre-commit * update CHANGELOG.md * deal with edge case * update type hint * Remove CenterCrop from Classification test pipeline and editing missing docs link (#2375) * Fix missing link for docs and removing centercrop for classification data pipeline * Revert the test threshold * Fix H-label classification (#2377) * Fix h-labelissue * Update unit tests * Make black happy * Fix unittests * Make black happy * Fix update heades information func * Update the logic: consider the loss per batch * Update for release 1.4 (#2380) * updated for 1.4.0rc3 * update changelog & release note * bump datumaro version up --------- Co-authored-by: Songki Choi * Switch to PTQ for sseg (#2374) * Switch to PTQ for sseg * Update log messages * Fix invalid import structures in otx.api (#2383) Update tiler.py * Update for 1.4.0rc4 (#2385) update for release 1.4.0rc4 * [release 1.4.0] XAI: Return saliency maps for Mask RCNN IR async infer (#2395) * Return saliency maps for openvino async infer * add workaround to fix yapf importing error --------- Co-authored-by: eunwoosh * Update for release 1.4.0 (#2399) update version string Co-authored-by: Sungman Cho * Fix broken links in documentation (#2405) * fix docs links to datumaro's docs * fix docs links to otx's docs * bump version to 1.4.1 * Update exportable code README (#2411) * Updated for release 1.4.1 (#2412) updated for release 1.4.1 * Add workaround for the incorrect meta info M-RCNN (used for XAI) (#2437) Add workaround for the incorrect mata info * Add model category attributes to model template (#2439) Add model category attributes to model template * Add model category & status fields in model template * Add is_default_for_task attr to model template * Update model templates with category attrs * Add integration tests for model templates consistency * Fix license & doc string * Fix typo * Refactor test cases * Refactor common tests by generator --------- Signed-off-by: Songki Choi * Update for 1.4.2rc1 (#2441) update for release 1.4.2rc1 * Fix label list order for h-label classification (#2440) * Fix label list for h-label cls * Fix unit tests * Modified fq numbers for lite HRNET (#2445) modified fq numbers for lite HRNET * Update PTQ ignored scope for hrnet 18 mod2 (#2449) Update ptq ignored scope for hrnet 18 mod2 * Fix OpenVINO inference for legacy models (#2450) * bug fix for legacy openvino models * Add tests * Specific exceptions --------- * Update for 1.4.2rc2 (#2455) update for release 1.4.2rc2 * Prevent zero-sized saliency map in tiling if tile size is too big (#2452) * Prevent zero-sized saliency map in tiling if tile size is too big * Prevent zero-sized saliency in tiling (PyTorch) * Add unit tests for Tiler merge features methods --------- Co-authored-by: Galina * Update pot fq reference number (#2456) update pot fq reference number to 15 * Bump datumaro version to 1.5.0rc0 (#2470) bump datumaro version to 1.5.0rc0 * Set tox version constraint (#2472) set tox version constraint - https://github.com/tox-dev/tox/issues/3110 * Bug fix for albumentations (#2467) * bug fix for legacy openvino models * Address albumentation issue --------- Co-authored-by: Ashwin Vaidya * update for release 1.4.2rc3 * Add a dummy hierarchical config required by MAPI (#2483) * bump version to 1.4.2rc4 * Bump datumaro version (#2502) * bump datumaro version * remove deprecated/reomved attribute usage of the datumaro * Upgrade nncf version for 1.4 release (#2459) * Upgrade nncf version * Fix nncf interface warning * Set the exact nncf version * Update FQ refs after NNCF upgrade * Use NNCF from pypi * Update version for release 1.4.2rc5 (#2507) update version for release 1.4.2rc5 * Update for 1.4.2 (#2514) update for release 1.4.2 * create branch release/1.5.0 * Delete mem cache handler after training is done (#2535) release mem cache handler after training is done * Fix bug that auto batch size doesn't consider distributed training (#2533) * consider distributed training while searching batch size * update unit test * reveret gpu memory upper bound * fix typo * change allocated to reserved * add unit test for distributed training * align with pre-commit * Apply fix progress hook to release 1.5.0 (#2539) * Fix hook's ordering issue. AdaptiveRepeatHook changes the runner.max_iters before the ProgressHook * Change the expression * Fix typo * Fix multi-label, h-label issue * Fix auto_bs issue * Apply suggestions from code review Co-authored-by: Eunwoo Shin * Reflecting reviews * Refactor the name of get_data_cfg * Revert adaptive hook sampler init * Refactor the function name: get_data_cfg -> get_subset_data_cfg * Fix unit test errors * Remove adding AdaptiveRepeatDataHook for autobs * Remove unused import * Fix detection and segmentation case in Geti scenario --------- Co-authored-by: Eunwoo Shin * Re introduce adaptive scheduling for training (#2541) * Re-introduce adaptive patience for training * Revert unit tests * Update for release 1.4.3rc1 (#2542) * Mirror Anomaly ModelAPI changes (#2531) * Migrate anomaly exportable code to modelAPI (#2432) * Fix license in PR template * Migrate to modelAPI * Remove color conversion in streamer * Remove reverse_input_channels * Add float * Remove test as metadata is no longer used * Remove metadata from load method * remove anomalib openvino inferencer * fix signature * Support logacy OpenVINO model * Transform image * add configs * Re-introduce adaptive training (#2543) * Re-introduce adaptive patience for training * Revert unit tests * Fix auto input size mismatch in eval & export (#2530) * Fix auto input size mismatch in eval & export * Re-enable E2E tests for Issue#2518 * Add input size check in export testing * Format float numbers in log * Fix NNCF export shape mismatch * Fix saliency map issue * Disable auto input size if tiling enabled --------- Signed-off-by: Songki Choi * Update ref. fq number for anomaly e2e2 (#2547) * Skip e2e det tests by issue2548 (#2550) * Add skip to chained TC for issue #2548 (#2552) * Update for release 1.4.3 (#2551) * Update MAPI for 1.5 release (#2555) Upgrade MAPI to v 0.1.6 (#2529) * Upgrade MAPI * Update exp code demo commit * Fix MAPI imports * Update ModelAPI configuration (#2564) * Update MAPI rt infor for detection * Upadte export info for cls, det and seg * Update unit tests * Disable QAT for SegNexts (#2565) * Disable NNCF QAT for SegNext * Del obsolete pot configs * Move NNCF skip marks to test commands to avoid duplication * Add Anomaly modelAPI changes to releases/1.4.0 (#2563) * bug fix for legacy openvino models * Apply otx anomaly 1.5 changes * Fix tests * Fix compression config * fix modelAPI imports * update integration tests * Edit config types * Update keys in deployed model --------- Co-authored-by: Ashwin Vaidya Co-authored-by: Kim, Sungchul * Fix the CustomNonLinearClsHead when the batch_size is set to 1 (#2571) Fix bn1d issue Co-authored-by: sungmanc * Update ModelAPI configuration (#2564 from 1.4) (#2568) Update ModelAPI configuration (#2564) * Update MAPI rt infor for detection * Upadte export info for cls, det and seg * Update unit tests * Update for 1.4.4rc1 (#2572) * Hotfix DatasetEntity.get_combined_subset function loop (#2577) Fix get_combined_subset function * Revert default input size to `Default` due to YOLOX perf regression (#2580) Signed-off-by: Songki Choi * Fix for the degradation issue of the classification task (#2585) * Revert to sync with 1.4.0 * Remove repeat data * Convert to the RGB value * Fix color conversion logic * Fix precommit * Bump datumaro version to 1.5.1rc3 (#2587) * Add label ids to anomaly OpenVINO model xml (#2590) * Add label ids to model xml --------- * Fix DeiT-Tiny model regression during class incremental training (#2594) * enable IBloss for DeiT-Tiny * update changelog * add docstring * Add label ids to model xml in release 1.5 (#2591) Add label ids to model xml * Fix DeiT-Tiny regression test for release/1.4.0 (#2595) * Fix DeiT regression test * update changelog * temp * Fix mmcls bug not wrapping model in DataParallel on CPUs (#2601) Wrap multi-label and h-label classification models by MMDataParallel in case of CPU training. --------- Signed-off-by: Songki Choi * Fix h-label loss normalization issue w/ exclusive label group of singe label (#2604) * Fix h-label loss normalization issue w/ exclusive label group with signle label * Fix non-linear version --------- Signed-off-by: Songki Choi * Boost up Image numpy accessing speed through PIL (#2586) * boost up numpy accessing speed through PIL * update CHANGELOG * resolve precommit error * resolve precommit error * add fallback logic with PIL open * use convert instead of draft * Add missing import pathlib for cls e2e testing (#2610) * Fix division by zero in class incremental learning for classification (#2606) * Add empty label to reproduce zero-division error Signed-off-by: Songki Choi * Fix minor typo Signed-off-by: Songki Choi * Fix empty label 4 -> 3 Signed-off-by: Songki Choi * Prevent division by zero Signed-off-by: Songki Choi * Update license Signed-off-by: Songki Choi * Update CHANGELOG.md Signed-off-by: Songki Choi * Fix inefficient sampling Signed-off-by: Songki Choi * Revert indexing Signed-off-by: Songki Choi * Fix minor typo Signed-off-by: Songki Choi --------- Signed-off-by: Songki Choi * Unify logger usage (#2612) * unify logger * align with pre-commit * unify anomaly logger to otx * change logger file path * align with pre-commit * change logger file path in missing file * configure logger after ConfigManager is initialized * configure logger when ConfigManager instance is initialized * update unit test code * move config_logger to each cli file * align with pre-commit * change part still using mmcv logger * Fix XAI algorithm for Detection (#2609) * Impove saliency maps algorithm for Detection * Remove extra changes * Update unit tests * Changes for 1 class * Fix pre-commit * Update CHANGELOG * Tighten dependency constraint only adapting latest patches (#2607) * tighten dependency constratint only adapting latest patches * adjust scikit-image version w.r.t python version * adjust tensorboard version w.r.t python version * remove version specifier for scikit-image * Add metadata to optimized model (#2618) * bug fix for legacy openvino models * Add metadata to optimized model * Revert formatting changes --------- Co-authored-by: Ashwin Vaidya * modify omegaconf version constraint * [release 1.5.0] Fix XAI algorithm for Detection (#2617) Update detection XAI algorithm * Update dependency constraint (#2622) * Update tpp (#2621) * Fix h-label bug of missing parent labels in output (#2626) * Fix h-label bug of missing parent labels in output * Fix h-label test data label schema * Update CHANGELOG.md --------- Signed-off-by: Songki Choi * Update publish workflow (#2625) update publish workflow to push whl to internal pypi * bump datumaro version to ~=1.5.0 * fixed mistake while mergeing back 1.4.4 * modifiy readme * remove openvino model wrapper class * remove openvino model wrapper tests * [release 1.5.0] DeiT: enable tests + add ViTFeatureVectorHook (#2630) Add ViT feature vector hook * Fix docs broken link to datatumaro_h-label Signed-off-by: Songki Choi * Fix wrong label settings for non-anomaly task ModelAPIs Signed-off-by: Songki Choi * Update publish workflow for tag checking (#2632) * Update e2e tests for XAI Detection (#2634) Fix e2e XAI ref value * Disable QAT for newly added models (#2636) * Update release note and readme (#2637) * update release note and readme * remove package upload step on internal publish wf * update release note and, changelog, and readme * update version string to 1.6.0dev * fix datumaro version to 1.6.0rc0 * Mergeback 1.5.0 to develop (#2642) * Update publish workflow for tag checking (#2632) * Update e2e tests for XAI Detection (#2634) * Disable QAT for newly added models (#2636) * Update release note and readme (#2637) * remove package upload step on internal publish wf * update release note and, changelog, and readme * update version string to 1.6.0dev --------- Co-authored-by: Galina Zalesskaya Co-authored-by: Jaeguk Hyun * Revert "Mergeback 1.5.0 to develop" (#2645) Revert "Mergeback 1.5.0 to develop (#2642)" This reverts commit 2f67686103df873d020681f6d504f9595ce4a963. * Add a tool to help conduct experiments (#2651) * implement run and experiment * implement experiment result aggregator * refactor experiment.py * refactor run.py * get export model speed * add var collumn * refactor experiment.py * refine a way to update argument in cmd * refine resource tracker * support anomaly on research framework * refine code aggregating exp result * bugfix * make other task available * eval task save avg_time_per_images as result * Add new argument to track CPU&GPU utilization and memory usage (#2500) * add argument to track resource usage * fix bug * fix a bug in a multi gpu case * use total cpu usage * add unit test * add mark to unit test * cover edge case * add pynvml in requirement * align with pre-commit * add license comment * update changelog * refine argument help * align with pre-commit * add version to requirement and raise an error if not supported values are given * apply new resource tracker format * refactor run.py * support optimize in research framework * cover edge case * Handle a case where fail cases exist * make argparse raise error rather than exit if problem exist * revert tensorboard aggregator * bugfix * save failed cases as yaml file * deal with integer in variables * add epoch to metric * use latest log.json file * align with otx logging method * move experiment.py from cli to tools * refactor experiment.py * merge otx run feature into experiment.py * move set_arguments_to_cmd definition into experiment.py * refactor experiment.py * bugfix * minor bugfix * use otx.cli instead of each otx entry * add feature to parse single workspace * add comments * fix bugs * align with pre-commit * revert parser argument * align with pre-commit * Revert inference batch size to 1 for instance segmentation (#2648) Signed-off-by: Songki Choi * Remove unnecessary log while building a model (#2658) * revert logger in otx/algorithms/detection/adapters/mmdet/utils/builder.py * revert logger in otx/algorithms/classification/adapters/mmcls/utils/builder.py * make change more readable * Fix a minor bug of experiment.py (#2662) fix bug * Not check avg_time_per_image during test (#2665) * ignore avg_time_per_image during test * do not call stdev when length of array is less than 2 * ignore avg_time_per_image during regerssion test * Update device selection logic in classificaiton --------- Signed-off-by: Kim, Vinnam Signed-off-by: Songki Choi Co-authored-by: Yunchu Lee Co-authored-by: Kim, Sungchul Co-authored-by: Vinnam Kim Co-authored-by: Evgeny Tsykunov Co-authored-by: Songki Choi Co-authored-by: Eunwoo Shin Co-authored-by: Jaeguk Hyun Co-authored-by: Sungman Cho Co-authored-by: Eugene Liu Co-authored-by: Wonju Lee Co-authored-by: Dick Ameln Co-authored-by: sungchul.kim Co-authored-by: GalyaZalesskaya Co-authored-by: Harim Kang Co-authored-by: Ashwin Vaidya Co-authored-by: Ashwin Vaidya Co-authored-by: sungmanc --- .github/workflows/publish_internal.yml | 83 ++ CHANGELOG.md | 43 +- README.md | 30 +- .../hierarhical_classification.rst | 4 +- .../object_detection/object_detection.rst | 20 + docs/source/guide/release_notes/index.rst | 154 +++- requirements/api.txt | 8 +- requirements/base.txt | 19 +- requirements/classification.txt | 4 +- requirements/detection.txt | 6 +- requirements/dev.txt | 14 +- requirements/docs.txt | 14 +- requirements/openvino.txt | 2 +- requirements/segmentation.txt | 6 +- requirements/visual_prompting.txt | 2 +- src/otx/__init__.py | 2 +- .../adapters/mmaction/data/det_dataset.py | 4 +- .../action/adapters/mmaction/task.py | 2 +- .../adapters/mmaction/utils/det_eval_utils.py | 9 +- .../action/adapters/openvino/task.py | 4 +- src/otx/algorithms/action/task.py | 2 +- .../action/tools/sample_classification.py | 5 +- .../action/tools/sample_detection.py | 5 +- .../adapters/anomalib/callbacks/inference.py | 4 +- .../anomaly/adapters/anomalib/data/data.py | 4 +- .../adapters/anomalib/logger/logger.py | 85 -- src/otx/algorithms/anomaly/tasks/inference.py | 19 +- src/otx/algorithms/anomaly/tasks/nncf.py | 4 +- src/otx/algorithms/anomaly/tasks/openvino.py | 56 +- src/otx/algorithms/anomaly/tasks/train.py | 4 +- src/otx/algorithms/anomaly/tools/sample.py | 4 +- .../adapters/mmcls/configurer.py | 2 +- .../adapters/mmcls/datasets/otx_datasets.py | 17 +- .../adapters/mmcls/models/classifiers/byol.py | 2 +- .../classifiers/custom_image_classifier.py | 2 +- .../mmcls/models/classifiers/mixin.py | 2 +- .../models/classifiers/semisl_classifier.py | 2 +- .../semisl_multilabel_classifier.py | 2 +- .../mmcls/models/heads/custom_cls_head.py | 4 + .../custom_hierarchical_linear_cls_head.py | 2 +- ...custom_hierarchical_non_linear_cls_head.py | 2 +- .../heads/custom_vision_transformer_head.py | 7 + .../adapters/mmcls/models/losses/ib_loss.py | 4 +- .../adapters/mmcls/nncf/builder.py | 4 +- .../adapters/mmcls/nncf/task.py | 2 +- .../classification/adapters/mmcls/task.py | 24 +- .../adapters/mmcls/utils/builder.py | 8 +- .../adapters/mmcls/utils/config_utils.py | 5 +- .../adapters/mmcls/utils/exporter.py | 2 +- .../classification/adapters/openvino/task.py | 13 +- .../classification/configs/configuration.yaml | 7 +- src/otx/algorithms/classification/task.py | 9 +- .../tools/classification_sample.py | 4 +- .../classification/utils/cls_utils.py | 8 +- .../common/adapters/mmcv/configurer.py | 2 +- .../mmcv/hooks/adaptive_repeat_data_hook.py | 2 +- .../mmcv/hooks/adaptive_training_hook.py | 2 +- .../common/adapters/mmcv/hooks/cancel_hook.py | 2 +- .../mmcv/hooks/composed_dataloaders_hook.py | 2 +- .../mmcv/hooks/custom_model_ema_hook.py | 2 +- .../mmcv/hooks/dual_model_ema_hook.py | 2 +- .../mmcv/hooks/early_stopping_hook.py | 2 +- .../adapters/mmcv/hooks/force_train_hook.py | 2 +- .../common/adapters/mmcv/hooks/logger_hook.py | 2 +- .../mmcv/hooks/loss_dynamics_tracking_hook.py | 2 +- .../adapters/mmcv/hooks/mean_teacher_hook.py | 2 +- .../adapters/mmcv/hooks/model_ema_v2_hook.py | 2 +- .../adapters/mmcv/hooks/no_bias_decay_hook.py | 2 +- .../adapters/mmcv/hooks/progress_hook.py | 2 +- .../mmcv/hooks/recording_forward_hook.py | 12 +- .../adapters/mmcv/hooks/task_adapt_hook.py | 2 +- .../mmcv/hooks/two_crop_transform_hook.py | 2 +- .../mmcv/models/backbones/efficientnet.py | 2 +- .../mmcv/models/backbones/efficientnetv2.py | 2 +- .../mmcv/models/backbones/mobilenetv3.py | 2 +- .../common/adapters/mmcv/nncf/utils.py | 2 +- .../common/adapters/mmcv/tasks/exporter.py | 2 +- .../adapters/mmcv/utils/automatic_bs.py | 2 +- .../adapters/mmcv/utils/config_utils.py | 2 +- .../torch/dataloaders/composed_dataloader.py | 2 +- .../dataloaders/samplers/balanced_sampler.py | 20 +- .../torch/dataloaders/samplers/otx_sampler.py | 2 +- .../adapters/torch/utils/bs_search_algo.py | 2 +- src/otx/algorithms/common/tasks/base_task.py | 4 +- src/otx/algorithms/common/tasks/nncf_task.py | 2 +- src/otx/algorithms/common/utils/data.py | 4 +- src/otx/algorithms/common/utils/task_adapt.py | 2 +- .../detection/adapters/mmdet/configurer.py | 2 +- .../adapters/mmdet/datasets/dataset.py | 2 +- .../hooks/det_class_probability_map_hook.py | 14 +- .../mmdet/models/backbones/imgclsmob.py | 7 +- .../mmdet/models/dense_heads/mmov_rpn_head.py | 2 +- .../models/detectors/custom_atss_detector.py | 2 +- .../custom_deformable_detr_detector.py | 2 +- .../models/detectors/custom_dino_detector.py | 2 +- .../models/detectors/custom_lite_dino.py | 2 +- .../detectors/custom_maskrcnn_detector.py | 2 +- .../detectors/custom_single_stage_detector.py | 2 +- .../detectors/custom_two_stage_detector.py | 2 +- .../models/detectors/custom_vfnet_detector.py | 2 +- .../models/detectors/custom_yolox_detector.py | 2 +- .../models/detectors/loss_dynamics_mixin.py | 2 +- .../mmdet/models/detectors/mean_teacher.py | 2 +- .../detection/adapters/mmdet/nncf/builder.py | 4 +- .../detection/adapters/mmdet/nncf/task.py | 2 +- .../detection/adapters/mmdet/task.py | 2 +- .../detection/adapters/mmdet/utils/builder.py | 8 +- .../adapters/mmdet/utils/config_utils.py | 2 +- .../adapters/mmdet/utils/exporter.py | 2 +- .../detection/adapters/openvino/task.py | 11 +- .../configs/detection/configuration.yaml | 5 +- .../cspdarknet_yolox_l/template.yaml | 1 - .../cspdarknet_yolox_s/template.yaml | 1 - .../cspdarknet_yolox_x/template.yaml | 1 - .../detection/resnext101_atss/template.yaml | 1 - .../instance_segmentation/configuration.yaml | 5 +- .../template_experimental.yaml | 3 +- .../efficientnetb2b_maskrcnn/template.yaml | 2 +- .../maskrcnn_swin_t/template.yaml | 1 - .../rotated_detection/configuration.yaml | 5 +- src/otx/algorithms/detection/task.py | 2 +- .../detection/tools/detection_sample.py | 4 +- .../tools/detection_semisl_sample.py | 4 +- .../tools/instance_segmentation_sample.py | 4 +- src/otx/algorithms/detection/utils/data.py | 2 +- src/otx/algorithms/detection/utils/utils.py | 13 +- .../segmentation/adapters/mmseg/configurer.py | 2 +- .../mmseg/models/backbones/litehrnet.py | 5 +- .../mmseg/models/segmentors/detcon.py | 2 +- .../segmentors/mean_teacher_segmentor.py | 2 +- .../models/segmentors/otx_encoder_decoder.py | 5 +- .../adapters/mmseg/nncf/builder.py | 5 +- .../segmentation/adapters/mmseg/nncf/task.py | 2 +- .../segmentation/adapters/mmseg/task.py | 2 +- .../adapters/mmseg/utils/data_utils.py | 2 +- .../adapters/mmseg/utils/exporter.py | 2 +- .../segmentation/adapters/openvino/task.py | 11 +- .../pot_optimization_config.json | 14 - .../configs/ham_segnext_b/template.yaml | 1 - .../pot_optimization_config.json | 14 - .../configs/ham_segnext_s/template.yaml | 1 - .../pot_optimization_config.json | 14 - .../configs/ham_segnext_t/template.yaml | 1 - src/otx/algorithms/segmentation/task.py | 2 +- .../segmentation/tools/segmentation_sample.py | 4 +- .../algorithms/segmentation/utils/metadata.py | 7 +- .../config/visual_prompting_config.py | 2 +- .../pytorch_lightning/datasets/dataset.py | 2 +- .../visual_prompting/tasks/inference.py | 2 +- .../visual_prompting/tasks/openvino.py | 11 +- .../visual_prompting/tasks/train.py | 2 +- src/otx/api/entities/dataset_item.py | 4 +- src/otx/api/entities/datasets.py | 6 +- src/otx/api/entities/image.py | 8 +- src/otx/api/entities/label_schema.py | 4 +- src/otx/api/entities/metrics.py | 5 +- src/otx/api/usecases/evaluation/accuracy.py | 4 +- src/otx/api/usecases/evaluation/f_measure.py | 4 +- .../demo/demo_package/model_container.py | 23 +- .../prediction_to_annotation_converter.py | 2 +- .../reporting/time_monitor_callback.py | 4 +- src/otx/api/utils/tiler.py | 419 +++++++++ src/otx/cli/manager/config_manager.py | 6 +- src/otx/cli/tools/build.py | 2 + src/otx/cli/tools/deploy.py | 2 + src/otx/cli/tools/eval.py | 10 +- src/otx/cli/tools/explain.py | 3 +- src/otx/cli/tools/export.py | 2 + src/otx/cli/tools/optimize.py | 2 + src/otx/cli/tools/train.py | 2 + src/otx/cli/utils/experiment.py | 5 +- src/otx/cli/utils/hpo.py | 4 +- src/otx/cli/utils/multi_gpu.py | 4 +- .../adapter/segmentation_dataset_adapter.py | 2 +- .../core/data/caching/mem_cache_handler.py | 2 +- src/otx/core/ov/graph/graph.py | 2 +- .../ov/graph/parsers/cls/cls_base_parser.py | 2 +- src/otx/core/ov/graph/utils.py | 2 +- src/otx/core/ov/models/ov_model.py | 2 +- src/otx/core/ov/models/parser_mixin.py | 2 +- src/otx/core/ov/ops/infrastructures.py | 2 +- src/otx/core/ov/ops/utils.py | 4 - src/otx/hpo/hpo_base.py | 4 +- src/otx/hpo/hpo_runner.py | 4 +- src/otx/hpo/hyperband.py | 4 +- src/otx/hpo/resource_manager.py | 4 +- src/otx/hpo/search_space.py | 4 +- .../stages/classification/incremental.yaml | 4 - .../anomalib/logger => utils}/__init__.py | 8 +- .../{algorithms/common => }/utils/logger.py | 0 .../3/.gitignore | 4 + .../datumaro_h-label/annotations/train.json | 5 + .../datumaro_h-label/annotations/valid.json | 5 + .../annotations/train.json | 5 + .../annotations/valid.json | 5 + .../cli/classification/test_classification.py | 46 +- .../test_api_xai_sanity_detection.py | 2 +- .../test_segmentation.py | 15 - tests/e2e/test_api_xai_sanity.py | 440 ++++++++++ .../cli/classification/test_classification.py | 9 - .../test_rotated_detection.py | 1 + .../test_segmentation.py | 6 +- tests/regression/regression_command.py | 4 + tests/test_suite/run_test_command.py | 41 +- .../classification/utils/test_utils.py | 4 + .../detection/test_xai_detection_validity.py | 36 +- .../detection/utils/test_detection_utils.py | 4 + .../visual_prompting/tasks/test_inference.py | 2 +- third-party-programs.txt | 243 ++++++ tools/experiment.py | 801 ++++++++++++++++++ 210 files changed, 2755 insertions(+), 605 deletions(-) create mode 100644 .github/workflows/publish_internal.yml delete mode 100644 src/otx/algorithms/anomaly/adapters/anomalib/logger/logger.py delete mode 100644 src/otx/algorithms/segmentation/configs/ham_segnext_b/pot_optimization_config.json delete mode 100644 src/otx/algorithms/segmentation/configs/ham_segnext_s/pot_optimization_config.json delete mode 100644 src/otx/algorithms/segmentation/configs/ham_segnext_t/pot_optimization_config.json create mode 100644 src/otx/api/utils/tiler.py rename src/otx/{algorithms/anomaly/adapters/anomalib/logger => utils}/__init__.py (82%) rename src/otx/{algorithms/common => }/utils/logger.py (100%) create mode 100644 tests/assets/classification_dataset_class_incremental/3/.gitignore create mode 100644 tests/e2e/test_api_xai_sanity.py create mode 100644 tools/experiment.py diff --git a/.github/workflows/publish_internal.yml b/.github/workflows/publish_internal.yml new file mode 100644 index 00000000000..800cc2c60ac --- /dev/null +++ b/.github/workflows/publish_internal.yml @@ -0,0 +1,83 @@ +name: Build and upload to internal PyPI + +on: + workflow_dispatch: # run on request (no need for PR) + +jobs: + build_wheels: + name: Build wheels + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Build wheels + uses: pypa/cibuildwheel@v2.13.1 + - uses: actions/upload-artifact@v3 + with: + path: ./wheelhouse/*.whl + + build_sdist: + name: Build source distribution + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Set up Python 3.10 + uses: actions/setup-python@v3 + with: + python-version: "3.10" + - name: Install pypa/build + run: python -m pip install build + - name: Build sdist + run: python -m build --sdist + - uses: actions/upload-artifact@v3 + with: + path: dist/*.tar.gz + + publish_package: + name: Publish package + needs: [build_wheels, build_sdist] + environment: pypi + runs-on: [self-hosted, linux, x64, dev] + permissions: write-all + steps: + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: "3.10" + - name: Install dependencies + run: python -m pip install twine + - name: Download artifacts + uses: actions/download-artifact@v3 + with: + # unpacks default artifact into dist/ + # if `name: artifact` is omitted, the action will create extra parent dir + name: artifact + path: dist + - name: Check tag + id: check-tag + uses: actions-ecosystem/action-regex-match@v2 + with: + text: ${{ github.ref }} + regex: '^refs/heads/releases/[0-9]+\.[0-9]+\.[0-9]+(\.[0-9]+rc[0-9]+|rc[0-9]+)?$' + - name: Check dist contents + run: twine check dist/* + - name: Publish package dist to internal PyPI + if: ${{ steps.check-tag.outputs.match != '' }} + run: | + export no_proxy=${{ secrets.PYPI_HOST }} + export REPOSITORY_URL=http://${{ secrets.PYPI_HOST }}:${{ secrets.PYPI_PORT }} + twine upload --verbose --repository-url $REPOSITORY_URL dist/* -u ${{ secrets.PYPI_USER }} -p ${{ secrets.PYPI_PASSWORD }} + - name: Publish package distributions to TestPyPI + if: ${{ steps.check-tag.outputs.match == '' }} + run: | + export REPOSITORY_URL=https://test.pypi.org/legacy/ + twine upload --verbose --repository-url $REPOSITORY_URL dist/* -u __token__ -p ${{ secrets.TESTPYPI_API_TOKEN }} + - name: Clean up dist + if: ${{ always() }} + run: | + if OUTPUT=$(ls | grep -c dist) + then + echo "Cleaning up dist directory" + rm -r dist + fi diff --git a/CHANGELOG.md b/CHANGELOG.md index 393a2f10560..9953ee7d95b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,18 +2,20 @@ All notable changes to this project will be documented in this file. -## \[v1.5.0 - unreleased\] +## \[unreleased\] + +## \[v1.5.0\] ### New features -- Enable configurable confidence threshold for otx eval and export() +- Enable configurable confidence threshold for otx eval and export () - Add YOLOX variants as new object detector models () -- Enable FeatureVectorHook to support action tasks() +- Enable FeatureVectorHook to support action tasks () - Add ONNX metadata to detection, instance segmantation, and segmentation models () -- Add a new feature to configure input size() +- Add a new feature to configure input size () - Introduce the OTXSampler and AdaptiveRepeatDataHook to achieve faster training at the small data regime () -- Add a new object detector Lite-DINO() -- Add Semi-SL Mean Teacher algorithm for Instance Segmentation task() +- Add a new object detector Lite-DINO () +- Add Semi-SL Mean Teacher algorithm for Instance Segmentation task () - Official supports for YOLOX-X, YOLOX-L, YOLOX-S, ResNeXt101-ATSS () - Add new argument to track resource usage in train command () - Add Self-SL for semantic segmentation of SegNext families () @@ -30,17 +32,42 @@ All notable changes to this project will be documented in this file. ### Bug fixes - Fix F1 auto-threshold to choose best largest confidence () +- Fix IBLoss enablement with DeiT-Tiny when class incremental training () ### Known issues - OpenVINO(==2023.0) IR inference is not working well on 2-stage models (e.g. Mask-RCNN) exported from torch>=1.13.1 - NNCF QAT optimization is disabled for MaskRCNN models due to CUDA runtime error in ROIAlign kernel on torch==2.0.1 +## \[v1.4.4\] + +### Enhancements + +- Update ModelAPI configuration() +- Add Anomaly modelAPI changes () +- Update Image numpy access () + +### Bug fixes + +- Fix IBLoss enablement with DeiT-Tiny when class incremental training () +- Fix mmcls bug not wrapping model in DataParallel on CPUs () +- Fix h-label loss normalization issue w/ exclusive label group of singe label () +- Fix division by zero in class incremental learning for classification () +- Fix saliency maps calculation issue for detection models () +- Fix h-label bug of missing parent labels in output () + +## \[v1.4.3\] + +### Enhancements + +- Re-introduce adaptive scheduling for training () + ## \[v1.4.2\] ### Enhancements -- Bump datumaro version to 1.5.0rc0 () +- Upgrade nncf version to 2.6.0 () +- Bump datumaro version to 1.5.0 (, ) - Set tox version constraint () - Add model category attributes to model template () @@ -304,7 +331,7 @@ All notable changes to this project will be documented in this file. - Enhance `find` command to find configurations of supported tasks / algorithms / models / backbones - Introduce `build` command to customize task or model configurations in isolated workspace - Auto-config feature to automatically select the right algorithm and default model for the `train` & `build` command by detecting the task type of given input dataset -- Improve [documentation](https://openvinotoolkit.github.io/training_extensions/latest/guide/get_started/introduction.html) +- Improve [documentation](https://openvinotoolkit.github.io/training_extensions/1.0.0/guide/get_started/introduction.html) - Improve training performance by introducing enhanced loss for the few-shot transfer ### Bug fixes diff --git a/README.md b/README.md index 665dc2ae5bc..e8e3f424eeb 100644 --- a/README.md +++ b/README.md @@ -42,7 +42,7 @@ If you are an experienced user, you can configure your own model based on [torch Furthermore, OpenVINO™ Training Extensions provides automatic configuration for ease of use. The framework will analyze your dataset and identify the most suitable model and figure out the best input size setting and other hyper-parameters. -The development team is continuously extending this [Auto-configuration](https://openvinotoolkit.github.io/training_extensions/latest/guide/explanation/additional_features/auto_configuration.html) functionalities to make training as simple as possible so that single CLI command can obtain accurate, efficient and robust models ready to be integrated into your project. +The development team is continuously extending this [Auto-configuration](https://openvinotoolkit.github.io/training_extensions/stable/guide/explanation/additional_features/auto_configuration.html) functionalities to make training as simple as possible so that single CLI command can obtain accurate, efficient and robust models ready to be integrated into your project. ### Key Features @@ -63,11 +63,11 @@ OpenVINO™ Training Extensions supports the [following learning methods](https: OpenVINO™ Training Extensions provides the following usability features: -- [Auto-configuration](https://openvinotoolkit.github.io/training_extensions/latest/guide/explanation/additional_features/auto_configuration.html). OpenVINO™ Training Extensions analyzes provided dataset and selects the proper task and model with appropriate input size to provide the best accuracy/speed trade-off. It will also make a random auto-split of your dataset if there is no validation set provided. +- [Auto-configuration](https://openvinotoolkit.github.io/training_extensions/stable/guide/explanation/additional_features/auto_configuration.html). OpenVINO™ Training Extensions analyzes provided dataset and selects the proper task and model with appropriate input size to provide the best accuracy/speed trade-off. It will also make a random auto-split of your dataset if there is no validation set provided. - [Datumaro](https://openvinotoolkit.github.io/datumaro/stable/index.html) data frontend: OpenVINO™ Training Extensions supports the most common academic field dataset formats for each task. We are constantly working to extend supported formats to give more freedom of datasets format choice. - **Distributed training** to accelerate the training process when you have multiple GPUs - **Mixed-precision training** to save GPUs memory and use larger batch sizes -- Integrated, efficient [hyper-parameter optimization module (HPO)](https://openvinotoolkit.github.io/training_extensions/latest/guide/explanation/additional_features/hpo.html). Through dataset proxy and built-in hyper-parameter optimizer, you can get much faster hyper-parameter optimization compared to other off-the-shelf tools. The hyperparameter optimization is dynamically scheduled based on your resource budget. +- Integrated, efficient [hyper-parameter optimization module (HPO)](https://openvinotoolkit.github.io/training_extensions/stable/guide/explanation/additional_features/hpo.html). Through dataset proxy and built-in hyper-parameter optimizer, you can get much faster hyper-parameter optimization compared to other off-the-shelf tools. The hyperparameter optimization is dynamically scheduled based on your resource budget. --- @@ -97,16 +97,20 @@ You can find more details with examples in the [CLI command intro](https://openv ## Updates -### v1.4.0 (3Q23) - -- Support encrypted dataset training () -- Add custom max iou assigner to prevent CPU OOM when large annotations are used () -- Auto train type detection for Semi-SL, Self-SL and Incremental: "--train-type" now is optional () -- Add per-class XAI saliency maps for Mask R-CNN model () -- Add new object detector Deformable DETR () -- Add new object detector DINO () -- Add new visual prompting task (, , , , ) -- Add new object detector ResNeXt101-ATSS () +### v1.5.0 (4Q23) + +- Enable configurable confidence threshold for otx eval and export () +- Add YOLOX variants as new object detector models () +- Enable FeatureVectorHook to support action tasks () +- Add ONNX metadata to detection, instance segmantation, and segmentation models () +- Add a new feature to configure input size () +- Introduce the OTXSampler and AdaptiveRepeatDataHook to achieve faster training at the small data regime () +- Add a new object detector Lite-DINO () +- Add Semi-SL Mean Teacher algorithm for Instance Segmentation task () +- Official supports for YOLOX-X, YOLOX-L, YOLOX-S, ResNeXt101-ATSS () +- Add new argument to track resource usage in train command () +- Add Self-SL for semantic segmentation of SegNext families () +- Adapt input size automatically based on dataset statistics () ### Release History diff --git a/docs/source/guide/explanation/algorithms/classification/hierarhical_classification.rst b/docs/source/guide/explanation/algorithms/classification/hierarhical_classification.rst index 6c4dc241610..ca1267f7bb3 100644 --- a/docs/source/guide/explanation/algorithms/classification/hierarhical_classification.rst +++ b/docs/source/guide/explanation/algorithms/classification/hierarhical_classification.rst @@ -39,7 +39,7 @@ Dataset Format .. _hierarchical_dataset: For hierarchical image classification, we created our custom dataset format that is supported by `Datumaro `_. -An example of the annotations format and dataset structure can be found in our `sample `_. +An example of the annotations format and dataset structure can be found in our `sample `_. To use OpenVINO™ Training Extensions with this format, it is required to pass dataset root paths directly to the CLI command: @@ -66,4 +66,4 @@ We use the same model templates as for Multi-class Classification. Please, refer .. Incremental Learning .. ******************** -.. To be added soon \ No newline at end of file +.. To be added soon diff --git a/docs/source/guide/explanation/algorithms/object_detection/object_detection.rst b/docs/source/guide/explanation/algorithms/object_detection/object_detection.rst index df3331e3510..7edc3065a41 100644 --- a/docs/source/guide/explanation/algorithms/object_detection/object_detection.rst +++ b/docs/source/guide/explanation/algorithms/object_detection/object_detection.rst @@ -117,6 +117,26 @@ In addition to these models, we supports experimental models for object detectio `Deformable_DETR `_ is `DETR `_ based model, and it solves slow convergence problem of DETR. `DINO `_ improves Deformable DETR based methods via denoising anchor boxes. Current SOTA models for object detection are based on DINO. `Lite-DINO `_ is efficient structure for DINO. It reduces FLOPS of transformer's encoder which takes the highest computational costs. +.. note:: + + For using experimental templates, you should specify full path of experimental template. Ex) otx build src/otx/algorithms/detection/configs/detection/resnet50_dino/template_experimental.yaml --task detection + +In addition to these models, we supports experimental models for object detection. These experimental models will be changed to official models within a few releases. + ++---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------+---------------------+-----------------+ +| Template ID | Name | Complexity (GFLOPs) | Model size (MB) | ++===========================================================================================================================================================================================================================+=====================+=====================+=================+ +| `Custom_Object_Detection_Gen3_Deformable_DETR `_ | Deformable_DETR | 165 | 157.0 | ++---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------+---------------------+-----------------+ +| `Custom_Object_Detection_Gen3_DINO `_ | DINO | 235 | 182.0 | ++---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------+---------------------+-----------------+ +| `Custom_Object_Detection_Gen3_ResNeXt101_ATSS `_ | ResNeXt101-ATSS | 434.75 | 344.0 | ++---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------+---------------------+-----------------+ + +`Deformable_DETR `_ is `DETR `_ based model, and it solves slow convergence problem of DETR. `DINO `_ improves Deformable DETR based methods via denoising anchor boxes. Current SOTA models for object detection are based on DINO. +Although transformer based models show notable performance on various object detection benchmark, CNN based model still show good performance with proper latency. +Therefore, we added a new experimental CNN based method, ResNeXt101-ATSS. ATSS still shows good performance among `RetinaNet `_ based models. We integrated large ResNeXt101 backbone to our Custom ATSS head, and it shows good transfer learning performance. + .. note:: For using experimental templates, you should specify full path of experimental template. Ex) otx build src/otx/algorithms/detection/configs/detection/resnet50_dino/template_experimental.yaml --task detection diff --git a/docs/source/guide/release_notes/index.rst b/docs/source/guide/release_notes/index.rst index ef0c1f6d721..133b7350c9e 100644 --- a/docs/source/guide/release_notes/index.rst +++ b/docs/source/guide/release_notes/index.rst @@ -1,9 +1,60 @@ Releases -======== +######## -*************** -[v1.4.0] (3Q23) -*************** +.. toctree:: + :maxdepth: 1 + +v1.5.0 (4Q23) +------------- + +- Enable configurable confidence threshold for otx eval and export +- Add YOLOX variants as new object detector models +- Enable FeatureVectorHook to support action tasks +- Add ONNX metadata to detection, instance segmantation, and segmentation models +- Add a new feature to configure input size +- Introduce the OTXSampler and AdaptiveRepeatDataHook to achieve faster training at the small data regime +- Add a new object detector Lite-DINO +- Add Semi-SL Mean Teacher algorithm for Instance Segmentation task +- Official supports for YOLOX-X, YOLOX-L, YOLOX-S, ResNeXt101-ATSS +- Add new argument to track resource usage in train command +- Add Self-SL for semantic segmentation of SegNext families +- Adapt input size automatically based on dataset statistics +- Refine input data in-memory caching +- Adapt timeout value of initialization for distributed training +- Optimize data loading by merging load & resize operations w/ caching support for cls/det/iseg/sseg +- Support torch==2.0.1 +- Set "Auto" as default input size mode + + +v1.4.4 (4Q23) +------------- + +- Update ModelAPI configuration +- Add Anomaly modelAPI changes +- Update Image numpy access + +v1.4.3 (4Q23) +------------- + +- Re introduce adaptive scheduling for training + +v1.4.2 (4Q23) +------------- + +- Upgrade nncf version to 2.6.0 +- Bump datumaro version to 1.5.0 +- Set tox version constraint +- Add model category attributes to model template +- Minor bug fixes + +v1.4.1 (3Q23) +------------- + +- Update the README file in exportable code +- Minor bug fixes + +v1.4.0 (3Q23) +------------- - Support encrypted dataset training - Add custom max iou assigner to prevent CPU OOM when large annotations are used @@ -13,10 +64,23 @@ Releases - Add new object detector DINO - Add new visual prompting task - Add new object detector ResNeXt101-ATSS +- Introduce channel_last parameter to improve the performance +- Decrease time for making a workspace +- Set persistent_workers and pin_memory as True in detection task +- New algorithm for Semi-SL semantic segmentation based on metric learning via class prototypes +- Self-SL for classification now can recieve just folder with any images to start contrastive pretraining +- Update OpenVINO version to 2023.0, and NNCF verion to 2.5 +- Improve XAI saliency map generation for tiling detection and tiling instance segmentation +- Remove CenterCrop from Classification test pipeline and editing missing docs link +- Switch to PTQ for sseg +- Minor bug fixes + +v1.3.1 (2Q23) +------------- +- Minor bug fixes -*************** -[v1.3.0] (2Q23) -*************** +v1.3.0 (2Q23) +------------- - Support direct annotation input for COCO format - Action task supports multi GPU training @@ -25,10 +89,38 @@ Releases - Support auto adapting batch size - Support auto adapting num_workers - Support noisy label detection for detection tasks +- Make semantic segmentation OpenVINO models compatible with ModelAPI +- Support label hierarchy through LabelTree in LabelSchema for classification task +- Enhance exportable code file structure, video inference and default value for demo +- Speedup OpenVINO inference in image classificaiton, semantic segmentation, object detection and instance segmentation tasks +- Refactoring of ONNX export functionality +- Minor bug fixes + +v1.2.4 (3Q23) +------------- +- Per-class saliency maps for M-RCNN +- Disable semantic segmentation soft prediction processing +- Update export and nncf hyperparameters +- Minor bug fixes -************* -v1.2.0 (1Q23) -************* +v1.2.3 (2Q23) +------------- + +- Improve warning message for tiling configurable parameter +- Minor bug fixes + +v1.2.1 (2Q23) +------------- + +- Upgrade mmdeploy==0.14.0 from official PyPI +- Integrate new ignored loss in semantic segmentation +- Optimize YOLOX data pipeline +- Tiling Spatial Concatenation for OpenVINO IR +- Optimize counting train & inference speed and memory consumption +- Minor bug fixes + +v1.2.0 (2Q23) +------------- - Add generating feature cli_report.log in output for otx training - Support multiple python versions up to 3.10 @@ -36,10 +128,30 @@ v1.2.0 (1Q23) - Add option to save images after inference in OTX CLI demo together with demo in exportable code - Support storage cache in Apache Arrow using Datumaro for cls, det, seg tasks - Add noisy label detection for multi-class classification task +- Clean up and refactor the output of the OTX CLI +- Enhance DetCon logic and SupCon for semantic segmentation +- Detection task refactoring +- Classification task refactoring +- Extend OTX explain CLI +- Segmentation task refactoring +- Action task refactoring +- Optimize data preprocessing time and enhance overall performance in semantic segmentation +- Support automatic batch size decrease when there is no enough GPU memory +- Minor bug fixes + +v1.1.2 (2Q23) +------------- + +- Minor bug fixes + + +v1.1.1 (1Q23) +------------- + +- Minor bug fixes -************* v1.1.0 (1Q23) -************* +------------- - Add FP16 IR export support - Add in-memory caching in dataloader @@ -50,10 +162,24 @@ v1.1.0 (1Q23) - Add embedding of inference configuration to IR for classification - Enable VOC dataset in OTX - Add mmcls.VisionTransformer backbone support +- Parametrize saliency maps dumping in export +- Bring mmdeploy to action recognition model export & Test optimization of action tasks +- Update backbone lists +- Add explanation for XAI & minor doc fixes +- Refactor phase#1: MPA modules + + +v1.0.1 (1Q23) +------------- + +- Refine documents by proof review +- Separate installation for each tasks +- Improve POT efficiency by setting stat_requests_number parameter to 1 +- Minor bug fixes + -************* v1.0.0 (1Q23) -************* +------------- - Installation through PyPI - Package will be renamed as OpenVINO™ Training Extensions diff --git a/requirements/api.txt b/requirements/api.txt index c9968e9184f..4b1f9a8cfb0 100644 --- a/requirements/api.txt +++ b/requirements/api.txt @@ -1,12 +1,12 @@ # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # API Requirements. # -attrs>=21.2.0 +attrs==23.1.* networkx>=2.6,<=2.8.0 numpy>=1.21.0,<=1.23.4 # np.bool was removed in 1.24.0 which was used in openvino runtime omegaconf>=2.1.1 opencv-python>=4.5 -pymongo -scikit-learn>=1.0.2 +pymongo==4.5.* +scikit-learn==1.3.* Shapely>=1.7.1,<=1.8.0 imagesize==1.4.1 -dill>=0.3.6 +dill==0.3.* diff --git a/requirements/base.txt b/requirements/base.txt index 859cc9f8ddf..538912b36cc 100644 --- a/requirements/base.txt +++ b/requirements/base.txt @@ -1,13 +1,14 @@ # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # -# Base Algo Requirements. # -natsort>=6.0.0 -prettytable -protobuf>=3.20.0 +# Base Algo Requirements. # +natsort==8.1.* +prettytable==3.9.* +protobuf==3.20.* pyyaml datumaro~=1.6.0rc0 -psutil -scipy>=1.8 -bayesian-optimization>=1.2.0 -tensorboard>=2.11.0 -multiprocess +psutil==5.9.* +scipy==1.10.* +bayesian-optimization==1.4.* +tensorboard==2.15.*; python_version >= '3.9' +tensorboard==2.14.*; python_version < '3.9' +multiprocess==0.70.* pynvml==11.* diff --git a/requirements/classification.txt b/requirements/classification.txt index e6ddaff425b..bfe35197d9c 100644 --- a/requirements/classification.txt +++ b/requirements/classification.txt @@ -4,5 +4,5 @@ mmcv-full==1.7.0 mmcls==0.25.0 timm==0.6.12 mmdeploy==0.14.0 -pytorchcv -yapf<0.40.0 # it should be removed after https://github.com/google/yapf/issues/1118 is solved +pytorchcv==0.0.67 +yapf<0.40.0 # it should be removed after https://github.com/google/yapf/issues/1118 is solved diff --git a/requirements/detection.txt b/requirements/detection.txt index 85195605308..ddcc33f1347 100644 --- a/requirements/detection.txt +++ b/requirements/detection.txt @@ -2,10 +2,10 @@ # Detection Requirements. mmcv-full==1.7.0 mmdet==2.28.1 -pytorchcv +pytorchcv==0.0.67 mmcls==0.25.0 timm==0.6.12 mmdeploy==0.14.0 mmengine==0.7.4 -scikit-image -yapf<0.40.0 # it should be removed after https://github.com/google/yapf/issues/1118 is solved +scikit-image # specifying different version w.r.t python_version is not effect +yapf<0.40.0 # it should be removed after https://github.com/google/yapf/issues/1118 is solved diff --git a/requirements/dev.txt b/requirements/dev.txt index d6648af378e..3966fdcf396 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -1,12 +1,12 @@ # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # Development Requirements. # pre-commit==2.20.0 -pylint -pytest -coverage -pytest-timeout -pytest-mock +pylint==3.0.* +pytest==7.4.* +coverage==7.3.* +pytest-timeout==2.2.* +pytest-mock==3.12.* onnx==1.13.0 onnxruntime==1.14.1 -pytest-csv -tox>=4.5.1.1 +pytest-csv==3.0.* +tox==4.11.* diff --git a/requirements/docs.txt b/requirements/docs.txt index 3f5c106da4b..f3cb3ec5100 100644 --- a/requirements/docs.txt +++ b/requirements/docs.txt @@ -1,10 +1,10 @@ -furo -myst-parser +furo==2023.3.* +myst-parser==1.0.* sphinx==5.3.0 pydata-sphinx-theme==0.12.0 -sphinx-tabs -sphinx-panels +sphinx-tabs==3.4.* +sphinx-panels==0.4.* sphinx-copybutton==0.5.0 -sphinx-autoapi -sphinxemoji -nbsphinx +sphinx-autoapi==2.1.* +sphinxemoji==0.2.* +nbsphinx==0.9.* diff --git a/requirements/openvino.txt b/requirements/openvino.txt index 91447e1f721..90ee6025685 100644 --- a/requirements/openvino.txt +++ b/requirements/openvino.txt @@ -5,4 +5,4 @@ onnx==1.13.0 openvino-model-api==0.1.6 openvino==2023.1.0 openvino-dev==2023.1.0 -openvino-telemetry>=2022.1.0 +openvino-telemetry==2023.2.* diff --git a/requirements/segmentation.txt b/requirements/segmentation.txt index fc1346b1c9c..fdda6fa9cde 100644 --- a/requirements/segmentation.txt +++ b/requirements/segmentation.txt @@ -2,9 +2,9 @@ # Segmentation Requirements. mmcv-full==1.7.0 mmsegmentation==0.30.0 -scikit-image +scikit-image # specifying different version w.r.t python_version is not effect mmdeploy==0.14.0 timm==0.6.12 -pytorchcv +pytorchcv==0.0.67 einops==0.6.1 -yapf<0.40.0 # it should be removed after https://github.com/google/yapf/issues/1118 is solved +yapf<0.40.0 # it should be removed after https://github.com/google/yapf/issues/1118 is solved diff --git a/requirements/visual_prompting.txt b/requirements/visual_prompting.txt index 5daace82bef..ba53e936a7a 100644 --- a/requirements/visual_prompting.txt +++ b/requirements/visual_prompting.txt @@ -1,5 +1,5 @@ # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # Visual Prompting Requirements. -scikit-image +scikit-image # specifying different version w.r.t python_version is not effect pytorch-lightning>=1.7.0,<1.10.0 timm==0.6.12 diff --git a/src/otx/__init__.py b/src/otx/__init__.py index 0dc07f3e3b2..ce673d0b5d0 100644 --- a/src/otx/__init__.py +++ b/src/otx/__init__.py @@ -3,5 +3,5 @@ # Copyright (C) 2021-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -__version__ = "1.5.0rc0" +__version__ = "1.6.0dev" # NOTE: Sync w/ src/otx/api/usecases/exportable_code/demo/requirements.txt on release diff --git a/src/otx/algorithms/action/adapters/mmaction/data/det_dataset.py b/src/otx/algorithms/action/adapters/mmaction/data/det_dataset.py index 7ee87e89f39..038e296e938 100644 --- a/src/otx/algorithms/action/adapters/mmaction/data/det_dataset.py +++ b/src/otx/algorithms/action/adapters/mmaction/data/det_dataset.py @@ -26,7 +26,6 @@ from mmaction.datasets.ava_dataset import AVADataset from mmaction.datasets.builder import DATASETS from mmaction.datasets.pipelines import Compose -from mmaction.utils import get_root_logger from mmcv.utils import print_log from otx.algorithms.action.adapters.mmaction.data.pipelines import RawFrameDecode @@ -36,8 +35,9 @@ from otx.api.entities.label import LabelEntity from otx.api.entities.metadata import VideoMetadata from otx.api.utils.shape_factory import ShapeFactory +from otx.utils.logger import get_logger -root_logger = get_root_logger() +root_logger = get_logger() # pylint: disable=too-many-instance-attributes, too-many-arguments, too-many-locals, super-init-not-called diff --git a/src/otx/algorithms/action/adapters/mmaction/task.py b/src/otx/algorithms/action/adapters/mmaction/task.py index 4e07e7ebc3a..66a5e981e35 100644 --- a/src/otx/algorithms/action/adapters/mmaction/task.py +++ b/src/otx/algorithms/action/adapters/mmaction/task.py @@ -57,7 +57,6 @@ from otx.algorithms.common.configs.configuration_enums import BatchSizeAdaptType from otx.algorithms.common.utils import append_dist_rank_suffix from otx.algorithms.common.utils.data import get_dataset -from otx.algorithms.common.utils.logger import get_logger from otx.api.entities.datasets import DatasetEntity from otx.api.entities.inference_parameters import InferenceParameters from otx.api.entities.model import ModelPrecision @@ -66,6 +65,7 @@ from otx.api.entities.task_environment import TaskEnvironment from otx.api.usecases.tasks.interfaces.export_interface import ExportType from otx.core.data import caching +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/action/adapters/mmaction/utils/det_eval_utils.py b/src/otx/algorithms/action/adapters/mmaction/utils/det_eval_utils.py index 1127a6eb958..9653158a609 100644 --- a/src/otx/algorithms/action/adapters/mmaction/utils/det_eval_utils.py +++ b/src/otx/algorithms/action/adapters/mmaction/utils/det_eval_utils.py @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions # and limitations under the License. -import logging import time from collections import defaultdict @@ -25,6 +24,10 @@ from mmaction.core.evaluation.ava_evaluation import standard_fields from mmaction.core.evaluation.ava_utils import print_time, read_exclusions +from otx.utils.logger import get_logger + +logger = get_logger() + # pylint: disable=too-many-locals, too-many-branches def det_eval(predictions, result_type, labels, video_infos, exclude_file, verbose=True, custom_classes=None): @@ -62,7 +65,7 @@ def det_eval(predictions, result_type, labels, video_infos, exclude_file, verbos start = time.time() for image_key in gt_boxes: if verbose and image_key in excluded_keys: - logging.info("Found excluded timestamp in detections: %s. It will be ignored.", image_key) + logger.info("Found excluded timestamp in detections: %s. It will be ignored.", image_key) continue pascal_evaluator.add_single_ground_truth_image_info( image_key, @@ -77,7 +80,7 @@ def det_eval(predictions, result_type, labels, video_infos, exclude_file, verbos start = time.time() for image_key in boxes: if verbose and image_key in excluded_keys: - logging.info("Found excluded timestamp in detections: %s. It will be ignored.", image_key) + logger.info("Found excluded timestamp in detections: %s. It will be ignored.", image_key) continue pascal_evaluator.add_single_detected_image_info( image_key, diff --git a/src/otx/algorithms/action/adapters/openvino/task.py b/src/otx/algorithms/action/adapters/openvino/task.py index 85f3480b64c..01c55538ac6 100644 --- a/src/otx/algorithms/action/adapters/openvino/task.py +++ b/src/otx/algorithms/action/adapters/openvino/task.py @@ -16,7 +16,6 @@ import io import json -import logging import os import random import tempfile @@ -72,8 +71,9 @@ IOptimizationTask, OptimizationType, ) +from otx.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger() # TODO: refactoring to Sphinx style. diff --git a/src/otx/algorithms/action/task.py b/src/otx/algorithms/action/task.py index f5e3e0980ad..5622da6c6aa 100644 --- a/src/otx/algorithms/action/task.py +++ b/src/otx/algorithms/action/task.py @@ -29,7 +29,6 @@ InferenceProgressCallback, TrainingProgressCallback, ) -from otx.algorithms.common.utils.logger import get_logger from otx.api.configuration import cfg_helper from otx.api.configuration.helper.utils import config_to_bytes, ids_to_strings from otx.api.entities.annotation import Annotation @@ -66,6 +65,7 @@ from otx.api.usecases.tasks.interfaces.export_interface import ExportType from otx.api.utils.vis_utils import get_actmap from otx.cli.utils.multi_gpu import is_multigpu_child_process +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/action/tools/sample_classification.py b/src/otx/algorithms/action/tools/sample_classification.py index 931e923838a..36e86cefcd3 100644 --- a/src/otx/algorithms/action/tools/sample_classification.py +++ b/src/otx/algorithms/action/tools/sample_classification.py @@ -22,8 +22,6 @@ os.environ["FEATURE_FLAGS_OTX_ACTION_TASKS"] = "1" -from mmcv.utils import get_logger - from otx.algorithms.common.utils import get_task_class from otx.api.configuration.helper import create from otx.api.entities.inference_parameters import InferenceParameters @@ -36,8 +34,9 @@ from otx.api.usecases.tasks.interfaces.export_interface import ExportType from otx.api.usecases.tasks.interfaces.optimization_interface import OptimizationType from otx.core.data.adapter import get_dataset_adapter +from otx.utils.logger import get_logger -logger = get_logger(name="sample") +logger = get_logger() def parse_args(): diff --git a/src/otx/algorithms/action/tools/sample_detection.py b/src/otx/algorithms/action/tools/sample_detection.py index d39e197b214..1caf67c7ba0 100644 --- a/src/otx/algorithms/action/tools/sample_detection.py +++ b/src/otx/algorithms/action/tools/sample_detection.py @@ -22,8 +22,6 @@ os.environ["FEATURE_FLAGS_OTX_ACTION_TASKS"] = "1" -from mmcv.utils import get_logger - from otx.algorithms.common.utils import get_task_class from otx.api.configuration.helper import create from otx.api.entities.inference_parameters import InferenceParameters @@ -34,8 +32,9 @@ from otx.api.entities.task_environment import TaskEnvironment from otx.api.usecases.tasks.interfaces.export_interface import ExportType from otx.core.data.adapter import get_dataset_adapter +from otx.utils.logger import get_logger -logger = get_logger(name="sample") +logger = get_logger() def parse_args(): diff --git a/src/otx/algorithms/anomaly/adapters/anomalib/callbacks/inference.py b/src/otx/algorithms/anomaly/adapters/anomalib/callbacks/inference.py index d4d28741763..fdf5ddbd9bc 100644 --- a/src/otx/algorithms/anomaly/adapters/anomalib/callbacks/inference.py +++ b/src/otx/algorithms/anomaly/adapters/anomalib/callbacks/inference.py @@ -23,7 +23,6 @@ from pytorch_lightning.callbacks import Callback from torch import Tensor -from otx.algorithms.anomaly.adapters.anomalib.logger import get_logger from otx.api.entities.annotation import Annotation from otx.api.entities.datasets import DatasetEntity from otx.api.entities.label import LabelEntity @@ -32,8 +31,9 @@ from otx.api.entities.scored_label import ScoredLabel from otx.api.entities.shapes.rectangle import Rectangle from otx.api.utils.segmentation_utils import create_annotation_from_segmentation_map +from otx.utils.logger import get_logger -logger = get_logger(__name__) +logger = get_logger() class AnomalyInferenceCallback(Callback): diff --git a/src/otx/algorithms/anomaly/adapters/anomalib/data/data.py b/src/otx/algorithms/anomaly/adapters/anomalib/data/data.py index b037434f40c..5a026e47e2d 100644 --- a/src/otx/algorithms/anomaly/adapters/anomalib/data/data.py +++ b/src/otx/algorithms/anomaly/adapters/anomalib/data/data.py @@ -25,7 +25,6 @@ from torch import Tensor from torch.utils.data import DataLoader, Dataset -from otx.algorithms.anomaly.adapters.anomalib.logger import get_logger from otx.api.entities.datasets import DatasetEntity from otx.api.entities.model_template import TaskType from otx.api.entities.shapes.polygon import Polygon @@ -36,8 +35,9 @@ split_local_global_dataset, ) from otx.api.utils.segmentation_utils import mask_from_dataset_item +from otx.utils.logger import get_logger -logger = get_logger(__name__) +logger = get_logger() class OTXAnomalyDataset(Dataset): diff --git a/src/otx/algorithms/anomaly/adapters/anomalib/logger/logger.py b/src/otx/algorithms/anomaly/adapters/anomalib/logger/logger.py deleted file mode 100644 index 0504c37767f..00000000000 --- a/src/otx/algorithms/anomaly/adapters/anomalib/logger/logger.py +++ /dev/null @@ -1,85 +0,0 @@ -"""Logging.""" - -# Copyright (c) OpenMMLab. All rights reserved. -import logging -from logging import FileHandler, Handler, Logger, StreamHandler -from typing import Dict, List, Optional - -import torch.distributed as dist - -logger_initialized: Dict[str, bool] = {} - - -def get_logger( - name: str, log_file: Optional[str] = None, log_level: int = logging.INFO, file_mode: str = "w" -) -> Logger: - """Get logger. - - If the logger has not been initialized, this method will initialize the - logger by adding one or two handlers, otherwise the initialized logger will - be directly returned. During initialization, a StreamHandler will always be - added. If `log_file` is specified and the process rank is 0, a FileHandler - will also be added. - - Args: - name (str): Logger name. - log_file (str | None): The log filename. If specified, a FileHandler - will be added to the logger. - log_level (int): The logger level. Notx that only the process of - rank 0 is affected, and other processes will set the level to - "Error" thus be silent most of the time. - file_mode (str): The file mode used in opening log file. - Defaults to 'w'. - - Returns: - logging.Logger: The expected logger. - """ - logger = logging.getLogger(name) - if name in logger_initialized: - return logger - # handle hierarchical names - # e.g., logger "a" is initialized, then logger "a.b" will skip the - # initialization since it is a child of "a". - for logger_name in logger_initialized: - if name.startswith(logger_name): - return logger - - # handle duplicate logs to the console - # Starting in 1.8.0, PyTorch DDP attaches a StreamHandler (NOTSET) - # to the root logger. As logger.propagate is True by default, this root - # level handler causes logging messages from rank>0 processes to - # unexpectedly show up on the console, creating much unwanted clutter. - # To fix this issue, we set the root logger's StreamHandler, if any, to log - # at the ERROR level. - for handler in logger.root.handlers: # type: ignore - if isinstance(handler, StreamHandler): - handler.setLevel(logging.ERROR) - - handlers: List[Handler] = [StreamHandler()] - - if dist.is_available() and dist.is_initialized(): - rank = dist.get_rank() - else: - rank = 0 - - # only rank 0 will add a FileHandler - if rank == 0 and log_file is not None: - # Here, the default behaviour of the official logger is 'a'. Thus, we - # provide an interface to change the file mode to the default - # behaviour. - handlers.append(FileHandler(log_file, file_mode)) - - formatter = logging.Formatter("[%(levelname)s] %(asctime)s - %(name)s - %(message)s") - for handler in handlers: - handler.setFormatter(formatter) - handler.setLevel(log_level) - logger.addHandler(handler) - - if rank == 0: - logger.setLevel(log_level) - else: - logger.setLevel(logging.ERROR) - - logger_initialized[name] = True - - return logger diff --git a/src/otx/algorithms/anomaly/tasks/inference.py b/src/otx/algorithms/anomaly/tasks/inference.py index 50c5a4b81f7..bf430a5dbea 100644 --- a/src/otx/algorithms/anomaly/tasks/inference.py +++ b/src/otx/algorithms/anomaly/tasks/inference.py @@ -43,7 +43,6 @@ ) from otx.algorithms.anomaly.adapters.anomalib.config import get_anomalib_config from otx.algorithms.anomaly.adapters.anomalib.data import OTXAnomalyDataModule -from otx.algorithms.anomaly.adapters.anomalib.logger import get_logger from otx.algorithms.anomaly.configs.base.configuration import BaseAnomalyConfig from otx.algorithms.common.utils import embed_ir_model_data from otx.algorithms.common.utils.utils import embed_onnx_model_data @@ -69,8 +68,9 @@ from otx.api.usecases.tasks.interfaces.export_interface import ExportType, IExportTask from otx.api.usecases.tasks.interfaces.inference_interface import IInferenceTask from otx.api.usecases.tasks.interfaces.unload_interface import IUnload +from otx.utils.logger import get_logger -logger = get_logger(__name__) +logger = get_logger() # pylint: disable=too-many-instance-attributes @@ -359,7 +359,20 @@ def _add_metadata_to_ir(self, model_file: str, export_type: ExportType) -> None: extra_model_data[("model_info", "reverse_input_channels")] = False extra_model_data[("model_info", "model_type")] = "AnomalyDetection" - extra_model_data[("model_info", "labels")] = "Normal Anomaly" + + labels = [] + label_ids = [] + for label_entity in self.task_environment.label_schema.get_labels(include_empty=False): + label_name = label_entity.name.replace(" ", "_") + # There is a mismatch between labels in OTX and modelAPI + if label_name == "Anomalous": + label_name = "Anomaly" + labels.append(label_name) + label_ids.append(str(label_entity.id_)) + + extra_model_data[("model_info", "labels")] = " ".join(labels) + extra_model_data[("model_info", "label_ids")] = " ".join(label_ids) + if export_type == ExportType.OPENVINO: embed_ir_model_data(model_file, extra_model_data) elif export_type == ExportType.ONNX: diff --git a/src/otx/algorithms/anomaly/tasks/nncf.py b/src/otx/algorithms/anomaly/tasks/nncf.py index d8152d042ec..006ca11afad 100644 --- a/src/otx/algorithms/anomaly/tasks/nncf.py +++ b/src/otx/algorithms/anomaly/tasks/nncf.py @@ -41,7 +41,6 @@ from otx.algorithms.anomaly.adapters.anomalib.callbacks import ProgressCallback from otx.algorithms.anomaly.adapters.anomalib.data import OTXAnomalyDataModule -from otx.algorithms.anomaly.adapters.anomalib.logger import get_logger from otx.api.entities.datasets import DatasetEntity from otx.api.entities.model import ( ModelEntity, @@ -56,10 +55,11 @@ IOptimizationTask, OptimizationType, ) +from otx.utils.logger import get_logger from .inference import InferenceTask -logger = get_logger(__name__) +logger = get_logger() class NNCFTask(InferenceTask, IOptimizationTask): diff --git a/src/otx/algorithms/anomaly/tasks/openvino.py b/src/otx/algorithms/anomaly/tasks/openvino.py index 5a5fffb3f02..931f325f373 100644 --- a/src/otx/algorithms/anomaly/tasks/openvino.py +++ b/src/otx/algorithms/anomaly/tasks/openvino.py @@ -32,7 +32,6 @@ from openvino.model_api.models import AnomalyDetection, AnomalyResult from otx.algorithms.anomaly.adapters.anomalib.config import get_anomalib_config -from otx.algorithms.anomaly.adapters.anomalib.logger import get_logger from otx.algorithms.anomaly.configs.base.configuration import BaseAnomalyConfig from otx.algorithms.common.utils import embed_ir_model_data from otx.algorithms.common.utils.ir import check_if_quantized @@ -71,8 +70,9 @@ ) from otx.api.utils.anomaly_utils import create_detection_annotation_from_anomaly_heatmap from otx.api.utils.segmentation_utils import create_annotation_from_segmentation_map +from otx.utils.logger import get_logger -logger = get_logger(__name__) +logger = get_logger() class OTXNNCFAnomalyDataloader: @@ -361,6 +361,8 @@ def optimize( output_model.optimization_type = ModelOptimizationType.POT output_model.optimization_methods = [OptimizationMethod.QUANTIZATION] output_model.precision = [ModelPrecision.INT8] + metadata = self.get_metadata() + output_model.set_data("metadata", json.dumps(metadata).encode()) self.task_environment.model = output_model self.inference_model = self.get_openvino_model() @@ -401,6 +403,27 @@ def _create_from_legacy(self) -> None: Args: model_file (str): The XML model file. """ + extra_model_data = self._metadata_in_ir_format() + + for key, value in extra_model_data.items(): + if isinstance(value, np.ndarray): + extra_model_data[key] = value.tolist() + + with tempfile.TemporaryDirectory() as temp_dir: + xml_data = self.task_environment.model.get_data("openvino.xml") + bin_data = self.task_environment.model.get_data("openvino.bin") + with open(f"{temp_dir}/openvino.xml", "wb") as file: + file.write(xml_data) + with open(f"{temp_dir}/openvino.bin", "wb") as file: + file.write(bin_data) + embed_ir_model_data(f"{temp_dir}/openvino.xml", extra_model_data) + with open(f"{temp_dir}/openvino.xml", "rb") as file: + self.task_environment.model.set_data("openvino.xml", file.read()) + with open(f"{temp_dir}/openvino.bin", "rb") as file: + self.task_environment.model.set_data("openvino.bin", file.read()) + + def _metadata_in_ir_format(self) -> Dict[Tuple[str, str], Union[str, int, float, List[Union[int, float]]]]: + """Return metadata in format of tuple keys that are used in IR with modelAPI.""" metadata = self.get_metadata() extra_model_data: Dict[Tuple[str, str], Any] = {} for key, value in metadata.items(): @@ -430,23 +453,7 @@ def _create_from_legacy(self) -> None: extra_model_data[("model_info", "reverse_input_channels")] = False extra_model_data[("model_info", "model_type")] = "AnomalyDetection" extra_model_data[("model_info", "labels")] = "Normal Anomaly" - - for key, value in extra_model_data.items(): - if isinstance(value, np.ndarray): - extra_model_data[key] = value.tolist() - - with tempfile.TemporaryDirectory() as temp_dir: - xml_data = self.task_environment.model.get_data("openvino.xml") - bin_data = self.task_environment.model.get_data("openvino.bin") - with open(f"{temp_dir}/openvino.xml", "wb") as file: - file.write(xml_data) - with open(f"{temp_dir}/openvino.bin", "wb") as file: - file.write(bin_data) - embed_ir_model_data(f"{temp_dir}/openvino.xml", extra_model_data) - with open(f"{temp_dir}/openvino.xml", "rb") as file: - self.task_environment.model.set_data("openvino.xml", file.read()) - with open(f"{temp_dir}/openvino.bin", "rb") as file: - self.task_environment.model.set_data("openvino.bin", file.read()) + return extra_model_data def _serialize_list(self, arr: Union[Tuple, List]) -> str: """Converts a list to space separated string.""" @@ -483,6 +490,17 @@ def _get_openvino_configuration(self) -> Dict[str, Any]: configuration: Dict[str, Any] = { "labels": LabelSchemaMapper.forward(self.task_environment.label_schema), } + # Add new IR keys to parameters + for key, value in self._metadata_in_ir_format().items(): + # since the same key is used to store label info in OTX SDK format + if key[1] == "labels": + assert isinstance(value, str) + configuration["modelapi_labels"] = [name for name in value.split(" ")] + elif key[1] in ("mean_values", "scale_values"): + assert isinstance(value, str) + configuration[key[1]] = [float(x) for x in value.split(" ")] + else: + configuration[key[1]] = value return configuration diff --git a/src/otx/algorithms/anomaly/tasks/train.py b/src/otx/algorithms/anomaly/tasks/train.py index a1f4759ab1a..9e2f57f249f 100644 --- a/src/otx/algorithms/anomaly/tasks/train.py +++ b/src/otx/algorithms/anomaly/tasks/train.py @@ -29,15 +29,15 @@ from otx.algorithms.anomaly.adapters.anomalib.callbacks import ProgressCallback from otx.algorithms.anomaly.adapters.anomalib.data import OTXAnomalyDataModule -from otx.algorithms.anomaly.adapters.anomalib.logger import get_logger from otx.api.entities.datasets import DatasetEntity from otx.api.entities.model import ModelEntity from otx.api.entities.train_parameters import TrainParameters from otx.api.usecases.tasks.interfaces.training_interface import ITrainingTask +from otx.utils.logger import get_logger from .inference import InferenceTask -logger = get_logger(__name__) +logger = get_logger() class TrainingTask(InferenceTask, ITrainingTask): diff --git a/src/otx/algorithms/anomaly/tools/sample.py b/src/otx/algorithms/anomaly/tools/sample.py index 1cf7057b2ef..a8433defb51 100644 --- a/src/otx/algorithms/anomaly/tools/sample.py +++ b/src/otx/algorithms/anomaly/tools/sample.py @@ -29,7 +29,6 @@ AnomalyDetectionDataset, AnomalySegmentationDataset, ) -from otx.algorithms.anomaly.adapters.anomalib.logger import get_logger from otx.algorithms.anomaly.tasks import NNCFTask, OpenVINOTask from otx.api.configuration.helper import create as create_hyper_parameters from otx.api.entities.inference_parameters import InferenceParameters @@ -46,8 +45,9 @@ from otx.api.usecases.tasks.interfaces.export_interface import ExportType from otx.api.usecases.tasks.interfaces.inference_interface import IInferenceTask from otx.api.usecases.tasks.interfaces.optimization_interface import OptimizationType +from otx.utils.logger import get_logger -logger = get_logger(__name__) +logger = get_logger() # pylint: disable=too-many-instance-attributes diff --git a/src/otx/algorithms/classification/adapters/mmcls/configurer.py b/src/otx/algorithms/classification/adapters/mmcls/configurer.py index 397026d5760..873a6efdbe8 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/configurer.py +++ b/src/otx/algorithms/classification/adapters/mmcls/configurer.py @@ -22,7 +22,7 @@ recursively_update_cfg, update_or_add_custom_hook, ) -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/classification/adapters/mmcls/datasets/otx_datasets.py b/src/otx/algorithms/classification/adapters/mmcls/datasets/otx_datasets.py index 6c0ec0ab2a9..ad950763948 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/datasets/otx_datasets.py +++ b/src/otx/algorithms/classification/adapters/mmcls/datasets/otx_datasets.py @@ -1,6 +1,6 @@ """Base Dataset for Classification Task.""" -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -18,10 +18,10 @@ from torch.utils.data import Dataset from otx.algorithms.common.utils import get_cls_img_indices, get_old_new_img_indices -from otx.algorithms.common.utils.logger import get_logger from otx.api.entities.datasets import DatasetEntity from otx.api.entities.id import ID from otx.api.entities.label import LabelEntity +from otx.utils.logger import get_logger logger = get_logger() @@ -174,7 +174,10 @@ def class_accuracy(self, results, gt_labels): for i in range(self.num_classes): cls_pred = pred_label == i cls_pred = cls_pred[gt_labels == i] - cls_acc = np.sum(cls_pred) / len(cls_pred) + if len(cls_pred) > 0: + cls_acc = np.sum(cls_pred) / len(cls_pred) + else: + cls_acc = 0.0 accracies.append(cls_acc) return accracies @@ -295,6 +298,7 @@ class OTXHierarchicalClsDataset(OTXMultilabelClsDataset): def __init__(self, **kwargs): self.hierarchical_info = kwargs.pop("hierarchical_info", None) + self.label_schema = kwargs.pop("label_schema", None) super().__init__(**kwargs) def load_annotations(self): @@ -303,6 +307,13 @@ def load_annotations(self): for i, _ in enumerate(self.otx_dataset): class_indices = [] item_labels = self.otx_dataset[i].get_roi_labels(self.labels, include_empty=include_empty) + if self.label_schema: + # NOTE: Parent labels might be missing in annotations. + # This code fills the gap just in case. + full_item_labels = set() + for label in item_labels: + full_item_labels.update(self.label_schema.get_ancestors(label)) + item_labels = full_item_labels ignored_labels = self.otx_dataset[i].ignored_labels if item_labels: num_cls_heads = self.hierarchical_info["num_multiclass_heads"] diff --git a/src/otx/algorithms/classification/adapters/mmcls/models/classifiers/byol.py b/src/otx/algorithms/classification/adapters/mmcls/models/classifiers/byol.py index 9f8a2cb0dba..82e994a7a96 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/models/classifiers/byol.py +++ b/src/otx/algorithms/classification/adapters/mmcls/models/classifiers/byol.py @@ -18,7 +18,7 @@ from mmcls.models.builder import CLASSIFIERS, build_backbone, build_head, build_neck from torch import nn -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/classification/adapters/mmcls/models/classifiers/custom_image_classifier.py b/src/otx/algorithms/classification/adapters/mmcls/models/classifiers/custom_image_classifier.py index 65e31d6bc53..d2e23dadda5 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/models/classifiers/custom_image_classifier.py +++ b/src/otx/algorithms/classification/adapters/mmcls/models/classifiers/custom_image_classifier.py @@ -12,8 +12,8 @@ from otx.algorithms.common.adapters.mmcv.hooks.recording_forward_hook import ViTReciproCAMHook from otx.algorithms.common.adapters.mmdeploy.utils import is_mmdeploy_enabled -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.common.utils.task_adapt import map_class_names +from otx.utils.logger import get_logger from .mixin import ClsLossDynamicsTrackingMixin, SAMClassifierMixin diff --git a/src/otx/algorithms/classification/adapters/mmcls/models/classifiers/mixin.py b/src/otx/algorithms/classification/adapters/mmcls/models/classifiers/mixin.py index 1674a2d182b..99d899356e1 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/models/classifiers/mixin.py +++ b/src/otx/algorithms/classification/adapters/mmcls/models/classifiers/mixin.py @@ -10,12 +10,12 @@ import numpy as np import pandas as pd -from otx.algorithms.common.utils.logger import get_logger from otx.api.entities.dataset_item import DatasetItemEntityWithID from otx.core.data.noisy_label_detection import ( LossDynamicsTracker, LossDynamicsTrackingMixin, ) +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/classification/adapters/mmcls/models/classifiers/semisl_classifier.py b/src/otx/algorithms/classification/adapters/mmcls/models/classifiers/semisl_classifier.py index 33850bff708..a4eddee15b8 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/models/classifiers/semisl_classifier.py +++ b/src/otx/algorithms/classification/adapters/mmcls/models/classifiers/semisl_classifier.py @@ -6,7 +6,7 @@ import torch from mmcls.models.builder import CLASSIFIERS -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger from .custom_image_classifier import CustomImageClassifier diff --git a/src/otx/algorithms/classification/adapters/mmcls/models/classifiers/semisl_multilabel_classifier.py b/src/otx/algorithms/classification/adapters/mmcls/models/classifiers/semisl_multilabel_classifier.py index 1a773054384..aac9ed9688b 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/models/classifiers/semisl_multilabel_classifier.py +++ b/src/otx/algorithms/classification/adapters/mmcls/models/classifiers/semisl_multilabel_classifier.py @@ -5,7 +5,7 @@ from mmcls.models.builder import CLASSIFIERS -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger from .custom_image_classifier import CustomImageClassifier diff --git a/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_cls_head.py b/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_cls_head.py index bc466c303dc..119be69caae 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_cls_head.py +++ b/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_cls_head.py @@ -44,6 +44,10 @@ def forward(self, x): def forward_train(self, cls_score, gt_label): """Forward_train fuction of CustomNonLinearHead class.""" + bs = cls_score.shape[0] + if bs == 1: + cls_score = torch.cat([cls_score, cls_score], dim=0) + gt_label = torch.cat([gt_label, gt_label], dim=0) logit = self.classifier(cls_score) losses = self.loss(logit, gt_label, feature=cls_score) return losses diff --git a/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_hierarchical_linear_cls_head.py b/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_hierarchical_linear_cls_head.py index 6776756bb61..3e0de200be2 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_hierarchical_linear_cls_head.py +++ b/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_hierarchical_linear_cls_head.py @@ -105,7 +105,7 @@ def forward_train(self, cls_score, gt_label, **kwargs): losses["loss"] += multiclass_loss num_effective_heads_in_batch += 1 - if self.hierarchical_info["num_multiclass_heads"] > 1: + if num_effective_heads_in_batch > 0: losses["loss"] /= num_effective_heads_in_batch if self.compute_multilabel_loss: diff --git a/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_hierarchical_non_linear_cls_head.py b/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_hierarchical_non_linear_cls_head.py index 5397818fbf3..69ea7bb1476 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_hierarchical_non_linear_cls_head.py +++ b/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_hierarchical_non_linear_cls_head.py @@ -135,7 +135,7 @@ def forward_train(self, cls_score, gt_label, **kwargs): losses["loss"] += multiclass_loss num_effective_heads_in_batch += 1 - if self.hierarchical_info["num_multiclass_heads"] > 1: + if num_effective_heads_in_batch > 0: losses["loss"] /= num_effective_heads_in_batch if self.compute_multilabel_loss: diff --git a/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_vision_transformer_head.py b/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_vision_transformer_head.py index 4c3713c66e6..b58d0803589 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_vision_transformer_head.py +++ b/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_vision_transformer_head.py @@ -42,3 +42,10 @@ def post_process(self, pred): def forward(self, x): """Forward fuction of CustomVisionTransformerClsHead class.""" return self.simple_test(x) + + def forward_train(self, x, gt_label, **kwargs): + """Forward_train fuction of CustomVisionTransformerClsHead class.""" + x = self.pre_logits(x) + cls_score = self.layers.head(x) + losses = self.loss(cls_score, gt_label, feature=x) + return losses diff --git a/src/otx/algorithms/classification/adapters/mmcls/models/losses/ib_loss.py b/src/otx/algorithms/classification/adapters/mmcls/models/losses/ib_loss.py index d890c1a55ac..8d585d79b24 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/models/losses/ib_loss.py +++ b/src/otx/algorithms/classification/adapters/mmcls/models/losses/ib_loss.py @@ -1,5 +1,5 @@ """Module for defining IB Loss which alleviate effect of imbalanced dataset.""" -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -48,7 +48,7 @@ def update_weight(self, cls_num_list): """Update loss weight per class.""" if len(cls_num_list) == 0: raise ValueError("Cannot compute the IB loss weight with empty cls_num_list.") - per_cls_weights = 1.0 / np.array(cls_num_list) + per_cls_weights = 1.0 / (np.array(cls_num_list) + self.epsilon) per_cls_weights = per_cls_weights / np.sum(per_cls_weights) * len(cls_num_list) per_cls_weights = torch.FloatTensor(per_cls_weights) self.weight.data = per_cls_weights.to(device=self.weight.device) diff --git a/src/otx/algorithms/classification/adapters/mmcls/nncf/builder.py b/src/otx/algorithms/classification/adapters/mmcls/nncf/builder.py index b8db070fb85..0b4f7b7fe28 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/nncf/builder.py +++ b/src/otx/algorithms/classification/adapters/mmcls/nncf/builder.py @@ -7,7 +7,6 @@ from typing import Optional, Union import torch -from mmcls.utils import get_root_logger from mmcv.parallel import DataContainer from mmcv.runner import CheckpointLoader from mmcv.utils import Config, ConfigDict @@ -20,8 +19,9 @@ ) from otx.algorithms.common.adapters.nncf import is_accuracy_aware_training_set from otx.algorithms.common.adapters.nncf.compression import NNCFMetaState +from otx.utils.logger import get_logger -logger = get_root_logger() +logger = get_logger() def build_nncf_classifier( # pylint: disable=too-many-locals,too-many-statements diff --git a/src/otx/algorithms/classification/adapters/mmcls/nncf/task.py b/src/otx/algorithms/classification/adapters/mmcls/nncf/task.py index eefb7bf1de4..987a9438b43 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/nncf/task.py +++ b/src/otx/algorithms/classification/adapters/mmcls/nncf/task.py @@ -13,7 +13,6 @@ ) from otx.algorithms.classification.adapters.mmcls.task import MMClassificationTask from otx.algorithms.common.tasks.nncf_task import NNCFBaseTask -from otx.algorithms.common.utils.logger import get_logger from otx.api.entities.datasets import DatasetEntity from otx.api.entities.metrics import ( CurveMetric, @@ -26,6 +25,7 @@ from otx.api.entities.model import ModelEntity # ModelStatus from otx.api.entities.optimization_parameters import OptimizationParameters from otx.api.entities.task_environment import TaskEnvironment +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/classification/adapters/mmcls/task.py b/src/otx/algorithms/classification/adapters/mmcls/task.py index cd78a3ccdf7..9ae0b5721e8 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/task.py +++ b/src/otx/algorithms/classification/adapters/mmcls/task.py @@ -18,7 +18,6 @@ from mmcv.runner import wrap_fp16_model from mmcv.utils import Config, ConfigDict -from otx.algorithms import TRANSFORMER_BACKBONES from otx.algorithms.classification.adapters.mmcls.apis.train import train_model from otx.algorithms.classification.adapters.mmcls.utils.exporter import ( ClassificationExporter, @@ -31,6 +30,7 @@ EigenCamHook, FeatureVectorHook, ReciproCAMHook, + ViTFeatureVectorHook, ViTReciproCAMHook, ) from otx.algorithms.common.adapters.mmcv.utils import ( @@ -54,7 +54,6 @@ from otx.algorithms.common.tasks.nncf_task import NNCFBaseTask from otx.algorithms.common.utils import is_hpu_available from otx.algorithms.common.utils.data import get_dataset -from otx.algorithms.common.utils.logger import get_logger from otx.api.entities.datasets import DatasetEntity from otx.api.entities.explain_parameters import ExplainParameters from otx.api.entities.inference_parameters import InferenceParameters @@ -62,6 +61,7 @@ from otx.api.entities.subset import Subset from otx.api.entities.task_environment import TaskEnvironment from otx.api.usecases.tasks.interfaces.export_interface import ExportType +from otx.utils.logger import get_logger from .configurer import ( ClassificationConfigurer, @@ -174,6 +174,7 @@ def configure( elif self._hierarchical: options_for_patch_datasets["type"] = "OTXHierarchicalClsDataset" options_for_patch_datasets["hierarchical_info"] = self._hierarchical_info + options_for_patch_datasets["label_schema"] = self._task_environment.label_schema options_for_patch_evaluation["task"] = "hierarchical" elif self._selfsl: options_for_patch_datasets["type"] = "SelfSLDataset" @@ -229,7 +230,6 @@ def _infer_model( ) ) - dump_features = True dump_saliency_map = not inference_parameters.is_evaluation if inference_parameters else True self._init_task() @@ -278,16 +278,16 @@ def hook(module, inp, outp): forward_explainer_hook: Union[nullcontext, BaseRecordingForwardHook] if model_type == "VisionTransformer": forward_explainer_hook = ViTReciproCAMHook(feature_model) - elif ( - not dump_saliency_map or model_type in TRANSFORMER_BACKBONES - ): # TODO: remove latter "or" condition after resolving Issue#2098 + elif not dump_saliency_map: forward_explainer_hook = nullcontext() else: forward_explainer_hook = ReciproCAMHook(feature_model) - if ( - not dump_features or model_type in TRANSFORMER_BACKBONES - ): # TODO: remove latter "or" condition after resolving Issue#2098 - feature_vector_hook: Union[nullcontext, BaseRecordingForwardHook] = nullcontext() + + feature_vector_hook: Union[nullcontext, BaseRecordingForwardHook] + if model_type == "VisionTransformer": + feature_vector_hook = ViTFeatureVectorHook(feature_model) + elif not dump_saliency_map: + feature_vector_hook = nullcontext() else: feature_vector_hook = FeatureVectorHook(feature_model) @@ -370,6 +370,10 @@ def _train_model( # Model model = self.build_model(cfg, fp16=cfg.get("fp16", False)) + if cfg.device == "cpu": + # NOTE: mmcls does not wrap models w/ DP for CPU training not like mmdet + # Raw DataContainer "img_metas" is exposed, which results in errors + model = build_data_parallel(model, cfg, distributed=False) model.train() if is_hpu_available(): # TODO (sungchul): move it to appropriate location if needed diff --git a/src/otx/algorithms/classification/adapters/mmcls/utils/builder.py b/src/otx/algorithms/classification/adapters/mmcls/utils/builder.py index 36974ce3851..9496930cce5 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/utils/builder.py +++ b/src/otx/algorithms/classification/adapters/mmcls/utils/builder.py @@ -10,9 +10,9 @@ from mmcv.runner import load_checkpoint from mmcv.utils import Config, ConfigDict, get_logger -from otx.algorithms.common.utils.logger import LEVEL +from otx.utils.logger import LEVEL -logger = get_logger("mmcls") +mmcls_logger = get_logger("mmcls") def build_classifier( @@ -35,9 +35,9 @@ def build_classifier( model_cfg = deepcopy(config.model) model = origin_build_classifier(model_cfg) - logger.setLevel("WARNING") + mmcls_logger.setLevel("WARNING") # make logger less verbose temporally model.init_weights() - logger.setLevel(LEVEL) + mmcls_logger.setLevel(LEVEL) model = model.to(device) checkpoint = checkpoint if checkpoint else config.pop("load_from", None) diff --git a/src/otx/algorithms/classification/adapters/mmcls/utils/config_utils.py b/src/otx/algorithms/classification/adapters/mmcls/utils/config_utils.py index 4595e1482cc..710eb2e605f 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/utils/config_utils.py +++ b/src/otx/algorithms/classification/adapters/mmcls/utils/config_utils.py @@ -20,9 +20,8 @@ from otx.algorithms.common.adapters.mmcv.utils import ( get_dataset_configs, - patch_color_conversion, ) -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger logger = get_logger() @@ -49,8 +48,6 @@ def patch_datasets( for cfg in cfgs: cfg.update(kwargs) - patch_color_conversion(config) - def patch_evaluation(config: Config, task: str): """Patch evaluation.""" diff --git a/src/otx/algorithms/classification/adapters/mmcls/utils/exporter.py b/src/otx/algorithms/classification/adapters/mmcls/utils/exporter.py index f0406d6cdb6..ecc4dbfe35e 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/utils/exporter.py +++ b/src/otx/algorithms/classification/adapters/mmcls/utils/exporter.py @@ -11,7 +11,7 @@ from otx.algorithms.common.adapters.mmdeploy.utils.utils import ( sync_batchnorm_2_batchnorm, ) -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/classification/adapters/openvino/task.py b/src/otx/algorithms/classification/adapters/openvino/task.py index 91cfb3de5fa..2c87e96a3c5 100644 --- a/src/otx/algorithms/classification/adapters/openvino/task.py +++ b/src/otx/algorithms/classification/adapters/openvino/task.py @@ -16,7 +16,6 @@ import io import json -import logging import os import tempfile import time @@ -79,8 +78,9 @@ OptimizationType, ) from otx.api.utils.dataset_utils import add_saliency_maps_to_dataset_item +from otx.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger() # TODO: refactoring to Sphinx style. @@ -176,6 +176,12 @@ def __init__(self, task_environment: TaskEnvironment): self.inferencer = self.load_inferencer() template_file_path = self.task_environment.model_template.model_template_path self._base_dir = os.path.abspath(os.path.dirname(template_file_path)) + self._avg_time_per_image: Optional[float] = None + + @property + def avg_time_per_image(self) -> Optional[float]: + """Average inference time per image.""" + return self._avg_time_per_image def load_inferencer(self) -> ClassificationOpenVINOInferencer: """load_inferencer function of ClassificationOpenVINOTask.""" @@ -270,7 +276,8 @@ def add_prediction(id: int, predicted_scene: AnnotationSceneEntity, aux_data: tu self.inferencer.await_all() - logger.info(f"Avg time per image: {total_time/len(dataset)} secs") + self._avg_time_per_image = total_time / len(dataset) + logger.info(f"Avg time per image: {self._avg_time_per_image} secs") logger.info(f"Total time: {total_time} secs") logger.info("Classification OpenVINO inference completed") diff --git a/src/otx/algorithms/classification/configs/configuration.yaml b/src/otx/algorithms/classification/configs/configuration.yaml index 099c20af4f8..03c327f88b2 100644 --- a/src/otx/algorithms/classification/configs/configuration.yaml +++ b/src/otx/algorithms/classification/configs/configuration.yaml @@ -210,7 +210,7 @@ learning_parameters: warning: This is applied exclusively when early stopping is enabled. use_adaptive_interval: affects_outcome_of: TRAINING - default_value: false + default_value: true description: Depending on the size of iteration per epoch, adaptively update the validation interval and related values. editable: true header: Use adaptive validation interval @@ -277,11 +277,12 @@ learning_parameters: warning: null input_size: affects_outcome_of: INFERENCE - default_value: Auto + default_value: Default description: The input size of the given model could be configured to one of the predefined resolutions. Reduced training and inference time could be expected by using smaller input size. - Defaults to Auto, in which input size is automatically determined based on dataset statistics. + In Auto mode, the input size is automatically determined based on dataset statistics. + Defaults to per-model default resolution. editable: true enum_name: InputSizePreset header: Configure model input size. diff --git a/src/otx/algorithms/classification/task.py b/src/otx/algorithms/classification/task.py index 9050e736f0c..107cc31c225 100644 --- a/src/otx/algorithms/classification/task.py +++ b/src/otx/algorithms/classification/task.py @@ -27,7 +27,6 @@ from otx.algorithms.common.tasks.base_task import TRAIN_TYPE_DIR_PATH, OTXTask from otx.algorithms.common.utils import embed_ir_model_data from otx.algorithms.common.utils.callback import TrainingProgressCallback -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.common.utils.utils import embed_onnx_model_data from otx.api.configuration import cfg_helper from otx.api.configuration.helper.utils import ids_to_strings @@ -71,6 +70,7 @@ from otx.api.utils.labels_utils import get_empty_label from otx.cli.utils.multi_gpu import is_multigpu_child_process from otx.core.data.caching.mem_cache_handler import MemCacheHandlerSingleton +from otx.utils.logger import get_logger logger = get_logger() RECIPE_TRAIN_TYPE = { @@ -119,6 +119,11 @@ def __init__(self, task_environment: TaskEnvironment, output_path: Optional[str] if self._task_environment.model is not None: self._load_model() + if hasattr(self._hyperparams.learning_parameters, "input_size"): + input_size_cfg = InputSizePreset(self._hyperparams.learning_parameters.input_size.value) + else: + input_size_cfg = InputSizePreset.DEFAULT + self._input_size = input_size_cfg.tuple if hasattr(self._hyperparams.learning_parameters, "input_size"): input_size_cfg = InputSizePreset(self._hyperparams.learning_parameters.input_size.value) @@ -129,7 +134,7 @@ def __init__(self, task_environment: TaskEnvironment, output_path: Optional[str] def _is_multi_label(self, label_groups: List[LabelGroup], all_labels: List[LabelEntity]): """Check whether the current training mode is multi-label or not.""" # NOTE: In the current Geti, multi-label should have `___` symbol for all group names. - find_multilabel_symbol = ["___" in i.name for i in label_groups] + find_multilabel_symbol = ["___" in getattr(i, "name", "") for i in label_groups] return ( (len(label_groups) > 1) and (len(label_groups) == len(all_labels)) and (False not in find_multilabel_symbol) ) diff --git a/src/otx/algorithms/classification/tools/classification_sample.py b/src/otx/algorithms/classification/tools/classification_sample.py index c82636f5fbe..45104ddc19b 100644 --- a/src/otx/algorithms/classification/tools/classification_sample.py +++ b/src/otx/algorithms/classification/tools/classification_sample.py @@ -12,7 +12,6 @@ import numpy as np import torch -from mmcv.utils import get_logger from otx.algorithms.common.utils import get_task_class from otx.api.configuration.helper import create @@ -33,6 +32,7 @@ from otx.api.entities.task_environment import TaskEnvironment from otx.api.usecases.tasks.interfaces.export_interface import ExportType from otx.api.usecases.tasks.interfaces.optimization_interface import OptimizationType +from otx.utils.logger import get_logger SEED = 5 random.seed(SEED) @@ -42,7 +42,7 @@ torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False -logger = get_logger(name="mmcls") +logger = get_logger() parser = argparse.ArgumentParser(description="Sample showcasing the new API") parser.add_argument("template_file_path", help="path to template file") diff --git a/src/otx/algorithms/classification/utils/cls_utils.py b/src/otx/algorithms/classification/utils/cls_utils.py index 88db1ed2ecb..67e3b1c5601 100644 --- a/src/otx/algorithms/classification/utils/cls_utils.py +++ b/src/otx/algorithms/classification/utils/cls_utils.py @@ -98,16 +98,20 @@ def get_cls_model_api_configuration(label_schema: LabelSchemaEntity, inference_c """Get ModelAPI config.""" mapi_config = {} mapi_config[("model_info", "model_type")] = "Classification" + mapi_config[("model_info", "task_type")] = "classification" mapi_config[("model_info", "confidence_threshold")] = str(inference_config["confidence_threshold"]) mapi_config[("model_info", "multilabel")] = str(inference_config["multilabel"]) mapi_config[("model_info", "hierarchical")] = str(inference_config["hierarchical"]) mapi_config[("model_info", "output_raw_scores")] = str(True) all_labels = "" + all_label_ids = "" for lbl in label_schema.get_labels(include_empty=False): all_labels += lbl.name.replace(" ", "_") + " " - all_labels = all_labels.strip() - mapi_config[("model_info", "labels")] = all_labels + all_label_ids += f"{lbl.id_} " + + mapi_config[("model_info", "labels")] = all_labels.strip() + mapi_config[("model_info", "label_ids")] = all_label_ids.strip() hierarchical_config = {} hierarchical_config["cls_heads_info"] = get_multihead_class_info(label_schema) diff --git a/src/otx/algorithms/common/adapters/mmcv/configurer.py b/src/otx/algorithms/common/adapters/mmcv/configurer.py index 2136ee91f72..f013c45c7ef 100644 --- a/src/otx/algorithms/common/adapters/mmcv/configurer.py +++ b/src/otx/algorithms/common/adapters/mmcv/configurer.py @@ -34,9 +34,9 @@ is_xpu_available, ) from otx.algorithms.common.utils.data import compute_robust_dataset_statistics -from otx.algorithms.common.utils.logger import get_logger from otx.api.usecases.reporting.time_monitor_callback import TimeMonitorCallback from otx.core.data import caching +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/common/adapters/mmcv/hooks/adaptive_repeat_data_hook.py b/src/otx/algorithms/common/adapters/mmcv/hooks/adaptive_repeat_data_hook.py index 310e7316ba6..a04657fd324 100644 --- a/src/otx/algorithms/common/adapters/mmcv/hooks/adaptive_repeat_data_hook.py +++ b/src/otx/algorithms/common/adapters/mmcv/hooks/adaptive_repeat_data_hook.py @@ -8,7 +8,7 @@ from otx.algorithms.common.adapters.mmcv.utils.config_utils import get_proper_repeat_times from otx.algorithms.common.adapters.torch.dataloaders.samplers import OTXSampler -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/common/adapters/mmcv/hooks/adaptive_training_hook.py b/src/otx/algorithms/common/adapters/mmcv/hooks/adaptive_training_hook.py index f9e6fb345ff..747b638fb3a 100644 --- a/src/otx/algorithms/common/adapters/mmcv/hooks/adaptive_training_hook.py +++ b/src/otx/algorithms/common/adapters/mmcv/hooks/adaptive_training_hook.py @@ -12,7 +12,7 @@ from otx.algorithms.common.adapters.mmcv.hooks.early_stopping_hook import ( EarlyStoppingHook, ) -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/common/adapters/mmcv/hooks/cancel_hook.py b/src/otx/algorithms/common/adapters/mmcv/hooks/cancel_hook.py index c2300184f42..9f92c73caa0 100644 --- a/src/otx/algorithms/common/adapters/mmcv/hooks/cancel_hook.py +++ b/src/otx/algorithms/common/adapters/mmcv/hooks/cancel_hook.py @@ -10,7 +10,7 @@ from mmcv.runner import BaseRunner, EpochBasedRunner from mmcv.runner.hooks import HOOKS, Hook -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/common/adapters/mmcv/hooks/composed_dataloaders_hook.py b/src/otx/algorithms/common/adapters/mmcv/hooks/composed_dataloaders_hook.py index ab2e32414c6..a70a01b68b1 100644 --- a/src/otx/algorithms/common/adapters/mmcv/hooks/composed_dataloaders_hook.py +++ b/src/otx/algorithms/common/adapters/mmcv/hooks/composed_dataloaders_hook.py @@ -9,7 +9,7 @@ from torch.utils.data import DataLoader from otx.algorithms.common.adapters.torch.dataloaders import ComposedDL -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/common/adapters/mmcv/hooks/custom_model_ema_hook.py b/src/otx/algorithms/common/adapters/mmcv/hooks/custom_model_ema_hook.py index b73fe48bae1..b30b080c052 100644 --- a/src/otx/algorithms/common/adapters/mmcv/hooks/custom_model_ema_hook.py +++ b/src/otx/algorithms/common/adapters/mmcv/hooks/custom_model_ema_hook.py @@ -10,7 +10,7 @@ from mmcv.runner import HOOKS, BaseRunner, Hook from mmcv.runner.hooks.ema import EMAHook -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/common/adapters/mmcv/hooks/dual_model_ema_hook.py b/src/otx/algorithms/common/adapters/mmcv/hooks/dual_model_ema_hook.py index f1c02adc658..9708c8cb05a 100644 --- a/src/otx/algorithms/common/adapters/mmcv/hooks/dual_model_ema_hook.py +++ b/src/otx/algorithms/common/adapters/mmcv/hooks/dual_model_ema_hook.py @@ -9,7 +9,7 @@ from mmcv.parallel import is_module_wrapper from mmcv.runner import HOOKS, Hook -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/common/adapters/mmcv/hooks/early_stopping_hook.py b/src/otx/algorithms/common/adapters/mmcv/hooks/early_stopping_hook.py index 3d55ab15fbc..4be96b9516c 100644 --- a/src/otx/algorithms/common/adapters/mmcv/hooks/early_stopping_hook.py +++ b/src/otx/algorithms/common/adapters/mmcv/hooks/early_stopping_hook.py @@ -10,7 +10,7 @@ from mmcv.runner.hooks import HOOKS, Hook from mmcv.utils import print_log -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/common/adapters/mmcv/hooks/force_train_hook.py b/src/otx/algorithms/common/adapters/mmcv/hooks/force_train_hook.py index 0d47e67841c..b79c7b89a2e 100644 --- a/src/otx/algorithms/common/adapters/mmcv/hooks/force_train_hook.py +++ b/src/otx/algorithms/common/adapters/mmcv/hooks/force_train_hook.py @@ -16,7 +16,7 @@ from mmcv.runner.hooks import HOOKS, Hook -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/common/adapters/mmcv/hooks/logger_hook.py b/src/otx/algorithms/common/adapters/mmcv/hooks/logger_hook.py index a7ce6a35f48..6889db20b5d 100644 --- a/src/otx/algorithms/common/adapters/mmcv/hooks/logger_hook.py +++ b/src/otx/algorithms/common/adapters/mmcv/hooks/logger_hook.py @@ -10,7 +10,7 @@ from mmcv.runner.dist_utils import master_only from mmcv.runner.hooks import HOOKS, Hook, LoggerHook -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/common/adapters/mmcv/hooks/loss_dynamics_tracking_hook.py b/src/otx/algorithms/common/adapters/mmcv/hooks/loss_dynamics_tracking_hook.py index 0623f13621d..041445bf22b 100644 --- a/src/otx/algorithms/common/adapters/mmcv/hooks/loss_dynamics_tracking_hook.py +++ b/src/otx/algorithms/common/adapters/mmcv/hooks/loss_dynamics_tracking_hook.py @@ -12,9 +12,9 @@ from otx.algorithms.common.adapters.mmcv.utils.config_utils import ( update_or_add_custom_hook, ) -from otx.algorithms.common.utils.logger import get_logger from otx.api.entities.datasets import DatasetEntity from otx.core.data.noisy_label_detection.base import LossDynamicsTracker, LossDynamicsTrackingMixin +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/common/adapters/mmcv/hooks/mean_teacher_hook.py b/src/otx/algorithms/common/adapters/mmcv/hooks/mean_teacher_hook.py index 01be81b842f..e34b36fc7e6 100644 --- a/src/otx/algorithms/common/adapters/mmcv/hooks/mean_teacher_hook.py +++ b/src/otx/algorithms/common/adapters/mmcv/hooks/mean_teacher_hook.py @@ -8,7 +8,7 @@ from otx.algorithms.common.adapters.mmcv.hooks.dual_model_ema_hook import ( DualModelEMAHook, ) -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/common/adapters/mmcv/hooks/model_ema_v2_hook.py b/src/otx/algorithms/common/adapters/mmcv/hooks/model_ema_v2_hook.py index b22f7989776..f9ef09f69df 100644 --- a/src/otx/algorithms/common/adapters/mmcv/hooks/model_ema_v2_hook.py +++ b/src/otx/algorithms/common/adapters/mmcv/hooks/model_ema_v2_hook.py @@ -9,7 +9,7 @@ from mmcv.runner import HOOKS, Hook from torch import nn -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/common/adapters/mmcv/hooks/no_bias_decay_hook.py b/src/otx/algorithms/common/adapters/mmcv/hooks/no_bias_decay_hook.py index 06e1e06e485..b930029a1ca 100644 --- a/src/otx/algorithms/common/adapters/mmcv/hooks/no_bias_decay_hook.py +++ b/src/otx/algorithms/common/adapters/mmcv/hooks/no_bias_decay_hook.py @@ -6,7 +6,7 @@ from mmcv.runner import HOOKS, Hook from torch import nn -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/common/adapters/mmcv/hooks/progress_hook.py b/src/otx/algorithms/common/adapters/mmcv/hooks/progress_hook.py index 1725d855438..03115f32f5b 100644 --- a/src/otx/algorithms/common/adapters/mmcv/hooks/progress_hook.py +++ b/src/otx/algorithms/common/adapters/mmcv/hooks/progress_hook.py @@ -19,8 +19,8 @@ from mmcv.runner import BaseRunner from mmcv.runner.hooks import HOOKS, Hook -from otx.algorithms.common.utils.logger import get_logger from otx.api.usecases.reporting.time_monitor_callback import TimeMonitorCallback +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/common/adapters/mmcv/hooks/recording_forward_hook.py b/src/otx/algorithms/common/adapters/mmcv/hooks/recording_forward_hook.py index a3b2698babb..9ba901f8fde 100644 --- a/src/otx/algorithms/common/adapters/mmcv/hooks/recording_forward_hook.py +++ b/src/otx/algorithms/common/adapters/mmcv/hooks/recording_forward_hook.py @@ -16,7 +16,7 @@ from __future__ import annotations from abc import ABC -from typing import List, Optional, Sequence, Union +from typing import List, Optional, Sequence, Tuple, Union import numpy as np import torch @@ -173,6 +173,16 @@ def func(feature_map: Union[torch.Tensor, Sequence[torch.Tensor]], fpn_idx: int return feature_vector +class ViTFeatureVectorHook(BaseRecordingForwardHook): + """FeatureVectorHook for transformer-based classifiers.""" + + @staticmethod + def func(features: Tuple[List[torch.Tensor]], fpn_idx: int = -1) -> torch.Tensor: + """Generate the feature vector for transformer-based classifiers by returning the cls token.""" + _, cls_token = features[0] + return cls_token + + class ReciproCAMHook(BaseRecordingForwardHook): """Implementation of recipro-cam for class-wise saliency map. diff --git a/src/otx/algorithms/common/adapters/mmcv/hooks/task_adapt_hook.py b/src/otx/algorithms/common/adapters/mmcv/hooks/task_adapt_hook.py index 193f156fafd..d3e4f9ad68b 100644 --- a/src/otx/algorithms/common/adapters/mmcv/hooks/task_adapt_hook.py +++ b/src/otx/algorithms/common/adapters/mmcv/hooks/task_adapt_hook.py @@ -11,7 +11,7 @@ ClsIncrSampler, OTXSampler, ) -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/common/adapters/mmcv/hooks/two_crop_transform_hook.py b/src/otx/algorithms/common/adapters/mmcv/hooks/two_crop_transform_hook.py index 138ae1bf6c7..d225e3b44e4 100644 --- a/src/otx/algorithms/common/adapters/mmcv/hooks/two_crop_transform_hook.py +++ b/src/otx/algorithms/common/adapters/mmcv/hooks/two_crop_transform_hook.py @@ -4,7 +4,7 @@ from mmcv.runner import BaseRunner from mmcv.runner.hooks import HOOKS, Hook -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/common/adapters/mmcv/models/backbones/efficientnet.py b/src/otx/algorithms/common/adapters/mmcv/models/backbones/efficientnet.py index 13ca6eb1c3c..ce341d13a87 100644 --- a/src/otx/algorithms/common/adapters/mmcv/models/backbones/efficientnet.py +++ b/src/otx/algorithms/common/adapters/mmcv/models/backbones/efficientnet.py @@ -22,7 +22,7 @@ from torch import nn from torch.nn import init -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger from ..builder import BACKBONES diff --git a/src/otx/algorithms/common/adapters/mmcv/models/backbones/efficientnetv2.py b/src/otx/algorithms/common/adapters/mmcv/models/backbones/efficientnetv2.py index 11fbf4c04f5..631e2fc612f 100644 --- a/src/otx/algorithms/common/adapters/mmcv/models/backbones/efficientnetv2.py +++ b/src/otx/algorithms/common/adapters/mmcv/models/backbones/efficientnetv2.py @@ -17,7 +17,7 @@ from mmcv.runner import load_checkpoint from torch import nn -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger from ..builder import BACKBONES diff --git a/src/otx/algorithms/common/adapters/mmcv/models/backbones/mobilenetv3.py b/src/otx/algorithms/common/adapters/mmcv/models/backbones/mobilenetv3.py index 8d5630a1a51..cd63173afc1 100644 --- a/src/otx/algorithms/common/adapters/mmcv/models/backbones/mobilenetv3.py +++ b/src/otx/algorithms/common/adapters/mmcv/models/backbones/mobilenetv3.py @@ -19,7 +19,7 @@ from mmcv.runner import load_checkpoint from torch import nn -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger from ..builder import BACKBONES diff --git a/src/otx/algorithms/common/adapters/mmcv/nncf/utils.py b/src/otx/algorithms/common/adapters/mmcv/nncf/utils.py index cb82d26cb26..08bd33a97fa 100644 --- a/src/otx/algorithms/common/adapters/mmcv/nncf/utils.py +++ b/src/otx/algorithms/common/adapters/mmcv/nncf/utils.py @@ -26,7 +26,7 @@ no_nncf_trace, ) from otx.algorithms.common.utils import get_arg_spec -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/common/adapters/mmcv/tasks/exporter.py b/src/otx/algorithms/common/adapters/mmcv/tasks/exporter.py index 969e5ffdf81..995f46d5399 100644 --- a/src/otx/algorithms/common/adapters/mmcv/tasks/exporter.py +++ b/src/otx/algorithms/common/adapters/mmcv/tasks/exporter.py @@ -6,7 +6,7 @@ import os import traceback -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/common/adapters/mmcv/utils/automatic_bs.py b/src/otx/algorithms/common/adapters/mmcv/utils/automatic_bs.py index 3ccd2c81f3f..cfc4b6eb07d 100644 --- a/src/otx/algorithms/common/adapters/mmcv/utils/automatic_bs.py +++ b/src/otx/algorithms/common/adapters/mmcv/utils/automatic_bs.py @@ -11,7 +11,7 @@ from torch.cuda import is_available as cuda_available from otx.algorithms.common.adapters.torch.utils import BsSearchAlgo -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/common/adapters/mmcv/utils/config_utils.py b/src/otx/algorithms/common/adapters/mmcv/utils/config_utils.py index 007c64dfa30..3930fc22bb6 100644 --- a/src/otx/algorithms/common/adapters/mmcv/utils/config_utils.py +++ b/src/otx/algorithms/common/adapters/mmcv/utils/config_utils.py @@ -25,8 +25,8 @@ from mmcv.utils.path import check_file_exist from otx.algorithms.common.configs.configuration_enums import InputSizePreset -from otx.algorithms.common.utils.logger import get_logger from otx.api.entities.datasets import DatasetEntity +from otx.utils.logger import get_logger from ._config_utils_get_configs_by_keys import get_configs_by_keys from ._config_utils_get_configs_by_pairs import get_configs_by_pairs diff --git a/src/otx/algorithms/common/adapters/torch/dataloaders/composed_dataloader.py b/src/otx/algorithms/common/adapters/torch/dataloaders/composed_dataloader.py index 5fb30db8ead..463f5771333 100644 --- a/src/otx/algorithms/common/adapters/torch/dataloaders/composed_dataloader.py +++ b/src/otx/algorithms/common/adapters/torch/dataloaders/composed_dataloader.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 # -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/common/adapters/torch/dataloaders/samplers/balanced_sampler.py b/src/otx/algorithms/common/adapters/torch/dataloaders/samplers/balanced_sampler.py index 1ddaaab7884..12492a46b31 100644 --- a/src/otx/algorithms/common/adapters/torch/dataloaders/samplers/balanced_sampler.py +++ b/src/otx/algorithms/common/adapters/torch/dataloaders/samplers/balanced_sampler.py @@ -9,7 +9,7 @@ import numpy as np from torch.utils.data import Dataset -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger from .otx_sampler import OTXSampler @@ -58,21 +58,19 @@ def __init__( super().__init__(dataset, samples_per_gpu, n_repeats=n_repeats) - self.img_indices = self.dataset.img_indices # type: ignore[attr-defined] + self.img_indices = {k: v for k, v in self.dataset.img_indices.items() if len(v) > 0} self.num_cls = len(self.img_indices.keys()) self.data_length = len(self.dataset) + self.num_trials = int(self.data_length / self.num_cls) if efficient_mode: # Reduce the # of sampling (sampling data for a single epoch) - self.num_tail = min(len(cls_indices) for cls_indices in self.img_indices.values()) - base = 1 - (1 / self.num_tail) - if base == 0: - raise ValueError("Required more than one sample per class") - self.num_trials = int(math.log(0.001, base)) - if int(self.data_length / self.num_cls) < self.num_trials: - self.num_trials = int(self.data_length / self.num_cls) - else: - self.num_trials = int(self.data_length / self.num_cls) + num_tail = min(len(cls_indices) for cls_indices in self.img_indices.values()) + if num_tail > 1: + base = 1 - (1 / num_tail) + num_reduced_trials = int(math.log(0.001, base)) + self.num_trials = min(num_reduced_trials, self.num_trials) + self.num_samples = self._calculate_num_samples() logger.info( diff --git a/src/otx/algorithms/common/adapters/torch/dataloaders/samplers/otx_sampler.py b/src/otx/algorithms/common/adapters/torch/dataloaders/samplers/otx_sampler.py index b01f2aaef66..ada7250c65c 100644 --- a/src/otx/algorithms/common/adapters/torch/dataloaders/samplers/otx_sampler.py +++ b/src/otx/algorithms/common/adapters/torch/dataloaders/samplers/otx_sampler.py @@ -12,8 +12,8 @@ from torch.utils.data.sampler import Sampler from otx.algorithms.common.adapters.mmcv.utils.config_utils import get_proper_repeat_times -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.common.utils.task_adapt import unwrap_dataset +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/common/adapters/torch/utils/bs_search_algo.py b/src/otx/algorithms/common/adapters/torch/utils/bs_search_algo.py index 5b1457c6ede..eaf8c1116e6 100644 --- a/src/otx/algorithms/common/adapters/torch/utils/bs_search_algo.py +++ b/src/otx/algorithms/common/adapters/torch/utils/bs_search_algo.py @@ -8,7 +8,7 @@ import torch import torch.distributed as dist -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/common/tasks/base_task.py b/src/otx/algorithms/common/tasks/base_task.py index f390c5fd260..93f0435240c 100644 --- a/src/otx/algorithms/common/tasks/base_task.py +++ b/src/otx/algorithms/common/tasks/base_task.py @@ -30,7 +30,6 @@ from otx.algorithms.common.adapters.mmcv.hooks.cancel_hook import CancelInterfaceHook from otx.algorithms.common.configs.training_base import TrainType from otx.algorithms.common.utils import UncopiableDefaultDict, append_dist_rank_suffix, set_random_seed -from otx.algorithms.common.utils.logger import get_logger from otx.api.entities.datasets import DatasetEntity from otx.api.entities.explain_parameters import ExplainParameters from otx.api.entities.inference_parameters import InferenceParameters @@ -46,6 +45,7 @@ from otx.api.usecases.tasks.interfaces.export_interface import ExportType, IExportTask from otx.api.usecases.tasks.interfaces.inference_interface import IInferenceTask from otx.api.usecases.tasks.interfaces.unload_interface import IUnload +from otx.utils.logger import get_logger TRAIN_TYPE_DIR_PATH = { TrainType.Incremental.name: ".", @@ -107,7 +107,7 @@ def __init__(self, task_environment: TaskEnvironment, output_path: Optional[str] self._learning_curves = UncopiableDefaultDict(OTXLoggerHook.Curve) self._model_label_schema: List[LabelEntity] = [] self._resume = False - self._should_stop = False + self._should_stop: bool = False self.cancel_interface: Optional[CancelInterfaceHook] = None self.reserved_cancel = False self._model_ckpt = None diff --git a/src/otx/algorithms/common/tasks/nncf_task.py b/src/otx/algorithms/common/tasks/nncf_task.py index a3810e80370..30d11750b04 100644 --- a/src/otx/algorithms/common/tasks/nncf_task.py +++ b/src/otx/algorithms/common/tasks/nncf_task.py @@ -37,7 +37,6 @@ from otx.algorithms.common.adapters.nncf.config import compose_nncf_config from otx.algorithms.common.utils.callback import OptimizationProgressCallback from otx.algorithms.common.utils.data import get_dataset -from otx.algorithms.common.utils.logger import get_logger from otx.api.configuration import cfg_helper from otx.api.configuration.helper.utils import ids_to_strings from otx.api.entities.datasets import DatasetEntity @@ -58,6 +57,7 @@ IOptimizationTask, OptimizationType, ) +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/common/utils/data.py b/src/otx/algorithms/common/utils/data.py index 75fa6f2a201..0297700045c 100644 --- a/src/otx/algorithms/common/utils/data.py +++ b/src/otx/algorithms/common/utils/data.py @@ -5,7 +5,6 @@ # pylint: disable=invalid-name import glob -import logging import os import random from typing import Any, Dict, List, Optional, Union @@ -19,8 +18,9 @@ from otx.api.entities.image import Image from otx.api.entities.subset import Subset from otx.api.utils.argument_checks import IMAGE_FILE_EXTENSIONS +from otx.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger() def get_unlabeled_filename(base_root: str, file_list_path: str): diff --git a/src/otx/algorithms/common/utils/task_adapt.py b/src/otx/algorithms/common/utils/task_adapt.py index 1f726a3f1c1..b720de811cb 100644 --- a/src/otx/algorithms/common/utils/task_adapt.py +++ b/src/otx/algorithms/common/utils/task_adapt.py @@ -5,7 +5,7 @@ import numpy as np -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/detection/adapters/mmdet/configurer.py b/src/otx/algorithms/detection/adapters/mmdet/configurer.py index a176d64e3a3..e21947ea19c 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/configurer.py +++ b/src/otx/algorithms/detection/adapters/mmdet/configurer.py @@ -13,12 +13,12 @@ from otx.algorithms.common.adapters.mmcv.utils.config_utils import ( InputSizeManager, ) -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.detection.adapters.mmdet.utils import ( cluster_anchors, patch_tiling, should_cluster_anchors, ) +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/detection/adapters/mmdet/datasets/dataset.py b/src/otx/algorithms/detection/adapters/mmdet/datasets/dataset.py index cce2b5dda77..7f1fb146311 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/datasets/dataset.py +++ b/src/otx/algorithms/detection/adapters/mmdet/datasets/dataset.py @@ -276,7 +276,7 @@ def evaluate( # pylint: disable=too-many-branches assert isinstance(iou_thrs, list) mean_aps = [] for iou_thr in iou_thrs: # pylint: disable=redefined-argument-from-local - print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}') + print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}', logger) mean_ap, _ = self.evaluator.evaluate(results, logger, iou_thr, scale_ranges) mean_aps.append(mean_ap) eval_results[f"AP{int(iou_thr * 100):02d}"] = round(mean_ap, 3) diff --git a/src/otx/algorithms/detection/adapters/mmdet/hooks/det_class_probability_map_hook.py b/src/otx/algorithms/detection/adapters/mmdet/hooks/det_class_probability_map_hook.py index 7931e234091..2847f1c573a 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/hooks/det_class_probability_map_hook.py +++ b/src/otx/algorithms/detection/adapters/mmdet/hooks/det_class_probability_map_hook.py @@ -60,12 +60,9 @@ def func( else: cls_scores = self._get_cls_scores_from_feature_map(feature_map) - # Don't use softmax for tiles in tiling detection, if the tile doesn't contain objects, - # it would highlight one of the class maps as a background class - if self.use_cls_softmax and self._num_cls_out_channels > 1: - cls_scores = [torch.softmax(t, dim=1) for t in cls_scores] - - batch_size, _, height, width = cls_scores[-1].size() + middle_idx = len(cls_scores) // 2 + # resize to the middle feature map + batch_size, _, height, width = cls_scores[middle_idx].size() saliency_maps = torch.empty(batch_size, self._num_cls_out_channels, height, width) for batch_idx in range(batch_size): cls_scores_anchorless = [] @@ -82,6 +79,11 @@ def func( ) saliency_maps[batch_idx] = torch.cat(cls_scores_anchorless_resized, dim=0).mean(dim=0) + # Don't use softmax for tiles in tiling detection, if the tile doesn't contain objects, + # it would highlight one of the class maps as a background class + if self.use_cls_softmax: + saliency_maps[0] = torch.stack([torch.softmax(t, dim=1) for t in saliency_maps[0]]) + if self._norm_saliency_maps: saliency_maps = saliency_maps.reshape((batch_size, self._num_cls_out_channels, -1)) saliency_maps = self._normalize_map(saliency_maps) diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/backbones/imgclsmob.py b/src/otx/algorithms/detection/adapters/mmdet/models/backbones/imgclsmob.py index 7bfdd26f55c..e9c93e1fa84 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/models/backbones/imgclsmob.py +++ b/src/otx/algorithms/detection/adapters/mmdet/models/backbones/imgclsmob.py @@ -8,15 +8,18 @@ from mmcv.cnn import build_activation_layer, build_norm_layer from mmcv.runner import get_dist_info from mmdet.models.builder import BACKBONES -from mmdet.utils.logger import get_root_logger from pytorchcv.model_provider import _models from pytorchcv.models.model_store import download_model from torch import distributed, nn from torch.nn.modules.batchnorm import _BatchNorm +from otx.utils.logger import get_logger + # TODO: Need to fix pylint issues # pylint: disable=protected-access, abstract-method, no-value-for-parameter, assignment-from-no-return +logger = get_logger() + def replace_activation(model, activation_cfg): """Replace activate funtion.""" @@ -95,8 +98,6 @@ def init_weights(self, pretrained=True): def generate_backbones(): """Generate backbones of pytorchcv funtion.""" - logger = get_root_logger() - for model_name, model_getter in _models.items(): def closure(model_name, model_getter): diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/dense_heads/mmov_rpn_head.py b/src/otx/algorithms/detection/adapters/mmdet/models/dense_heads/mmov_rpn_head.py index 853840d7f6e..7befbb7ca7a 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/models/dense_heads/mmov_rpn_head.py +++ b/src/otx/algorithms/detection/adapters/mmdet/models/dense_heads/mmov_rpn_head.py @@ -11,8 +11,8 @@ from mmdet.models.builder import HEADS from mmdet.models.dense_heads.rpn_head import RPNHead -from otx.algorithms.common.utils.logger import get_logger from otx.core.ov.models.mmov_model import MMOVModel +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_atss_detector.py b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_atss_detector.py index 789aa146e32..63d94f894d9 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_atss_detector.py +++ b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_atss_detector.py @@ -13,12 +13,12 @@ FeatureVectorHook, ) from otx.algorithms.common.adapters.mmdeploy.utils import is_mmdeploy_enabled -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.common.utils.task_adapt import map_class_names from otx.algorithms.detection.adapters.mmdet.hooks.det_class_probability_map_hook import ( DetClassProbabilityMapHook, ) from otx.algorithms.detection.adapters.mmdet.models.loss_dyns import TrackingLossType +from otx.utils.logger import get_logger from .l2sp_detector_mixin import L2SPDetectorMixin from .loss_dynamics_mixin import DetLossDynamicsTrackingMixin diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_deformable_detr_detector.py b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_deformable_detr_detector.py index f5a5ee33c7b..4a9a18c312d 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_deformable_detr_detector.py +++ b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_deformable_detr_detector.py @@ -14,8 +14,8 @@ FeatureVectorHook, ) from otx.algorithms.common.adapters.mmdeploy.utils import is_mmdeploy_enabled -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.common.utils.task_adapt import map_class_names +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_dino_detector.py b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_dino_detector.py index 2f210922e6d..3d739fcd292 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_dino_detector.py +++ b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_dino_detector.py @@ -11,8 +11,8 @@ FeatureVectorHook, ) from otx.algorithms.common.adapters.mmdeploy.utils import is_mmdeploy_enabled -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.detection.adapters.mmdet.models.detectors import CustomDeformableDETR +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_lite_dino.py b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_lite_dino.py index b2f973187bb..be71f1b8b7c 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_lite_dino.py +++ b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_lite_dino.py @@ -6,8 +6,8 @@ from mmdet.models.builder import DETECTORS -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.detection.adapters.mmdet.models.detectors import CustomDINO +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_maskrcnn_detector.py b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_maskrcnn_detector.py index afd8b0bcf0e..3d80c497a4c 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_maskrcnn_detector.py +++ b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_maskrcnn_detector.py @@ -13,8 +13,8 @@ FeatureVectorHook, ) from otx.algorithms.common.adapters.mmdeploy.utils import is_mmdeploy_enabled -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.common.utils.task_adapt import map_class_names +from otx.utils.logger import get_logger from .l2sp_detector_mixin import L2SPDetectorMixin from .sam_detector_mixin import SAMDetectorMixin diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_single_stage_detector.py b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_single_stage_detector.py index a8e926cae5d..f98b67c631d 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_single_stage_detector.py +++ b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_single_stage_detector.py @@ -13,13 +13,13 @@ FeatureVectorHook, ) from otx.algorithms.common.adapters.mmdeploy.utils import is_mmdeploy_enabled -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.common.utils.task_adapt import map_class_names from otx.algorithms.detection.adapters.mmdet.hooks.det_class_probability_map_hook import ( DetClassProbabilityMapHook, ) from otx.algorithms.detection.adapters.mmdet.models.detectors.loss_dynamics_mixin import DetLossDynamicsTrackingMixin from otx.algorithms.detection.adapters.mmdet.models.loss_dyns import TrackingLossType +from otx.utils.logger import get_logger from .l2sp_detector_mixin import L2SPDetectorMixin from .sam_detector_mixin import SAMDetectorMixin diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_two_stage_detector.py b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_two_stage_detector.py index 552891c8978..1e9f663fffc 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_two_stage_detector.py +++ b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_two_stage_detector.py @@ -8,8 +8,8 @@ from mmdet.models.builder import DETECTORS from mmdet.models.detectors.two_stage import TwoStageDetector -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.common.utils.task_adapt import map_class_names +from otx.utils.logger import get_logger from .l2sp_detector_mixin import L2SPDetectorMixin from .sam_detector_mixin import SAMDetectorMixin diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_vfnet_detector.py b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_vfnet_detector.py index d1120387077..14e746f76f7 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_vfnet_detector.py +++ b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_vfnet_detector.py @@ -8,10 +8,10 @@ from mmdet.models.builder import DETECTORS from mmdet.models.detectors.vfnet import VFNet -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.common.utils.task_adapt import map_class_names from otx.algorithms.detection.adapters.mmdet.models.detectors.loss_dynamics_mixin import DetLossDynamicsTrackingMixin from otx.algorithms.detection.adapters.mmdet.models.loss_dyns import TrackingLossType +from otx.utils.logger import get_logger from .l2sp_detector_mixin import L2SPDetectorMixin from .sam_detector_mixin import SAMDetectorMixin diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_yolox_detector.py b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_yolox_detector.py index 114e6b3c065..b53cf2777db 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_yolox_detector.py +++ b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_yolox_detector.py @@ -13,7 +13,6 @@ FeatureVectorHook, ) from otx.algorithms.common.adapters.mmdeploy.utils import is_mmdeploy_enabled -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.common.utils.task_adapt import map_class_names from otx.algorithms.detection.adapters.mmdet.hooks.det_class_probability_map_hook import ( DetClassProbabilityMapHook, @@ -22,6 +21,7 @@ DetLossDynamicsTrackingMixin, ) from otx.algorithms.detection.adapters.mmdet.models.loss_dyns import TrackingLossType +from otx.utils.logger import get_logger from .l2sp_detector_mixin import L2SPDetectorMixin from .sam_detector_mixin import SAMDetectorMixin diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/loss_dynamics_mixin.py b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/loss_dynamics_mixin.py index 5463409cfe4..8f3fe5ddfd0 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/loss_dynamics_mixin.py +++ b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/loss_dynamics_mixin.py @@ -10,7 +10,6 @@ import numpy as np import pandas as pd -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.detection.adapters.mmdet.models.loss_dyns import TrackingLossType from otx.api.entities.dataset_item import DatasetItemEntityWithID from otx.api.entities.datasets import DatasetEntity @@ -19,6 +18,7 @@ LossDynamicsTracker, LossDynamicsTrackingMixin, ) +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/mean_teacher.py b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/mean_teacher.py index 05c112a3db4..ac8f99e5240 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/mean_teacher.py +++ b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/mean_teacher.py @@ -15,7 +15,7 @@ from mmdet.models import DETECTORS, build_detector from mmdet.models.detectors import BaseDetector -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger from .sam_detector_mixin import SAMDetectorMixin diff --git a/src/otx/algorithms/detection/adapters/mmdet/nncf/builder.py b/src/otx/algorithms/detection/adapters/mmdet/nncf/builder.py index d520580c46a..9befb61cbea 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/nncf/builder.py +++ b/src/otx/algorithms/detection/adapters/mmdet/nncf/builder.py @@ -10,7 +10,6 @@ from mmcv.parallel import DataContainer from mmcv.runner import CheckpointLoader from mmcv.utils import Config, ConfigDict -from mmdet.utils import get_root_logger from otx.algorithms.common.adapters.mmcv.nncf.runners import NNCF_META_KEY from otx.algorithms.common.adapters.mmcv.utils import ( @@ -21,8 +20,9 @@ from otx.algorithms.common.adapters.nncf.compression import NNCFMetaState from otx.algorithms.common.adapters.nncf.utils import no_nncf_trace from otx.algorithms.detection.adapters.mmdet.utils import build_detector +from otx.utils.logger import get_logger -logger = get_root_logger() +logger = get_logger() def build_nncf_detector( # pylint: disable=too-many-locals,too-many-statements diff --git a/src/otx/algorithms/detection/adapters/mmdet/nncf/task.py b/src/otx/algorithms/detection/adapters/mmdet/nncf/task.py index 01b912d3791..265e0d5ac36 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/nncf/task.py +++ b/src/otx/algorithms/detection/adapters/mmdet/nncf/task.py @@ -8,7 +8,6 @@ import otx.algorithms.detection.adapters.mmdet.nncf.patches # noqa: F401 # pylint: disable=unused-import from otx.algorithms.common.tasks.nncf_task import NNCFBaseTask -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.detection.adapters.mmdet.nncf import build_nncf_detector from otx.algorithms.detection.adapters.mmdet.task import MMDetectionTask from otx.algorithms.detection.adapters.mmdet.utils.config_utils import ( @@ -22,6 +21,7 @@ from otx.api.entities.subset import Subset from otx.api.entities.task_environment import TaskEnvironment from otx.api.usecases.evaluation.metrics_helper import MetricsHelper +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/detection/adapters/mmdet/task.py b/src/otx/algorithms/detection/adapters/mmdet/task.py index 53c00ceaf32..247775c6de4 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/task.py +++ b/src/otx/algorithms/detection/adapters/mmdet/task.py @@ -42,7 +42,6 @@ from otx.algorithms.common.configs.training_base import TrainType from otx.algorithms.common.tasks.nncf_task import NNCFBaseTask from otx.algorithms.common.utils.data import get_dataset -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.detection.adapters.mmdet.apis.train import ( monkey_patched_nms, monkey_patched_roi_align, @@ -82,6 +81,7 @@ from otx.api.entities.task_environment import TaskEnvironment from otx.api.serialization.label_mapper import label_schema_to_bytes from otx.api.usecases.tasks.interfaces.export_interface import ExportType +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/detection/adapters/mmdet/utils/builder.py b/src/otx/algorithms/detection/adapters/mmdet/utils/builder.py index 61a50ab80db..c2e1ee54db2 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/utils/builder.py +++ b/src/otx/algorithms/detection/adapters/mmdet/utils/builder.py @@ -10,9 +10,9 @@ from mmcv.runner import load_checkpoint from mmcv.utils import Config, ConfigDict, get_logger -from otx.algorithms.common.utils.logger import LEVEL +from otx.utils.logger import LEVEL -logger = get_logger("mmdet") +mmdet_logger = get_logger("mmdet") def build_detector( @@ -37,9 +37,9 @@ def build_detector( model_cfg = deepcopy(config.model) model = origin_build_detector(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) - logger.setLevel("WARNING") + mmdet_logger.setLevel("WARNING") # make logger less verbose temporally model.init_weights() - logger.setLevel(LEVEL) + mmdet_logger.setLevel(LEVEL) model = model.to(device) checkpoint = checkpoint if checkpoint else config.pop("load_from", None) diff --git a/src/otx/algorithms/detection/adapters/mmdet/utils/config_utils.py b/src/otx/algorithms/detection/adapters/mmdet/utils/config_utils.py index 6935eb65e46..3c2431adae3 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/utils/config_utils.py +++ b/src/otx/algorithms/detection/adapters/mmdet/utils/config_utils.py @@ -8,7 +8,6 @@ InputSizeManager, get_configs_by_pairs, ) -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.detection.configs.base import DetectionConfig from otx.algorithms.detection.utils.data import ( adaptive_tile_params, @@ -18,6 +17,7 @@ ) from otx.api.entities.datasets import DatasetEntity, DatasetPurpose from otx.api.entities.subset import Subset +from otx.utils.logger import get_logger try: from sklearn.cluster import KMeans diff --git a/src/otx/algorithms/detection/adapters/mmdet/utils/exporter.py b/src/otx/algorithms/detection/adapters/mmdet/utils/exporter.py index 2d82c980fbf..a2c4b71c68c 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/utils/exporter.py +++ b/src/otx/algorithms/detection/adapters/mmdet/utils/exporter.py @@ -9,8 +9,8 @@ from otx.algorithms.common.adapters.mmdeploy.utils.utils import ( sync_batchnorm_2_batchnorm, ) -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.detection.adapters.mmdet.utils.builder import build_detector +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/detection/adapters/openvino/task.py b/src/otx/algorithms/detection/adapters/openvino/task.py index bc624027217..a0e7eb9998c 100644 --- a/src/otx/algorithms/detection/adapters/openvino/task.py +++ b/src/otx/algorithms/detection/adapters/openvino/task.py @@ -36,7 +36,6 @@ from otx.algorithms.common.utils import OTXOpenVinoDataLoader from otx.algorithms.common.utils.ir import check_if_quantized -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.common.utils.utils import get_default_async_reqs_num from otx.algorithms.detection.adapters.openvino import model_wrappers from otx.algorithms.detection.configs.base import DetectionConfig @@ -86,6 +85,7 @@ OptimizationType, ) from otx.api.utils.dataset_utils import add_saliency_maps_to_dataset_item +from otx.utils.logger import get_logger logger = get_logger() @@ -387,6 +387,7 @@ def __init__(self, task_environment: TaskEnvironment): self.confidence_threshold: float = 0.0 self.config = self.load_config() self.inferencer = self.load_inferencer() + self._avg_time_per_image: Optional[float] = None logger.info("OpenVINO task initialization completed") @property @@ -394,6 +395,11 @@ def hparams(self): """Hparams of OpenVINO Detection Task.""" return self.task_environment.get_hyper_parameters(DetectionConfig) + @property + def avg_time_per_image(self) -> Optional[float]: + """Average inference time per image.""" + return self._avg_time_per_image + def load_config(self) -> ADDict: """Load configurable parameters from model adapter. @@ -557,7 +563,8 @@ def add_prediction(id: int, predicted_scene: AnnotationSceneEntity, aux_data: tu self.inferencer.await_all() - logger.info(f"Avg time per image: {total_time/len(dataset)} secs") + self._avg_time_per_image = total_time / len(dataset) + logger.info(f"Avg time per image: {self._avg_time_per_image} secs") logger.info(f"Total time: {total_time} secs") logger.info("OpenVINO inference completed") return dataset diff --git a/src/otx/algorithms/detection/configs/detection/configuration.yaml b/src/otx/algorithms/detection/configs/detection/configuration.yaml index d36b0d941bc..ef3a46315bc 100644 --- a/src/otx/algorithms/detection/configs/detection/configuration.yaml +++ b/src/otx/algorithms/detection/configs/detection/configuration.yaml @@ -245,11 +245,12 @@ learning_parameters: warning: null input_size: affects_outcome_of: INFERENCE - default_value: Auto + default_value: Default description: The input size of the given model could be configured to one of the predefined resolutions. Reduced training and inference time could be expected by using smaller input size. - Defaults to Auto, in which input size is automatically determined based on dataset statistics. + In Auto mode, the input size is automatically determined based on dataset statistics. + Defaults to per-model default resolution. editable: true enum_name: InputSizePreset header: Configure model input size. diff --git a/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_l/template.yaml b/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_l/template.yaml index 83a90ecfcca..f06013bba10 100644 --- a/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_l/template.yaml +++ b/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_l/template.yaml @@ -14,7 +14,6 @@ framework: OTXDetection v2.9.1 entrypoints: base: otx.algorithms.detection.adapters.mmdet.task.MMDetectionTask openvino: otx.algorithms.detection.adapters.openvino.task.OpenVINODetectionTask - nncf: otx.algorithms.detection.adapters.mmdet.nncf.task.DetectionNNCFTask # Capabilities. capabilities: diff --git a/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_s/template.yaml b/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_s/template.yaml index 0c94a081ce3..335b07f8099 100644 --- a/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_s/template.yaml +++ b/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_s/template.yaml @@ -14,7 +14,6 @@ framework: OTXDetection v2.9.1 entrypoints: base: otx.algorithms.detection.adapters.mmdet.task.MMDetectionTask openvino: otx.algorithms.detection.adapters.openvino.task.OpenVINODetectionTask - nncf: otx.algorithms.detection.adapters.mmdet.nncf.task.DetectionNNCFTask # Capabilities. capabilities: diff --git a/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_x/template.yaml b/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_x/template.yaml index 50e07835a96..1fdf665d533 100644 --- a/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_x/template.yaml +++ b/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_x/template.yaml @@ -14,7 +14,6 @@ framework: OTXDetection v2.9.1 entrypoints: base: otx.algorithms.detection.adapters.mmdet.task.MMDetectionTask openvino: otx.algorithms.detection.adapters.openvino.task.OpenVINODetectionTask - nncf: otx.algorithms.detection.adapters.mmdet.nncf.task.DetectionNNCFTask # Capabilities. capabilities: diff --git a/src/otx/algorithms/detection/configs/detection/resnext101_atss/template.yaml b/src/otx/algorithms/detection/configs/detection/resnext101_atss/template.yaml index 27fe398fd1b..79308f5388a 100644 --- a/src/otx/algorithms/detection/configs/detection/resnext101_atss/template.yaml +++ b/src/otx/algorithms/detection/configs/detection/resnext101_atss/template.yaml @@ -14,7 +14,6 @@ framework: OTXDetection v2.9.1 entrypoints: base: otx.algorithms.detection.adapters.mmdet.task.MMDetectionTask openvino: otx.algorithms.detection.adapters.openvino.task.OpenVINODetectionTask - nncf: otx.algorithms.detection.adapters.mmdet.nncf.task.DetectionNNCFTask # Capabilities. capabilities: diff --git a/src/otx/algorithms/detection/configs/instance_segmentation/configuration.yaml b/src/otx/algorithms/detection/configs/instance_segmentation/configuration.yaml index f0672ae5ff8..d18107fbb33 100644 --- a/src/otx/algorithms/detection/configs/instance_segmentation/configuration.yaml +++ b/src/otx/algorithms/detection/configs/instance_segmentation/configuration.yaml @@ -245,11 +245,12 @@ learning_parameters: warning: null input_size: affects_outcome_of: INFERENCE - default_value: Auto + default_value: Default description: The input size of the given model could be configured to one of the predefined resolutions. Reduced training and inference time could be expected by using smaller input size. - Defaults to Auto, in which input size is automatically determined based on dataset statistics. + In Auto mode, the input size is automatically determined based on dataset statistics. + Defaults to per-model default resolution. editable: true enum_name: InputSizePreset header: Configure model input size. diff --git a/src/otx/algorithms/detection/configs/instance_segmentation/convnext_maskrcnn/template_experimental.yaml b/src/otx/algorithms/detection/configs/instance_segmentation/convnext_maskrcnn/template_experimental.yaml index 6baef6921d3..b12d8f7e5fb 100644 --- a/src/otx/algorithms/detection/configs/instance_segmentation/convnext_maskrcnn/template_experimental.yaml +++ b/src/otx/algorithms/detection/configs/instance_segmentation/convnext_maskrcnn/template_experimental.yaml @@ -14,7 +14,6 @@ framework: OTXDetection v2.9.1 entrypoints: base: otx.algorithms.detection.adapters.mmdet.task.MMDetectionTask openvino: otx.algorithms.detection.adapters.openvino.task.OpenVINODetectionTask - nncf: otx.algorithms.detection.adapters.mmdet.nncf.task.DetectionNNCFTask # Capabilities. capabilities: @@ -29,7 +28,7 @@ hyper_parameters: default_value: 2 auto_hpo_state: POSSIBLE inference_batch_size: - default_value: 2 + default_value: 1 learning_rate: default_value: 0.001 auto_hpo_state: POSSIBLE diff --git a/src/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/template.yaml b/src/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/template.yaml index 272a648c551..f825fbac61d 100644 --- a/src/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/template.yaml +++ b/src/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/template.yaml @@ -29,7 +29,7 @@ hyper_parameters: default_value: 4 auto_hpo_state: POSSIBLE inference_batch_size: - default_value: 4 + default_value: 1 learning_rate: default_value: 0.015 auto_hpo_state: POSSIBLE diff --git a/src/otx/algorithms/detection/configs/instance_segmentation/maskrcnn_swin_t/template.yaml b/src/otx/algorithms/detection/configs/instance_segmentation/maskrcnn_swin_t/template.yaml index ed02d0f7e9f..61f359406e9 100644 --- a/src/otx/algorithms/detection/configs/instance_segmentation/maskrcnn_swin_t/template.yaml +++ b/src/otx/algorithms/detection/configs/instance_segmentation/maskrcnn_swin_t/template.yaml @@ -14,7 +14,6 @@ framework: OTXDetection v2.9.1 entrypoints: base: otx.algorithms.detection.adapters.mmdet.task.MMDetectionTask openvino: otx.algorithms.detection.adapters.openvino.task.OpenVINODetectionTask - nncf: otx.algorithms.detection.adapters.mmdet.nncf.task.DetectionNNCFTask # Capabilities. capabilities: diff --git a/src/otx/algorithms/detection/configs/rotated_detection/configuration.yaml b/src/otx/algorithms/detection/configs/rotated_detection/configuration.yaml index 52c232aaa7f..eb2cecbb289 100644 --- a/src/otx/algorithms/detection/configs/rotated_detection/configuration.yaml +++ b/src/otx/algorithms/detection/configs/rotated_detection/configuration.yaml @@ -245,11 +245,12 @@ learning_parameters: warning: null input_size: affects_outcome_of: INFERENCE - default_value: Auto + default_value: Default description: The input size of the given model could be configured to one of the predefined resolutions. Reduced training and inference time could be expected by using smaller input size. - Defaults to Auto, in which input size is automatically determined based on dataset statistics. + In Auto mode, the input size is automatically determined based on dataset statistics. + Defaults to per-model default resolution. editable: true enum_name: InputSizePreset header: Configure model input size. diff --git a/src/otx/algorithms/detection/task.py b/src/otx/algorithms/detection/task.py index a3bd184f690..db50fecbb1e 100644 --- a/src/otx/algorithms/detection/task.py +++ b/src/otx/algorithms/detection/task.py @@ -20,7 +20,6 @@ TrainingProgressCallback, ) from otx.algorithms.common.utils.ir import embed_ir_model_data -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.common.utils.utils import embed_onnx_model_data from otx.algorithms.detection.configs.base import DetectionConfig from otx.algorithms.detection.utils import create_detection_shapes, create_mask_shapes, get_det_model_api_configuration @@ -56,6 +55,7 @@ from otx.api.utils.dataset_utils import add_saliency_maps_to_dataset_item from otx.cli.utils.multi_gpu import is_multigpu_child_process from otx.core.data.caching.mem_cache_handler import MemCacheHandlerSingleton +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/detection/tools/detection_sample.py b/src/otx/algorithms/detection/tools/detection_sample.py index d55598af7d7..ace9058df0f 100644 --- a/src/otx/algorithms/detection/tools/detection_sample.py +++ b/src/otx/algorithms/detection/tools/detection_sample.py @@ -18,7 +18,6 @@ import sys import numpy as np -from mmcv.utils import get_logger from otx.algorithms.common.utils import get_task_class from otx.api.configuration.helper import create @@ -43,8 +42,9 @@ from otx.api.entities.task_environment import TaskEnvironment from otx.api.usecases.tasks.interfaces.export_interface import ExportType from otx.api.usecases.tasks.interfaces.optimization_interface import OptimizationType +from otx.utils.logger import get_logger -logger = get_logger(name="mmdet") +logger = get_logger() def parse_args(): diff --git a/src/otx/algorithms/detection/tools/detection_semisl_sample.py b/src/otx/algorithms/detection/tools/detection_semisl_sample.py index 421c5f6055f..a4ed1c1d652 100644 --- a/src/otx/algorithms/detection/tools/detection_semisl_sample.py +++ b/src/otx/algorithms/detection/tools/detection_semisl_sample.py @@ -19,7 +19,6 @@ from random import randint import numpy as np -from mmcv.utils import get_logger from otx.algorithms.common.utils import get_task_class from otx.api.configuration.helper import create @@ -45,8 +44,9 @@ from otx.api.entities.task_environment import TaskEnvironment from otx.api.usecases.tasks.interfaces.export_interface import ExportType from otx.api.usecases.tasks.interfaces.optimization_interface import OptimizationType +from otx.utils.logger import get_logger -logger = get_logger(name="mmdet") +logger = get_logger() def parse_args(): diff --git a/src/otx/algorithms/detection/tools/instance_segmentation_sample.py b/src/otx/algorithms/detection/tools/instance_segmentation_sample.py index 956ea904b7b..354bb40e673 100644 --- a/src/otx/algorithms/detection/tools/instance_segmentation_sample.py +++ b/src/otx/algorithms/detection/tools/instance_segmentation_sample.py @@ -19,7 +19,6 @@ import cv2 import numpy as np -from mmcv.utils import get_logger from otx.algorithms.common.utils import get_task_class from otx.api.configuration.helper import create @@ -44,8 +43,9 @@ from otx.api.entities.task_environment import TaskEnvironment from otx.api.usecases.tasks.interfaces.export_interface import ExportType from otx.api.usecases.tasks.interfaces.optimization_interface import OptimizationType +from otx.utils.logger import get_logger -logger = get_logger(name="mmdet") +logger = get_logger() # pylint: disable=too-many-locals, too-many-statements diff --git a/src/otx/algorithms/detection/utils/data.py b/src/otx/algorithms/detection/utils/data.py index 5830e067ea1..3dcd7a741af 100644 --- a/src/otx/algorithms/detection/utils/data.py +++ b/src/otx/algorithms/detection/utils/data.py @@ -10,7 +10,6 @@ from mmdet.datasets.api_wrappers.coco_api import COCO from otx.algorithms.common.utils.data import compute_robust_dataset_statistics -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.detection.configs.base.configuration import DetectionConfig from otx.api.entities.annotation import ( Annotation, @@ -27,6 +26,7 @@ from otx.api.entities.shapes.rectangle import Rectangle from otx.api.entities.subset import Subset from otx.api.utils.shape_factory import ShapeFactory +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/detection/utils/utils.py b/src/otx/algorithms/detection/utils/utils.py index 0297b148be5..5c68ecfa31e 100644 --- a/src/otx/algorithms/detection/utils/utils.py +++ b/src/otx/algorithms/detection/utils/utils.py @@ -110,16 +110,22 @@ def get_det_model_api_configuration( """Get ModelAPI config.""" omz_config = {} all_labels = "" + all_label_ids = "" if task_type == TaskType.DETECTION: omz_config[("model_info", "model_type")] = "ssd" + omz_config[("model_info", "task_type")] = "detection" if task_type == TaskType.INSTANCE_SEGMENTATION: omz_config[("model_info", "model_type")] = "MaskRCNN" + omz_config[("model_info", "task_type")] = "instance_segmentation" all_labels = "otx_empty_lbl " + all_label_ids = "None " if tiling_parameters.enable_tiling: omz_config[("model_info", "resize_type")] = "fit_to_window_letterbox" if task_type == TaskType.ROTATED_DETECTION: - omz_config[("model_info", "model_type")] = "rotated_detection" + omz_config[("model_info", "model_type")] = "MaskRCNN" + omz_config[("model_info", "task_type")] = "rotated_detection" all_labels = "otx_empty_lbl " + all_label_ids = "None " if tiling_parameters.enable_tiling: omz_config[("model_info", "resize_type")] = "fit_to_window_letterbox" @@ -137,9 +143,10 @@ def get_det_model_api_configuration( for lbl in label_schema.get_labels(include_empty=False): all_labels += lbl.name.replace(" ", "_") + " " - all_labels = all_labels.strip() + all_label_ids += f"{lbl.id_} " - omz_config[("model_info", "labels")] = all_labels + omz_config[("model_info", "labels")] = all_labels.strip() + omz_config[("model_info", "label_ids")] = all_label_ids.strip() return omz_config diff --git a/src/otx/algorithms/segmentation/adapters/mmseg/configurer.py b/src/otx/algorithms/segmentation/adapters/mmseg/configurer.py index b1c45dfab6c..4a28eb80a5f 100644 --- a/src/otx/algorithms/segmentation/adapters/mmseg/configurer.py +++ b/src/otx/algorithms/segmentation/adapters/mmseg/configurer.py @@ -19,8 +19,8 @@ remove_custom_hook, ) from otx.algorithms.common.utils import append_dist_rank_suffix -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.segmentation.adapters.mmseg.models.heads import otx_head_factory +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/segmentation/adapters/mmseg/models/backbones/litehrnet.py b/src/otx/algorithms/segmentation/adapters/mmseg/models/backbones/litehrnet.py index 70a27f390c8..7faeaafc518 100644 --- a/src/otx/algorithms/segmentation/adapters/mmseg/models/backbones/litehrnet.py +++ b/src/otx/algorithms/segmentation/adapters/mmseg/models/backbones/litehrnet.py @@ -30,7 +30,6 @@ from mmcv.utils.parrots_wrapper import _BatchNorm from mmseg.models.backbones.resnet import BasicBlock, Bottleneck from mmseg.models.builder import BACKBONES -from mmseg.utils import get_root_logger from torch import nn from otx.algorithms.segmentation.adapters.mmseg.models.utils import ( @@ -39,6 +38,9 @@ LocalAttentionModule, channel_shuffle, ) +from otx.utils.logger import get_logger + +logger = get_logger() # pylint: disable=invalid-name, too-many-lines, too-many-instance-attributes, too-many-locals, too-many-arguments @@ -1432,7 +1434,6 @@ def init_weights(self, pretrained=None): """ if isinstance(pretrained, str): - logger = get_root_logger() load_checkpoint(self, pretrained, strict=False, logger=logger) elif pretrained is None: for m in self.modules(): diff --git a/src/otx/algorithms/segmentation/adapters/mmseg/models/segmentors/detcon.py b/src/otx/algorithms/segmentation/adapters/mmseg/models/segmentors/detcon.py index 41dcc8448d5..46e119d9a9c 100644 --- a/src/otx/algorithms/segmentation/adapters/mmseg/models/segmentors/detcon.py +++ b/src/otx/algorithms/segmentation/adapters/mmseg/models/segmentors/detcon.py @@ -24,7 +24,7 @@ from mmseg.ops import resize from torch import nn -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger from .otx_encoder_decoder import OTXEncoderDecoder diff --git a/src/otx/algorithms/segmentation/adapters/mmseg/models/segmentors/mean_teacher_segmentor.py b/src/otx/algorithms/segmentation/adapters/mmseg/models/segmentors/mean_teacher_segmentor.py index 1107cc651be..01bafd40ec9 100644 --- a/src/otx/algorithms/segmentation/adapters/mmseg/models/segmentors/mean_teacher_segmentor.py +++ b/src/otx/algorithms/segmentation/adapters/mmseg/models/segmentors/mean_teacher_segmentor.py @@ -8,8 +8,8 @@ from mmseg.models.segmentors.base import BaseSegmentor from mmseg.ops import resize -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.segmentation.adapters.mmseg.models.heads.proto_head import ProtoNet +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/segmentation/adapters/mmseg/models/segmentors/otx_encoder_decoder.py b/src/otx/algorithms/segmentation/adapters/mmseg/models/segmentors/otx_encoder_decoder.py index 8b7766f29a4..14b5cfa6bad 100644 --- a/src/otx/algorithms/segmentation/adapters/mmseg/models/segmentors/otx_encoder_decoder.py +++ b/src/otx/algorithms/segmentation/adapters/mmseg/models/segmentors/otx_encoder_decoder.py @@ -8,10 +8,12 @@ import torch from mmseg.models import SEGMENTORS from mmseg.models.segmentors.encoder_decoder import EncoderDecoder -from mmseg.utils import get_root_logger from otx.algorithms.common.adapters.mmdeploy.utils import is_mmdeploy_enabled from otx.algorithms.common.utils.task_adapt import map_class_names +from otx.utils.logger import get_logger + +logger = get_logger() # pylint: disable=unused-argument, line-too-long @@ -57,7 +59,6 @@ def load_state_dict_pre_hook( model, model_classes, chkpt_classes, chkpt_dict, prefix, *args, **kwargs ): # pylint: disable=too-many-locals, unused-argument """Modify input state_dict according to class name matching before weight loading.""" - logger = get_root_logger("INFO") logger.info(f"----------------- OTXEncoderDecoder.load_state_dict_pre_hook() called w/ prefix: {prefix}") # Dst to src mapping index diff --git a/src/otx/algorithms/segmentation/adapters/mmseg/nncf/builder.py b/src/otx/algorithms/segmentation/adapters/mmseg/nncf/builder.py index 3f516e6d7a9..d7c2d3ac78b 100644 --- a/src/otx/algorithms/segmentation/adapters/mmseg/nncf/builder.py +++ b/src/otx/algorithms/segmentation/adapters/mmseg/nncf/builder.py @@ -12,8 +12,6 @@ from mmcv.utils import Config, ConfigDict # pylint: disable=no-name-in-module -from mmseg.utils import get_root_logger # type: ignore - from otx.algorithms.common.adapters.mmcv.nncf.runners import NNCF_META_KEY from otx.algorithms.common.adapters.mmcv.utils import ( get_configs_by_pairs, @@ -22,8 +20,9 @@ from otx.algorithms.common.adapters.nncf import is_accuracy_aware_training_set from otx.algorithms.common.adapters.nncf.compression import NNCFMetaState from otx.algorithms.segmentation.adapters.mmseg.utils import build_segmentor +from otx.utils.logger import get_logger -logger = get_root_logger() +logger = get_logger() def build_nncf_segmentor( # noqa: C901 # pylint: disable=too-many-locals,too-many-statements diff --git a/src/otx/algorithms/segmentation/adapters/mmseg/nncf/task.py b/src/otx/algorithms/segmentation/adapters/mmseg/nncf/task.py index c4979e607d7..bab4bd206fa 100644 --- a/src/otx/algorithms/segmentation/adapters/mmseg/nncf/task.py +++ b/src/otx/algorithms/segmentation/adapters/mmseg/nncf/task.py @@ -8,7 +8,6 @@ import otx.algorithms.segmentation.adapters.mmseg.nncf.patches # noqa: F401 # pylint: disable=unused-import from otx.algorithms.common.tasks.nncf_task import NNCFBaseTask -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.segmentation.adapters.mmseg.nncf import build_nncf_segmentor from otx.algorithms.segmentation.adapters.mmseg.task import MMSegmentationTask from otx.api.entities.datasets import DatasetEntity @@ -27,6 +26,7 @@ ) from otx.api.entities.optimization_parameters import OptimizationParameters from otx.api.entities.task_environment import TaskEnvironment +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/segmentation/adapters/mmseg/task.py b/src/otx/algorithms/segmentation/adapters/mmseg/task.py index 5d60580cabc..ade39b64224 100644 --- a/src/otx/algorithms/segmentation/adapters/mmseg/task.py +++ b/src/otx/algorithms/segmentation/adapters/mmseg/task.py @@ -39,7 +39,6 @@ from otx.algorithms.common.tasks.nncf_task import NNCFBaseTask from otx.algorithms.common.utils import is_hpu_available from otx.algorithms.common.utils.data import get_dataset -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.segmentation.adapters.mmseg.apis.train import train_segmentor from otx.algorithms.segmentation.adapters.mmseg.configurer import ( IncrSegmentationConfigurer, @@ -63,6 +62,7 @@ from otx.api.entities.task_environment import TaskEnvironment from otx.api.serialization.label_mapper import label_schema_to_bytes from otx.api.usecases.tasks.interfaces.export_interface import ExportType +from otx.utils.logger import get_logger if is_hpu_available(): import habana_frameworks.torch.core as htcore diff --git a/src/otx/algorithms/segmentation/adapters/mmseg/utils/data_utils.py b/src/otx/algorithms/segmentation/adapters/mmseg/utils/data_utils.py index 3964a00af36..bdfe9ce32cd 100644 --- a/src/otx/algorithms/segmentation/adapters/mmseg/utils/data_utils.py +++ b/src/otx/algorithms/segmentation/adapters/mmseg/utils/data_utils.py @@ -25,7 +25,6 @@ from mmseg.datasets.custom import CustomDataset from skimage.segmentation import felzenszwalb -from otx.algorithms.common.utils.logger import get_logger from otx.api.entities.annotation import ( Annotation, AnnotationSceneEntity, @@ -38,6 +37,7 @@ from otx.api.entities.scored_label import ScoredLabel from otx.api.entities.shapes.polygon import Point, Polygon from otx.api.entities.subset import Subset +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/segmentation/adapters/mmseg/utils/exporter.py b/src/otx/algorithms/segmentation/adapters/mmseg/utils/exporter.py index 0a13e5f01f3..b7317c8b090 100644 --- a/src/otx/algorithms/segmentation/adapters/mmseg/utils/exporter.py +++ b/src/otx/algorithms/segmentation/adapters/mmseg/utils/exporter.py @@ -8,8 +8,8 @@ from otx.algorithms.common.adapters.mmcv.tasks.exporter import Exporter from otx.algorithms.common.adapters.mmdeploy.utils import sync_batchnorm_2_batchnorm -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.segmentation.adapters.mmseg.utils.builder import build_segmentor +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/segmentation/adapters/openvino/task.py b/src/otx/algorithms/segmentation/adapters/openvino/task.py index 51b27286910..0a4e9693192 100644 --- a/src/otx/algorithms/segmentation/adapters/openvino/task.py +++ b/src/otx/algorithms/segmentation/adapters/openvino/task.py @@ -34,7 +34,6 @@ from otx.algorithms.common.utils import OTXOpenVinoDataLoader, get_default_async_reqs_num, read_py_config from otx.algorithms.common.utils.ir import check_if_quantized -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.segmentation.adapters.openvino import model_wrappers from otx.algorithms.segmentation.configs.base import SegmentationConfig from otx.algorithms.segmentation.utils import get_activation_map @@ -72,6 +71,7 @@ IOptimizationTask, OptimizationType, ) +from otx.utils.logger import get_logger logger = get_logger() @@ -162,6 +162,7 @@ def __init__(self, task_environment: TaskEnvironment): self.model = self.task_environment.model self.model_name = self.task_environment.model_template.model_template_id self.inferencer = self.load_inferencer() + self._avg_time_per_image: Optional[float] = None labels = task_environment.get_labels(include_empty=False) self._label_dictionary = dict(enumerate(labels, 1)) @@ -173,6 +174,11 @@ def hparams(self): """Hparams of OpenVINO Segmentation Task.""" return self.task_environment.get_hyper_parameters(SegmentationConfig) + @property + def avg_time_per_image(self) -> Optional[float]: + """Average inference time per image.""" + return self._avg_time_per_image + def load_inferencer(self) -> OpenVINOSegmentationInferencer: """load_inferencer function of OpenVINO Segmentation Task.""" if self.model is None: @@ -248,7 +254,8 @@ def add_prediction( self.inferencer.await_all() - logger.info(f"Avg time per image: {total_time/len(dataset)} secs") + self._avg_time_per_image = total_time / len(dataset) + logger.info(f"Avg time per image: {self._avg_time_per_image} secs") logger.info(f"Total time: {total_time} secs") logger.info("Segmentation OpenVINO inference completed") diff --git a/src/otx/algorithms/segmentation/configs/ham_segnext_b/pot_optimization_config.json b/src/otx/algorithms/segmentation/configs/ham_segnext_b/pot_optimization_config.json deleted file mode 100644 index f9b9b854e30..00000000000 --- a/src/otx/algorithms/segmentation/configs/ham_segnext_b/pot_optimization_config.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "algorithms": [ - { - "name": "DefaultQuantization", - "params": { - "preset": "mixed", - "target_device": "ANY", - "range_estimator": { - "preset": "quantile" - } - } - } - ] -} diff --git a/src/otx/algorithms/segmentation/configs/ham_segnext_b/template.yaml b/src/otx/algorithms/segmentation/configs/ham_segnext_b/template.yaml index bb0cf4a7e85..50ec187dd37 100644 --- a/src/otx/algorithms/segmentation/configs/ham_segnext_b/template.yaml +++ b/src/otx/algorithms/segmentation/configs/ham_segnext_b/template.yaml @@ -14,7 +14,6 @@ framework: OTXSegmentation v0.14.0 entrypoints: base: otx.algorithms.segmentation.adapters.mmseg.task.MMSegmentationTask openvino: otx.algorithms.segmentation.adapters.openvino.task.OpenVINOSegmentationTask - nncf: otx.algorithms.segmentation.adapters.mmseg.nncf.task.SegmentationNNCFTask # Capabilities. capabilities: diff --git a/src/otx/algorithms/segmentation/configs/ham_segnext_s/pot_optimization_config.json b/src/otx/algorithms/segmentation/configs/ham_segnext_s/pot_optimization_config.json deleted file mode 100644 index f9b9b854e30..00000000000 --- a/src/otx/algorithms/segmentation/configs/ham_segnext_s/pot_optimization_config.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "algorithms": [ - { - "name": "DefaultQuantization", - "params": { - "preset": "mixed", - "target_device": "ANY", - "range_estimator": { - "preset": "quantile" - } - } - } - ] -} diff --git a/src/otx/algorithms/segmentation/configs/ham_segnext_s/template.yaml b/src/otx/algorithms/segmentation/configs/ham_segnext_s/template.yaml index d0ca51655cf..f28a01c3464 100644 --- a/src/otx/algorithms/segmentation/configs/ham_segnext_s/template.yaml +++ b/src/otx/algorithms/segmentation/configs/ham_segnext_s/template.yaml @@ -14,7 +14,6 @@ framework: OTXSegmentation v0.14.0 entrypoints: base: otx.algorithms.segmentation.adapters.mmseg.task.MMSegmentationTask openvino: otx.algorithms.segmentation.adapters.openvino.task.OpenVINOSegmentationTask - nncf: otx.algorithms.segmentation.adapters.mmseg.nncf.task.SegmentationNNCFTask # Capabilities. capabilities: diff --git a/src/otx/algorithms/segmentation/configs/ham_segnext_t/pot_optimization_config.json b/src/otx/algorithms/segmentation/configs/ham_segnext_t/pot_optimization_config.json deleted file mode 100644 index f9b9b854e30..00000000000 --- a/src/otx/algorithms/segmentation/configs/ham_segnext_t/pot_optimization_config.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "algorithms": [ - { - "name": "DefaultQuantization", - "params": { - "preset": "mixed", - "target_device": "ANY", - "range_estimator": { - "preset": "quantile" - } - } - } - ] -} diff --git a/src/otx/algorithms/segmentation/configs/ham_segnext_t/template.yaml b/src/otx/algorithms/segmentation/configs/ham_segnext_t/template.yaml index c2403b3723b..ad041eea837 100644 --- a/src/otx/algorithms/segmentation/configs/ham_segnext_t/template.yaml +++ b/src/otx/algorithms/segmentation/configs/ham_segnext_t/template.yaml @@ -14,7 +14,6 @@ framework: OTXSegmentation v0.14.0 entrypoints: base: otx.algorithms.segmentation.adapters.mmseg.task.MMSegmentationTask openvino: otx.algorithms.segmentation.adapters.openvino.task.OpenVINOSegmentationTask - nncf: otx.algorithms.segmentation.adapters.mmseg.nncf.task.SegmentationNNCFTask # Capabilities. capabilities: diff --git a/src/otx/algorithms/segmentation/task.py b/src/otx/algorithms/segmentation/task.py index cca2befe81d..dac8fe574cb 100644 --- a/src/otx/algorithms/segmentation/task.py +++ b/src/otx/algorithms/segmentation/task.py @@ -20,7 +20,6 @@ TrainingProgressCallback, ) from otx.algorithms.common.utils.ir import embed_ir_model_data -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.common.utils.utils import embed_onnx_model_data from otx.algorithms.segmentation.configs.base import SegmentationConfig from otx.algorithms.segmentation.utils import get_activation_map @@ -61,6 +60,7 @@ ) from otx.cli.utils.multi_gpu import is_multigpu_child_process from otx.core.data.caching.mem_cache_handler import MemCacheHandlerSingleton +from otx.utils.logger import get_logger logger = get_logger() RECIPE_TRAIN_TYPE = { diff --git a/src/otx/algorithms/segmentation/tools/segmentation_sample.py b/src/otx/algorithms/segmentation/tools/segmentation_sample.py index ce1ed269286..0f5bb730939 100644 --- a/src/otx/algorithms/segmentation/tools/segmentation_sample.py +++ b/src/otx/algorithms/segmentation/tools/segmentation_sample.py @@ -19,7 +19,6 @@ import cv2 import numpy as np -from mmcv.utils import get_logger from otx.algorithms.common.utils import get_task_class from otx.api.configuration.helper import create @@ -44,8 +43,9 @@ from otx.api.entities.task_environment import TaskEnvironment from otx.api.usecases.tasks.interfaces.export_interface import ExportType from otx.api.usecases.tasks.interfaces.optimization_interface import OptimizationType +from otx.utils.logger import get_logger -logger = get_logger(name="mmseg") +logger = get_logger() def parse_args(): diff --git a/src/otx/algorithms/segmentation/utils/metadata.py b/src/otx/algorithms/segmentation/utils/metadata.py index f234c3ad65e..0245d3de03c 100644 --- a/src/otx/algorithms/segmentation/utils/metadata.py +++ b/src/otx/algorithms/segmentation/utils/metadata.py @@ -12,14 +12,17 @@ def get_seg_model_api_configuration(label_schema: LabelSchemaEntity, hyperparams: ConfigDict): """Get ModelAPI config.""" all_labels = "" + all_label_ids = "" for lbl in label_schema.get_labels(include_empty=False): all_labels += lbl.name.replace(" ", "_") + " " - all_labels = all_labels.strip() + all_label_ids += f"{lbl.id_} " return { ("model_info", "model_type"): "Segmentation", ("model_info", "soft_threshold"): str(hyperparams.postprocessing.soft_threshold), ("model_info", "blur_strength"): str(hyperparams.postprocessing.blur_strength), - ("model_info", "labels"): all_labels, ("model_info", "return_soft_prediction"): "True", + ("model_info", "labels"): all_labels.strip(), + ("model_info", "label_ids"): all_label_ids.strip(), + ("model_info", "task_type"): "segmentation", } diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/config/visual_prompting_config.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/config/visual_prompting_config.py index e3382f25526..6a212c9cbb8 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/config/visual_prompting_config.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/config/visual_prompting_config.py @@ -20,8 +20,8 @@ from omegaconf import DictConfig, ListConfig, OmegaConf -from otx.algorithms.common.utils.logger import get_logger from otx.api.configuration.configurable_parameters import ConfigurableParameters +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/dataset.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/dataset.py index 6b527cf6d22..9f79eeda019 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/dataset.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/dataset.py @@ -24,7 +24,6 @@ from torch.utils.data import DataLoader, Dataset from torchvision import transforms -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.visual_prompting.adapters.pytorch_lightning.datasets.pipelines import ( MultipleInputsCompose, Pad, @@ -39,6 +38,7 @@ from otx.api.entities.shapes.polygon import Polygon from otx.api.entities.subset import Subset from otx.api.utils.shape_factory import ShapeFactory +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/visual_prompting/tasks/inference.py b/src/otx/algorithms/visual_prompting/tasks/inference.py index 6ff23ee9050..9358bd93242 100644 --- a/src/otx/algorithms/visual_prompting/tasks/inference.py +++ b/src/otx/algorithms/visual_prompting/tasks/inference.py @@ -32,7 +32,6 @@ from pytorch_lightning.callbacks import TQDMProgressBar from otx.algorithms.common.utils import set_random_seed -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.visual_prompting.adapters.pytorch_lightning.callbacks import ( InferenceCallback, ) @@ -62,6 +61,7 @@ from otx.api.usecases.tasks.interfaces.export_interface import ExportType, IExportTask from otx.api.usecases.tasks.interfaces.inference_interface import IInferenceTask from otx.api.usecases.tasks.interfaces.unload_interface import IUnload +from otx.utils.logger import get_logger logger = get_logger() diff --git a/src/otx/algorithms/visual_prompting/tasks/openvino.py b/src/otx/algorithms/visual_prompting/tasks/openvino.py index de244698837..fe499300970 100644 --- a/src/otx/algorithms/visual_prompting/tasks/openvino.py +++ b/src/otx/algorithms/visual_prompting/tasks/openvino.py @@ -35,7 +35,6 @@ from otx.algorithms.common.utils import get_default_async_reqs_num, read_py_config from otx.algorithms.common.utils.ir import check_if_quantized -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.visual_prompting.adapters.openvino import model_wrappers from otx.algorithms.visual_prompting.adapters.pytorch_lightning.datasets.dataset import ( OTXVisualPromptingDataset, @@ -76,6 +75,7 @@ IOptimizationTask, OptimizationType, ) +from otx.utils.logger import get_logger logger = get_logger() @@ -258,6 +258,7 @@ def __init__(self, task_environment: TaskEnvironment) -> None: self.model = self.task_environment.model self.model_name = self.task_environment.model_template.model_template_id self.inferencer = self.load_inferencer() + self._avg_time_per_image: Optional[float] = None labels = task_environment.get_labels(include_empty=False) self._label_dictionary = dict(enumerate(labels, 1)) @@ -270,6 +271,11 @@ def hparams(self): """Hparams of OpenVINO Visual Prompting Task.""" return self.task_environment.get_hyper_parameters(VisualPromptingBaseConfig) + @property + def avg_time_per_image(self) -> Optional[float]: + """Average inference time per image.""" + return self._avg_time_per_image + def load_inferencer(self) -> OpenVINOVisualPromptingInferencer: """Load OpenVINO Visual Prompting Inferencer.""" if self.model is None: @@ -328,7 +334,8 @@ def add_prediction(id: int, annotations: List[Annotation]): self.inferencer.await_all() - logger.info(f"Avg time per image: {total_time/len(dataset)} secs") + self._avg_time_per_image = total_time / len(dataset) + logger.info(f"Avg time per image: {self._avg_time_per_image} secs") logger.info(f"Total time: {total_time} secs") logger.info("Visual Prompting OpenVINO inference completed") diff --git a/src/otx/algorithms/visual_prompting/tasks/train.py b/src/otx/algorithms/visual_prompting/tasks/train.py index 67b734a767b..344601b7b01 100644 --- a/src/otx/algorithms/visual_prompting/tasks/train.py +++ b/src/otx/algorithms/visual_prompting/tasks/train.py @@ -25,7 +25,6 @@ ) from pytorch_lightning.loggers import CSVLogger -from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.visual_prompting.adapters.pytorch_lightning.datasets import ( OTXVisualPromptingDataModule, ) @@ -34,6 +33,7 @@ from otx.api.entities.model import ModelEntity from otx.api.entities.train_parameters import TrainParameters from otx.api.usecases.tasks.interfaces.training_interface import ITrainingTask +from otx.utils.logger import get_logger from .inference import InferenceTask diff --git a/src/otx/api/entities/dataset_item.py b/src/otx/api/entities/dataset_item.py index 7975a6a5436..77a0d119fe5 100644 --- a/src/otx/api/entities/dataset_item.py +++ b/src/otx/api/entities/dataset_item.py @@ -9,12 +9,12 @@ import copy from inspect import signature import itertools -import logging from threading import Lock from typing import List, Optional, Sequence, Set, Tuple, TypeVar, Union from bson import ObjectId import numpy as np +from otx.utils.logger import get_logger from otx.api.entities.annotation import Annotation, AnnotationSceneEntity from otx.api.entities.id import ID from otx.api.entities.label import LabelEntity @@ -26,7 +26,7 @@ from otx.api.entities.subset import Subset from otx.api.utils.shape_factory import ShapeFactory -logger = logging.getLogger(__name__) +logger = get_logger() T = TypeVar("T", bound="DatasetItemEntity") diff --git a/src/otx/api/entities/datasets.py b/src/otx/api/entities/datasets.py index 1a37458ebe9..aa5f82f373b 100644 --- a/src/otx/api/entities/datasets.py +++ b/src/otx/api/entities/datasets.py @@ -14,13 +14,14 @@ from bson.objectid import ObjectId +from otx.utils.logger import get_logger from otx.api.entities.annotation import AnnotationSceneEntity, AnnotationSceneKind from otx.api.entities.dataset_item import DatasetItemEntity from otx.api.entities.id import ID from otx.api.entities.label import LabelEntity from otx.api.entities.subset import Subset -logger = logging.getLogger(__name__) +logger = get_logger() class DatasetPurpose(Enum): @@ -349,8 +350,9 @@ def get_combined_subset(self, subsets: List[Subset]) -> "DatasetEntity": Returns: DatasetEntity: DatasetEntity with items matching subsets """ + to_keep = set(subsets) dataset = DatasetEntity( - items=[item for item in self._items if item.subset in set(subsets)], + items=[item for item in self if item.subset in to_keep], purpose=self.purpose, ) return dataset diff --git a/src/otx/api/entities/image.py b/src/otx/api/entities/image.py index a241a9511bb..e841820c92d 100644 --- a/src/otx/api/entities/image.py +++ b/src/otx/api/entities/image.py @@ -10,6 +10,7 @@ import cv2 import imagesize import numpy as np +from PIL import Image as PILImage from otx.api.entities.annotation import Annotation from otx.api.entities.media import IMedia2DEntity @@ -91,7 +92,12 @@ def numpy(self) -> np.ndarray: np.ndarray: NumPy representation of the image. """ if self.__data is None: - return cv2.cvtColor(cv2.imread(self.__file_path), cv2.COLOR_BGR2RGB) + try: + image = PILImage.open(self.__file_path) + image = np.asarray(image.convert("RGB")) + except ValueError: + image = cv2.cvtColor(cv2.imread(self.__file_path), cv2.COLOR_BGR2RGB) + return image if callable(self.__data): return self.__data() return self.__data diff --git a/src/otx/api/entities/label_schema.py b/src/otx/api/entities/label_schema.py index 2d69c5469ff..fdb3bf047fc 100644 --- a/src/otx/api/entities/label_schema.py +++ b/src/otx/api/entities/label_schema.py @@ -5,7 +5,7 @@ # import copy -import logging +from otx.utils.logger import get_logger import re from enum import Enum from typing import Dict, List, Optional, Sequence, Union @@ -18,7 +18,7 @@ from otx.api.entities.label import LabelEntity from otx.api.entities.scored_label import ScoredLabel -logger = logging.getLogger(__name__) +logger = get_logger() def natural_sort_label_id(target: Union[ID, LabelEntity, ScoredLabel]) -> List[Union[int, str]]: diff --git a/src/otx/api/entities/metrics.py b/src/otx/api/entities/metrics.py index 2abbd3b1240..f33ba288832 100644 --- a/src/otx/api/entities/metrics.py +++ b/src/otx/api/entities/metrics.py @@ -6,15 +6,17 @@ import abc import datetime -import logging import math from enum import Enum from typing import Generic, List, Optional, Sequence, TypeVar, Union +from otx.utils.logger import get_logger import numpy as np from otx.api.utils.time_utils import now +logger = get_logger() + class MetricEntity(metaclass=abc.ABCMeta): """This interface represents a metric, which is the smallest building block for the performance statistics. @@ -370,7 +372,6 @@ def normalize(self): if not np.all(self.__matrix_values.sum(axis=1, keepdims=True) > 0): self.__matrix_values = np.nan_to_num(self.__matrix_values) - logger = logging.getLogger(__name__) logger.warning("Replacing NaN in the matrix with zeroes since the sum of one (or more) row(s) was zero.") def __repr__(self): diff --git a/src/otx/api/usecases/evaluation/accuracy.py b/src/otx/api/usecases/evaluation/accuracy.py index 25797bc3fa5..2344a7d3cfa 100644 --- a/src/otx/api/usecases/evaluation/accuracy.py +++ b/src/otx/api/usecases/evaluation/accuracy.py @@ -6,12 +6,12 @@ import copy -import logging from typing import List, Set, Tuple import numpy as np from sklearn.metrics import confusion_matrix as sklearn_confusion_matrix +from otx.utils.logger import get_logger from otx.api.entities.dataset_item import DatasetItemEntity from otx.api.entities.datasets import DatasetEntity from otx.api.entities.label import LabelEntity @@ -37,7 +37,7 @@ IPerformanceProvider, ) -logger = logging.getLogger(__name__) +logger = get_logger() class Accuracy(IPerformanceProvider): diff --git a/src/otx/api/usecases/evaluation/f_measure.py b/src/otx/api/usecases/evaluation/f_measure.py index b8f07522020..cc845ef7609 100644 --- a/src/otx/api/usecases/evaluation/f_measure.py +++ b/src/otx/api/usecases/evaluation/f_measure.py @@ -3,8 +3,8 @@ # SPDX-License-Identifier: Apache-2.0 # -import logging from typing import Dict, List, Optional, Tuple +from otx.utils.logger import get_logger import numpy as np @@ -32,7 +32,7 @@ ) from otx.api.utils.shape_factory import ShapeFactory -logger = logging.getLogger(__name__) +logger = get_logger() ALL_CLASSES_NAME = "All Classes" diff --git a/src/otx/api/usecases/exportable_code/demo/demo_package/model_container.py b/src/otx/api/usecases/exportable_code/demo/demo_package/model_container.py index 69eac7a8011..4723de654de 100644 --- a/src/otx/api/usecases/exportable_code/demo/demo_package/model_container.py +++ b/src/otx/api/usecases/exportable_code/demo/demo_package/model_container.py @@ -1,7 +1,6 @@ """ModelContainer class used for loading the model in the model wrapper.""" -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -# import importlib import json @@ -17,6 +16,7 @@ from otx.api.entities.model_template import TaskType from otx.api.serialization.label_mapper import LabelSchemaMapper from otx.api.utils.detection_utils import detection2array +from otx.api.utils.tiler import Tiler from .utils import get_model_path, get_parameters @@ -47,9 +47,24 @@ def __init__(self, model_dir: Path, device="CPU") -> None: self._task_type is TaskType.ROTATED_DETECTION or self._task_type is TaskType.INSTANCE_SEGMENTATION ) + # labels for modelAPI wrappers can be empty, because unused in pre- and postprocessing self.model_parameters = self.parameters["model_parameters"] - # model already contains correct labels - self.model_parameters.pop("labels") + + if self._task_type in ( + TaskType.ANOMALY_CLASSIFICATION, + TaskType.ANOMALY_DETECTION, + TaskType.ANOMALY_SEGMENTATION, + ): + # The anomaly task requires non-empty labels. + # modelapi_labels key is used as a workaround as labels key is used for labels in OTX SDK format + self.model_parameters["labels"] = ( + self.model_parameters.pop("modelapi_labels") + if "modelapi_labels" in self.model_parameters + else ["Normal", "Anomaly"] + ) + else: + # model already contains correct labels + self.model_parameters.pop("labels") self._initialize_wrapper() self.core_model = Model.create_model( diff --git a/src/otx/api/usecases/exportable_code/prediction_to_annotation_converter.py b/src/otx/api/usecases/exportable_code/prediction_to_annotation_converter.py index 7ea4c568b63..40d1f4beec2 100644 --- a/src/otx/api/usecases/exportable_code/prediction_to_annotation_converter.py +++ b/src/otx/api/usecases/exportable_code/prediction_to_annotation_converter.py @@ -371,7 +371,7 @@ def convert_to_annotation(self, predictions: AnomalyResult, metadata: Dict[str, """Convert predictions to OTX Annotation Scene using the metadata. Args: - predictions (tuple): Raw predictions from the model. + predictions (AnomalyResult): Raw predictions from the model. metadata (Dict[str, Any]): Variable containing metadata information. Returns: diff --git a/src/otx/api/usecases/reporting/time_monitor_callback.py b/src/otx/api/usecases/reporting/time_monitor_callback.py index 9965bc4a217..8d032a6acf1 100644 --- a/src/otx/api/usecases/reporting/time_monitor_callback.py +++ b/src/otx/api/usecases/reporting/time_monitor_callback.py @@ -6,8 +6,8 @@ # pylint: disable=too-many-instance-attributes,too-many-arguments -import logging import math +from otx.utils.logger import get_logger import time from copy import deepcopy from typing import List @@ -20,7 +20,7 @@ ) from otx.api.usecases.reporting.callback import Callback -logger = logging.getLogger(__name__) +logger = get_logger() class TimeMonitorCallback(Callback): diff --git a/src/otx/api/utils/tiler.py b/src/otx/api/utils/tiler.py new file mode 100644 index 00000000000..f645b2ace77 --- /dev/null +++ b/src/otx/api/utils/tiler.py @@ -0,0 +1,419 @@ +"""Tiling Module.""" + +# Copyright (C) 2021-2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import cv2 +from itertools import product +from typing import Dict, List, Optional, Tuple, Union + +import numpy as np +from openvino.model_api.models import Model, ImageModel + +from openvino.model_api.models.utils import DetectionResult + +from otx.api.utils.async_pipeline import OTXDetectionAsyncPipeline +from otx.api.utils.detection_utils import detection2array +from otx.api.utils.nms import multiclass_nms +from otx.api.utils.dataset_utils import non_linear_normalization + + +class Tiler: + """Tile Image into (non)overlapping Patches. Images are tiled in order to efficiently process large images. + + Args: + tile_size: Tile dimension for each patch + overlap: Overlap between adjacent tile + max_number: max number of prediction per image + detector: OpenVINO adaptor model + classifier: Tile classifier OpenVINO adaptor model + segm: enable instance segmentation mask output + mode: async or sync mode + """ + + def __init__( + self, + tile_size: int, + overlap: float, + max_number: int, + detector: Model, + classifier: Optional[ImageModel] = None, + segm: bool = False, + mode: str = "async", + num_classes: int = 0, + ): # pylint: disable=too-many-arguments + self.tile_size = tile_size + self.overlap = overlap + self.max_number = max_number + self.model = detector + self.classifier = classifier + # needed to create saliency maps for IRs for Mask RCNN + self.num_classes = num_classes + self.segm = segm + if self.segm: + self.model.disable_mask_resizing() + if mode == "async": + self.async_pipeline = OTXDetectionAsyncPipeline(self.model) + + def tile(self, image: np.ndarray) -> List[List[int]]: + """Tiles an input image to either overlapping, non-overlapping or random patches. + + Args: + image: Input image to tile. + + Returns: + Tiles coordinates + """ + height, width = image.shape[:2] + + coords = [[0, 0, width, height]] + for (loc_j, loc_i) in product( + range(0, width, int(self.tile_size * (1 - self.overlap))), + range(0, height, int(self.tile_size * (1 - self.overlap))), + ): + x2 = min(loc_j + self.tile_size, width) + y2 = min(loc_i + self.tile_size, height) + coords.append([loc_j, loc_i, x2, y2]) + return coords + + def filter_tiles_by_objectness( + self, image: np.ndarray, tile_coords: List[List[int]], confidence_threshold: float = 0.35 + ): + """Filter tiles by objectness score by running tile classifier. + + Args: + image (np.ndarray): full size image + tile_coords (List[List[int]]): tile coordinates + + Returns: + keep_coords: tile coordinates to keep + """ + keep_coords = [] + for i, coord in enumerate(tile_coords): + tile_img = self.crop_tile(image, coord) + tile_dict, _ = self.model.preprocess(tile_img) + objectness_score = self.classifier.infer_sync(tile_dict) + if i == 0 or objectness_score["tile_prob"] > confidence_threshold: + keep_coords.append(coord) + return keep_coords + + def predict(self, image: np.ndarray, mode: str = "async"): + """Predict by cropping full image to tiles. + + Args: + image (np.ndarray): full size image + + Returns: + detection: prediction results + features: saliency map and feature vector + """ + tile_coords = self.tile(image) + if self.classifier is not None: + tile_coords = self.filter_tiles_by_objectness(image, tile_coords) + + if mode == "sync": + return self.predict_sync(image, tile_coords) + return self.predict_async(image, tile_coords) + + def predict_sync(self, image: np.ndarray, tile_coords: List[List[int]]): + """Predict by cropping full image to tiles synchronously. + + Args: + image (np.ndarray): full size image + tile_coords (List[List[int]]): tile coordinates + + Returns: + detection: prediction results + features: saliency map and feature vector + """ + features = [] + tile_results = [] + + for coord in tile_coords: + tile_img = self.crop_tile(image, coord) + tile_dict, tile_meta = self.model.preprocess(tile_img) + raw_predictions = self.model.infer_sync(tile_dict) + predictions = self.model.postprocess(raw_predictions, tile_meta) + tile_result = self.postprocess_tile(predictions, *coord[:2]) + # cache each tile feature vector and saliency map + if "feature_vector" in raw_predictions or "saliency_map" in raw_predictions: + tile_meta.update({"coord": coord}) + features.append( + ( + (raw_predictions["feature_vector"].reshape(-1), raw_predictions["saliency_map"][0]), + tile_meta, + ) + ) + + tile_results.append(tile_result) + + merged_results = self.merge_results(tile_results, image.shape) + merged_features = self.merge_features(features, merged_results) + return merged_results, merged_features + + def predict_async(self, image: np.ndarray, tile_coords: List[List[int]]): + """Predict by cropping full image to tiles asynchronously. + + Args: + image (np.ndarray): full size image + tile_coords (List[List[int]]): tile coordinates + + Returns: + detection: prediction results + features: saliency map and feature vector + """ + num_tiles = len(tile_coords) + + processed_tiles = 0 + tile_results = [] + features = [] + for i, coord in enumerate(tile_coords): + pred = self.async_pipeline.get_result(processed_tiles) + while pred: + tile_prediction, meta, feats = pred + if isinstance(feats[0], np.ndarray): + features.append((feats, meta)) + tile_result = self.postprocess_tile(tile_prediction, *meta["coord"][:2]) + tile_results.append(tile_result) + processed_tiles += 1 + pred = self.async_pipeline.get_result(processed_tiles) + self.async_pipeline.submit_data(self.crop_tile(image, coord), i, {"coord": coord, "tile_i": i}) + + self.async_pipeline.await_all() + for j in range(processed_tiles, num_tiles): + tile_prediction, meta, feats = self.async_pipeline.get_result(j) + if isinstance(feats[0], np.ndarray): + features.append((feats, meta)) + tile_result = self.postprocess_tile(tile_prediction, *meta["coord"][:2]) + tile_results.append(tile_result) + assert j == num_tiles - 1, "Number of tiles processed does not match number of tiles" + merged_results = self.merge_results(tile_results, image.shape) + merged_features = self.merge_features(features, merged_results) + return merged_results, merged_features + + def postprocess_tile(self, predictions: DetectionResult, offset_x: int, offset_y: int) -> Dict[str, List]: + """Postprocess single tile prediction. + + Args: + predictions (Union[List, Tuple]): predictions from model + offset_x (int): tile offset in x direction + offset_y (int): tile offset in y direction + + Returns: + Dict[str, List]: postprocessed predictions - bboxes and masks + """ + output_dict: dict = {"bboxes": [], "masks": []} + if self.segm: + tile_scores, tile_labels, tile_boxes, tile_masks = predictions + tile_boxes += np.tile([offset_x, offset_y], 2) + out = np.concatenate( + ( + tile_labels[:, np.newaxis], + tile_scores[:, np.newaxis], + tile_boxes, + ), + -1, + ) + output_dict["masks"] = tile_masks + else: + assert isinstance(predictions.objects, list) + out = detection2array(predictions.objects) + out[:, 2:] += np.tile([offset_x, offset_y], 2) + output_dict["bboxes"] = out + return output_dict + + def crop_tile(self, image: np.ndarray, coord: List[int]) -> np.ndarray: + """Crop tile from full image. + + Args: + image (np.ndarray): full-res image + coord (List): tile coordinates + + Returns: + np.ndarray: cropped tile + """ + x1, y1, x2, y2 = coord + return image[y1:y2, x1:x2] + + @staticmethod + def detection2tuple(detections: np.ndarray): + """Convert detection to tuple. + + Args: + detections (np.ndarray): prediction results in numpy array + + Returns: + scores (np.ndarray): scores between 0-1 + labels (np.ndarray): label indices + boxes (np.ndarray): boxes + """ + labels = detections[:, 0] + scores = detections[:, 1] + boxes = detections[:, 2:] + return scores, labels, boxes + + def merge_results(self, results: List[Dict], shape: List[int]): + """Merge results from tiles. + + Args: + results (List[Dict]): list of tile results + shape (List[int]): original full-res image shape + """ + + detections = np.empty((0, 6), dtype=np.float32) + masks = [] + for result in results: + if len(result["bboxes"]): + detections = np.concatenate((detections, result["bboxes"])) + if self.segm: + masks.extend(result["masks"]) + + if np.prod(detections.shape): + detections, keep = multiclass_nms(detections, max_num=self.max_number) + if self.segm: + masks = [masks[keep_idx] for keep_idx in keep] + self.resize_masks(masks, detections, shape) + detections = *Tiler.detection2tuple(detections), masks + return detections + + def merge_features( + self, features: List, predictions: Union[Tuple, np.ndarray] + ) -> Union[Tuple[None, None], List[np.ndarray]]: + """Merge tile-level feature vectors to image-level features. + + Args: + features: tile-level features. + predictions: predictions with masks for whole image. + + Returns: + image_vector (np.ndarray): Merged feature vector for entire image. + image_saliency_map (List): Merged saliency map for entire image + """ + if len(features) == 0: + return (None, None) + image_vector = self.merge_vectors(features) + + (_, image_saliency_map), _ = features[0] + if isinstance(image_saliency_map, np.ndarray): + image_saliency_map = self.merge_maps(features) + else: + # if saliency maps weren't return from hook (Mask RCNN case) + image_saliency_map = self.get_tiling_saliency_map_from_segm_masks(predictions) + + return image_vector, image_saliency_map + + def merge_vectors(self, features: List) -> np.ndarray: + """Merge tile-level feature vectors to image-level feature vector. + + Args: + features: tile-level features. + + Returns: + merged_vectors (np.ndarray): Merged vectors for entire image. + """ + vectors = [vector for (vector, _), _ in features] + return np.average(vectors, axis=0) + + def merge_maps(self, features: List) -> np.ndarray: + """Merge tile-level saliency maps to image-level saliency map. + + Args: + features: tile-level features ((vector, map: np.array), tile_meta). + Each saliency map is a list of maps for each detected class or None if class wasn't detected. + + Returns: + merged_maps (np.ndarray): Merged saliency maps for entire image. + """ + (_, image_saliency_map), image_meta = features[0] + + num_classes, feat_h, feat_w = image_saliency_map.shape + dtype = image_saliency_map[0][0].dtype + + image_h, image_w, _ = image_meta["original_shape"] + ratio = np.array([feat_h / min(self.tile_size, image_h), feat_w / min(self.tile_size, image_w)]) + + image_map_h = int(image_h * ratio[0]) + image_map_w = int(image_w * ratio[1]) + # happens because of the bug then tile_size for IR in a few times more than original image + if image_map_h == 0 or image_map_w == 0: + return [None] * num_classes + merged_map = [np.zeros((image_map_h, image_map_w)) for _ in range(num_classes)] + + for (_, saliency_map), meta in features[1:]: + x_1, y_1, x_2, y_2 = meta["coord"] + y_1, x_1 = ((y_1, x_1) * ratio).astype(np.uint16) + y_2, x_2 = ((y_2, x_2) * ratio).astype(np.uint16) + + map_h, map_w = saliency_map[0].shape + # resize feature map if it got from the tile which width and height is less the tile_size + if (map_h > y_2 - y_1 > 0) and (map_w > x_2 - x_1 > 0): + saliency_map = np.array([cv2.resize(cls_map, (x_2 - x_1, y_2 - y_1)) for cls_map in saliency_map]) + # cut the rest of the feature map that went out of the image borders + map_h, map_w = y_2 - y_1, x_2 - x_1 + + for ci, hi, wi in [(c_, h_, w_) for c_ in range(num_classes) for h_ in range(map_h) for w_ in range(map_w)]: + map_pixel = saliency_map[ci, hi, wi] + # on tile overlap add 0.5 value of each tile + if merged_map[ci][y_1 + hi, x_1 + wi] != 0: + merged_map[ci][y_1 + hi, x_1 + wi] = 0.5 * (map_pixel + merged_map[ci][y_1 + hi, x_1 + wi]) + else: + merged_map[ci][y_1 + hi, x_1 + wi] = map_pixel + + for class_idx in range(num_classes): + image_map_cls = image_saliency_map[class_idx] + # resize the feature map for whole image to add it to merged saliency maps + image_map_cls = cv2.resize(image_map_cls, (image_map_w, image_map_h)) + merged_map[class_idx] += (0.5 * image_map_cls).astype(dtype) + merged_map[class_idx] = non_linear_normalization(merged_map[class_idx]) + return merged_map + + def get_tiling_saliency_map_from_segm_masks(self, detections: Union[Tuple, np.ndarray]) -> List: + """Post process function for saliency map of OTX MaskRCNN model for tiling.""" + + # No detection case + if isinstance(detections, np.ndarray) and detections.size == 0: + return [None] + # Exportable demo case + if self.num_classes == 0: + return [None] + + classes = [int(cls) - 1 for cls in detections[1]] + saliency_maps: List = [None for _ in range(self.num_classes)] + scores = detections[0].reshape(-1, 1, 1) + masks = detections[3] + weighted_masks = masks * scores + for mask, cls in zip(weighted_masks, classes): + if saliency_maps[cls] is None: + saliency_maps[cls] = [mask] + else: + saliency_maps[cls].append(mask) + saliency_maps = self._merge_and_normalize(saliency_maps, self.num_classes) + return saliency_maps + + @staticmethod + def _merge_and_normalize(saliency_maps: List, num_classes: int) -> List: + for i in range(num_classes): + if saliency_maps[i] is not None: + # combine masks for all objects within one class + saliency_maps[i] = np.max(np.array(saliency_maps[i]), axis=0) + + for i in range(num_classes): + per_class_map = saliency_maps[i] + if per_class_map is not None: + max_values = np.max(per_class_map) + per_class_map = 255 * (per_class_map) / (max_values + 1e-12) + per_class_map = per_class_map.astype(np.uint8) + saliency_maps[i] = per_class_map + return saliency_maps + + def resize_masks(self, masks: List, dets: np.ndarray, shape: List[int]): + """Resize Masks. + + Args: + masks (List): list of raw np.ndarray masks + dets (np.ndarray): detections including labels, scores, and boxes + shape (List[int]): original full-res image shape + """ + for i, (det, mask) in enumerate(zip(dets, masks)): + masks[i] = self.model.segm_postprocess(det[2:], mask, *shape[:-1]) diff --git a/src/otx/cli/manager/config_manager.py b/src/otx/cli/manager/config_manager.py index 344bc0484fd..1143010cd33 100644 --- a/src/otx/cli/manager/config_manager.py +++ b/src/otx/cli/manager/config_manager.py @@ -3,7 +3,6 @@ # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -import logging import os import shutil from collections import defaultdict @@ -30,6 +29,9 @@ from otx.cli.utils.multi_gpu import is_multigpu_child_process from otx.cli.utils.parser import gen_param_help, gen_params_dict_from_args from otx.core.data.manager.dataset_manager import DatasetManager +from otx.utils.logger import get_logger + +logger = get_logger() DEFAULT_MODEL_TEMPLATE_ID = { "CLASSIFICATION": "Custom_Image_Classification_EfficinetNet-B0", @@ -293,7 +295,7 @@ def _check_semisl_requirements(unlabeled_dir): if all_unlabeled_images > 1: return unlabeled_dir - logging.warning( + logger.warning( "WARNING: There are none or too litle images to start Semi-SL training. " "It should be more than relative threshold (at least 7% of labeled images) " "Start Supervised training instead." diff --git a/src/otx/cli/tools/build.py b/src/otx/cli/tools/build.py index 12007c8d0d5..2357534cc85 100644 --- a/src/otx/cli/tools/build.py +++ b/src/otx/cli/tools/build.py @@ -19,6 +19,7 @@ from otx.cli.manager.config_manager import TASK_TYPE_TO_SUB_DIR_NAME, ConfigManager from otx.cli.utils.parser import get_parser_and_hprams_data +from otx.utils.logger import config_logger SUPPORTED_TASKS = ( "CLASSIFICATION", @@ -101,6 +102,7 @@ def main(): args = get_args() config_manager = ConfigManager(args, workspace_root=args.workspace, mode="build") + config_logger(config_manager.output_path / "otx.log", "INFO") if args.task: config_manager.task_type = args.task.upper() diff --git a/src/otx/cli/tools/deploy.py b/src/otx/cli/tools/deploy.py index 4809abff01e..a4321e0cd0f 100644 --- a/src/otx/cli/tools/deploy.py +++ b/src/otx/cli/tools/deploy.py @@ -25,6 +25,7 @@ from otx.cli.utils.importing import get_impl_class from otx.cli.utils.io import read_label_schema, read_model from otx.cli.utils.parser import get_parser_and_hprams_data +from otx.utils.logger import config_logger def get_args(): @@ -50,6 +51,7 @@ def main(): # Parses input arguments. args = get_args() config_manager = ConfigManager(args, mode="deploy") + config_logger(config_manager.output_path / "otx.log", "INFO") # Auto-Configuration for model template config_manager.configure_template() diff --git a/src/otx/cli/tools/eval.py b/src/otx/cli/tools/eval.py index 00a533510d0..2ed3b22a477 100644 --- a/src/otx/cli/tools/eval.py +++ b/src/otx/cli/tools/eval.py @@ -34,6 +34,7 @@ get_parser_and_hprams_data, ) from otx.core.data.adapter import get_dataset_adapter +from otx.utils.logger import config_logger # pylint: disable=too-many-locals @@ -94,6 +95,7 @@ def main(): args, override_param = get_args() config_manager = ConfigManager(args, workspace_root=args.workspace, mode="eval") + config_logger(config_manager.output_path / "otx.log", "INFO") # Auto-Configuration for model template config_manager.configure_template() @@ -154,11 +156,11 @@ def main(): print(resultset.performance) output_path = Path(args.output) if args.output else config_manager.output_path + performance = {resultset.performance.score.name: resultset.performance.score.value} + if hasattr(task, "avg_time_per_image"): + performance["avg_time_per_image"] = task.avg_time_per_image with open(output_path / "performance.json", "w", encoding="UTF-8") as write_file: - json.dump( - {resultset.performance.score.name: resultset.performance.score.value}, - write_file, - ) + json.dump(performance, write_file) return dict(retcode=0, template=template.name) diff --git a/src/otx/cli/tools/explain.py b/src/otx/cli/tools/explain.py index ec7735acbdf..b6e70cb9dc7 100644 --- a/src/otx/cli/tools/explain.py +++ b/src/otx/cli/tools/explain.py @@ -18,7 +18,6 @@ # Update environment variables for CLI use import otx.cli # noqa: F401 -from otx.algorithms.common.utils.logger import get_logger from otx.api.entities.explain_parameters import ExplainParameters from otx.api.entities.task_environment import TaskEnvironment from otx.cli.manager import ConfigManager @@ -36,6 +35,7 @@ get_override_param, get_parser_and_hprams_data, ) +from otx.utils.logger import config_logger, get_logger logger = get_logger() @@ -135,6 +135,7 @@ def main(): args, override_param = get_args() config_manager = ConfigManager(args, mode="explain") + config_logger(config_manager.output_path / "otx.log", "INFO") # Auto-Configuration for model template config_manager.configure_template() diff --git a/src/otx/cli/tools/export.py b/src/otx/cli/tools/export.py index 8ec6c0f92b5..019c855c9ba 100644 --- a/src/otx/cli/tools/export.py +++ b/src/otx/cli/tools/export.py @@ -27,6 +27,7 @@ from otx.cli.utils.io import read_binary, read_label_schema, save_model_data from otx.cli.utils.nncf import is_checkpoint_nncf from otx.cli.utils.parser import add_hyper_parameters_sub_parser, get_override_param, get_parser_and_hprams_data +from otx.utils.logger import config_logger def get_args(): @@ -73,6 +74,7 @@ def main(): """Main function that is used for model exporting.""" args, override_param = get_args() config_manager = ConfigManager(args, mode="export", workspace_root=args.workspace) + config_logger(config_manager.output_path / "otx.log", "INFO") # Auto-Configuration for model template config_manager.configure_template() diff --git a/src/otx/cli/tools/optimize.py b/src/otx/cli/tools/optimize.py index 312abe78e3c..c94c723243d 100644 --- a/src/otx/cli/tools/optimize.py +++ b/src/otx/cli/tools/optimize.py @@ -36,6 +36,7 @@ get_parser_and_hprams_data, ) from otx.core.data.adapter import get_dataset_adapter +from otx.utils.logger import config_logger # pylint: disable=too-many-locals @@ -87,6 +88,7 @@ def main(): args, override_param = get_args() config_manager = ConfigManager(args, workspace_root=args.workspace, mode="optimize") + config_logger(config_manager.output_path / "otx.log", "INFO") # Auto-Configuration for model template config_manager.configure_template() diff --git a/src/otx/cli/tools/train.py b/src/otx/cli/tools/train.py index cb7c3fb4830..dfb3fe1c4a1 100644 --- a/src/otx/cli/tools/train.py +++ b/src/otx/cli/tools/train.py @@ -44,6 +44,7 @@ ) from otx.cli.utils.report import get_otx_report from otx.core.data.adapter import get_dataset_adapter +from otx.utils.logger import config_logger def get_args(): @@ -199,6 +200,7 @@ def train(exit_stack: Optional[ExitStack] = None): # pylint: disable=too-many-b args, override_param = get_args() config_manager = ConfigManager(args, workspace_root=args.workspace, mode=mode) + config_logger(config_manager.output_path / "otx.log", "INFO") # Auto-Configuration for model template config_manager.configure_template() diff --git a/src/otx/cli/utils/experiment.py b/src/otx/cli/utils/experiment.py index cc4af013f80..591d69fdee5 100644 --- a/src/otx/cli/utils/experiment.py +++ b/src/otx/cli/utils/experiment.py @@ -3,7 +3,6 @@ # SPDX-License-Identifier: Apache-2.0 # -import logging import multiprocessing as mp import os import time @@ -15,12 +14,14 @@ import psutil import yaml +from otx.utils.logger import get_logger + try: import pynvml except ImportError: pynvml = None -logger = logging.getLogger(__name__) +logger = get_logger() GIB = 1024**3 AVAILABLE_RESOURCE_TYPE = ["cpu", "gpu"] diff --git a/src/otx/cli/utils/hpo.py b/src/otx/cli/utils/hpo.py index dbdcfb23d4a..5a0a82d50af 100644 --- a/src/otx/cli/utils/hpo.py +++ b/src/otx/cli/utils/hpo.py @@ -5,7 +5,6 @@ # import json -import logging import os import re import shutil @@ -31,8 +30,9 @@ from otx.cli.utils.io import read_model, save_model_data from otx.core.data.adapter import get_dataset_adapter from otx.hpo import HyperBand, TrialStatus, run_hpo_loop +from otx.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger() def _check_hpo_enabled_task(task_type): diff --git a/src/otx/cli/utils/multi_gpu.py b/src/otx/cli/utils/multi_gpu.py index f1ffe7774bc..834a9aa6087 100644 --- a/src/otx/cli/utils/multi_gpu.py +++ b/src/otx/cli/utils/multi_gpu.py @@ -15,7 +15,6 @@ # and limitations under the License. import datetime -import logging import os import signal import socket @@ -31,8 +30,9 @@ import torch.multiprocessing as mp from otx.api.configuration import ConfigurableParameters +from otx.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger() def _get_free_port(): diff --git a/src/otx/core/data/adapter/segmentation_dataset_adapter.py b/src/otx/core/data/adapter/segmentation_dataset_adapter.py index 4f6ff7c29ba..2ccca25e24c 100644 --- a/src/otx/core/data/adapter/segmentation_dataset_adapter.py +++ b/src/otx/core/data/adapter/segmentation_dataset_adapter.py @@ -23,7 +23,6 @@ from datumaro.util.meta_file_util import parse_meta_file from skimage.segmentation import felzenszwalb -from otx.algorithms.common.utils.logger import get_logger from otx.api.entities.annotation import Annotation from otx.api.entities.dataset_item import DatasetItemEntity from otx.api.entities.datasets import DatasetEntity @@ -31,6 +30,7 @@ from otx.api.entities.image import Image from otx.api.entities.subset import Subset from otx.core.data.adapter.base_dataset_adapter import BaseDatasetAdapter +from otx.utils.logger import get_logger # pylint: disable=invalid-name, too-many-locals, no-member, too-many-nested-blocks, too-many-branches, # pylint: too-many-arguments diff --git a/src/otx/core/data/caching/mem_cache_handler.py b/src/otx/core/data/caching/mem_cache_handler.py index b9925e88772..02138b7b481 100644 --- a/src/otx/core/data/caching/mem_cache_handler.py +++ b/src/otx/core/data/caching/mem_cache_handler.py @@ -12,7 +12,7 @@ import psutil from multiprocess.synchronize import Lock -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger logger = get_logger() GIB = 1024**3 diff --git a/src/otx/core/ov/graph/graph.py b/src/otx/core/ov/graph/graph.py index e6c197fdaec..e51e1a431ad 100644 --- a/src/otx/core/ov/graph/graph.py +++ b/src/otx/core/ov/graph/graph.py @@ -15,7 +15,7 @@ import networkx as nx from openvino.runtime import Model -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger from ..ops.op import Operation from ..ops.utils import convert_op_to_torch diff --git a/src/otx/core/ov/graph/parsers/cls/cls_base_parser.py b/src/otx/core/ov/graph/parsers/cls/cls_base_parser.py index 2e9c37c3266..0f0ceadb2ae 100644 --- a/src/otx/core/ov/graph/parsers/cls/cls_base_parser.py +++ b/src/otx/core/ov/graph/parsers/cls/cls_base_parser.py @@ -5,7 +5,7 @@ from typing import Dict, List, Optional -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger from ..builder import PARSERS from ..parser import parameter_parser diff --git a/src/otx/core/ov/graph/utils.py b/src/otx/core/ov/graph/utils.py index c297fd00e6d..c6bf2b69675 100644 --- a/src/otx/core/ov/graph/utils.py +++ b/src/otx/core/ov/graph/utils.py @@ -7,11 +7,11 @@ import torch -from otx.algorithms.common.utils.logger import get_logger from otx.core.ov.graph import Graph from otx.core.ov.ops.builder import OPS from otx.core.ov.ops.infrastructures import ConstantV0 from otx.core.ov.ops.op import Operation +from otx.utils.logger import get_logger # pylint: disable=too-many-locals, protected-access, too-many-branches, too-many-statements, too-many-nested-blocks logger = get_logger() diff --git a/src/otx/core/ov/models/ov_model.py b/src/otx/core/ov/models/ov_model.py index e71fea1a609..aeca7db0397 100644 --- a/src/otx/core/ov/models/ov_model.py +++ b/src/otx/core/ov/models/ov_model.py @@ -16,7 +16,7 @@ import torch from torch.nn import init -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger from ..graph import Graph from ..graph.utils import ( diff --git a/src/otx/core/ov/models/parser_mixin.py b/src/otx/core/ov/models/parser_mixin.py index 2ec165fc484..bb49e0ae36f 100644 --- a/src/otx/core/ov/models/parser_mixin.py +++ b/src/otx/core/ov/models/parser_mixin.py @@ -9,7 +9,7 @@ import openvino.runtime as ov -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger from ..graph.parsers.builder import PARSERS from .ov_model import OVModel diff --git a/src/otx/core/ov/ops/infrastructures.py b/src/otx/core/ov/ops/infrastructures.py index 2572ac7af01..44b39b9d120 100644 --- a/src/otx/core/ov/ops/infrastructures.py +++ b/src/otx/core/ov/ops/infrastructures.py @@ -10,7 +10,7 @@ import numpy as np import torch -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger from ..utils import get_op_name # type: ignore[attr-defined] from .builder import OPS diff --git a/src/otx/core/ov/ops/utils.py b/src/otx/core/ov/ops/utils.py index 25222a18830..b74d37bbb02 100644 --- a/src/otx/core/ov/ops/utils.py +++ b/src/otx/core/ov/ops/utils.py @@ -28,10 +28,6 @@ def convert_op_to_torch(op_node: Node): try: torch_module = OPS.get_by_type_version(op_type, op_version).from_ov(op_node) except Exception as e: - # logger.error(e) - # logger.error(op_type) - # logger.error(op_version) - # logger.error(op_node.get_attributes()) raise e return torch_module diff --git a/src/otx/hpo/hpo_base.py b/src/otx/hpo/hpo_base.py index dc03f5cb501..17ebc9da4be 100644 --- a/src/otx/hpo/hpo_base.py +++ b/src/otx/hpo/hpo_base.py @@ -15,7 +15,6 @@ # and limitations under the License. import json -import logging import tempfile from abc import ABC, abstractmethod from enum import IntEnum @@ -23,8 +22,9 @@ from otx.hpo.search_space import SearchSpace from otx.hpo.utils import check_mode_input, check_positive +from otx.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger() class HpoBase(ABC): diff --git a/src/otx/hpo/hpo_runner.py b/src/otx/hpo/hpo_runner.py index 27565329625..27ecc0a84a9 100644 --- a/src/otx/hpo/hpo_runner.py +++ b/src/otx/hpo/hpo_runner.py @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions # and limitations under the License. -import logging import multiprocessing import os import queue @@ -28,8 +27,9 @@ from otx.hpo.hpo_base import HpoBase, Trial, TrialStatus from otx.hpo.resource_manager import get_resource_manager +from otx.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger() @dataclass diff --git a/src/otx/hpo/hyperband.py b/src/otx/hpo/hyperband.py index eea8bac0f57..49e5b5003ed 100644 --- a/src/otx/hpo/hyperband.py +++ b/src/otx/hpo/hyperband.py @@ -15,7 +15,6 @@ # and limitations under the License. import json -import logging import math import os from os import path as osp @@ -30,8 +29,9 @@ check_positive, left_vlaue_is_better, ) +from otx.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger() def _check_reduction_factor_value(reduction_factor: int): diff --git a/src/otx/hpo/resource_manager.py b/src/otx/hpo/resource_manager.py index c514577ab9d..a6df2d9930a 100644 --- a/src/otx/hpo/resource_manager.py +++ b/src/otx/hpo/resource_manager.py @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions # and limitations under the License. -import logging import os from abc import ABC, abstractmethod from typing import Any, Dict, List, Literal, Optional @@ -22,8 +21,9 @@ import torch from otx.hpo.utils import check_positive +from otx.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger() class BaseResourceManager(ABC): diff --git a/src/otx/hpo/search_space.py b/src/otx/hpo/search_space.py index 81698b56578..acdcf657f0b 100644 --- a/src/otx/hpo/search_space.py +++ b/src/otx/hpo/search_space.py @@ -15,14 +15,14 @@ # and limitations under the License. -import logging import math import typing from typing import Any, Dict, List, Optional, Tuple, Union from otx.hpo.utils import check_positive +from otx.utils.logger import get_logger -logger = logging.getLogger(__name__) +logger = get_logger() AVAILABLE_SEARCH_SPACE_TYPE = ["uniform", "quniform", "loguniform", "qloguniform", "choice"] diff --git a/src/otx/recipes/stages/classification/incremental.yaml b/src/otx/recipes/stages/classification/incremental.yaml index 5835f4f5ba5..b40a5fdfd5b 100644 --- a/src/otx/recipes/stages/classification/incremental.yaml +++ b/src/otx/recipes/stages/classification/incremental.yaml @@ -42,10 +42,6 @@ custom_hooks: [ start: 3, min_delta_ratio: 0.01, priority: 75, - }, - { - type: AdaptiveRepeatDataHook, - priority: ABOVE_NORMAL } ] diff --git a/src/otx/algorithms/anomaly/adapters/anomalib/logger/__init__.py b/src/otx/utils/__init__.py similarity index 82% rename from src/otx/algorithms/anomaly/adapters/anomalib/logger/__init__.py rename to src/otx/utils/__init__.py index c39b63f72c3..1a7b41db1f0 100644 --- a/src/otx/algorithms/anomaly/adapters/anomalib/logger/__init__.py +++ b/src/otx/utils/__init__.py @@ -1,6 +1,6 @@ -"""Logging.""" +"""Collection of tools to run common OTX algorithms.""" -# Copyright (C) 2021 Intel Corporation +# Copyright (C) 2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions # and limitations under the License. - -from .logger import get_logger - -__all__ = ["get_logger"] diff --git a/src/otx/algorithms/common/utils/logger.py b/src/otx/utils/logger.py similarity index 100% rename from src/otx/algorithms/common/utils/logger.py rename to src/otx/utils/logger.py diff --git a/tests/assets/classification_dataset_class_incremental/3/.gitignore b/tests/assets/classification_dataset_class_incremental/3/.gitignore new file mode 100644 index 00000000000..5e7d2734cfc --- /dev/null +++ b/tests/assets/classification_dataset_class_incremental/3/.gitignore @@ -0,0 +1,4 @@ +# Ignore everything in this directory +* +# Except this file +!.gitignore diff --git a/tests/assets/datumaro_h-label/annotations/train.json b/tests/assets/datumaro_h-label/annotations/train.json index dc7994026dc..f641fbb2352 100644 --- a/tests/assets/datumaro_h-label/annotations/train.json +++ b/tests/assets/datumaro_h-label/annotations/train.json @@ -3,6 +3,11 @@ "categories": { "label": { "label_groups": [ + { + "name": "shape", + "group_type": "exclusive", + "labels": ["blue", "green"] + }, { "name": "blue", "group_type": "exclusive", diff --git a/tests/assets/datumaro_h-label/annotations/valid.json b/tests/assets/datumaro_h-label/annotations/valid.json index dc7994026dc..f641fbb2352 100644 --- a/tests/assets/datumaro_h-label/annotations/valid.json +++ b/tests/assets/datumaro_h-label/annotations/valid.json @@ -3,6 +3,11 @@ "categories": { "label": { "label_groups": [ + { + "name": "shape", + "group_type": "exclusive", + "labels": ["blue", "green"] + }, { "name": "blue", "group_type": "exclusive", diff --git a/tests/assets/datumaro_h-label_class_decremental/annotations/train.json b/tests/assets/datumaro_h-label_class_decremental/annotations/train.json index be96929c774..dbd6dfa0702 100644 --- a/tests/assets/datumaro_h-label_class_decremental/annotations/train.json +++ b/tests/assets/datumaro_h-label_class_decremental/annotations/train.json @@ -3,6 +3,11 @@ "categories": { "label": { "label_groups": [ + { + "name": "shape", + "group_type": "exclusive", + "labels": ["blue", "green"] + }, { "name": "blue", "group_type": "exclusive", diff --git a/tests/assets/datumaro_h-label_class_decremental/annotations/valid.json b/tests/assets/datumaro_h-label_class_decremental/annotations/valid.json index be96929c774..dbd6dfa0702 100644 --- a/tests/assets/datumaro_h-label_class_decremental/annotations/valid.json +++ b/tests/assets/datumaro_h-label_class_decremental/annotations/valid.json @@ -3,6 +3,11 @@ "categories": { "label": { "label_groups": [ + { + "name": "shape", + "group_type": "exclusive", + "labels": ["blue", "green"] + }, { "name": "blue", "group_type": "exclusive", diff --git a/tests/e2e/cli/classification/test_classification.py b/tests/e2e/cli/classification/test_classification.py index e48385ca9bf..909858ef524 100644 --- a/tests/e2e/cli/classification/test_classification.py +++ b/tests/e2e/cli/classification/test_classification.py @@ -5,6 +5,7 @@ import copy import os +from pathlib import Path import pytest import torch @@ -223,9 +224,6 @@ def test_otx_hpo(self, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_optimize(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "multi_class_cls" - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - nncf_optimize_testing(template, tmp_dir_path, otx_dir, args) @e2e_pytest_component @@ -233,9 +231,6 @@ def test_nncf_optimize(self, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_export(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "multi_class_cls" - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - nncf_export_testing(template, tmp_dir_path) @e2e_pytest_component @@ -243,9 +238,6 @@ def test_nncf_export(self, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_validate_fq(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "multi_class_cls" - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - nncf_validate_fq_testing(template, tmp_dir_path, otx_dir, "classification", type(self).__name__) @e2e_pytest_component @@ -253,9 +245,6 @@ def test_nncf_validate_fq(self, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_eval(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "multi_class_cls" - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - nncf_eval_testing(template, tmp_dir_path, otx_dir, args, threshold=0.01) @e2e_pytest_component @@ -263,9 +252,6 @@ def test_nncf_eval(self, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_eval_openvino(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "multi_class_cls" - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - nncf_eval_openvino_testing(template, tmp_dir_path, otx_dir, args) @e2e_pytest_component @@ -448,9 +434,6 @@ def test_otx_hpo(self, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_optimize(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "multi_label_cls" - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - nncf_optimize_testing(template, tmp_dir_path, otx_dir, args_m) @e2e_pytest_component @@ -458,9 +441,6 @@ def test_nncf_optimize(self, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_export(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "multi_label_cls" - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - nncf_export_testing(template, tmp_dir_path) @e2e_pytest_component @@ -468,9 +448,6 @@ def test_nncf_export(self, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_validate_fq(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "multi_label_cls" - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - nncf_validate_fq_testing(template, tmp_dir_path, otx_dir, "classification", type(self).__name__) @e2e_pytest_component @@ -478,9 +455,6 @@ def test_nncf_validate_fq(self, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_eval(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "multi_label_cls" - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - nncf_eval_testing(template, tmp_dir_path, otx_dir, args_m, threshold=0.01) @e2e_pytest_component @@ -488,9 +462,6 @@ def test_nncf_eval(self, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_eval_openvino(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "multi_label_cls" - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - nncf_eval_openvino_testing(template, tmp_dir_path, otx_dir, args_m) @e2e_pytest_component @@ -622,9 +593,6 @@ def test_otx_hpo(self, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_optimize(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "h_label_cls" - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - nncf_optimize_testing(template, tmp_dir_path, otx_dir, args_h) @e2e_pytest_component @@ -632,9 +600,6 @@ def test_nncf_optimize(self, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_export(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "h_label_cls" - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - nncf_export_testing(template, tmp_dir_path) @e2e_pytest_component @@ -642,9 +607,6 @@ def test_nncf_export(self, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_eval(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "h_label_cls" - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - nncf_eval_testing(template, tmp_dir_path, otx_dir, args_h, threshold=0.01) @e2e_pytest_component @@ -652,9 +614,6 @@ def test_nncf_eval(self, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_validate_fq(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "h_label_cls" - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - nncf_validate_fq_testing(template, tmp_dir_path, otx_dir, "classification", type(self).__name__) @e2e_pytest_component @@ -662,9 +621,6 @@ def test_nncf_validate_fq(self, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_eval_openvino(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "h_label_cls" - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - nncf_eval_openvino_testing(template, tmp_dir_path, otx_dir, args_h) @e2e_pytest_component diff --git a/tests/e2e/cli/detection/test_api_xai_sanity_detection.py b/tests/e2e/cli/detection/test_api_xai_sanity_detection.py index 02024d54bfd..4cd11fa937c 100644 --- a/tests/e2e/cli/detection/test_api_xai_sanity_detection.py +++ b/tests/e2e/cli/detection/test_api_xai_sanity_detection.py @@ -32,7 +32,7 @@ class TestOVDetXAIAPI(DetectionTaskAPIBase): ref_raw_saliency_shapes = { - "MobileNetV2-ATSS": (4, 4), # Need to be adapted to configurable or adaptive input size + "MobileNetV2-ATSS": (16, 16), # Need to be adapted to configurable or adaptive input size } @e2e_pytest_api diff --git a/tests/e2e/cli/semantic_segmentation/test_segmentation.py b/tests/e2e/cli/semantic_segmentation/test_segmentation.py index 0315e3e051d..d85698a1394 100644 --- a/tests/e2e/cli/semantic_segmentation/test_segmentation.py +++ b/tests/e2e/cli/semantic_segmentation/test_segmentation.py @@ -188,9 +188,6 @@ def test_otx_hpo(self, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_optimize(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "segmentation" - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - nncf_optimize_testing(template, tmp_dir_path, otx_dir, args) @e2e_pytest_component @@ -198,9 +195,6 @@ def test_nncf_optimize(self, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_export(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "segmentation" - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - nncf_export_testing(template, tmp_dir_path) @e2e_pytest_component @@ -208,9 +202,6 @@ def test_nncf_export(self, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_validate_fq(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "segmentation" - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - nncf_validate_fq_testing(template, tmp_dir_path, otx_dir, "semantic_segmentation", type(self).__name__) @e2e_pytest_component @@ -218,9 +209,6 @@ def test_nncf_validate_fq(self, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_eval(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "segmentation" - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - nncf_eval_testing(template, tmp_dir_path, otx_dir, args, threshold=0.01) @e2e_pytest_component @@ -228,9 +216,6 @@ def test_nncf_eval(self, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_eval_openvino(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "segmentation" - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - nncf_eval_openvino_testing(template, tmp_dir_path, otx_dir, args) @e2e_pytest_component diff --git a/tests/e2e/test_api_xai_sanity.py b/tests/e2e/test_api_xai_sanity.py new file mode 100644 index 00000000000..8534276839e --- /dev/null +++ b/tests/e2e/test_api_xai_sanity.py @@ -0,0 +1,440 @@ +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import os +import os.path as osp +import tempfile +from copy import deepcopy + +import pytest +import torch + +from otx.algorithms.classification.adapters.mmcls.task import MMClassificationTask +from otx.algorithms.classification.adapters.openvino.task import ClassificationOpenVINOTask + +from otx.algorithms.detection.adapters.mmdet.task import MMDetectionTask +from otx.algorithms.detection.adapters.openvino.task import OpenVINODetectionTask +from otx.algorithms.detection.configs.base import DetectionConfig +from otx.api.configuration.helper import create +from otx.api.entities.inference_parameters import InferenceParameters +from otx.api.entities.model import ( + ModelConfiguration, + ModelEntity, +) +from otx.api.entities.result_media import ResultMediaEntity +from otx.api.entities.subset import Subset +from otx.api.entities.train_parameters import TrainParameters +from otx.api.entities.model_template import parse_model_template, TaskType +from otx.api.entities.label_schema import LabelGroup, LabelGroupType, LabelSchemaEntity +from otx.api.usecases.tasks.interfaces.export_interface import ExportType +from otx.cli.utils.io import read_model, save_model_data +from tests.integration.api.classification.test_api_classification import ( + DEFAULT_CLS_TEMPLATE_DIR, + ClassificationTaskAPIBase, +) +from tests.integration.api.detection.api_detection import DetectionTaskAPIBase, DEFAULT_DET_TEMPLATE_DIR +from tests.test_suite.e2e_test_system import e2e_pytest_api +from tests.unit.algorithms.detection.test_helpers import ( + DEFAULT_ISEG_TEMPLATE_DIR, + init_environment, + generate_det_dataset, +) + +torch.manual_seed(0) + +assert_text_explain_all = "The number of saliency maps should be equal to the number of all classes." +assert_text_explain_predicted = "The number of saliency maps should be equal to the number of predicted classes." + + +def saliency_maps_check( + predicted_dataset, task_labels, raw_sal_map_shape, processed_saliency_maps=False, only_predicted=True +): + for data_point in predicted_dataset: + saliency_map_counter = 0 + metadata_list = data_point.get_metadata() + for metadata in metadata_list: + if isinstance(metadata.data, ResultMediaEntity): + if metadata.data.type == "saliency_map": + saliency_map_counter += 1 + if processed_saliency_maps: + assert metadata.data.numpy.ndim == 3 + assert metadata.data.numpy.shape == (data_point.height, data_point.width, 3) + else: + assert metadata.data.numpy.ndim == 2 + assert metadata.data.numpy.shape == raw_sal_map_shape + if only_predicted: + assert saliency_map_counter == len(data_point.annotation_scene.get_labels()), assert_text_explain_predicted + else: + assert saliency_map_counter == len(task_labels), assert_text_explain_all + + +class TestOVClsXAIAPI(ClassificationTaskAPIBase): + ref_raw_saliency_shapes = { + "EfficientNet-B0": (7, 7), + } + + @e2e_pytest_api + @pytest.mark.parametrize( + "multilabel,hierarchical", + [(False, False), (True, False), (False, True)], + ids=["multiclass", "multilabel", "hierarchical"], + ) + def test_inference_xai(self, multilabel, hierarchical): + with tempfile.TemporaryDirectory() as temp_dir: + hyper_parameters, model_template = self.setup_configurable_parameters(DEFAULT_CLS_TEMPLATE_DIR, num_iters=1) + task_environment, dataset = self.init_environment( + hyper_parameters, model_template, multilabel, hierarchical, 20 + ) + + # Train and save a model + task = MMClassificationTask(task_environment=task_environment) + train_parameters = TrainParameters() + output_model = ModelEntity( + dataset, + task_environment.get_model_configuration(), + ) + task.train(dataset, output_model, train_parameters) + save_model_data(output_model, temp_dir) + + for processed_saliency_maps, only_predicted in [[True, False], [False, True]]: + task_environment, dataset = self.init_environment( + hyper_parameters, model_template, multilabel, hierarchical, 20 + ) + + # Infer torch model + task = MMClassificationTask(task_environment=task_environment) + inference_parameters = InferenceParameters( + is_evaluation=False, + process_saliency_maps=processed_saliency_maps, + explain_predicted_classes=only_predicted, + ) + predicted_dataset = task.infer(dataset.with_empty_annotations(), inference_parameters) + + # Check saliency maps torch task + task_labels = output_model.configuration.get_label_schema().get_labels(include_empty=False) + saliency_maps_check( + predicted_dataset, + task_labels, + self.ref_raw_saliency_shapes[model_template.name], + processed_saliency_maps=processed_saliency_maps, + only_predicted=only_predicted, + ) + + # Save OV IR model + task._model_ckpt = osp.join(temp_dir, "weights.pth") + exported_model = ModelEntity(None, task_environment.get_model_configuration()) + task.export(ExportType.OPENVINO, exported_model, dump_features=True) + os.makedirs(temp_dir, exist_ok=True) + save_model_data(exported_model, temp_dir) + + # Infer OV IR model + load_weights_ov = osp.join(temp_dir, "openvino.xml") + task_environment.model = read_model(task_environment.get_model_configuration(), load_weights_ov, None) + task = ClassificationOpenVINOTask(task_environment=task_environment) + _, dataset = self.init_environment(hyper_parameters, model_template, multilabel, hierarchical, 20) + predicted_dataset_ov = task.infer(dataset.with_empty_annotations(), inference_parameters) + + # Check saliency maps OV task + saliency_maps_check( + predicted_dataset_ov, + task_labels, + self.ref_raw_saliency_shapes[model_template.name], + processed_saliency_maps=processed_saliency_maps, + only_predicted=only_predicted, + ) + + +class TestOVDetXAIAPI(DetectionTaskAPIBase): + ref_raw_saliency_shapes = { + "MobileNetV2-ATSS": (6, 8), + } + + @e2e_pytest_api + def test_inference_xai(self): + with tempfile.TemporaryDirectory() as temp_dir: + hyper_parameters, model_template = self.setup_configurable_parameters( + DEFAULT_DET_TEMPLATE_DIR, num_iters=15 + ) + task_env, dataset = self.init_environment(hyper_parameters, model_template, 10) + + train_task = MMDetectionTask(task_environment=task_env) + trained_model = ModelEntity( + dataset, + task_env.get_model_configuration(), + ) + train_task.train(dataset, trained_model, TrainParameters()) + save_model_data(trained_model, temp_dir) + + for processed_saliency_maps, only_predicted in [[True, False], [False, True]]: + task_env, dataset = self.init_environment(hyper_parameters, model_template, 10) + inference_parameters = InferenceParameters( + is_evaluation=False, + process_saliency_maps=processed_saliency_maps, + explain_predicted_classes=only_predicted, + ) + + # Infer torch model + task_env.model = trained_model + inference_task = MMDetectionTask(task_environment=task_env) + val_dataset = dataset.get_subset(Subset.VALIDATION) + predicted_dataset = inference_task.infer(val_dataset.with_empty_annotations(), inference_parameters) + + # Check saliency maps torch task + task_labels = trained_model.configuration.get_label_schema().get_labels(include_empty=False) + saliency_maps_check( + predicted_dataset, + task_labels, + self.ref_raw_saliency_shapes[model_template.name], + processed_saliency_maps=processed_saliency_maps, + only_predicted=only_predicted, + ) + + # Save OV IR model + inference_task._model_ckpt = osp.join(temp_dir, "weights.pth") + exported_model = ModelEntity(None, task_env.get_model_configuration()) + inference_task.export(ExportType.OPENVINO, exported_model, dump_features=True) + os.makedirs(temp_dir, exist_ok=True) + save_model_data(exported_model, temp_dir) + + # Infer OV IR model + load_weights_ov = osp.join(temp_dir, "openvino.xml") + task_env.model = read_model(task_env.get_model_configuration(), load_weights_ov, None) + task = OpenVINODetectionTask(task_environment=task_env) + _, dataset = self.init_environment(hyper_parameters, model_template, 10) + predicted_dataset_ov = task.infer(dataset.with_empty_annotations(), inference_parameters) + + # Check saliency maps OV task + saliency_maps_check( + predicted_dataset_ov, + task_labels, + self.ref_raw_saliency_shapes[model_template.name], + processed_saliency_maps=processed_saliency_maps, + only_predicted=only_predicted, + ) + + +class TestOVDetTilXAIAPI(DetectionTaskAPIBase): + ref_raw_saliency_shapes = { + "MobileNetV2-ATSS": (6, 8), + } + + @e2e_pytest_api + def test_inference_xai(self): + with tempfile.TemporaryDirectory() as temp_dir: + hyper_parameters, model_template = self.setup_configurable_parameters( + DEFAULT_DET_TEMPLATE_DIR, num_iters=10, tiling=True + ) + task_env, dataset = self.init_environment(hyper_parameters, model_template, 10) + + train_task = MMDetectionTask(task_environment=task_env) + trained_model = ModelEntity( + dataset, + task_env.get_model_configuration(), + ) + train_task.train(dataset, trained_model, TrainParameters()) + save_model_data(trained_model, temp_dir) + + for processed_saliency_maps, only_predicted in [[True, False], [False, True]]: + task_env, dataset = self.init_environment(hyper_parameters, model_template, 10) + inference_parameters = InferenceParameters( + is_evaluation=False, + process_saliency_maps=processed_saliency_maps, + explain_predicted_classes=only_predicted, + ) + + # Infer torch model + task_env.model = trained_model + inference_task = MMDetectionTask(task_environment=task_env) + val_dataset = dataset.get_subset(Subset.VALIDATION) + predicted_dataset = inference_task.infer(val_dataset.with_empty_annotations(), inference_parameters) + + # Check saliency maps torch task + task_labels = trained_model.configuration.get_label_schema().get_labels(include_empty=False) + saliency_maps_check( + predicted_dataset, + task_labels, + self.ref_raw_saliency_shapes[model_template.name], + processed_saliency_maps=processed_saliency_maps, + only_predicted=only_predicted, + ) + + # Save OV IR model + inference_task._model_ckpt = osp.join(temp_dir, "weights.pth") + exported_model = ModelEntity(None, task_env.get_model_configuration()) + inference_task.export(ExportType.OPENVINO, exported_model, dump_features=True) + os.makedirs(temp_dir, exist_ok=True) + save_model_data(exported_model, temp_dir) + + # Infer OV IR model + load_weights_ov = osp.join(temp_dir, "openvino.xml") + task_env.model = read_model(task_env.get_model_configuration(), load_weights_ov, None) + task = OpenVINODetectionTask(task_environment=task_env) + _, dataset = self.init_environment(hyper_parameters, model_template, 10) + predicted_dataset_ov = task.infer(dataset.with_empty_annotations(), inference_parameters) + + # Check saliency maps OV task + saliency_maps_check( + predicted_dataset_ov, + task_labels, + self.ref_raw_saliency_shapes[model_template.name], + processed_saliency_maps=processed_saliency_maps, + only_predicted=only_predicted, + ) + + +class TestOVISegmXAIAPI: + @e2e_pytest_api + def test_inference_xai(self): + with tempfile.TemporaryDirectory() as temp_dir: + model_template = parse_model_template(os.path.join(DEFAULT_ISEG_TEMPLATE_DIR, "template.yaml")) + hyper_parameters = create(model_template.hyper_parameters.data) + hyper_parameters.learning_parameters.num_iters = 3 + task_env = init_environment(hyper_parameters, model_template, task_type=TaskType.INSTANCE_SEGMENTATION) + + train_task = MMDetectionTask(task_env) + + iseg_dataset, iseg_labels = generate_det_dataset(TaskType.INSTANCE_SEGMENTATION, 100) + iseg_label_schema = LabelSchemaEntity() + iseg_label_group = LabelGroup( + name="labels", + labels=iseg_labels, + group_type=LabelGroupType.EXCLUSIVE, + ) + iseg_label_schema.add_group(iseg_label_group) + + _config = ModelConfiguration(DetectionConfig(), iseg_label_schema) + trained_model = ModelEntity( + iseg_dataset, + _config, + ) + + train_task.train(iseg_dataset, trained_model, TrainParameters()) + + save_model_data(trained_model, temp_dir) + + processed_saliency_maps, only_predicted = False, True + task_env = init_environment(hyper_parameters, model_template, task_type=TaskType.INSTANCE_SEGMENTATION) + inference_parameters = InferenceParameters( + is_evaluation=False, + process_saliency_maps=processed_saliency_maps, + explain_predicted_classes=only_predicted, + ) + + # Infer torch model + task_env.model = trained_model + inference_task = MMDetectionTask(task_environment=task_env) + val_dataset = iseg_dataset.get_subset(Subset.VALIDATION) + val_dataset_copy = deepcopy(val_dataset) + predicted_dataset = inference_task.infer(val_dataset.with_empty_annotations(), inference_parameters) + + # Check saliency maps torch task + task_labels = trained_model.configuration.get_label_schema().get_labels(include_empty=False) + saliency_maps_check( + predicted_dataset, + task_labels, + (224, 224), + processed_saliency_maps=processed_saliency_maps, + only_predicted=only_predicted, + ) + + # Save OV IR model + inference_task._model_ckpt = osp.join(temp_dir, "weights.pth") + exported_model = ModelEntity(None, task_env.get_model_configuration()) + inference_task.export(ExportType.OPENVINO, exported_model, dump_features=True) + os.makedirs(temp_dir, exist_ok=True) + save_model_data(exported_model, temp_dir) + + # Infer OV IR model + load_weights_ov = osp.join(temp_dir, "openvino.xml") + task_env.model = read_model(task_env.get_model_configuration(), load_weights_ov, None) + task = OpenVINODetectionTask(task_environment=task_env) + predicted_dataset_ov = task.infer(val_dataset_copy.with_empty_annotations(), inference_parameters) + + # Check saliency maps OV task + saliency_maps_check( + predicted_dataset_ov, + task_labels, + (480, 640), + processed_saliency_maps=processed_saliency_maps, + only_predicted=only_predicted, + ) + + +class TestOVISegmTilXAIAPI: + @e2e_pytest_api + def test_inference_xai(self): + with tempfile.TemporaryDirectory() as temp_dir: + model_template = parse_model_template(os.path.join(DEFAULT_ISEG_TEMPLATE_DIR, "template.yaml")) + hyper_parameters = create(model_template.hyper_parameters.data) + hyper_parameters.learning_parameters.num_iters = 5 + hyper_parameters.tiling_parameters.enable_tiling = True + task_env = init_environment(hyper_parameters, model_template, task_type=TaskType.INSTANCE_SEGMENTATION) + + train_task = MMDetectionTask(task_env) + + iseg_dataset, iseg_labels = generate_det_dataset(TaskType.INSTANCE_SEGMENTATION, 100) + iseg_label_schema = LabelSchemaEntity() + iseg_label_group = LabelGroup( + name="labels", + labels=iseg_labels, + group_type=LabelGroupType.EXCLUSIVE, + ) + iseg_label_schema.add_group(iseg_label_group) + + _config = ModelConfiguration(DetectionConfig(), iseg_label_schema) + trained_model = ModelEntity( + iseg_dataset, + _config, + ) + + train_task.train(iseg_dataset, trained_model, TrainParameters()) + + save_model_data(trained_model, temp_dir) + + processed_saliency_maps, only_predicted = False, True + task_env = init_environment(hyper_parameters, model_template, task_type=TaskType.INSTANCE_SEGMENTATION) + inference_parameters = InferenceParameters( + is_evaluation=False, + process_saliency_maps=processed_saliency_maps, + explain_predicted_classes=only_predicted, + ) + + # Infer torch model + task_env.model = trained_model + inference_task = MMDetectionTask(task_environment=task_env) + val_dataset = iseg_dataset.get_subset(Subset.VALIDATION) + val_dataset_copy = deepcopy(val_dataset) + predicted_dataset = inference_task.infer(val_dataset.with_empty_annotations(), inference_parameters) + + # Check saliency maps torch task + task_labels = trained_model.configuration.get_label_schema().get_labels(include_empty=False) + saliency_maps_check( + predicted_dataset, + task_labels, + (33, 44), + processed_saliency_maps=processed_saliency_maps, + only_predicted=only_predicted, + ) + + # Save OV IR model + inference_task._model_ckpt = osp.join(temp_dir, "weights.pth") + exported_model = ModelEntity(None, task_env.get_model_configuration()) + inference_task.export(ExportType.OPENVINO, exported_model, dump_features=True) + os.makedirs(temp_dir, exist_ok=True) + save_model_data(exported_model, temp_dir) + + # Infer OV IR model + load_weights_ov = osp.join(temp_dir, "openvino.xml") + task_env.model = read_model(task_env.get_model_configuration(), load_weights_ov, None) + task = OpenVINODetectionTask(task_environment=task_env) + predicted_dataset_ov = task.infer(val_dataset_copy.with_empty_annotations(), inference_parameters) + + # Check saliency maps OV task + saliency_maps_check( + predicted_dataset_ov, + task_labels, + (480, 640), + processed_saliency_maps=processed_saliency_maps, + only_predicted=only_predicted, + ) diff --git a/tests/integration/cli/classification/test_classification.py b/tests/integration/cli/classification/test_classification.py index 81ca3b085ca..186613de79f 100644 --- a/tests/integration/cli/classification/test_classification.py +++ b/tests/integration/cli/classification/test_classification.py @@ -210,9 +210,6 @@ def test_otx_hpo(self, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_optimize(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "multi_class_cls" - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - nncf_optimize_testing(template, tmp_dir_path, otx_dir, args) @e2e_pytest_component @@ -410,9 +407,6 @@ def test_otx_eval_deployment(self, template, tmp_dir_path): @pytest.mark.parametrize("template", default_templates, ids=default_templates_ids) def test_nncf_optimize(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "multi_label_cls" - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - nncf_optimize_testing(template, tmp_dir_path, otx_dir, args_m) @e2e_pytest_component @@ -538,7 +532,4 @@ def test_otx_eval_deployment(self, template, tmp_dir_path): @pytest.mark.parametrize("template", default_templates, ids=default_templates_ids) def test_nncf_optimize(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "h_label_cls" - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - nncf_optimize_testing(template, tmp_dir_path, otx_dir, args_h) diff --git a/tests/integration/cli/instance_segmentation/test_rotated_detection.py b/tests/integration/cli/instance_segmentation/test_rotated_detection.py index 3abab1c9719..e51966eb866 100644 --- a/tests/integration/cli/instance_segmentation/test_rotated_detection.py +++ b/tests/integration/cli/instance_segmentation/test_rotated_detection.py @@ -33,6 +33,7 @@ TestRotatedDetectionModelTemplates = generate_model_template_testing(templates) + # NOTE: Most of implementation parts are same with the ISeg tasks. # So, currently just added the `test_otx_train` function to check # Whether further modifications make Rotated detection fails or not diff --git a/tests/integration/cli/semantic_segmentation/test_segmentation.py b/tests/integration/cli/semantic_segmentation/test_segmentation.py index 9a5cfad80d6..7f1b286e0df 100644 --- a/tests/integration/cli/semantic_segmentation/test_segmentation.py +++ b/tests/integration/cli/semantic_segmentation/test_segmentation.py @@ -86,6 +86,7 @@ default_template = parse_model_template( Path("src/otx/algorithms/segmentation/configs") / "ocr_lite_hrnet_18_mod2" / "template.yaml" ) + # add integration test for semi-sl with new SegNext model and prototype based approach segnext_template = parse_model_template( Path("src/otx/algorithms/segmentation/configs") / "ham_segnext_s" / "template.yaml" @@ -181,9 +182,6 @@ def test_otx_hpo(self, template, tmp_dir_path): @pytest.mark.parametrize("template", default_templates, ids=default_templates_ids) def test_nncf_optimize(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "segmentation" - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - nncf_optimize_testing(template, tmp_dir_path, otx_dir, args) @e2e_pytest_component @@ -245,5 +243,5 @@ def test_otx_train_auto_adapt_batch_size(self, template, tmp_dir_path, bs_adapt_ def test_otx_train_auto_adapt_num_workers(self, template, tmp_dir_path): adapting_num_workers_args = copy.deepcopy(args) adapting_num_workers_args["train_params"].extend(["--learning_parameters.auto_num_workers", "True"]) - tmp_dir_path = tmp_dir_path / f"segmentation_auto_adapt_num_workers" + tmp_dir_path = tmp_dir_path / "segmentation_auto_adapt_num_workers" otx_train_testing(template, tmp_dir_path, otx_dir, adapting_num_workers_args) diff --git a/tests/regression/regression_command.py b/tests/regression/regression_command.py index 5ffd95d3053..121f2c33efb 100644 --- a/tests/regression/regression_command.py +++ b/tests/regression/regression_command.py @@ -130,6 +130,8 @@ def regression_openvino_testing( model_criteria = criteria[template.name] * (1.0 - reg_threshold) for k in trained_performance.keys(): + if k == "avg_time_per_image": + continue result_dict[k] = round(exported_performance[k], 3) if exported_performance[k] < model_criteria: regression_result["passed"] = False @@ -180,6 +182,8 @@ def regression_deployment_testing( modified_criteria = model_criteria - (model_criteria * reg_threshold) for k in exported_performance.keys(): + if k == "avg_time_per_image": + continue if isinstance(criteria, dict) and template.name in criteria.keys(): result_dict[k] = round(deployed_performance[k], 3) if deployed_performance[k] < modified_criteria: diff --git a/tests/test_suite/run_test_command.py b/tests/test_suite/run_test_command.py index 0b2d6ebbfe2..c40d092bffd 100644 --- a/tests/test_suite/run_test_command.py +++ b/tests/test_suite/run_test_command.py @@ -10,7 +10,7 @@ import sys import torch from pathlib import Path -from typing import Dict +from typing import Dict, Union import onnx import onnxruntime @@ -349,11 +349,7 @@ def otx_eval_openvino_testing( with open(perf_path) as read_file: exported_performance = json.load(read_file) - for k in trained_performance.keys(): - assert ( - exported_performance[k] >= trained_performance[k] - or abs(trained_performance[k] - exported_performance[k]) / (trained_performance[k] + 1e-10) <= threshold - ), f"{trained_performance[k]=}, {exported_performance[k]=}" + compare_model_accuracy(exported_performance, trained_performance, threshold) def otx_demo_testing(template, root, otx_dir, args): @@ -494,11 +490,7 @@ def otx_eval_deployment_testing(template, root, otx_dir, args, threshold=0.0): with open(f"{template_work_dir}/deployed_{template.model_template_id}/performance.json") as read_file: deployed_performance = json.load(read_file) - for k in exported_performance.keys(): - assert ( - deployed_performance[k] >= exported_performance[k] - or abs(exported_performance[k] - deployed_performance[k]) / (exported_performance[k] + 1e-10) <= threshold - ), f"{exported_performance[k]=}, {deployed_performance[k]=}" + compare_model_accuracy(deployed_performance, deployed_performance, threshold) def otx_demo_deployment_testing(template, root, otx_dir, args): @@ -654,6 +646,9 @@ def ptq_eval_testing(template, root, otx_dir, args, is_visual_prompting=False): def nncf_optimize_testing(template, root, otx_dir, args): + if template.entrypoints.nncf is None: + pytest.skip("NNCF QAT is disabled: entrypoints.nncf in template is not specified") + template_work_dir = get_template_dir(template, root) command_line = [ "otx", @@ -676,6 +671,8 @@ def nncf_optimize_testing(template, root, otx_dir, args): def nncf_export_testing(template, root): + if template.entrypoints.nncf is None: + pytest.skip("NNCF QAT is disabled: entrypoints.nncf in template is not specified") template_work_dir = get_template_dir(template, root) command_line = [ "otx", @@ -706,6 +703,8 @@ def nncf_export_testing(template, root): def nncf_validate_fq_testing(template, root, otx_dir, task_type, test_name): + if template.entrypoints.nncf is None: + pytest.skip("NNCF QAT is disabled: entrypoints.nncf in template is not specified") template_work_dir = get_template_dir(template, root) xml_path = f"{template_work_dir}/exported_nncf_{template.model_template_id}/openvino.xml" path_to_ref_data = os.path.join( @@ -716,6 +715,8 @@ def nncf_validate_fq_testing(template, root, otx_dir, task_type, test_name): def nncf_eval_testing(template, root, otx_dir, args, threshold=0.01): + if template.entrypoints.nncf is None: + pytest.skip("NNCF QAT is disabled: entrypoints.nncf in template is not specified") template_work_dir = get_template_dir(template, root) command_line = [ "otx", @@ -736,14 +737,12 @@ def nncf_eval_testing(template, root, otx_dir, args, threshold=0.01): with open(f"{template_work_dir}/nncf_{template.model_template_id}/performance.json") as read_file: evaluated_performance = json.load(read_file) - for k in trained_performance.keys(): - assert ( - evaluated_performance[k] >= trained_performance[k] - or abs(trained_performance[k] - evaluated_performance[k]) / (trained_performance[k] + 1e-10) <= threshold - ), f"{trained_performance[k]=}, {evaluated_performance[k]=}" + compare_model_accuracy(evaluated_performance, trained_performance, threshold) def nncf_eval_openvino_testing(template, root, otx_dir, args): + if template.entrypoints.nncf is None: + pytest.skip("NNCF QAT is disabled: entrypoints.nncf in template is not specified") template_work_dir = get_template_dir(template, root) command_line = [ "otx", @@ -1163,3 +1162,13 @@ def test_default_for_task(self): assert num_default_model == 1 return _TestModelTemplates + + +def compare_model_accuracy(performance_to_test: Dict, target_performance: Dict, threshold: Union[float, int]): + for k in target_performance.keys(): + if k == "avg_time_per_image": + continue + assert ( + performance_to_test[k] >= target_performance[k] + or abs(target_performance[k] - performance_to_test[k]) / (target_performance[k] + 1e-10) <= threshold + ), f"{target_performance[k]=}, {performance_to_test[k]=}" diff --git a/tests/unit/algorithms/classification/utils/test_utils.py b/tests/unit/algorithms/classification/utils/test_utils.py index bbff0ccfdc3..f0f3819f009 100644 --- a/tests/unit/algorithms/classification/utils/test_utils.py +++ b/tests/unit/algorithms/classification/utils/test_utils.py @@ -93,3 +93,7 @@ def test_get_cls_model_api_configuration(default_hierarchical_data): assert len(model_api_cfg) > 0 assert model_api_cfg[("model_info", "confidence_threshold")] == str(config["confidence_threshold"]) assert ("model_info", "hierarchical_config") in model_api_cfg + assert ("model_info", "labels") in model_api_cfg + assert ("model_info", "label_ids") in model_api_cfg + assert len(label_schema.get_labels(include_empty=False)) == len(model_api_cfg[("model_info", "labels")].split()) + assert len(label_schema.get_labels(include_empty=False)) == len(model_api_cfg[("model_info", "label_ids")].split()) diff --git a/tests/unit/algorithms/detection/test_xai_detection_validity.py b/tests/unit/algorithms/detection/test_xai_detection_validity.py index 89c28fd83a1..0b38853397e 100644 --- a/tests/unit/algorithms/detection/test_xai_detection_validity.py +++ b/tests/unit/algorithms/detection/test_xai_detection_validity.py @@ -24,31 +24,31 @@ class TestExplainMethods: ref_saliency_shapes = { - "MobileNetV2-ATSS": (2, 4, 4), - "ResNeXt101-ATSS": (2, 4, 4), + "MobileNetV2-ATSS": (2, 13, 13), + "ResNeXt101-ATSS": (2, 13, 13), "SSD": (81, 13, 13), - "YOLOX-TINY": (80, 13, 13), - "YOLOX-S": (80, 13, 13), - "YOLOX-L": (80, 13, 13), - "YOLOX-X": (80, 13, 13), + "YOLOX-TINY": (80, 26, 26), + "YOLOX-S": (80, 26, 26), + "YOLOX-L": (80, 26, 26), + "YOLOX-X": (80, 26, 26), } ref_saliency_vals_det = { - "MobileNetV2-ATSS": np.array([67, 216, 255, 57], dtype=np.uint8), - "ResNeXt101-ATSS": np.array([75, 214, 229, 173], dtype=np.uint8), - "YOLOX-TINY": np.array([80, 28, 42, 53, 49, 68, 72, 75, 69, 57, 65, 6, 157], dtype=np.uint8), - "YOLOX-S": np.array([75, 178, 151, 159, 150, 148, 144, 144, 147, 144, 147, 142, 189], dtype=np.uint8), - "YOLOX-L": np.array([43, 28, 0, 6, 7, 19, 22, 17, 14, 18, 25, 7, 34], dtype=np.uint8), - "YOLOX-X": np.array([255, 144, 83, 76, 83, 86, 82, 90, 91, 93, 110, 104, 83], dtype=np.uint8), - "SSD": np.array([119, 72, 118, 35, 39, 30, 31, 31, 36, 27, 44, 23, 61], dtype=np.uint8), + "MobileNetV2-ATSS": np.array([34, 67, 148, 132, 172, 147, 146, 155, 167, 159], dtype=np.uint8), + "ResNeXt101-ATSS": np.array([52, 75, 68, 76, 89, 94, 101, 111, 125, 123], dtype=np.uint8), + "YOLOX-TINY": np.array([177, 94, 147, 147, 161, 162, 164, 164, 163, 166], dtype=np.uint8), + "YOLOX-S": np.array([158, 170, 180, 158, 152, 148, 153, 153, 148, 145], dtype=np.uint8), + "YOLOX-L": np.array([255, 80, 97, 88, 73, 71, 72, 76, 75, 76], dtype=np.uint8), + "YOLOX-X": np.array([185, 218, 189, 103, 83, 70, 62, 66, 66, 67], dtype=np.uint8), + "SSD": np.array([255, 178, 212, 90, 93, 79, 79, 80, 87, 83], dtype=np.uint8), } ref_saliency_vals_det_wo_postprocess = { - "MobileNetV2-ATSS": -0.10465062, - "ResNeXt101-ATSS": -0.073549636, + "MobileNetV2-ATSS": -0.014513552, + "ResNeXt101-ATSS": -0.055565584, "YOLOX-TINY": 0.04948914, - "YOLOX-S": 0.01133332, - "YOLOX-L": 0.01870133, + "YOLOX-S": 0.011557617, + "YOLOX-L": 0.020231, "YOLOX-X": 0.0043506604, "SSD": 0.6629989, } @@ -93,7 +93,7 @@ def test_saliency_map_det(self, template): assert saliency_maps[0].ndim == 3 assert saliency_maps[0].shape == self.ref_saliency_shapes[template.name] # convert to int16 in case of negative value difference - actual_sal_vals = saliency_maps[0][0][0].astype(np.int16) + actual_sal_vals = saliency_maps[0][0][0][:10].astype(np.int16) ref_sal_vals = self.ref_saliency_vals_det[template.name].astype(np.uint8) assert np.all(np.abs(actual_sal_vals - ref_sal_vals) <= 1) diff --git a/tests/unit/algorithms/detection/utils/test_detection_utils.py b/tests/unit/algorithms/detection/utils/test_detection_utils.py index 77c46a8c855..0a3a645e29e 100644 --- a/tests/unit/algorithms/detection/utils/test_detection_utils.py +++ b/tests/unit/algorithms/detection/utils/test_detection_utils.py @@ -34,3 +34,7 @@ def test_get_det_model_api_configuration(): tiling_parameters.tile_overlap / tiling_parameters.tile_ir_scale_factor ) assert model_api_cfg[("model_info", "max_pred_number")] == str(tiling_parameters.tile_max_number) + assert ("model_info", "labels") in model_api_cfg + assert ("model_info", "label_ids") in model_api_cfg + assert len(label_schema.get_labels(include_empty=False)) == len(model_api_cfg[("model_info", "labels")].split()) + assert len(label_schema.get_labels(include_empty=False)) == len(model_api_cfg[("model_info", "label_ids")].split()) diff --git a/tests/unit/algorithms/visual_prompting/tasks/test_inference.py b/tests/unit/algorithms/visual_prompting/tasks/test_inference.py index b89d59f86a8..12bfb36817c 100644 --- a/tests/unit/algorithms/visual_prompting/tasks/test_inference.py +++ b/tests/unit/algorithms/visual_prompting/tasks/test_inference.py @@ -16,7 +16,7 @@ from otx.api.entities.model import ModelEntity, ModelFormat, ModelOptimizationType from otx.api.entities.resultset import ResultSetEntity from tests.test_suite.e2e_test_system import e2e_pytest_unit -from otx.algorithms.common.utils.logger import get_logger +from otx.utils.logger import get_logger from tests.unit.algorithms.visual_prompting.test_helpers import ( generate_visual_prompting_dataset, init_environment, diff --git a/third-party-programs.txt b/third-party-programs.txt index e6ddfe9006c..c22526b2a73 100644 --- a/third-party-programs.txt +++ b/third-party-programs.txt @@ -828,6 +828,7 @@ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ------------------------------------------------------------- + mmengine Apache-2.0 @@ -1035,3 +1036,245 @@ Apache-2.0 limitations under the License. ------------------------------------------------------------- + +pynvml + +BSD-3-Clause + +Copyright (c) 2011-2021, NVIDIA Corporation. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of staged-recipes nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------- + +segment-anything + +Apache-2.0 + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + ------------------------------------------------------------- diff --git a/tools/experiment.py b/tools/experiment.py new file mode 100644 index 00000000000..311b7641c2d --- /dev/null +++ b/tools/experiment.py @@ -0,0 +1,801 @@ +"""OTX experiment helper.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import argparse +import csv +import dataclasses +import json +import os +import re +import shutil +import statistics +import sys +from abc import ABC, abstractmethod +from copy import copy, deepcopy +from dataclasses import dataclass, field +from datetime import datetime, timedelta +from itertools import product +from pathlib import Path +from typing import Any, Dict, List, Optional, Union + +import yaml +from otx.cli.tools.cli import main as otx_cli +from rich.console import Console +from rich.table import Table + + +def get_parser() -> argparse.ArgumentParser: + """Parses command line arguments.""" + parser = argparse.ArgumentParser() + parser.add_argument("-f", "--file", type=str, help="Experiment recipe file.") + parser.add_argument("-p", "--parse", type=str, help="Workspace path to parse.") + return parser + + +def parse_time_delta_fmt(time_str: str, format: str) -> timedelta: + """Convert datetime to timedelta. + + Args: + time_str (str): datetime format string. + format (str): datetime format. + + Returns: + timedelta: timedelta converted from datetime. + """ + return datetime.strptime(time_str, format) - datetime(1900, 1, 1) + + +def find_latest_file(root_dir: Union[Path, str], file_name: str) -> Union[None, Path]: + """Find a latest file of matched files. + + Args: + root_dir (Union[Path, str]): Root directory for searching. + file_name (str): File name to search. It can constain shell style wild card. + + Returns: + Union[None, Path]: Latest file path. If file can't be found, return None. + """ + root_dir = Path(root_dir) + train_record_files = sorted((root_dir).glob(file_name), reverse=True, key=lambda x: x.stat().st_mtime) + if not train_record_files: + return None + return train_record_files[0] + + +@dataclass +class ExperimentResult: + """Dataclass to manage experiment result. + + It serves not only storing values but also various features. + For example, it can be added by other ExperimentResult instance and also divided by integer. + It can provide dictionary format result and also can parse a dictionary with same format as itself. + """ + + val_score: Union[float, None] = None + test_score: Union[float, None] = None + train_e2e_time: Union[timedelta, None] = None + avg_iter_time: Union[float, None] = None + std_iter_time: Union[float, None] = None + avg_data_time: Union[float, None] = None + std_data_time: Union[float, None] = None + export_model_score: Union[float, None] = None + avg_ov_infer_time: Union[float, None] = None + max_cpu_mem: Union[float, None] = None + avg_cpu_util: Union[float, None] = None + max_gpu_mem: Union[float, None] = None + avg_gpu_util: Union[float, None] = None + optimize_model_score: Union[float, None] = None + epoch: Union[int, None] = None + + def get_formatted_result(self) -> Dict: + """Return dictionary format result.""" + result = dataclasses.asdict(self) + + for attr_name in ["max_cpu_mem", "max_gpu_mem"]: + max_mem = result.pop(attr_name) + result[f"{attr_name}(GiB)"] = max_mem + + for attr_name in ["avg_cpu_util", "avg_gpu_util"]: + res_util = result.pop(attr_name) + result[f"{attr_name}(%)"] = res_util + + if self.train_e2e_time is not None: + result["train_e2e_time"] = str(self.train_e2e_time).split(".")[0] + + # delete None value + for key in list(result.keys()): + if result[key] is None: + del result[key] + elif isinstance(result[key], float): + result[key] = round(result[key], 4) + + return result + + def __add__(self, obj: "ExperimentResult"): + """Add with same class. If None exists, it's skipped.""" + new_obj = deepcopy(self) + + for attr in dataclasses.fields(self): + self._add_if_not_none(new_obj, obj, attr.name) + + return new_obj + + @staticmethod + def _add_if_not_none(dst_obj: "ExperimentResult", src_obj: "ExperimentResult", attr: str): + dst_obj_val = getattr(dst_obj, attr) + src_obj_val = getattr(src_obj, attr) + if dst_obj_val is not None and src_obj_val is not None: + setattr(dst_obj, attr, dst_obj_val + src_obj_val) + else: + setattr(dst_obj, attr, None) + + def __truediv__(self, divisor: Union[int, float]): + """Divide with same class. If None exists, it's skipped.""" + new_obj = deepcopy(self) + + for attr in dataclasses.fields(self): + self._divide_if_not_none(new_obj, attr.name, divisor) + + return new_obj + + @staticmethod + def _divide_if_not_none(obj: "ExperimentResult", attr: str, divisor: Union[int, float]): + obj_val = getattr(obj, attr) + if obj_val is not None: + setattr(obj, attr, obj_val / divisor) + + def parse_formatted_dict(self, formatted_dict: Dict): + """Parse a dictionary with same format.""" + max_mem_pat = re.compile(r"max_.*_mem") + cpu_util_pat = re.compile(r"avg.*_util") + for key, val in formatted_dict.items(): + max_mem_name = max_mem_pat.search(key) + cpu_util_name = cpu_util_pat.search(key) + if max_mem_name is not None: + max_mem_name = max_mem_name.group(0) + setattr(self, max_mem_name, val) + elif cpu_util_name is not None: + cpu_util_name = cpu_util_name.group(0) + setattr(self, cpu_util_name, val) + elif key == "train_e2e_time": + setattr(self, key, parse_time_delta_fmt(val, "%H:%M:%S")) + else: + setattr(self, key, val) + + +class BaseExpParser(ABC): + """Base class for an experiment parser. + + Args: + workspace (Path): Workspace to parse. + """ + + def __init__(self, workspace: Path): + self._workspace = workspace + self._exp_result = ExperimentResult() + self._iter_time_arr = [] + self._data_time_arr = [] + + @abstractmethod + def parse_exp_log(self): + """Abstract method to parse experiment log.""" + raise NotImplementedError + + def get_exp_result(self): + """Get experiment result.""" + self._calculate_avg_std_per_iter() + + return self._exp_result.get_formatted_result() + + def _calculate_avg_std_per_iter(self): + if self._iter_time_arr: + self._exp_result.avg_iter_time = statistics.mean(self._iter_time_arr) + self._exp_result.std_iter_time = ( + statistics.stdev(self._iter_time_arr) if len(self._iter_time_arr) > 1 else 0 + ) + + if self._data_time_arr: + self._exp_result.avg_data_time = statistics.mean(self._data_time_arr) + self._exp_result.std_data_time = ( + statistics.stdev(self._data_time_arr) if len(self._data_time_arr) > 1 else 0 + ) + + def _parse_eval_output(self, file_path: Path): + # NOTE: It is assumed that performance.json has key named either score or avg_time_per_image + with file_path.open("r") as f: + eval_output: Dict = json.load(f) + + if "train" in str(file_path.parent.name): + self._exp_result.test_score = list(eval_output.values())[0] + elif "export" in str(file_path.parent.name): + for key, val in eval_output.items(): + if key == "avg_time_per_image": + self._exp_result.avg_ov_infer_time = val + else: + self._exp_result.export_model_score = val + elif "optimize" in str(file_path.parent.name): + self._exp_result.optimize_model_score = list(eval_output.values())[0] + + def _parse_resource_usage(self, file_path: Path): + with file_path.open("r") as f: + resource_usage = yaml.safe_load(f) + + if "cpu" in resource_usage: + self._exp_result.max_cpu_mem = float(resource_usage["cpu"]["max_memory_usage"].split()[0]) + self._exp_result.avg_cpu_util = float(resource_usage["cpu"]["avg_util"].split()[0]) + + if "gpu" in resource_usage: + self._exp_result.max_gpu_mem = float(resource_usage["gpu"]["total_max_mem"].split()[0]) + self._exp_result.avg_gpu_util = float(resource_usage["gpu"]["total_avg_util"].split()[0]) + + def _parse_cli_report(self, file_path: Path, save_val_score=True): + with file_path.open("r") as f: + lines = f.readlines() + + val_score_pattern = re.compile(r"score: Performance\(score: ([-+]?\d+(\.\d*)?|\.\d+)") + e2e_time_pattern = re.compile(r"time elapsed: '(\d+:\d+:\d+(\.\d*)?)'") + for line in lines: + if save_val_score: + val_score = val_score_pattern.search(line) + if val_score is not None: + self._exp_result.val_score = float(val_score.group(1)) + + e2e_time = e2e_time_pattern.search(line) + if e2e_time is not None: + self._exp_result.train_e2e_time = parse_time_delta_fmt(e2e_time.group(1), "%H:%M:%S.%f") + + +class MMCVExpParser(BaseExpParser): + """MMCV experiment parser class.""" + + def parse_exp_log(self): + """Parse experiment log.""" + for task_dir in (self._workspace / "outputs").iterdir(): + if task_dir.is_symlink(): + continue + + if "train" in str(task_dir.name): + # test score + eval_files = list(task_dir.glob("performance.json")) + if eval_files: + self._parse_eval_output(eval_files[0]) + + # iter, data time, epoch + train_record_file = find_latest_file(task_dir / "logs", "*.log.json") + if train_record_file is not None: + self._parse_train_record(train_record_file) + + # train e2e time & val score + cli_report_files = list(task_dir.glob("cli_report.log")) + if cli_report_files: + self._parse_cli_report(cli_report_files[0]) + + # get resource info + resource_file = task_dir / "resource_usage.yaml" + if resource_file.exists(): + self._parse_resource_usage(resource_file) + + elif "export" in str(task_dir) or "optimize" in str(task_dir): + eval_files = list(task_dir.glob("performance.json")) + if eval_files: + self._parse_eval_output(eval_files[0]) + + def _parse_train_record(self, file_path: Path): + with file_path.open("r") as f: + lines = f.readlines() + + last_epoch = 0 + for line in lines: + iter_history = json.loads(line) + if iter_history.get("mode") == "train": + self._iter_time_arr.append(iter_history["time"]) + self._data_time_arr.append(iter_history["data_time"]) + if iter_history["epoch"] > last_epoch: + last_epoch = iter_history["epoch"] + + self._exp_result.epoch = last_epoch + + +class AnomalibExpParser(BaseExpParser): + """Anomalib experiment parser class.""" + + def parse_exp_log(self): + """Parse experiment log.""" + for task_dir in (self._workspace / "outputs").iterdir(): + if task_dir.is_symlink(): + continue + + if "train" in str(task_dir.name): + # test score + eval_files = list(task_dir.glob("performance.json")) + if eval_files: + self._parse_eval_output(eval_files[0]) + + # val score and train e2e time + cli_report_files = list(task_dir.glob("cli_report.log")) + if cli_report_files: + self._parse_cli_report(cli_report_files[0]) + + # get resource info + resource_file = task_dir / "resource_usage.yaml" + if resource_file.exists(): + self._parse_resource_usage(resource_file) + + elif "export" in str(task_dir) or "optimize" in str(task_dir): + eval_files = list(task_dir.glob("performance.json")) + if eval_files: + self._parse_eval_output(eval_files[0]) + + +def get_exp_parser(workspace: Path) -> BaseExpParser: + """Get experiment parser depending on framework. + + Args: + workspace (Path): Workspace to parse. + + Returns: + BaseExpParser: Experiment parser. + """ + with (workspace / "template.yaml").open("r") as f: + template = yaml.safe_load(f) + + if "anomaly" in template["task_type"].lower(): + return AnomalibExpParser(workspace) + return MMCVExpParser(workspace) + + +def organize_exp_result(workspace: Union[str, Path], exp_meta: Optional[Dict[str, str]] = None): + """Organize experiment result and save it as a file named exp_result.yaml. + + Args: + workspace (Union[str, Path]): Workspace to organize an expeirment result. + exp_meta (Dict[str, str], optional): + Experiment meta information. If it exists, it's saved together. Defaults to None. + """ + if isinstance(workspace, str): + workspace = Path(workspace) + + exp_parser = get_exp_parser(workspace) + exp_parser.parse_exp_log() + + exp_result = exp_parser.get_exp_result() + + if not exp_result: + print(f"There is no experiment result in {workspace}") + + with (workspace / "exp_result.yaml").open("w") as f: + yaml.dump({"meta": exp_meta, "exp_result": exp_result}, f, default_flow_style=False) + + +def write_csv(output_path: Union[str, Path], header: List[str], rows: List[Dict[str, Any]]): + """Write csv file based on header and rows. + + Args: + output_path (Union[str, Path]): Where file is saved. + header (List[str]): List of header. + rows (List[Dict[str, Any]]): Each row of csv. + """ + if isinstance(output_path, str): + output_path = Path(output_path) + + with output_path.open("w") as f: + writer = csv.DictWriter(f, fieldnames=header) + writer.writeheader() + writer.writerows(rows) + + +def print_table(headers: List[str], rows: List[Dict[str, Any]], table_title: str = "Table"): + """Print a table to console. + + Args: + headers (List[str]): List of headers. + rows (List[Dict[str, Any]]): Rows of table. + table_title (str, optional): Table title. Defaults to "Table". + """ + # print experiment summary to console + table = Table(title=table_title) + for header in headers: + table.add_column(header, justify="center", no_wrap=True) + for each_exp_result_summary in rows: + table_row = [] + for header in headers: + val = each_exp_result_summary.get(header) + table_row.append(str(val)) + + table.add_row(*table_row) + + console = Console() + console.print(table) + + +def aggregate_all_exp_result(exp_dir: Union[str, Path]): + """Aggregate all experiment results. + + Args: + exp_dir (Union[str, Path]): Experiment directory. + """ + if isinstance(exp_dir, str): + exp_dir = Path(exp_dir) + + tensorboard_dir = exp_dir / "tensorboard" + tensorboard_dir.mkdir(exist_ok=True) + + meta_header: Union[List[str], None] = None + metric_header = set() + all_exp_result: List[Dict[str, str]] = [] + exp_result_aggregation = {} + for each_exp in exp_dir.iterdir(): + # parse single experiment + exp_result_file = each_exp / "exp_result.yaml" + if not exp_result_file.exists(): + continue + + with exp_result_file.open("r") as f: + exp_yaml_result: Dict[str, Dict] = yaml.safe_load(f) + + each_exp_result = copy(exp_yaml_result["meta"]) + each_exp_result.update(exp_yaml_result["exp_result"]) + all_exp_result.append(each_exp_result) + + if meta_header is None: + meta_header = list(exp_yaml_result["meta"].keys()) + + metric_header = metric_header | set(exp_yaml_result["exp_result"].keys()) + + exp_meta = copy(exp_yaml_result["meta"]) + exp_meta.pop("repeat") + + exp_result = ExperimentResult() + exp_result.parse_formatted_dict(exp_yaml_result["exp_result"]) + + # Sum experiments with same variables. + exp_name = json.dumps(exp_meta, sort_keys=True).encode() # get unique hash based on variable + if exp_name in exp_result_aggregation: + exp_result_aggregation[exp_name]["result"] += exp_result + exp_result_aggregation[exp_name]["num"] += 1 + else: + exp_result_aggregation[exp_name] = {"result": exp_result, "num": 1, "meta": exp_meta} + + # copy tensorboard log into tensorboard dir + exp_tb_dir = list(each_exp.rglob("tf_logs")) + if exp_tb_dir: + shutil.copytree(exp_tb_dir[0], tensorboard_dir / each_exp.name, dirs_exist_ok=True) + + if not all_exp_result: + print("There aren't any experiment results.") + return + + # print and save the experiment aggregation + headers = sorted(meta_header) + sorted(metric_header) + write_csv(exp_dir / "all_exp_result.csv", headers, all_exp_result) + + for key in ["repeat", "std_iter_time", "std_data_time"]: # average of std is distorted value + if key in headers: + headers.remove(key) + + rows = [] + for val in exp_result_aggregation.values(): + exp_result = val["result"] / val["num"] + exp_result.std_iter_time = None + exp_result.std_data_time = None + each_exp_result = copy(val["meta"]) + + each_exp_result.update(exp_result.get_formatted_result()) + rows.append(each_exp_result) + write_csv(exp_dir / "exp_summary.csv", headers, rows) + + print_table(headers, rows, "Experiment Summary") + + +@dataclass +class Command: + """Command dataclass.""" + + command: List[str] + variable: Dict[str, str] = field(default_factory=dict) + + +class ExpRecipeParser: + """Class to parse an experiment recipe. + + Args: + recipe_file (Union[str, Path]): Recipe file to parse. + """ + + def __init__(self, recipe_file: Union[str, Path]): + if not os.path.exists(recipe_file): + raise RuntimeError(f"{recipe_file} doesn't exist.") + + with open(recipe_file, "r") as f: + self._exp_recipe: Dict = yaml.safe_load(f) + constants = self._exp_recipe.get("constants", {}) + self._cvt_number_to_str(constants) + self._constants: Dict[str, str] = constants + self._variables: Optional[Dict[str, str]] = None + self._commands: Optional[List[Command]] = None + self.output_path: Path = Path( + self._exp_recipe.get("output_path", f"experiment_{datetime.now().strftime('%Y%m%d_%H%M%S')}") + ) + self.repeat: int = self._exp_recipe.get("repeat", 1) + self._replace_pat = re.compile(r"\$\{(\w+)\}") + + @property + def constants(self) -> Dict[str, str]: + """Constants in recipe file.""" + return self._constants + + @property + def variables(self) -> Dict[str, Union[str, List[str]]]: + """Variables in recipe file. If it contains constants, they're replaced by real value.""" + if self._variables is None: + variables = self._exp_recipe.get("variables", {}) + self._cvt_number_to_str(variables) + self._variables = self._replace_var_in_target(self.constants, variables) + return self._variables + + @property + def commands(self) -> List[Command]: + """List of commands from experiment recipe. + + It counts all available cases and makes Command instance per each case. + + Returns: + List[Command]: List of Command instances. + """ + if self._commands is None: + command = self._exp_recipe.get("command", []) + if isinstance(command, str): + command = [command] + command = self._replace_var_in_target(self.constants, command) + var_combinations = self._product_all_cases(self.variables, command) + if not var_combinations: + self._commands = [Command(command=command)] + + command_arr = [] + for var_combination in var_combinations: + command_arr.append(Command(self._replace_var_in_target(var_combination, command), var_combination)) + self._commands = command_arr + return self._commands + + def _product_all_cases( + self, + variable: Dict[str, Union[str, List[str]]], + target_str: Union[str, List[str]], + ) -> List[Dict[str, str]]: + if isinstance(target_str, str): + target_str = [target_str] + found_keys = [] + for each_str in target_str: + found_keys.extend([x for x in set(self._replace_pat.findall(each_str)) if x in variable]) + if not found_keys: + return [] + + values_of_found_key = [] + for key in found_keys: + if isinstance(variable[key], list): + values_of_found_key.append(variable[key]) + else: + values_of_found_key.append([variable[key]]) + + all_cases = [] + for value_of_key_found in product(*values_of_found_key): + all_cases.append(dict(zip(found_keys, value_of_key_found))) + + return all_cases + + def _replace_var_in_target( + self, + variable: Dict[str, str], + target: Union[str, List, Dict], + ) -> Union[str, List, Dict]: + if isinstance(target, str): + for key, val in variable.items(): + target = target.replace(f"${{{key}}}", val) + elif isinstance(target, list): + for i in range(len(target)): + target[i] = self._replace_var_in_target(variable, target[i]) + elif isinstance(target, dict): + for key in target.keys(): + target[key] = self._replace_var_in_target(variable, target[key]) + else: + raise TypeError(f"{type(target)} isn't supported type. target should has str, list or dict type.") + + return target + + @staticmethod + def _cvt_number_to_str(target: Dict): + """Convert int or float in dict to string.""" + for key, val in target.items(): + if isinstance(val, (int, float)): + target[key] = str(val) + elif isinstance(val, list): + for i in range(len(val)): + if isinstance(val[i], (int, float)): + val[i] = str(val[i]) + + +@dataclass +class CommandFailInfo: + """Dataclass to store command fail information.""" + + exception: Exception + variable: Dict[str, str] + command: str + + def get_formatted_result(self) -> Dict: + """Return dictionary format result.""" + result = dataclasses.asdict(self) + result["exception"] = str(result["exception"]) + return result + + +def log_fail_cases(fail_cases: List[CommandFailInfo], output_path: Path): + """Print fail cases and save it as a file. + + Args: + fail_cases (List[CommandFailInfo]): False cases. + output_path (Path): Where fale cases are saved. + """ + console = Console() + console.rule("[bold red]List of failed cases") + for each_fail_case in fail_cases: + console.print(f"Case : {each_fail_case.variable}", crop=False) + console.print(f"command : {each_fail_case.command}", crop=False) + console.print("Error log:", str(each_fail_case.exception), crop=False) + console.print() + console.rule() + + with (output_path / "failed_cases.yaml").open("w") as f: + yaml.safe_dump([fail_case.get_formatted_result() for fail_case in fail_cases], f) + + +class OtxCommandRunner: + """Class to run list of otx commands who have same varaibles. + + It provides convenient features not just runs commands. + Same workspae made at first otx command is used for all commands. + Therefore, the template don't have to be set after first command. + And when executing OTX eval, which model to evaluate is decided depending on previous command. + + Args: + command_ins (Command): Command instance to run. + repeat_idx (int): repeat index. + """ + + OUTPUT_FILE_NAME = {"export": "openvino.bin", "optimize": "weights.pth"} + + def __init__(self, command_ins: Command, repeat_idx: int): + self._command_ins = command_ins + self._repeat_idx = repeat_idx + self._command_var = copy(command_ins.variable) + self._workspace = Path("_".join(self._command_var.values()).replace("/", "_") + f"_repeat_{repeat_idx}") + self._command_var["repeat"] = str(repeat_idx) + self._fail_logs: List[CommandFailInfo] = [] + self._previous_cmd_entry: Optional[str] = None + + @property + def fail_logs(self) -> List[CommandFailInfo]: + """Information of all failed cases.""" + return self._fail_logs + + def run_command_list(self): + """Run all commands and organize experiment results.""" + for command in self._command_ins.command: + command = command.split() + if not self._prepare_run_command(command): + print(f"otx {command[1]} is skipped.") + continue + + self._run_otx_command(command) + + self._previous_cmd_entry = command[1] + + organize_exp_result(self._workspace, self._command_var) + + def _prepare_run_command(self, command: List[str]) -> bool: + self.set_arguments_to_cmd(command, "--workspace", str(self._workspace)) + cmd_entry = command[1] + if cmd_entry == "train": + self.set_arguments_to_cmd(command, "--seed", str(self._repeat_idx)) + elif cmd_entry == "eval": + if self._previous_cmd_entry in self.OUTPUT_FILE_NAME: + file_path = self._find_model_path(self._previous_cmd_entry) + if file_path is None: + return False + self.set_arguments_to_cmd(command, "--load-weights", str(file_path)) + output_path = str(file_path.parents[1]) + else: + output_path = str(self._workspace / "outputs" / "latest_trained_model") + self.set_arguments_to_cmd(command, "--output", output_path) + + return True + + def _run_otx_command(self, command: List[str]): + sys.argv = copy(command) + try: + otx_cli() + except Exception as e: + self._fail_logs.append(CommandFailInfo(variable=self._command_var, exception=e, command=" ".join(command))) + + def _find_model_path(self, cmd_entry: str): + output_dir = find_latest_file(self._workspace / "outputs", f"*{cmd_entry}") + if output_dir is None: + print(f"There is no {cmd_entry} output directory.") + return None + file_path = list(output_dir.rglob(self.OUTPUT_FILE_NAME[cmd_entry])) + if not file_path: + print(f"{self.OUTPUT_FILE_NAME[cmd_entry]} can't be found.") + return None + return file_path[0] + + @staticmethod + def set_arguments_to_cmd(command: List[str], key: str, value: Optional[str] = None, before_params: bool = True): + """Add arguments at proper position in command. + + Args: + command (List[str]): list includng a otx command entry and arguments. + key (str): arguement key. + value (str or None): argument value. + before_params (bool): whether argument should be after `param` or not. + """ + if key in command: + if value is not None: + command[command.index(key) + 1] = value + return + + if before_params and "params" in command: + index = command.index("params") + else: + index = len(command) + + if value is not None: + command.insert(index, value) + command.insert(index, key) + + +def run_experiment_recipe(recipe_file: Union[str, Path]): + """Run experiments based on the recipe. + + Args: + recipe_file (Union[str, Path]): Recipe file to run. + """ + exp_recipe = ExpRecipeParser(recipe_file) + output_path = exp_recipe.output_path + output_path.mkdir(exist_ok=True) + current_dir = os.getcwd() + os.chdir(output_path) + + fail_cases: List[CommandFailInfo] = [] + for command_ins in exp_recipe.commands: + for repeat_idx in range(exp_recipe.repeat): + otx_cmd_runner = OtxCommandRunner(command_ins, repeat_idx) + otx_cmd_runner.run_command_list() + fail_cases.extend(otx_cmd_runner.fail_logs) + + os.chdir(current_dir) + + if fail_cases: + log_fail_cases(fail_cases, output_path) + + aggregate_all_exp_result(output_path) + + +def main(): + """Main function to decide which function to execute.""" + parser = get_parser() + args = parser.parse_args() + + if args.file is not None and args.parse is not None: + print("Please give either --file or --parse argument.") + elif args.file is not None: + run_experiment_recipe(args.file) + elif args.parse is not None: + organize_exp_result(args.parse) + else: + parser.print_help() + + +if __name__ == "__main__": + main() From 03e87f533c257cbe6afded7773cc0a9beecb48f6 Mon Sep 17 00:00:00 2001 From: "Kim, Sungchul" Date: Wed, 29 Nov 2023 09:25:02 +0900 Subject: [PATCH 09/39] Fix mixed & lower precision training (#2668) * remove dtype argument in torch.xpu.optimize * Add `custom_auto_fp16` to use xpu autocast * Update `forward`s to use `custom_auto_fp16` * precommit * Disable FP16 training * Add `custom_force_fp32` * Removed what force casting tensors to bf16 * Add `XPUOptimizerHook` and `XPUGradScaler` * precommit * Enable lower precision training * Remove dtype check for lower precision * Add `bf16_training` in recipe * fix * Remove unused module * Change `XPUOptimizerHook` to `BFp16XPUOptimizerHook` * Fix for common devices which don't use bf16 * precommit * Enable to use `auto_fp16` as it is * Add try-except avoiding mmcv import error * Fix error type Co-authored-by: Eunwoo Shin --------- Co-authored-by: Shin, Eunwoo --- .../adapters/mmcls/apis/train.py | 9 +- .../classification/configs/deit_tiny/model.py | 2 +- .../configs/efficientnet_b0_cls_incr/model.py | 2 +- .../efficientnet_v2_s_cls_incr/model.py | 2 +- .../mobilenet_v3_large_1_cls_incr/model.py | 2 +- .../common/adapters/mmcv/configurer.py | 15 +- .../common/adapters/mmcv/hooks/__init__.py | 7 + .../adapters/mmcv/hooks/xpu_optimizer_hook.py | 38 +++++ .../common/adapters/mmcv/utils/fp16_utils.py | 133 ++++++++++++++++++ .../common/adapters/torch/amp/__init__.py | 9 ++ .../adapters/torch/amp/xpu_grad_scaler.py | 114 +++++++++++++++ src/otx/algorithms/common/utils/__init__.py | 12 ++ .../detection/adapters/mmdet/apis/train.py | 9 +- .../models/detectors/custom_yolox_detector.py | 3 - .../detection/cspdarknet_yolox_l/model.py | 2 +- .../detection/cspdarknet_yolox_s/model.py | 2 +- .../detection/cspdarknet_yolox_tiny/model.py | 2 +- .../detection/cspdarknet_yolox_x/model.py | 2 +- .../detection/mobilenetv2_atss/model.py | 2 +- .../detection/mobilenetv2_ssd/model.py | 2 +- .../detection/resnext101_atss/model.py | 2 +- .../efficientnetb2b_maskrcnn/model.py | 2 +- .../maskrcnn_swin_t/model.py | 2 +- .../segmentation/adapters/mmseg/apis/train.py | 10 +- 24 files changed, 355 insertions(+), 30 deletions(-) create mode 100644 src/otx/algorithms/common/adapters/mmcv/hooks/xpu_optimizer_hook.py create mode 100644 src/otx/algorithms/common/adapters/mmcv/utils/fp16_utils.py create mode 100644 src/otx/algorithms/common/adapters/torch/amp/__init__.py create mode 100644 src/otx/algorithms/common/adapters/torch/amp/xpu_grad_scaler.py diff --git a/src/otx/algorithms/classification/adapters/mmcls/apis/train.py b/src/otx/algorithms/classification/adapters/mmcls/apis/train.py index de22e38087f..5527e19a847 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/apis/train.py +++ b/src/otx/algorithms/classification/adapters/mmcls/apis/train.py @@ -93,13 +93,14 @@ def train_model(model, dataset, cfg, distributed=False, validate=False, timestam optimizer = build_optimizer(model, cfg.optimizer) if cfg.device == "xpu": - if fp16_cfg is not None: - dtype = torch.bfloat16 - else: - dtype = torch.float32 + dtype = torch.bfloat16 if cfg.optimizer_config.get("bf16_training", False) else torch.float32 model.train() model, optimizer = torch.xpu.optimize(model, optimizer=optimizer, dtype=dtype) + if "bf16_training" in cfg.optimizer_config: + # Remove unused parameters in runner + cfg.optimizer_config.pop("bf16_training") + if cfg.get("runner") is None: cfg.runner = {"type": "EpochBasedRunner", "max_epochs": cfg.total_epochs} warnings.warn( diff --git a/src/otx/algorithms/classification/configs/deit_tiny/model.py b/src/otx/algorithms/classification/configs/deit_tiny/model.py index f69c8cdbbb6..083166b9e49 100644 --- a/src/otx/algorithms/classification/configs/deit_tiny/model.py +++ b/src/otx/algorithms/classification/configs/deit_tiny/model.py @@ -14,7 +14,7 @@ backbone=dict(arch="deit-tiny", init_cfg=dict(type="Pretrained", checkpoint=ckpt_url, prefix="backbone")), ) -fp16 = dict(loss_scale=512.0) +fp16 = dict(loss_scale=512.0, bf16_training=False) optimizer = dict(_delete_=True, type="AdamW", lr=0.01, weight_decay=0.05) optimizer_config = dict(_delete_=True) diff --git a/src/otx/algorithms/classification/configs/efficientnet_b0_cls_incr/model.py b/src/otx/algorithms/classification/configs/efficientnet_b0_cls_incr/model.py index 4055b7ff90b..c397f704649 100644 --- a/src/otx/algorithms/classification/configs/efficientnet_b0_cls_incr/model.py +++ b/src/otx/algorithms/classification/configs/efficientnet_b0_cls_incr/model.py @@ -22,4 +22,4 @@ ), ) -fp16 = dict(loss_scale=512.0) +fp16 = dict(loss_scale=512.0, bf16_training=False) diff --git a/src/otx/algorithms/classification/configs/efficientnet_v2_s_cls_incr/model.py b/src/otx/algorithms/classification/configs/efficientnet_v2_s_cls_incr/model.py index b0a278536cd..ea5ef1ff773 100644 --- a/src/otx/algorithms/classification/configs/efficientnet_v2_s_cls_incr/model.py +++ b/src/otx/algorithms/classification/configs/efficientnet_v2_s_cls_incr/model.py @@ -16,4 +16,4 @@ head=dict(type="CustomLinearClsHead", loss=dict(type="CrossEntropyLoss", loss_weight=1.0)), ) -fp16 = dict(loss_scale=512.0) +fp16 = dict(loss_scale=512.0, bf16_training=False) diff --git a/src/otx/algorithms/classification/configs/mobilenet_v3_large_1_cls_incr/model.py b/src/otx/algorithms/classification/configs/mobilenet_v3_large_1_cls_incr/model.py index 6441b503ee8..f8cbfe4b01a 100644 --- a/src/otx/algorithms/classification/configs/mobilenet_v3_large_1_cls_incr/model.py +++ b/src/otx/algorithms/classification/configs/mobilenet_v3_large_1_cls_incr/model.py @@ -22,4 +22,4 @@ ), ) -fp16 = dict(loss_scale=512.0) +fp16 = dict(loss_scale=512.0, bf16_training=False) diff --git a/src/otx/algorithms/common/adapters/mmcv/configurer.py b/src/otx/algorithms/common/adapters/mmcv/configurer.py index f013c45c7ef..7342735f43d 100644 --- a/src/otx/algorithms/common/adapters/mmcv/configurer.py +++ b/src/otx/algorithms/common/adapters/mmcv/configurer.py @@ -263,7 +263,20 @@ def configure_fp16(cfg: Config): logger.warning("SAMOptimizerHook is not supported on HPU. Changed to OptimizerHook.") opts["type"] = "HPUOptimizerHook" cfg.optimizer_config.update(opts) - elif torch.cuda.is_available() or is_xpu_available(): + elif is_xpu_available(): + opts.update({"distributed": distributed, **fp16_config}) + if optim_type == "SAMOptimizerHook": + logger.warning("SAMOptimizerHook is not supported on XPU yet, changed to OptimizerHook.") + opts["type"] = "OptimizerHook" + if optim_type == "OptimizerHook": + opts["type"] = "BFp16XPUOptimizerHook" + else: + # does not support optimizerhook type + # let mm library handle it + cfg.fp16 = fp16_config + opts = dict() + cfg.optimizer_config.update(opts) + elif torch.cuda.is_available(): opts.update({"distributed": distributed, **fp16_config}) if optim_type == "SAMOptimizerHook": opts["type"] = "Fp16SAMOptimizerHook" diff --git a/src/otx/algorithms/common/adapters/mmcv/hooks/__init__.py b/src/otx/algorithms/common/adapters/mmcv/hooks/__init__.py index a7c41d80fee..4aed0db6e6d 100644 --- a/src/otx/algorithms/common/adapters/mmcv/hooks/__init__.py +++ b/src/otx/algorithms/common/adapters/mmcv/hooks/__init__.py @@ -98,3 +98,10 @@ __all__ += ["HPUOptimizerHook"] except: # noqa: E722 pass + +try: + from .xpu_optimizer_hook import BFp16XPUOptimizerHook + + __all__ += ["BFp16XPUOptimizerHook"] +except: # noqa: E722 + pass diff --git a/src/otx/algorithms/common/adapters/mmcv/hooks/xpu_optimizer_hook.py b/src/otx/algorithms/common/adapters/mmcv/hooks/xpu_optimizer_hook.py new file mode 100644 index 00000000000..2f3bd5d944a --- /dev/null +++ b/src/otx/algorithms/common/adapters/mmcv/hooks/xpu_optimizer_hook.py @@ -0,0 +1,38 @@ +"""Custom Optimizer Hook for mixed precision training on XPU.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + + +from typing import Optional, Union + +from mmcv.runner.hooks import HOOKS, Fp16OptimizerHook + +from otx.algorithms.common.adapters.torch.amp import XPUGradScaler + + +@HOOKS.register_module() +class BFp16XPUOptimizerHook(Fp16OptimizerHook): + """Custom Optimizer Hook for mixed & lower precision training on XPU.""" + + def __init__( + self, + grad_clip: Optional[dict] = None, + coalesce: bool = True, + bucket_size_mb: int = -1, + loss_scale: Union[float, str, dict] = 512.0, + distributed: bool = True, + ) -> None: + self.grad_clip = grad_clip + self.coalesce = coalesce + self.bucket_size_mb = bucket_size_mb + self.distributed = distributed + self._scale_update_param = None + if loss_scale == "dynamic": + self.loss_scaler = XPUGradScaler() + elif isinstance(loss_scale, float): + self._scale_update_param = loss_scale + self.loss_scaler = XPUGradScaler(init_scale=loss_scale) + elif isinstance(loss_scale, dict): + self.loss_scaler = XPUGradScaler(**loss_scale) + else: + raise ValueError("loss_scale must be of type float, dict, or " f'"dynamic", got {loss_scale}') diff --git a/src/otx/algorithms/common/adapters/mmcv/utils/fp16_utils.py b/src/otx/algorithms/common/adapters/mmcv/utils/fp16_utils.py new file mode 100644 index 00000000000..b5961575db7 --- /dev/null +++ b/src/otx/algorithms/common/adapters/mmcv/utils/fp16_utils.py @@ -0,0 +1,133 @@ +"""Custom fp16 related modules to enable XPU modules.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import functools +from inspect import getfullargspec +from typing import Callable, Iterable, Optional + +import torch +from mmcv.runner.fp16_utils import cast_tensor_type +from mmcv.utils import IS_NPU_AVAILABLE, TORCH_VERSION, digit_version +from torch import nn + +from otx.algorithms.common.utils import is_xpu_available + +try: + if is_xpu_available(): + from torch.xpu.amp import autocast + elif IS_NPU_AVAILABLE: + from torch.npu.amp import autocast + else: + from torch.cuda.amp import autocast +except ImportError: + pass + + +def custom_auto_fp16( + apply_to: Optional[Iterable] = None, + out_fp32: bool = False, + supported_types: tuple = (nn.Module,), +) -> Callable: + """Custom decorator to enable fp16 training automatically on XPU as well.""" + + def auto_fp16_wrapper(old_func: Callable) -> Callable: + @functools.wraps(old_func) + def new_func(*args, **kwargs) -> Callable: + # check if the module has set the attribute `fp16_enabled`, if not, + # just fallback to the original method. + if not isinstance(args[0], supported_types): + raise TypeError( + "@auto_fp16 can only be used to decorate the " f"method of those classes {supported_types}" + ) + if not (hasattr(args[0], "fp16_enabled") and args[0].fp16_enabled): + return old_func(*args, **kwargs) + + target_dtype = torch.bfloat16 if is_xpu_available() else torch.half + # get the arg spec of the decorated method + args_info = getfullargspec(old_func) + # get the argument names to be casted + args_to_cast = args_info.args if apply_to is None else apply_to + # convert the args that need to be processed + new_args = [] + # NOTE: default args are not taken into consideration + if args: + arg_names = args_info.args[: len(args)] + for i, arg_name in enumerate(arg_names): + if arg_name in args_to_cast: + new_args.append(cast_tensor_type(args[i], torch.float, target_dtype)) + else: + new_args.append(args[i]) + # convert the kwargs that need to be processed + new_kwargs = {} + if kwargs: + for arg_name, arg_value in kwargs.items(): + if arg_name in args_to_cast: + new_kwargs[arg_name] = cast_tensor_type(arg_value, torch.float, target_dtype) + else: + new_kwargs[arg_name] = arg_value + # apply converted arguments to the decorated method + if TORCH_VERSION != "parrots" and digit_version(TORCH_VERSION) >= digit_version("1.6.0"): + with autocast(enabled=True): + output = old_func(*new_args, **new_kwargs) + else: + output = old_func(*new_args, **new_kwargs) + # cast the results back to fp32 if necessary + if out_fp32: + output = cast_tensor_type(output, target_dtype, torch.float) + return output + + return new_func + + return auto_fp16_wrapper + + +def custom_force_fp32(apply_to: Optional[Iterable] = None, out_fp16: bool = False) -> Callable: + """Custom decorator to convert input arguments to fp32 in force on XPU as well.""" + + def force_fp32_wrapper(old_func): + @functools.wraps(old_func) + def new_func(*args, **kwargs) -> Callable: + # check if the module has set the attribute `fp16_enabled`, if not, + # just fallback to the original method. + if not isinstance(args[0], torch.nn.Module): + raise TypeError("@force_fp32 can only be used to decorate the " "method of nn.Module") + if not (hasattr(args[0], "fp16_enabled") and args[0].fp16_enabled): + return old_func(*args, **kwargs) + + source_dtype = torch.bfloat16 if is_xpu_available() else torch.half + # get the arg spec of the decorated method + args_info = getfullargspec(old_func) + # get the argument names to be casted + args_to_cast = args_info.args if apply_to is None else apply_to + # convert the args that need to be processed + new_args = [] + if args: + arg_names = args_info.args[: len(args)] + for i, arg_name in enumerate(arg_names): + if arg_name in args_to_cast: + new_args.append(cast_tensor_type(args[i], source_dtype, torch.float)) + else: + new_args.append(args[i]) + # convert the kwargs that need to be processed + new_kwargs = dict() + if kwargs: + for arg_name, arg_value in kwargs.items(): + if arg_name in args_to_cast: + new_kwargs[arg_name] = cast_tensor_type(arg_value, source_dtype, torch.float) + else: + new_kwargs[arg_name] = arg_value + # apply converted arguments to the decorated method + if TORCH_VERSION != "parrots" and digit_version(TORCH_VERSION) >= digit_version("1.6.0"): + with autocast(enabled=False): + output = old_func(*new_args, **new_kwargs) + else: + output = old_func(*new_args, **new_kwargs) + # cast the results back to fp32 if necessary + if out_fp16: + output = cast_tensor_type(output, torch.float, source_dtype) + return output + + return new_func + + return force_fp32_wrapper diff --git a/src/otx/algorithms/common/adapters/torch/amp/__init__.py b/src/otx/algorithms/common/adapters/torch/amp/__init__.py new file mode 100644 index 00000000000..1b0ce69ed34 --- /dev/null +++ b/src/otx/algorithms/common/adapters/torch/amp/__init__.py @@ -0,0 +1,9 @@ +"""Custom AMP (Automatic Mixed Precision package) in OTX.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +try: + from .xpu_grad_scaler import XPUGradScaler # noqa: F401 +except: # noqa: E722 + pass diff --git a/src/otx/algorithms/common/adapters/torch/amp/xpu_grad_scaler.py b/src/otx/algorithms/common/adapters/torch/amp/xpu_grad_scaler.py new file mode 100644 index 00000000000..f3994050cae --- /dev/null +++ b/src/otx/algorithms/common/adapters/torch/amp/xpu_grad_scaler.py @@ -0,0 +1,114 @@ +"""Custom GradScaler to scale loss.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +from collections import abc, defaultdict +from typing import List + +import torch +from intel_extension_for_pytorch.cpu.autocast._grad_scaler import _MultiDeviceReplicator +from torch.cuda.amp.grad_scaler import GradScaler, _refresh_per_optimizer_state + + +class XPUGradScaler(GradScaler): + """GradScaler for XPU.""" + + def __init__(self, init_scale=2.0**16, growth_factor=2.0, backoff_factor=0.5, growth_interval=2000, enabled=True): + self._enabled = enabled + + if self._enabled: + assert growth_factor > 1.0, "The growth factor must be > 1.0." + assert backoff_factor < 1.0, "The backoff factor must be < 1.0." + + self._init_scale = init_scale + # self._scale will be lazily initialized during the first call to scale() + self._scale = None + self._growth_factor = growth_factor + self._backoff_factor = backoff_factor + self._growth_interval = growth_interval + self._init_growth_tracker = 0 + # self._growth_tracker will be lazily initialized during the first call to scale() + self._growth_tracker = None + self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state) + + def scale(self, outputs): + """Multiplies ('scales') a tensor or list of tensors by the scale factor. + + Returns scaled outputs. If this instance of :class:`GradScaler` is not enabled, outputs are returned + unmodified. + + Args: + outputs (Tensor or iterable of Tensors): Outputs to scale. + """ + if not self._enabled: + return outputs + + # Short-circuit for the common case. + if isinstance(outputs, torch.Tensor): + assert outputs.device.type == "xpu" + if self._scale is None: + self._lazy_init_scale_growth_tracker(outputs.device) + assert self._scale is not None + return outputs * self._scale.to(device=outputs.device, non_blocking=True) + + # Invoke the more complex machinery only if we're treating multiple outputs. + stash: List[_MultiDeviceReplicator] = [] # holds a reference that can be overwritten by apply_scale + + def apply_scale(val): + if isinstance(val, torch.Tensor): + assert val.device.type == "xpu" + if len(stash) == 0: + if self._scale is None: + self._lazy_init_scale_growth_tracker(val.device) + assert self._scale is not None + stash.append(_MultiDeviceReplicator(self._scale)) + return val * stash[0].get(val.device) + elif isinstance(val, abc.Iterable): + iterable = map(apply_scale, val) + if isinstance(val, (list, tuple)): + return type(val)(iterable) + else: + return iterable + else: + raise ValueError("outputs must be a Tensor or an iterable of Tensors") + + return apply_scale(outputs) + + def _unscale_grads_(self, optimizer, inv_scale, found_inf, allow_bf16=False): + per_device_inv_scale = _MultiDeviceReplicator(inv_scale) + per_device_found_inf = _MultiDeviceReplicator(found_inf) + + # To set up _amp_foreach_non_finite_check_and_unscale_, split grads by device and dtype. + # There could be hundreds of grads, so we'd like to iterate through them just once. + # However, we don't know their devices or dtypes in advance. + + # https://stackoverflow.com/questions/5029934/defaultdict-of-defaultdict + # Google says mypy struggles with defaultdicts type annotations. + per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list)) # type: ignore[var-annotated] + with torch.no_grad(): + for group in optimizer.param_groups: + for param in group["params"]: + if param.grad is None: + continue + if param.grad.is_sparse: + # is_coalesced() == False means the sparse grad has values with duplicate indices. + # coalesce() deduplicates indices and adds all values that have the same index. + # For scaled bf16 values, there's a good chance coalescing will cause overflow, + # so we should check the coalesced _values(). + if param.grad.dtype is torch.bfloat16: + param.grad = param.grad.coalesce() + to_unscale = param.grad._values() + else: + to_unscale = param.grad + + # TODO: is there a way to split by device and dtype without appending in the inner loop? + per_device_and_dtype_grads[to_unscale.device][to_unscale.dtype].append(to_unscale) + + for device, per_dtype_grads in per_device_and_dtype_grads.items(): + for grads in per_dtype_grads.values(): + torch._amp_foreach_non_finite_check_and_unscale_( + grads, per_device_found_inf.get(device), per_device_inv_scale.get(device) + ) + + return per_device_found_inf._per_device_tensors diff --git a/src/otx/algorithms/common/utils/__init__.py b/src/otx/algorithms/common/utils/__init__.py index 80372c59b4b..fd5bccd3657 100644 --- a/src/otx/algorithms/common/utils/__init__.py +++ b/src/otx/algorithms/common/utils/__init__.py @@ -63,3 +63,15 @@ if is_hpu_available(): os.environ["PT_HPU_LAZY_MODE"] = "1" import habana_frameworks.torch.gpu_migration # noqa: F401 + + +if is_xpu_available(): + try: + import mmcv + + from otx.algorithms.common.adapters.mmcv.utils.fp16_utils import custom_auto_fp16, custom_force_fp32 + + mmcv.runner.auto_fp16 = custom_auto_fp16 + mmcv.runner.force_fp32 = custom_force_fp32 + except ImportError: + pass diff --git a/src/otx/algorithms/detection/adapters/mmdet/apis/train.py b/src/otx/algorithms/detection/adapters/mmdet/apis/train.py index 4565631880d..cd2638ec1ec 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/apis/train.py +++ b/src/otx/algorithms/detection/adapters/mmdet/apis/train.py @@ -142,13 +142,14 @@ def train_detector(model, dataset, cfg, distributed=False, validate=False, times optimizer = build_optimizer(model, cfg.optimizer) if cfg.device == "xpu": - if fp16_cfg is not None: - dtype = torch.bfloat16 - else: - dtype = torch.float32 + dtype = torch.bfloat16 if cfg.optimizer_config.get("bf16_training", False) else torch.float32 model.train() model, optimizer = torch.xpu.optimize(model, optimizer=optimizer, dtype=dtype) + if "bf16_training" in cfg.optimizer_config: + # Remove unused parameters in runner + cfg.optimizer_config.pop("bf16_training") + runner = build_runner( cfg.runner, default_args=dict(model=model, optimizer=optimizer, work_dir=cfg.work_dir, logger=logger, meta=meta) ) diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_yolox_detector.py b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_yolox_detector.py index b53cf2777db..c8658c489f6 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_yolox_detector.py +++ b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_yolox_detector.py @@ -58,9 +58,6 @@ def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=N def extract_feat(self, img): """Directly extract features from the backbone+neck.""" - # workaround for xpu device, since the input converted to fp16 by mmcv - if "xpu" in str(img.device) and img.dtype == torch.float16: - img = img.to(torch.bfloat16) x = self.backbone(img) if self.with_neck: x = self.neck(x) diff --git a/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_l/model.py b/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_l/model.py index dd300926d08..2d20a9ede43 100644 --- a/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_l/model.py +++ b/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_l/model.py @@ -20,5 +20,5 @@ load_from = "https://download.openmmlab.com/mmdetection/v2.0/yolox/\ yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236-d3bd2b23.pth" -fp16 = dict(loss_scale=512.0) +fp16 = dict(loss_scale=512.0, bf16_training=False) ignore = False diff --git a/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_s/model.py b/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_s/model.py index 1a0356f69c5..58114e4f173 100644 --- a/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_s/model.py +++ b/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_s/model.py @@ -20,5 +20,5 @@ load_from = "https://download.openmmlab.com/mmdetection/v2.0/yolox/\ yolox_s_8x8_300e_coco/yolox_s_8x8_300e_coco_20211121_095711-4592a793.pth" -fp16 = dict(loss_scale=512.0) +fp16 = dict(loss_scale=512.0, bf16_training=False) ignore = False diff --git a/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_tiny/model.py b/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_tiny/model.py index d8c806e3c57..6d88d486b7e 100644 --- a/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_tiny/model.py +++ b/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_tiny/model.py @@ -31,5 +31,5 @@ load_from = "https://storage.openvinotoolkit.org/repositories/openvino_training_extensions\ /models/object_detection/v2/yolox_tiny_8x8.pth" -fp16 = dict(loss_scale=512.0) +fp16 = dict(loss_scale=512.0, bf16_training=False) ignore = False diff --git a/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_x/model.py b/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_x/model.py index 857021810d1..e8c197507bc 100644 --- a/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_x/model.py +++ b/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_x/model.py @@ -20,5 +20,5 @@ load_from = "https://download.openmmlab.com/mmdetection/v2.0/yolox\ /yolox_x_8x8_300e_coco/yolox_x_8x8_300e_coco_20211126_140254-1ef88d67.pth" -fp16 = dict(loss_scale=512.0) +fp16 = dict(loss_scale=512.0, bf16_training=False) ignore = False diff --git a/src/otx/algorithms/detection/configs/detection/mobilenetv2_atss/model.py b/src/otx/algorithms/detection/configs/detection/mobilenetv2_atss/model.py index 2b0e1d9d71c..fccdd79317b 100644 --- a/src/otx/algorithms/detection/configs/detection/mobilenetv2_atss/model.py +++ b/src/otx/algorithms/detection/configs/detection/mobilenetv2_atss/model.py @@ -88,4 +88,4 @@ load_from = "https://storage.openvinotoolkit.org/repositories/openvino_training_extensions\ /models/object_detection/v2/mobilenet_v2-atss.pth" -fp16 = dict(loss_scale=512.0) +fp16 = dict(loss_scale=512.0, bf16_training=False) diff --git a/src/otx/algorithms/detection/configs/detection/mobilenetv2_ssd/model.py b/src/otx/algorithms/detection/configs/detection/mobilenetv2_ssd/model.py index 45847b0b80c..4c88b23d60a 100644 --- a/src/otx/algorithms/detection/configs/detection/mobilenetv2_ssd/model.py +++ b/src/otx/algorithms/detection/configs/detection/mobilenetv2_ssd/model.py @@ -95,5 +95,5 @@ load_from = "https://storage.openvinotoolkit.org/repositories/openvino_training_extensions\ /models/object_detection/v2/mobilenet_v2-2s_ssd-992x736.pth" -fp16 = dict(loss_scale=512.0) +fp16 = dict(loss_scale=512.0, bf16_training=False) ignore = False diff --git a/src/otx/algorithms/detection/configs/detection/resnext101_atss/model.py b/src/otx/algorithms/detection/configs/detection/resnext101_atss/model.py index 579e382adb7..53f592fb229 100644 --- a/src/otx/algorithms/detection/configs/detection/resnext101_atss/model.py +++ b/src/otx/algorithms/detection/configs/detection/resnext101_atss/model.py @@ -80,4 +80,4 @@ load_from = "https://storage.openvinotoolkit.org/repositories/openvino_training_extensions/\ models/object_detection/v2/resnext101_atss_070623.pth" -fp16 = dict(loss_scale=512.0) +fp16 = dict(loss_scale=512.0, bf16_training=False) diff --git a/src/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/model.py b/src/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/model.py index 03cb21733dc..65e751b21dc 100644 --- a/src/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/model.py +++ b/src/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/model.py @@ -127,5 +127,5 @@ v2/efficientnet_b2b-mask_rcnn-576x576.pth" evaluation = dict(interval=1, metric="mAP", save_best="mAP", iou_thr=[0.5]) -fp16 = dict(loss_scale=512.0) +fp16 = dict(loss_scale=512.0, bf16_training=False) ignore = True diff --git a/src/otx/algorithms/detection/configs/instance_segmentation/maskrcnn_swin_t/model.py b/src/otx/algorithms/detection/configs/instance_segmentation/maskrcnn_swin_t/model.py index 203470d2fac..9c41dd65052 100644 --- a/src/otx/algorithms/detection/configs/instance_segmentation/maskrcnn_swin_t/model.py +++ b/src/otx/algorithms/detection/configs/instance_segmentation/maskrcnn_swin_t/model.py @@ -153,7 +153,7 @@ optimizer_config = dict(_delete_=True, grad_clip=None) -fp16 = dict(loss_scale=dict(init_scale=512)) +fp16 = dict(loss_scale=dict(init_scale=512), bf16_training=False) load_from = ( "https://download.openmmlab.com/mmdetection/v2.0/swin/" diff --git a/src/otx/algorithms/segmentation/adapters/mmseg/apis/train.py b/src/otx/algorithms/segmentation/adapters/mmseg/apis/train.py index 6ed2ec50dc8..77aba2c8066 100644 --- a/src/otx/algorithms/segmentation/adapters/mmseg/apis/train.py +++ b/src/otx/algorithms/segmentation/adapters/mmseg/apis/train.py @@ -92,14 +92,14 @@ def train_segmentor(model, dataset, cfg, distributed=False, validate=False, time optimizer = build_optimizer(model, cfg.optimizer) if cfg.device == "xpu": - fp16_cfg = cfg.get("fp16_", None) - if fp16_cfg is not None: - dtype = torch.bfloat16 - else: - dtype = torch.float32 + dtype = torch.bfloat16 if cfg.optimizer_config.get("bf16_training", False) else torch.float32 model.train() model, optimizer = torch.xpu.optimize(model, optimizer=optimizer, dtype=dtype) + if "bf16_training" in cfg.optimizer_config: + # Remove unused parameters in runner + cfg.optimizer_config.pop("bf16_training") + if cfg.get("runner") is None: cfg.runner = {"type": "IterBasedRunner", "max_iters": cfg.total_iters} warnings.warn( From c1beb0549e58f18c3e9f17097d2a02e52bd81231 Mon Sep 17 00:00:00 2001 From: Vladislav Sovrasov Date: Thu, 30 Nov 2023 11:32:25 +0100 Subject: [PATCH 10/39] Add XPU support to anomaly task (#2677) * Update base.txt updated dependency version of datumaro * Update __init__.py update version string * Update requirements.txt * Temporarily skip visual prompting openvino integration test (#2323) * Fix import dm.DatasetSubset (#2324) Signed-off-by: Kim, Vinnam * Fix semantic segmentation soft prediction dtype (#2322) * Fix semantic segmentation soft prediction dtype * relax ref sal vals check --------- Co-authored-by: Songki Choi * Contrain yapf verison lesser than 0.40.0 (#2328) contrain_yapf_version * Fix detection e2e tests (#2327) Fix for detection * Mergeback: Label addtion/deletion 1.2.4 --> 1.4.0 (#2326) * Make black happy * Fix conflicts * Merge-back: add test datasets and edit the test code * Make black happy * Fix mis-merge * Make balck happy * Fix typo * Fix typoi --------- Co-authored-by: Songki Choi * Bump datumaro up to 1.4.0rc2 (#2332) bump datumaro up to 1.4.0rc2 * Tiling Doc for releases 1.4.0 (#2333) * Add tiling documentation * Bump otx version to 1.4.0rc2 (#2341) * OTX deploy for visual prompting task (#2311) * Enable `otx deploy` * (WIP) integration test * Docstring * Update args for create_model * Manually set image embedding layout * Enable to use model api for preprocessing - `fit_to_window` doesn't work expectedly, so newly implemented `VisualPromptingOpenvinoAdapter` to use new resize function * Remove skipped test * Updated * Update unit tests on model wrappers * Update * Update configuration * Fix not to patch pretrained path * pylint & update model api version in docstring --------- Co-authored-by: Wonju Lee * Bump albumentations version in anomaly requirements (#2350) increment albumentations version * Update action detection (#2346) * Remove skip mark for PTQ test of action detection * Update action detection documentation * Fix e2e (#2348) * Change classification dataset from dummy to toy * Revert test changes * Change label name for multilabel dataset * Revert e2e test changes * Change ov test cases' threshold * Add parent's label * Update ModelAPI in 1.4 release (#2347) * Upgrade model API * Update otx in exportable code * Fix unit tests * Fix black * Fix detection inference * Fix det tiling * Fix mypy * Fix demo * Fix visualizer in demo * Fix black * Add OTX optimize for visual prompting task (#2318) * Initial commit * Update block * (WIP) otx optimize * Fix * WIP * Update configs & exported outputs * Remove unused modules for torch * Add unit tests * pre-commit * Update CHANGELOG * Update detection docs (#2335) * Update detection docs * Revert template id changes * Fix wrong template id * Update docs/source/guide/explanation/algorithms/object_detection/object_detection.rst Co-authored-by: Eunwoo Shin * Update docs/source/guide/explanation/algorithms/object_detection/object_detection.rst Co-authored-by: Eunwoo Shin --------- Co-authored-by: Eunwoo Shin * Add visual prompting documentation (#2354) * (WIP) write docs * Add visual prompting documentation * Update CHANGELOG --------- Co-authored-by: sungchul.kim * Remove custom modelapi patch in visual prompting (#2359) * Remove custom modelapi patch * Update test * Fix graph metric order and label issues (#2356) * Fix graph metric going backward issue * Add license notice * Fix pre-commit issue * Add rename items & logic for metric --------- Signed-off-by: Songki Choi * Update multi-label document and conversion script (#2358) Update docs, label convert script * Update third party programs (#2365) * Make anomaly task compatible with older albumentations versions (#2363) * fix transforms export in metadata * wrap transform dict * add todo for updating to_dict call * Fixing detection saliency map for one class case (#2368) * fix softmax * fix validity tests * Add e2e test for visual prompting (#2360) * (WIP) otx optimize * pre-commit * (WIP) set e2e * Remove nncf config * Add visual prompting requirement * Add visual prompting in tox * Add visual prompting in setup.py * Fix typo * Delete unused configuration.yaml * Edit test_name * Add to limit activation range * Update from `vp` to `visprompt` * Fix about no returning the first label * pre-commit * (WIP) otx optimize * pre-commit * (WIP) set e2e * Remove nncf config * Add visual prompting requirement * Add visual prompting in tox * Add visual prompting in setup.py * Fix typo * pre-commit * Add actions * Update tests/e2e/cli/visual_prompting/test_visual_prompting.py Co-authored-by: Jaeguk Hyun * Skip PTQ e2e test * Change task name * Remove skipped tc --------- Co-authored-by: Jaeguk Hyun * Fix e2e (#2366) * Change e2e reference name * Update openvino eval threshold for multiclass classification * Change comment message * Fix tiling e2e tests --------- Co-authored-by: GalyaZalesskaya * Add Dino head unit tests (#2344) Recover DINO head unit tests * Update for release 1.4.0rc2 (#2370) * update for release 1.4.0rc2 * Add skip mark for unstable unit tests --------- Co-authored-by: jaegukhyun * Fix NNCF training on CPU (#2373) * Align label order between Geti and OTX (#2369) * align label order * align with pre-commit * update CHANGELOG.md * deal with edge case * update type hint * Remove CenterCrop from Classification test pipeline and editing missing docs link (#2375) * Fix missing link for docs and removing centercrop for classification data pipeline * Revert the test threshold * Fix H-label classification (#2377) * Fix h-labelissue * Update unit tests * Make black happy * Fix unittests * Make black happy * Fix update heades information func * Update the logic: consider the loss per batch * Update for release 1.4 (#2380) * updated for 1.4.0rc3 * update changelog & release note * bump datumaro version up --------- Co-authored-by: Songki Choi * Switch to PTQ for sseg (#2374) * Switch to PTQ for sseg * Update log messages * Fix invalid import structures in otx.api (#2383) Update tiler.py * Update for 1.4.0rc4 (#2385) update for release 1.4.0rc4 * [release 1.4.0] XAI: Return saliency maps for Mask RCNN IR async infer (#2395) * Return saliency maps for openvino async infer * add workaround to fix yapf importing error --------- Co-authored-by: eunwoosh * Update for release 1.4.0 (#2399) update version string Co-authored-by: Sungman Cho * Fix broken links in documentation (#2405) * fix docs links to datumaro's docs * fix docs links to otx's docs * bump version to 1.4.1 * Update exportable code README (#2411) * Updated for release 1.4.1 (#2412) updated for release 1.4.1 * Add workaround for the incorrect meta info M-RCNN (used for XAI) (#2437) Add workaround for the incorrect mata info * Add model category attributes to model template (#2439) Add model category attributes to model template * Add model category & status fields in model template * Add is_default_for_task attr to model template * Update model templates with category attrs * Add integration tests for model templates consistency * Fix license & doc string * Fix typo * Refactor test cases * Refactor common tests by generator --------- Signed-off-by: Songki Choi * Update for 1.4.2rc1 (#2441) update for release 1.4.2rc1 * Fix label list order for h-label classification (#2440) * Fix label list for h-label cls * Fix unit tests * Modified fq numbers for lite HRNET (#2445) modified fq numbers for lite HRNET * Update PTQ ignored scope for hrnet 18 mod2 (#2449) Update ptq ignored scope for hrnet 18 mod2 * Fix OpenVINO inference for legacy models (#2450) * bug fix for legacy openvino models * Add tests * Specific exceptions --------- * Update for 1.4.2rc2 (#2455) update for release 1.4.2rc2 * Prevent zero-sized saliency map in tiling if tile size is too big (#2452) * Prevent zero-sized saliency map in tiling if tile size is too big * Prevent zero-sized saliency in tiling (PyTorch) * Add unit tests for Tiler merge features methods --------- Co-authored-by: Galina * Update pot fq reference number (#2456) update pot fq reference number to 15 * Bump datumaro version to 1.5.0rc0 (#2470) bump datumaro version to 1.5.0rc0 * Set tox version constraint (#2472) set tox version constraint - https://github.com/tox-dev/tox/issues/3110 * Bug fix for albumentations (#2467) * bug fix for legacy openvino models * Address albumentation issue --------- Co-authored-by: Ashwin Vaidya * update for release 1.4.2rc3 * Add a dummy hierarchical config required by MAPI (#2483) * bump version to 1.4.2rc4 * Bump datumaro version (#2502) * bump datumaro version * remove deprecated/reomved attribute usage of the datumaro * Upgrade nncf version for 1.4 release (#2459) * Upgrade nncf version * Fix nncf interface warning * Set the exact nncf version * Update FQ refs after NNCF upgrade * Use NNCF from pypi * Update version for release 1.4.2rc5 (#2507) update version for release 1.4.2rc5 * Update for 1.4.2 (#2514) update for release 1.4.2 * create branch release/1.5.0 * Delete mem cache handler after training is done (#2535) release mem cache handler after training is done * Fix bug that auto batch size doesn't consider distributed training (#2533) * consider distributed training while searching batch size * update unit test * reveret gpu memory upper bound * fix typo * change allocated to reserved * add unit test for distributed training * align with pre-commit * Apply fix progress hook to release 1.5.0 (#2539) * Fix hook's ordering issue. AdaptiveRepeatHook changes the runner.max_iters before the ProgressHook * Change the expression * Fix typo * Fix multi-label, h-label issue * Fix auto_bs issue * Apply suggestions from code review Co-authored-by: Eunwoo Shin * Reflecting reviews * Refactor the name of get_data_cfg * Revert adaptive hook sampler init * Refactor the function name: get_data_cfg -> get_subset_data_cfg * Fix unit test errors * Remove adding AdaptiveRepeatDataHook for autobs * Remove unused import * Fix detection and segmentation case in Geti scenario --------- Co-authored-by: Eunwoo Shin * Re introduce adaptive scheduling for training (#2541) * Re-introduce adaptive patience for training * Revert unit tests * Update for release 1.4.3rc1 (#2542) * Mirror Anomaly ModelAPI changes (#2531) * Migrate anomaly exportable code to modelAPI (#2432) * Fix license in PR template * Migrate to modelAPI * Remove color conversion in streamer * Remove reverse_input_channels * Add float * Remove test as metadata is no longer used * Remove metadata from load method * remove anomalib openvino inferencer * fix signature * Support logacy OpenVINO model * Transform image * add configs * Re-introduce adaptive training (#2543) * Re-introduce adaptive patience for training * Revert unit tests * Fix auto input size mismatch in eval & export (#2530) * Fix auto input size mismatch in eval & export * Re-enable E2E tests for Issue#2518 * Add input size check in export testing * Format float numbers in log * Fix NNCF export shape mismatch * Fix saliency map issue * Disable auto input size if tiling enabled --------- Signed-off-by: Songki Choi * Update ref. fq number for anomaly e2e2 (#2547) * Skip e2e det tests by issue2548 (#2550) * Add skip to chained TC for issue #2548 (#2552) * Update for release 1.4.3 (#2551) * Update MAPI for 1.5 release (#2555) Upgrade MAPI to v 0.1.6 (#2529) * Upgrade MAPI * Update exp code demo commit * Fix MAPI imports * Update ModelAPI configuration (#2564) * Update MAPI rt infor for detection * Upadte export info for cls, det and seg * Update unit tests * Disable QAT for SegNexts (#2565) * Disable NNCF QAT for SegNext * Del obsolete pot configs * Move NNCF skip marks to test commands to avoid duplication * Add Anomaly modelAPI changes to releases/1.4.0 (#2563) * bug fix for legacy openvino models * Apply otx anomaly 1.5 changes * Fix tests * Fix compression config * fix modelAPI imports * update integration tests * Edit config types * Update keys in deployed model --------- Co-authored-by: Ashwin Vaidya Co-authored-by: Kim, Sungchul * Fix the CustomNonLinearClsHead when the batch_size is set to 1 (#2571) Fix bn1d issue Co-authored-by: sungmanc * Update ModelAPI configuration (#2564 from 1.4) (#2568) Update ModelAPI configuration (#2564) * Update MAPI rt infor for detection * Upadte export info for cls, det and seg * Update unit tests * Update for 1.4.4rc1 (#2572) * Hotfix DatasetEntity.get_combined_subset function loop (#2577) Fix get_combined_subset function * Revert default input size to `Default` due to YOLOX perf regression (#2580) Signed-off-by: Songki Choi * Fix for the degradation issue of the classification task (#2585) * Revert to sync with 1.4.0 * Remove repeat data * Convert to the RGB value * Fix color conversion logic * Fix precommit * Bump datumaro version to 1.5.1rc3 (#2587) * Add label ids to anomaly OpenVINO model xml (#2590) * Add label ids to model xml --------- * Fix DeiT-Tiny model regression during class incremental training (#2594) * enable IBloss for DeiT-Tiny * update changelog * add docstring * Add label ids to model xml in release 1.5 (#2591) Add label ids to model xml * Fix DeiT-Tiny regression test for release/1.4.0 (#2595) * Fix DeiT regression test * update changelog * temp * Fix mmcls bug not wrapping model in DataParallel on CPUs (#2601) Wrap multi-label and h-label classification models by MMDataParallel in case of CPU training. --------- Signed-off-by: Songki Choi * Fix h-label loss normalization issue w/ exclusive label group of singe label (#2604) * Fix h-label loss normalization issue w/ exclusive label group with signle label * Fix non-linear version --------- Signed-off-by: Songki Choi * Boost up Image numpy accessing speed through PIL (#2586) * boost up numpy accessing speed through PIL * update CHANGELOG * resolve precommit error * resolve precommit error * add fallback logic with PIL open * use convert instead of draft * Add missing import pathlib for cls e2e testing (#2610) * Fix division by zero in class incremental learning for classification (#2606) * Add empty label to reproduce zero-division error Signed-off-by: Songki Choi * Fix minor typo Signed-off-by: Songki Choi * Fix empty label 4 -> 3 Signed-off-by: Songki Choi * Prevent division by zero Signed-off-by: Songki Choi * Update license Signed-off-by: Songki Choi * Update CHANGELOG.md Signed-off-by: Songki Choi * Fix inefficient sampling Signed-off-by: Songki Choi * Revert indexing Signed-off-by: Songki Choi * Fix minor typo Signed-off-by: Songki Choi --------- Signed-off-by: Songki Choi * Unify logger usage (#2612) * unify logger * align with pre-commit * unify anomaly logger to otx * change logger file path * align with pre-commit * change logger file path in missing file * configure logger after ConfigManager is initialized * configure logger when ConfigManager instance is initialized * update unit test code * move config_logger to each cli file * align with pre-commit * change part still using mmcv logger * Fix XAI algorithm for Detection (#2609) * Impove saliency maps algorithm for Detection * Remove extra changes * Update unit tests * Changes for 1 class * Fix pre-commit * Update CHANGELOG * Tighten dependency constraint only adapting latest patches (#2607) * tighten dependency constratint only adapting latest patches * adjust scikit-image version w.r.t python version * adjust tensorboard version w.r.t python version * remove version specifier for scikit-image * Add metadata to optimized model (#2618) * bug fix for legacy openvino models * Add metadata to optimized model * Revert formatting changes --------- Co-authored-by: Ashwin Vaidya * modify omegaconf version constraint * [release 1.5.0] Fix XAI algorithm for Detection (#2617) Update detection XAI algorithm * Update dependency constraint (#2622) * Update tpp (#2621) * Fix h-label bug of missing parent labels in output (#2626) * Fix h-label bug of missing parent labels in output * Fix h-label test data label schema * Update CHANGELOG.md --------- Signed-off-by: Songki Choi * Update publish workflow (#2625) update publish workflow to push whl to internal pypi * bump datumaro version to ~=1.5.0 * fixed mistake while mergeing back 1.4.4 * modifiy readme * remove openvino model wrapper class * remove openvino model wrapper tests * [release 1.5.0] DeiT: enable tests + add ViTFeatureVectorHook (#2630) Add ViT feature vector hook * Fix docs broken link to datatumaro_h-label Signed-off-by: Songki Choi * Fix wrong label settings for non-anomaly task ModelAPIs Signed-off-by: Songki Choi * Update publish workflow for tag checking (#2632) * Update e2e tests for XAI Detection (#2634) Fix e2e XAI ref value * Disable QAT for newly added models (#2636) * Update release note and readme (#2637) * update release note and readme * remove package upload step on internal publish wf * update release note and, changelog, and readme * update version string to 1.6.0dev * fix datumaro version to 1.6.0rc0 * Mergeback 1.5.0 to develop (#2642) * Update publish workflow for tag checking (#2632) * Update e2e tests for XAI Detection (#2634) * Disable QAT for newly added models (#2636) * Update release note and readme (#2637) * remove package upload step on internal publish wf * update release note and, changelog, and readme * update version string to 1.6.0dev --------- Co-authored-by: Galina Zalesskaya Co-authored-by: Jaeguk Hyun * Revert "Mergeback 1.5.0 to develop" (#2645) Revert "Mergeback 1.5.0 to develop (#2642)" This reverts commit 2f67686103df873d020681f6d504f9595ce4a963. * Add a tool to help conduct experiments (#2651) * implement run and experiment * implement experiment result aggregator * refactor experiment.py * refactor run.py * get export model speed * add var collumn * refactor experiment.py * refine a way to update argument in cmd * refine resource tracker * support anomaly on research framework * refine code aggregating exp result * bugfix * make other task available * eval task save avg_time_per_images as result * Add new argument to track CPU&GPU utilization and memory usage (#2500) * add argument to track resource usage * fix bug * fix a bug in a multi gpu case * use total cpu usage * add unit test * add mark to unit test * cover edge case * add pynvml in requirement * align with pre-commit * add license comment * update changelog * refine argument help * align with pre-commit * add version to requirement and raise an error if not supported values are given * apply new resource tracker format * refactor run.py * support optimize in research framework * cover edge case * Handle a case where fail cases exist * make argparse raise error rather than exit if problem exist * revert tensorboard aggregator * bugfix * save failed cases as yaml file * deal with integer in variables * add epoch to metric * use latest log.json file * align with otx logging method * move experiment.py from cli to tools * refactor experiment.py * merge otx run feature into experiment.py * move set_arguments_to_cmd definition into experiment.py * refactor experiment.py * bugfix * minor bugfix * use otx.cli instead of each otx entry * add feature to parse single workspace * add comments * fix bugs * align with pre-commit * revert parser argument * align with pre-commit * Revert inference batch size to 1 for instance segmentation (#2648) Signed-off-by: Songki Choi * Remove unnecessary log while building a model (#2658) * revert logger in otx/algorithms/detection/adapters/mmdet/utils/builder.py * revert logger in otx/algorithms/classification/adapters/mmcls/utils/builder.py * make change more readable * Fix a minor bug of experiment.py (#2662) fix bug * Not check avg_time_per_image during test (#2665) * ignore avg_time_per_image during test * do not call stdev when length of array is less than 2 * ignore avg_time_per_image during regerssion test * Update device selection logic in classificaiton * Add xpu accelerator * Tmp patch for anomaly trainer * Use XPU callback for anomaly training * Update xpu accelerator * Fix for anomaly xpu callback * Fix validation batch logic * Cleanup, add docstrings * Refine xpu callback --------- Signed-off-by: Kim, Vinnam Signed-off-by: Songki Choi Co-authored-by: Yunchu Lee Co-authored-by: Kim, Sungchul Co-authored-by: Vinnam Kim Co-authored-by: Evgeny Tsykunov Co-authored-by: Songki Choi Co-authored-by: Eunwoo Shin Co-authored-by: Jaeguk Hyun Co-authored-by: Sungman Cho Co-authored-by: Eugene Liu Co-authored-by: Wonju Lee Co-authored-by: Dick Ameln Co-authored-by: sungchul.kim Co-authored-by: GalyaZalesskaya Co-authored-by: Harim Kang Co-authored-by: Ashwin Vaidya Co-authored-by: Ashwin Vaidya Co-authored-by: sungmanc --- .../adapters/anomalib/callbacks/__init__.py | 3 +- .../adapters/anomalib/callbacks/xpu.py | 36 +++++++++++++++++++ src/otx/algorithms/anomaly/tasks/train.py | 5 +++ 3 files changed, 43 insertions(+), 1 deletion(-) create mode 100644 src/otx/algorithms/anomaly/adapters/anomalib/callbacks/xpu.py diff --git a/src/otx/algorithms/anomaly/adapters/anomalib/callbacks/__init__.py b/src/otx/algorithms/anomaly/adapters/anomalib/callbacks/__init__.py index 95822fd7712..85054363f31 100644 --- a/src/otx/algorithms/anomaly/adapters/anomalib/callbacks/__init__.py +++ b/src/otx/algorithms/anomaly/adapters/anomalib/callbacks/__init__.py @@ -16,5 +16,6 @@ from .inference import AnomalyInferenceCallback from .progress import ProgressCallback +from .xpu import XPUCallback -__all__ = ["AnomalyInferenceCallback", "ProgressCallback"] +__all__ = ["AnomalyInferenceCallback", "ProgressCallback", "XPUCallback"] diff --git a/src/otx/algorithms/anomaly/adapters/anomalib/callbacks/xpu.py b/src/otx/algorithms/anomaly/adapters/anomalib/callbacks/xpu.py new file mode 100644 index 00000000000..461696a1528 --- /dev/null +++ b/src/otx/algorithms/anomaly/adapters/anomalib/callbacks/xpu.py @@ -0,0 +1,36 @@ +"""Anomaly XPU device callback.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import torch +from pytorch_lightning import Callback + + +class XPUCallback(Callback): + """XPU device callback. + + Applies IPEX optimization before training, moves data to XPU. + """ + + def __init__(self, device_idx=0): + self.device = torch.device(f"xpu:{device_idx}") + + def on_fit_start(self, trainer, pl_module): + """Applies IPEX optimization before training.""" + pl_module.to(self.device) + model, optimizer = torch.xpu.optimize(trainer.model, optimizer=trainer.optimizers[0]) + trainer.optimizers = [optimizer] + trainer.model = model + + def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): + """Moves train batch tensors to XPU.""" + for k in batch: + if not isinstance(batch[k], list): + batch[k] = batch[k].to(self.device) + + def on_validation_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): + """Moves validation batch tensors to XPU.""" + for k in batch: + if not isinstance(batch[k], list): + batch[k] = batch[k].to(self.device) diff --git a/src/otx/algorithms/anomaly/tasks/train.py b/src/otx/algorithms/anomaly/tasks/train.py index 9e2f57f249f..8016157e2a6 100644 --- a/src/otx/algorithms/anomaly/tasks/train.py +++ b/src/otx/algorithms/anomaly/tasks/train.py @@ -28,7 +28,9 @@ from pytorch_lightning import Trainer, seed_everything from otx.algorithms.anomaly.adapters.anomalib.callbacks import ProgressCallback +from otx.algorithms.anomaly.adapters.anomalib.callbacks.xpu import XPUCallback from otx.algorithms.anomaly.adapters.anomalib.data import OTXAnomalyDataModule +from otx.algorithms.common.utils.utils import is_xpu_available from otx.api.entities.datasets import DatasetEntity from otx.api.entities.model import ModelEntity from otx.api.entities.train_parameters import TrainParameters @@ -88,6 +90,9 @@ def train( ), ] + if is_xpu_available(): + callbacks.append(XPUCallback()) + self.trainer = Trainer(**config.trainer, logger=False, callbacks=callbacks) self.trainer.fit(model=self.model, datamodule=datamodule) From ae090bad5135f67874f5e38352ea0b6f0997592f Mon Sep 17 00:00:00 2001 From: Eunwoo Shin Date: Wed, 6 Dec 2023 16:34:01 +0900 Subject: [PATCH 11/39] Disable mixed precision training on XPU (#2683) * disable mixed precision training on XPU * fix optimize error * refactor XPUDataParallel * align with pre-commit * align with pre-commit * fix bug --- .../adapters/mmcls/apis/train.py | 6 ++++-- .../common/adapters/mmcv/configurer.py | 9 +-------- .../common/adapters/mmcv/nncf/utils.py | 2 ++ .../mmcv/utils/_builder_build_data_parallel.py | 18 ------------------ .../detection/adapters/mmdet/apis/train.py | 10 ++++++---- .../segmentation/adapters/mmseg/apis/train.py | 15 ++++++++------- 6 files changed, 21 insertions(+), 39 deletions(-) diff --git a/src/otx/algorithms/classification/adapters/mmcls/apis/train.py b/src/otx/algorithms/classification/adapters/mmcls/apis/train.py index 5527e19a847..50658c86d35 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/apis/train.py +++ b/src/otx/algorithms/classification/adapters/mmcls/apis/train.py @@ -78,7 +78,7 @@ def train_model(model, dataset, cfg, distributed=False, validate=False, timestam elif cfg.device == "xpu": assert len(cfg.gpu_ids) == 1 model.to(f"xpu:{cfg.gpu_ids[0]}") - model = XPUDataParallel(model, dim=0, device_ids=cfg.gpu_ids, enable_autocast=bool(fp16_cfg)) + model = XPUDataParallel(model, dim=0, device_ids=cfg.gpu_ids) elif cfg.device == "hpu": assert len(cfg.gpu_ids) == 1 model = HPUDataParallel(model.cuda(), dim=0, device_ids=cfg.gpu_ids, enable_autocast=bool(fp16_cfg)) @@ -93,7 +93,9 @@ def train_model(model, dataset, cfg, distributed=False, validate=False, timestam optimizer = build_optimizer(model, cfg.optimizer) if cfg.device == "xpu": - dtype = torch.bfloat16 if cfg.optimizer_config.get("bf16_training", False) else torch.float32 + if cfg.optimizer_config.get("bf16_training", False): + logger.warning("XPU supports fp32 training only currently.") + dtype = torch.float32 model.train() model, optimizer = torch.xpu.optimize(model, optimizer=optimizer, dtype=dtype) diff --git a/src/otx/algorithms/common/adapters/mmcv/configurer.py b/src/otx/algorithms/common/adapters/mmcv/configurer.py index 7342735f43d..0d7b427e114 100644 --- a/src/otx/algorithms/common/adapters/mmcv/configurer.py +++ b/src/otx/algorithms/common/adapters/mmcv/configurer.py @@ -264,18 +264,11 @@ def configure_fp16(cfg: Config): opts["type"] = "HPUOptimizerHook" cfg.optimizer_config.update(opts) elif is_xpu_available(): - opts.update({"distributed": distributed, **fp16_config}) if optim_type == "SAMOptimizerHook": logger.warning("SAMOptimizerHook is not supported on XPU yet, changed to OptimizerHook.") opts["type"] = "OptimizerHook" - if optim_type == "OptimizerHook": - opts["type"] = "BFp16XPUOptimizerHook" - else: - # does not support optimizerhook type - # let mm library handle it - cfg.fp16 = fp16_config - opts = dict() cfg.optimizer_config.update(opts) + logger.warning("XPU doesn't support mixed precision training currently.") elif torch.cuda.is_available(): opts.update({"distributed": distributed, **fp16_config}) if optim_type == "SAMOptimizerHook": diff --git a/src/otx/algorithms/common/adapters/mmcv/nncf/utils.py b/src/otx/algorithms/common/adapters/mmcv/nncf/utils.py index 08bd33a97fa..072b5ca7cf9 100644 --- a/src/otx/algorithms/common/adapters/mmcv/nncf/utils.py +++ b/src/otx/algorithms/common/adapters/mmcv/nncf/utils.py @@ -56,6 +56,8 @@ def get_fake_input( data = scatter(collate([data], samples_per_gpu=1), [-1])[0] elif device.type == "cuda": data = scatter(collate([data], samples_per_gpu=1), [device.index])[0] + elif device.type == "xpu": + data = scatter(collate([data], samples_per_gpu=1), [-1])[0] else: raise NotImplementedError() return data diff --git a/src/otx/algorithms/common/adapters/mmcv/utils/_builder_build_data_parallel.py b/src/otx/algorithms/common/adapters/mmcv/utils/_builder_build_data_parallel.py index 226e5e8cc25..f0dd316b0db 100644 --- a/src/otx/algorithms/common/adapters/mmcv/utils/_builder_build_data_parallel.py +++ b/src/otx/algorithms/common/adapters/mmcv/utils/_builder_build_data_parallel.py @@ -92,10 +92,6 @@ def build_data_parallel( class XPUDataParallel(MMDataParallel): - def __init__(self, *args, enable_autocast: bool = False, **kwargs): - super().__init__(*args, **kwargs) - self.enable_autocast = enable_autocast - def scatter(self, inputs, kwargs, device_ids): inputs, kwargs = super().scatter(inputs, kwargs, [-1]) target_device = torch.device(f"xpu:{device_ids[0]}") @@ -124,20 +120,6 @@ def scatter(self, inputs, kwargs, device_ids): return inputs, kwargs - def forward(self, *inputs, **kwargs): - # we have to apply autocast here, because the original mmcv's fp16 decorator is hard to override. - # Perhaps, one global autocast is not as accurate as original mmcv's approach - with torch.autocast(device_type="xpu", dtype=torch.bfloat16, enabled=self.enable_autocast): - return super().forward(*inputs, **kwargs) - - def train_step(self, *inputs, **kwargs): - with torch.autocast(device_type="xpu", dtype=torch.bfloat16, enabled=self.enable_autocast): - return super().train_step(*inputs, **kwargs) - - def val_step(self, *inputs, **kwargs): - with torch.autocast(device_type="xpu", dtype=torch.bfloat16, enabled=self.enable_autocast): - return super().val_step(*inputs, **kwargs) - class HPUDataParallel(MMDataParallel): def __init__(self, *args, enable_autocast: bool = False, put_gt_on_device=True, **kwargs): diff --git a/src/otx/algorithms/detection/adapters/mmdet/apis/train.py b/src/otx/algorithms/detection/adapters/mmdet/apis/train.py index cd2638ec1ec..c29977998c7 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/apis/train.py +++ b/src/otx/algorithms/detection/adapters/mmdet/apis/train.py @@ -107,6 +107,9 @@ def train_detector(model, dataset, cfg, distributed=False, validate=False, times fp16_cfg = cfg.get("fp16_", None) # put model on gpus + if cfg.device == "xpu": + model.to(f"xpu:{cfg.gpu_ids[0]}") + if distributed: find_unused_parameters = cfg.get("find_unused_parameters", False) # Sets the `find_unused_parameters` parameter in @@ -118,9 +121,6 @@ def train_detector(model, dataset, cfg, distributed=False, validate=False, times broadcast_buffers=False, find_unused_parameters=find_unused_parameters, ) - elif cfg.device == "xpu": - model = build_dp(model, cfg.device, device_ids=cfg.gpu_ids, enable_autocast=bool(fp16_cfg)) - model.to(f"xpu:{cfg.gpu_ids[0]}") elif cfg.device == "hpu": model = build_dp( model, cfg.device, device_ids=cfg.gpu_ids, dim=0, enable_autocast=bool(fp16_cfg), put_gt_on_device=False @@ -142,7 +142,9 @@ def train_detector(model, dataset, cfg, distributed=False, validate=False, times optimizer = build_optimizer(model, cfg.optimizer) if cfg.device == "xpu": - dtype = torch.bfloat16 if cfg.optimizer_config.get("bf16_training", False) else torch.float32 + if cfg.optimizer_config.get("bf16_training", False): + logger.warning("XPU supports fp32 training only currently.") + dtype = torch.float32 model.train() model, optimizer = torch.xpu.optimize(model, optimizer=optimizer, dtype=dtype) diff --git a/src/otx/algorithms/segmentation/adapters/mmseg/apis/train.py b/src/otx/algorithms/segmentation/adapters/mmseg/apis/train.py index 77aba2c8066..f0e4975939d 100644 --- a/src/otx/algorithms/segmentation/adapters/mmseg/apis/train.py +++ b/src/otx/algorithms/segmentation/adapters/mmseg/apis/train.py @@ -51,6 +51,9 @@ def train_segmentor(model, dataset, cfg, distributed=False, validate=False, time train_loader_cfg = {**loader_cfg, **cfg.data.get("train_dataloader", {})} data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset] + if cfg.device == "xpu": + model.to(f"xpu:{cfg.gpu_ids[0]}") + # put model on devices if distributed: find_unused_parameters = cfg.get("find_unused_parameters", False) @@ -64,16 +67,12 @@ def train_segmentor(model, dataset, cfg, distributed=False, validate=False, time find_unused_parameters=find_unused_parameters, ) else: - if not torch.cuda.is_available(): + if not torch.cuda.is_available(): # noqa assert digit_version(mmcv.__version__) >= digit_version( "1.4.4" ), "Please use MMCV >= 1.4.4 for CPU training!" - if cfg.device == "xpu": - use_autocast = bool(cfg.get("fp16_", False)) - model = build_dp(model, cfg.device, device_ids=cfg.gpu_ids, enable_autocast=use_autocast) - model.to(f"xpu:{cfg.gpu_ids[0]}") - elif cfg.device == "hpu": + if cfg.device == "hpu": use_autocast = bool(cfg.get("fp16_", False)) model = build_dp(model, cfg.device, device_ids=cfg.gpu_ids, enable_autocast=use_autocast) model.to(model.src_device_obj) @@ -92,7 +91,9 @@ def train_segmentor(model, dataset, cfg, distributed=False, validate=False, time optimizer = build_optimizer(model, cfg.optimizer) if cfg.device == "xpu": - dtype = torch.bfloat16 if cfg.optimizer_config.get("bf16_training", False) else torch.float32 + if cfg.optimizer_config.get("bf16_training", False): + logger.warning("XPU supports fp32 training only currently.") + dtype = torch.float32 model.train() model, optimizer = torch.xpu.optimize(model, optimizer=optimizer, dtype=dtype) From 0062179aa1a58f36d4536c6382908de94855a708 Mon Sep 17 00:00:00 2001 From: Vladislav Sovrasov Date: Fri, 8 Dec 2023 08:01:06 +0100 Subject: [PATCH 12/39] Update anomaly XPU integration (#2697) * Update anomaly XPU integration * Update strategy and accelerator * Cleanup in strategy * Fix mypy * remove XPU callback --- .../algorithms/anomaly/adapters/__init__.py | 4 ++ .../anomalib/accelerators/__init__.py | 8 +++ .../adapters/anomalib/accelerators/xpu.py | 60 +++++++++++++++++++ .../adapters/anomalib/callbacks/__init__.py | 3 +- .../adapters/anomalib/callbacks/xpu.py | 36 ----------- .../adapters/anomalib/strategies/__init__.py | 8 +++ .../anomalib/strategies/xpu_single.py | 60 +++++++++++++++++++ src/otx/algorithms/anomaly/tasks/train.py | 4 +- 8 files changed, 143 insertions(+), 40 deletions(-) create mode 100644 src/otx/algorithms/anomaly/adapters/anomalib/accelerators/__init__.py create mode 100644 src/otx/algorithms/anomaly/adapters/anomalib/accelerators/xpu.py delete mode 100644 src/otx/algorithms/anomaly/adapters/anomalib/callbacks/xpu.py create mode 100644 src/otx/algorithms/anomaly/adapters/anomalib/strategies/__init__.py create mode 100644 src/otx/algorithms/anomaly/adapters/anomalib/strategies/xpu_single.py diff --git a/src/otx/algorithms/anomaly/adapters/__init__.py b/src/otx/algorithms/anomaly/adapters/__init__.py index cdc9654bbb4..9c785467cb8 100644 --- a/src/otx/algorithms/anomaly/adapters/__init__.py +++ b/src/otx/algorithms/anomaly/adapters/__init__.py @@ -13,3 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions # and limitations under the License. + + +from .anomalib.accelerators.xpu import XPUAccelerator # noqa: F401 +from .anomalib.strategies import SingleXPUStrategy # noqa: F401 diff --git a/src/otx/algorithms/anomaly/adapters/anomalib/accelerators/__init__.py b/src/otx/algorithms/anomaly/adapters/anomalib/accelerators/__init__.py new file mode 100644 index 00000000000..b6c9661d650 --- /dev/null +++ b/src/otx/algorithms/anomaly/adapters/anomalib/accelerators/__init__.py @@ -0,0 +1,8 @@ +"""Lightning accelerator for XPU device.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +from .xpu import XPUAccelerator + +__all__ = ["XPUAccelerator"] diff --git a/src/otx/algorithms/anomaly/adapters/anomalib/accelerators/xpu.py b/src/otx/algorithms/anomaly/adapters/anomalib/accelerators/xpu.py new file mode 100644 index 00000000000..624a0ec5308 --- /dev/null +++ b/src/otx/algorithms/anomaly/adapters/anomalib/accelerators/xpu.py @@ -0,0 +1,60 @@ +"""Lightning accelerator for XPU device.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +from typing import Any, Dict, Union + +import torch +from pytorch_lightning.accelerators import AcceleratorRegistry +from pytorch_lightning.accelerators.accelerator import Accelerator + +from otx.algorithms.common.utils.utils import is_xpu_available + + +class XPUAccelerator(Accelerator): + """Support for a XPU, optimized for large-scale machine learning.""" + + accelerator_name = "xpu" + + def setup_device(self, device: torch.device) -> None: + """Sets up the specified device.""" + if device.type != "xpu": + raise RuntimeError(f"Device should be xpu, got {device} instead") + + torch.xpu.set_device(device) + + @staticmethod + def parse_devices(devices: Any) -> Any: + """Parses devices for multi-GPU training.""" + if isinstance(devices, list): + return devices + return [devices] + + @staticmethod + def get_parallel_devices(devices: Any) -> Any: + """Generates a list of parrallel devices.""" + return [torch.device("xpu", idx) for idx in devices] + + @staticmethod + def auto_device_count() -> int: + """Returns number of XPU devices available.""" + return torch.xpu.device_count() + + @staticmethod + def is_available() -> bool: + """Checks if XPU available.""" + return is_xpu_available() + + def get_device_stats(self, device: Union[str, torch.device]) -> Dict[str, Any]: + """Returns XPU devices stats.""" + return {} + + def teardown(self) -> None: + """Cleans-up XPU-related resources.""" + pass + + +AcceleratorRegistry.register( + XPUAccelerator.accelerator_name, XPUAccelerator, description="Accelerator supports XPU devices" +) diff --git a/src/otx/algorithms/anomaly/adapters/anomalib/callbacks/__init__.py b/src/otx/algorithms/anomaly/adapters/anomalib/callbacks/__init__.py index 85054363f31..95822fd7712 100644 --- a/src/otx/algorithms/anomaly/adapters/anomalib/callbacks/__init__.py +++ b/src/otx/algorithms/anomaly/adapters/anomalib/callbacks/__init__.py @@ -16,6 +16,5 @@ from .inference import AnomalyInferenceCallback from .progress import ProgressCallback -from .xpu import XPUCallback -__all__ = ["AnomalyInferenceCallback", "ProgressCallback", "XPUCallback"] +__all__ = ["AnomalyInferenceCallback", "ProgressCallback"] diff --git a/src/otx/algorithms/anomaly/adapters/anomalib/callbacks/xpu.py b/src/otx/algorithms/anomaly/adapters/anomalib/callbacks/xpu.py deleted file mode 100644 index 461696a1528..00000000000 --- a/src/otx/algorithms/anomaly/adapters/anomalib/callbacks/xpu.py +++ /dev/null @@ -1,36 +0,0 @@ -"""Anomaly XPU device callback.""" -# Copyright (C) 2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import torch -from pytorch_lightning import Callback - - -class XPUCallback(Callback): - """XPU device callback. - - Applies IPEX optimization before training, moves data to XPU. - """ - - def __init__(self, device_idx=0): - self.device = torch.device(f"xpu:{device_idx}") - - def on_fit_start(self, trainer, pl_module): - """Applies IPEX optimization before training.""" - pl_module.to(self.device) - model, optimizer = torch.xpu.optimize(trainer.model, optimizer=trainer.optimizers[0]) - trainer.optimizers = [optimizer] - trainer.model = model - - def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): - """Moves train batch tensors to XPU.""" - for k in batch: - if not isinstance(batch[k], list): - batch[k] = batch[k].to(self.device) - - def on_validation_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): - """Moves validation batch tensors to XPU.""" - for k in batch: - if not isinstance(batch[k], list): - batch[k] = batch[k].to(self.device) diff --git a/src/otx/algorithms/anomaly/adapters/anomalib/strategies/__init__.py b/src/otx/algorithms/anomaly/adapters/anomalib/strategies/__init__.py new file mode 100644 index 00000000000..ff3508b3f1c --- /dev/null +++ b/src/otx/algorithms/anomaly/adapters/anomalib/strategies/__init__.py @@ -0,0 +1,8 @@ +"""Lightning strategy for single XPU device.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +from .xpu_single import SingleXPUStrategy + +__all__ = ["SingleXPUStrategy"] diff --git a/src/otx/algorithms/anomaly/adapters/anomalib/strategies/xpu_single.py b/src/otx/algorithms/anomaly/adapters/anomalib/strategies/xpu_single.py new file mode 100644 index 00000000000..e211d3d2f42 --- /dev/null +++ b/src/otx/algorithms/anomaly/adapters/anomalib/strategies/xpu_single.py @@ -0,0 +1,60 @@ +"""Lightning strategy for single XPU devic.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +from typing import Optional + +import pytorch_lightning as pl +import torch +from lightning_fabric.plugins import CheckpointIO +from lightning_fabric.utilities.types import _DEVICE +from pytorch_lightning.plugins.precision import PrecisionPlugin +from pytorch_lightning.strategies import StrategyRegistry +from pytorch_lightning.strategies.single_device import SingleDeviceStrategy +from pytorch_lightning.utilities.exceptions import MisconfigurationException + +from otx.algorithms.common.utils.utils import is_xpu_available + + +class SingleXPUStrategy(SingleDeviceStrategy): + """Strategy for training on single XPU device.""" + + strategy_name = "xpu_single" + + def __init__( + self, + device: _DEVICE = "xpu:0", + accelerator: Optional["pl.accelerators.Accelerator"] = None, + checkpoint_io: Optional[CheckpointIO] = None, + precision_plugin: Optional[PrecisionPlugin] = None, + ): + + if not is_xpu_available(): + raise MisconfigurationException("`SingleXPUStrategy` requires XPU devices to run") + + super().__init__( + accelerator=accelerator, + device=device, + checkpoint_io=checkpoint_io, + precision_plugin=precision_plugin, + ) + + @property + def is_distributed(self) -> bool: + """Returns true if the strategy supports distributed training.""" + return False + + def setup_optimizers(self, trainer: "pl.Trainer") -> None: + """Sets up optimizers.""" + super().setup_optimizers(trainer) + if len(self.optimizers) != 1: # type: ignore + raise RuntimeError("XPU strategy doesn't support multiple optimizers") + model, optimizer = torch.xpu.optimize(trainer.model, optimizer=self.optimizers[0]) # type: ignore + self.optimizers = [optimizer] + trainer.model = model + + +StrategyRegistry.register( + SingleXPUStrategy.strategy_name, SingleXPUStrategy, description="Strategy that enables training on single XPU" +) diff --git a/src/otx/algorithms/anomaly/tasks/train.py b/src/otx/algorithms/anomaly/tasks/train.py index 8016157e2a6..34d5af57a34 100644 --- a/src/otx/algorithms/anomaly/tasks/train.py +++ b/src/otx/algorithms/anomaly/tasks/train.py @@ -28,7 +28,6 @@ from pytorch_lightning import Trainer, seed_everything from otx.algorithms.anomaly.adapters.anomalib.callbacks import ProgressCallback -from otx.algorithms.anomaly.adapters.anomalib.callbacks.xpu import XPUCallback from otx.algorithms.anomaly.adapters.anomalib.data import OTXAnomalyDataModule from otx.algorithms.common.utils.utils import is_xpu_available from otx.api.entities.datasets import DatasetEntity @@ -91,7 +90,8 @@ def train( ] if is_xpu_available(): - callbacks.append(XPUCallback()) + config.trainer.strategy = "xpu_single" + config.trainer.accelerator = "xpu" self.trainer = Trainer(**config.trainer, logger=False, callbacks=callbacks) self.trainer.fit(model=self.model, datamodule=datamodule) From 6da38a30fad08ad301f2f58c079eedb8a82f07ed Mon Sep 17 00:00:00 2001 From: Vladislav Sovrasov Date: Wed, 13 Dec 2023 11:54:52 +0100 Subject: [PATCH 13/39] Add XPU mixed precision plugin for lightning (#2714) * Update anomaly XPU integration * Update strategy and accelerator * Cleanup in strategy * Fix mypy * remove XPU callback * Add XPU mixed precision lightning training * Fix linters * Handle default plugins value --- .../adapters/anomalib/plugins/__init__.py | 7 ++ .../anomalib/plugins/xpu_precision.py | 109 ++++++++++++++++++ src/otx/algorithms/anomaly/tasks/train.py | 10 +- 3 files changed, 125 insertions(+), 1 deletion(-) create mode 100644 src/otx/algorithms/anomaly/adapters/anomalib/plugins/__init__.py create mode 100644 src/otx/algorithms/anomaly/adapters/anomalib/plugins/xpu_precision.py diff --git a/src/otx/algorithms/anomaly/adapters/anomalib/plugins/__init__.py b/src/otx/algorithms/anomaly/adapters/anomalib/plugins/__init__.py new file mode 100644 index 00000000000..df24d838d85 --- /dev/null +++ b/src/otx/algorithms/anomaly/adapters/anomalib/plugins/__init__.py @@ -0,0 +1,7 @@ +"""Plugin for mixed-precision training on XPU.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from .xpu_precision import MixedPrecisionXPUPlugin + +__all__ = ["MixedPrecisionXPUPlugin"] diff --git a/src/otx/algorithms/anomaly/adapters/anomalib/plugins/xpu_precision.py b/src/otx/algorithms/anomaly/adapters/anomalib/plugins/xpu_precision.py new file mode 100644 index 00000000000..bfd9f5d3b93 --- /dev/null +++ b/src/otx/algorithms/anomaly/adapters/anomalib/plugins/xpu_precision.py @@ -0,0 +1,109 @@ +"""Plugin for mixed-precision training on XPU.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + + +from contextlib import contextmanager +from typing import Any, Callable, Dict, Generator, Optional, Union + +import pytorch_lightning as pl +import torch +from lightning_fabric.utilities.types import Optimizable +from pytorch_lightning.plugins.precision.precision_plugin import PrecisionPlugin +from pytorch_lightning.utilities import GradClipAlgorithmType +from pytorch_lightning.utilities.exceptions import MisconfigurationException +from torch import Tensor +from torch.optim import LBFGS, Optimizer + + +class MixedPrecisionXPUPlugin(PrecisionPlugin): + """Plugin for Automatic Mixed Precision (AMP) training with ``torch.xpu.autocast``. + + Args: + scaler: An optional :class:`torch.cuda.amp.GradScaler` to use. + """ + + def __init__(self, scaler: Optional[Any] = None) -> None: + self.scaler = scaler + + def pre_backward(self, tensor: Tensor, module: "pl.LightningModule") -> Tensor: + """Apply grad scaler before backward.""" + if self.scaler is not None: + tensor = self.scaler.scale(tensor) + return super().pre_backward(tensor, module) + + def optimizer_step( # type: ignore[override] + self, + optimizer: Optimizable, + model: "pl.LightningModule", + optimizer_idx: int, + closure: Callable[[], Any], + **kwargs: Any, + ) -> Any: + """Make an optimizer step using scaler if it was passed.""" + if self.scaler is None: + # skip scaler logic, as bfloat16 does not require scaler + return super().optimizer_step( + optimizer, model=model, optimizer_idx=optimizer_idx, closure=closure, **kwargs + ) + if isinstance(optimizer, LBFGS): + raise MisconfigurationException( + f"Native AMP and the LBFGS optimizer are not compatible (optimizer {optimizer_idx})." + ) + closure_result = closure() + + if not _optimizer_handles_unscaling(optimizer): + # Unscaling needs to be performed here in case we are going to apply gradient clipping. + # Optimizers that perform unscaling in their `.step()` method are not supported (e.g., fused Adam). + # Note: `unscale` happens after the closure is executed, but before the `on_before_optimizer_step` hook. + self.scaler.unscale_(optimizer) + + self._after_closure(model, optimizer, optimizer_idx) + skipped_backward = closure_result is None + # in manual optimization, the closure does not return a value + if not model.automatic_optimization or not skipped_backward: + # note: the scaler will skip the `optimizer.step` if nonfinite gradients are found + step_output = self.scaler.step(optimizer, **kwargs) + self.scaler.update() + return step_output + return closure_result + + def clip_gradients( + self, + optimizer: Optimizer, + clip_val: Union[int, float] = 0.0, + gradient_clip_algorithm: GradClipAlgorithmType = GradClipAlgorithmType.NORM, + ) -> None: + """Handle grad clipping with scaler.""" + if clip_val > 0 and _optimizer_handles_unscaling(optimizer): + raise RuntimeError( + f"The current optimizer, {type(optimizer).__qualname__}, does not allow for gradient clipping" + " because it performs unscaling of gradients internally. HINT: Are you using a 'fused' optimizer?" + ) + super().clip_gradients(optimizer=optimizer, clip_val=clip_val, gradient_clip_algorithm=gradient_clip_algorithm) + + @contextmanager + def forward_context(self) -> Generator[None, None, None]: + """Enable autocast context.""" + with torch.xpu.autocast(True): + yield + + def state_dict(self) -> Dict[str, Any]: + """Returns state dict of the plugin.""" + if self.scaler is not None: + return self.scaler.state_dict() + return {} + + def load_state_dict(self, state_dict: Dict[str, Any]) -> None: + """Loads state dict to the plugin.""" + if self.scaler is not None: + self.scaler.load_state_dict(state_dict) + + +def _optimizer_handles_unscaling(optimizer: Any) -> bool: + """Determines if a PyTorch optimizer handles unscaling gradients in the step method ratherthan through the scaler. + + Since, the current implementation of this function checks a PyTorch internal variable on the optimizer, the return + value will only be reliable for built-in PyTorch optimizers. + """ + return getattr(optimizer, "_step_supports_amp_scaling", False) diff --git a/src/otx/algorithms/anomaly/tasks/train.py b/src/otx/algorithms/anomaly/tasks/train.py index 34d5af57a34..67af58a944e 100644 --- a/src/otx/algorithms/anomaly/tasks/train.py +++ b/src/otx/algorithms/anomaly/tasks/train.py @@ -29,6 +29,7 @@ from otx.algorithms.anomaly.adapters.anomalib.callbacks import ProgressCallback from otx.algorithms.anomaly.adapters.anomalib.data import OTXAnomalyDataModule +from otx.algorithms.anomaly.adapters.anomalib.plugins.xpu_precision import MixedPrecisionXPUPlugin from otx.algorithms.common.utils.utils import is_xpu_available from otx.api.entities.datasets import DatasetEntity from otx.api.entities.model import ModelEntity @@ -89,11 +90,18 @@ def train( ), ] + plugins = [] + if config.trainer.plugins is not None: + plugins.extend(config.trainer.plugins) + config.trainer.pop("plugins") + if is_xpu_available(): config.trainer.strategy = "xpu_single" config.trainer.accelerator = "xpu" + if config.trainer.precision == 16: + plugins.append(MixedPrecisionXPUPlugin()) - self.trainer = Trainer(**config.trainer, logger=False, callbacks=callbacks) + self.trainer = Trainer(**config.trainer, logger=False, callbacks=callbacks, plugins=plugins) self.trainer.fit(model=self.model, datamodule=datamodule) self.save_model(output_model) From 71e9adc308bdadcad58fe9a0af791101dad6ec68 Mon Sep 17 00:00:00 2001 From: Eunwoo Shin Date: Thu, 14 Dec 2023 00:01:57 +0900 Subject: [PATCH 14/39] Update code to support other features than 'train' on XPU (#2704) * move where patching code for xpu in detection * implement xpu resource manager in HPO * consider xpu during adaptive_num_workers * consider xpu while deciding asynchronous_sha * align with pre-commit * change variable name to proper one * change patching code into configure_device * update unit test code * align with pre-commit --- .../adapters/mmcv/utils/config_utils.py | 10 +- .../detection/adapters/mmdet/apis/train.py | 58 -------- .../detection/adapters/mmdet/configurer.py | 11 ++ .../detection/adapters/mmdet/task.py | 12 +- .../adapters/mmdet/utils/__init__.py | 4 + .../adapters/mmdet/utils/config_utils.py | 52 +++++++ src/otx/cli/utils/hpo.py | 15 +- src/otx/hpo/hpo_runner.py | 42 +++--- src/otx/hpo/resource_manager.py | 132 ++++++++++++------ .../adapters/mmcv/utils/test_config_utils.py | 17 +++ .../adapters/mmdet/test_configurer.py | 83 ++++++----- tests/unit/hpo/test_resource_manager.py | 97 ++++++++++--- 12 files changed, 343 insertions(+), 190 deletions(-) diff --git a/src/otx/algorithms/common/adapters/mmcv/utils/config_utils.py b/src/otx/algorithms/common/adapters/mmcv/utils/config_utils.py index 3930fc22bb6..baca58788a3 100644 --- a/src/otx/algorithms/common/adapters/mmcv/utils/config_utils.py +++ b/src/otx/algorithms/common/adapters/mmcv/utils/config_utils.py @@ -25,6 +25,7 @@ from mmcv.utils.path import check_file_exist from otx.algorithms.common.configs.configuration_enums import InputSizePreset +from otx.algorithms.common.utils import is_xpu_available from otx.api.entities.datasets import DatasetEntity from otx.utils.logger import get_logger @@ -505,11 +506,14 @@ def patch_persistent_workers(config: Config): def get_adaptive_num_workers(num_dataloader: int = 1) -> Union[int, None]: """Measure appropriate num_workers value and return it.""" - num_gpus = torch.cuda.device_count() - if num_gpus == 0: + if is_xpu_available(): + num_devices = torch.xpu.device_count() + else: + num_devices = torch.cuda.device_count() + if num_devices == 0: logger.warning("There is no GPUs. Use existing num_worker value.") return None - return min(multiprocessing.cpu_count() // (num_dataloader * num_gpus), 8) # max available num_workers is 8 + return min(multiprocessing.cpu_count() // (num_dataloader * num_devices), 8) # max available num_workers is 8 def patch_from_hyperparams(config: Config, hyperparams, **kwargs): diff --git a/src/otx/algorithms/detection/adapters/mmdet/apis/train.py b/src/otx/algorithms/detection/adapters/mmdet/apis/train.py index c29977998c7..3a731513f59 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/apis/train.py +++ b/src/otx/algorithms/detection/adapters/mmdet/apis/train.py @@ -7,8 +7,6 @@ import os import torch -from mmcv.ops.nms import NMSop -from mmcv.ops.roi_align import RoIAlign from mmcv.runner import ( DistSamplerSeedHook, EpochBasedRunner, @@ -16,18 +14,14 @@ build_runner, get_dist_info, ) -from mmcv.utils import ext_loader from mmdet.core import DistEvalHook, EvalHook, build_optimizer from mmdet.datasets import build_dataloader, build_dataset, replace_ImageToTensor from mmdet.utils import build_ddp, compat_cfg, find_latest_checkpoint, get_root_logger from mmdet.utils.util_distribution import build_dp, dp_factory -from torchvision.ops import nms as tv_nms -from torchvision.ops import roi_align as tv_roi_align from otx.algorithms.common.adapters.mmcv.utils import HPUDataParallel, XPUDataParallel from otx.algorithms.common.adapters.mmcv.utils.hpu_optimizers import HABANA_OPTIMIZERS -ext_module = ext_loader.load_ext("_ext", ["nms", "softnms", "nms_match", "nms_rotated", "nms_quadri"]) dp_factory["xpu"] = XPUDataParallel dp_factory["hpu"] = HPUDataParallel @@ -134,11 +128,6 @@ def train_detector(model, dataset, cfg, distributed=False, validate=False, times # build optimizer auto_scale_lr(cfg, distributed, logger) - if cfg.device in ["hpu", "xpu"]: - # dynamic patch for nms and roi_align - NMSop.forward = monkey_patched_nms - RoIAlign.forward = monkey_patched_roi_align - optimizer = build_optimizer(model, cfg.optimizer) if cfg.device == "xpu": @@ -211,50 +200,3 @@ def train_detector(model, dataset, cfg, distributed=False, validate=False, times elif cfg.load_from: runner.load_checkpoint(cfg.load_from) runner.run(data_loaders, cfg.workflow) - - -def monkey_patched_nms(ctx, bboxes, scores, iou_threshold, offset, score_threshold, max_num): - """Runs MMCVs NMS with torchvision.nms, or forces NMS from MMCV to run on CPU.""" - is_filtering_by_score = score_threshold > 0 - if is_filtering_by_score: - valid_mask = scores > score_threshold - bboxes, scores = bboxes[valid_mask], scores[valid_mask] - valid_inds = torch.nonzero(valid_mask, as_tuple=False).squeeze(dim=1) - - if bboxes.dtype == torch.bfloat16: - bboxes = bboxes.to(torch.float32) - if scores.dtype == torch.bfloat16: - scores = scores.to(torch.float32) - - if offset == 0: - inds = tv_nms(bboxes, scores, float(iou_threshold)) - else: - device = bboxes.device - bboxes = bboxes.to("cpu") - scores = scores.to("cpu") - inds = ext_module.nms(bboxes, scores, iou_threshold=float(iou_threshold), offset=offset) - bboxes = bboxes.to(device) - scores = scores.to(device) - - if max_num > 0: - inds = inds[:max_num] - if is_filtering_by_score: - inds = valid_inds[inds] - return inds - - -def monkey_patched_roi_align(self, input, rois): - """Replaces MMCVs roi align with the one from torchvision. - - Args: - self: patched instance - input: NCHW images - rois: Bx5 boxes. First column is the index into N. The other 4 columns are xyxy. - """ - - if "aligned" in tv_roi_align.__code__.co_varnames: - return tv_roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.aligned) - else: - if self.aligned: - rois -= rois.new_tensor([0.0] + [0.5 / self.spatial_scale] * 4) - return tv_roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio) diff --git a/src/otx/algorithms/detection/adapters/mmdet/configurer.py b/src/otx/algorithms/detection/adapters/mmdet/configurer.py index e21947ea19c..4a7097d3ff5 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/configurer.py +++ b/src/otx/algorithms/detection/adapters/mmdet/configurer.py @@ -5,6 +5,8 @@ from typing import Optional, Tuple +from mmcv.ops.nms import NMSop +from mmcv.ops.roi_align import RoIAlign from mmcv.utils import ConfigDict from otx.algorithms.common.adapters.mmcv.clsincr_mixin import IncrConfigurerMixin @@ -15,6 +17,8 @@ ) from otx.algorithms.detection.adapters.mmdet.utils import ( cluster_anchors, + monkey_patched_nms, + monkey_patched_roi_align, patch_tiling, should_cluster_anchors, ) @@ -72,6 +76,13 @@ def configure_task(self, cfg, **kwargs): if self.task_adapt_type == "default_task_adapt": self.configure_bbox_head(cfg) + def configure_device(self, cfg): + """Setting device for training and inference.""" + super().configure_device(cfg) + if cfg.device in ["xpu", "hpu"]: + NMSop.forward = monkey_patched_nms + RoIAlign.forward = monkey_patched_roi_align + def configure_classes(self, cfg): """Patch classes for model and dataset.""" super().configure_classes(cfg) diff --git a/src/otx/algorithms/detection/adapters/mmdet/task.py b/src/otx/algorithms/detection/adapters/mmdet/task.py index 247775c6de4..92d5e212e62 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/task.py +++ b/src/otx/algorithms/detection/adapters/mmdet/task.py @@ -13,8 +13,6 @@ from typing import Any, Dict, Optional, Union import torch -from mmcv.ops.nms import NMSop -from mmcv.ops.roi_align import RoIAlign from mmcv.runner import wrap_fp16_model from mmcv.utils import Config, ConfigDict, get_git_hash from mmdet import __version__ @@ -42,11 +40,7 @@ from otx.algorithms.common.configs.training_base import TrainType from otx.algorithms.common.tasks.nncf_task import NNCFBaseTask from otx.algorithms.common.utils.data import get_dataset -from otx.algorithms.detection.adapters.mmdet.apis.train import ( - monkey_patched_nms, - monkey_patched_roi_align, - train_detector, -) +from otx.algorithms.detection.adapters.mmdet.apis.train import train_detector from otx.algorithms.detection.adapters.mmdet.configurer import ( DetectionConfigurer, IncrDetectionConfigurer, @@ -348,10 +342,6 @@ def _infer_model( else: target_classes = mm_dataset.CLASSES - if cfg.device in ["xpu", "hpu"]: - NMSop.forward = monkey_patched_nms - RoIAlign.forward = monkey_patched_roi_align - # Model model = self.build_model(cfg, fp16=cfg.get("fp16", False)) model.CLASSES = target_classes diff --git a/src/otx/algorithms/detection/adapters/mmdet/utils/__init__.py b/src/otx/algorithms/detection/adapters/mmdet/utils/__init__.py index 516fb750a2f..f7e7bfc289e 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/utils/__init__.py +++ b/src/otx/algorithms/detection/adapters/mmdet/utils/__init__.py @@ -6,6 +6,8 @@ from .builder import build_detector from .config_utils import ( cluster_anchors, + monkey_patched_nms, + monkey_patched_roi_align, patch_input_preprocessing, patch_input_shape, patch_ir_scale_factor, @@ -21,4 +23,6 @@ "patch_input_shape", "patch_ir_scale_factor", "should_cluster_anchors", + "monkey_patched_nms", + "monkey_patched_roi_align", ] diff --git a/src/otx/algorithms/detection/adapters/mmdet/utils/config_utils.py b/src/otx/algorithms/detection/adapters/mmdet/utils/config_utils.py index 3c2431adae3..f0f7245fda2 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/utils/config_utils.py +++ b/src/otx/algorithms/detection/adapters/mmdet/utils/config_utils.py @@ -2,7 +2,11 @@ # Copyright (C) 2022-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import torch from mmcv import Config, ConfigDict +from mmcv.utils import ext_loader +from torchvision.ops import nms as tv_nms +from torchvision.ops import roi_align as tv_roi_align from otx.algorithms.common.adapters.mmcv.utils import ( InputSizeManager, @@ -30,6 +34,7 @@ logger = get_logger() +ext_module = ext_loader.load_ext("_ext", ["nms", "softnms", "nms_match", "nms_rotated", "nms_quadri"]) def should_cluster_anchors(model_cfg: Config): @@ -243,3 +248,50 @@ def patch_ir_scale_factor(deploy_cfg: ConfigDict, hyper_parameters: DetectionCon ConfigDict(opt_shapes=ConfigDict(input=[1, 3, ir_input_shape[2], ir_input_shape[3]])) ] print(f"-----------------> x {tile_ir_scale_factor} = {ir_input_shape}") + + +def monkey_patched_nms(ctx, bboxes, scores, iou_threshold, offset, score_threshold, max_num): + """Runs MMCVs NMS with torchvision.nms, or forces NMS from MMCV to run on CPU.""" + is_filtering_by_score = score_threshold > 0 + if is_filtering_by_score: + valid_mask = scores > score_threshold + bboxes, scores = bboxes[valid_mask], scores[valid_mask] + valid_inds = torch.nonzero(valid_mask, as_tuple=False).squeeze(dim=1) + + if bboxes.dtype == torch.bfloat16: + bboxes = bboxes.to(torch.float32) + if scores.dtype == torch.bfloat16: + scores = scores.to(torch.float32) + + if offset == 0: + inds = tv_nms(bboxes, scores, float(iou_threshold)) + else: + device = bboxes.device + bboxes = bboxes.to("cpu") + scores = scores.to("cpu") + inds = ext_module.nms(bboxes, scores, iou_threshold=float(iou_threshold), offset=offset) + bboxes = bboxes.to(device) + scores = scores.to(device) + + if max_num > 0: + inds = inds[:max_num] + if is_filtering_by_score: + inds = valid_inds[inds] + return inds + + +def monkey_patched_roi_align(self, input, rois): + """Replaces MMCVs roi align with the one from torchvision. + + Args: + self: patched instance + input: NCHW images + rois: Bx5 boxes. First column is the index into N. The other 4 columns are xyxy. + """ + + if "aligned" in tv_roi_align.__code__.co_varnames: + return tv_roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.aligned) + else: + if self.aligned: + rois -= rois.new_tensor([0.0] + [0.5 / self.spatial_scale] * 4) + return tv_roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio) diff --git a/src/otx/cli/utils/hpo.py b/src/otx/cli/utils/hpo.py index 5a0a82d50af..3fbfc103d64 100644 --- a/src/otx/cli/utils/hpo.py +++ b/src/otx/cli/utils/hpo.py @@ -19,6 +19,7 @@ import torch import yaml +from otx.algorithms.common.utils import is_xpu_available from otx.api.configuration.helper import create from otx.api.entities.datasets import DatasetEntity from otx.api.entities.model import ModelEntity @@ -459,7 +460,12 @@ def run_hpo(self, train_func: Callable, data_roots: Dict[str, Dict]) -> Union[Di """ self._environment.save_initial_weight(self._get_initial_model_weight_path()) hpo_algo = self._get_hpo_algo() - resource_type = "gpu" if torch.cuda.is_available() else "cpu" + if torch.cuda.is_available(): + resource_type = "gpu" + elif is_xpu_available(): + resource_type = "xpu" + else: + resource_type = "cpu" run_hpo_loop( hpo_algo, partial( @@ -497,6 +503,11 @@ def _get_hpo_algo(self): return hpo_algo def _prepare_asha(self): + if is_xpu_available(): + asynchronous_sha = torch.xpu.device_count() != 1 + else: + asynchronous_sha = torch.cuda.device_count() != 1 + args = { "search_space": self._hpo_config["hp_space"], "save_path": str(self._hpo_workdir), @@ -511,7 +522,7 @@ def _prepare_asha(self): "expected_time_ratio": self._hpo_time_ratio, "prior_hyper_parameters": self._get_default_hyper_parameters(), "asynchronous_bracket": True, - "asynchronous_sha": torch.cuda.device_count() != 1, + "asynchronous_sha": asynchronous_sha, } logger.debug(f"ASHA args = {args}") diff --git a/src/otx/hpo/hpo_runner.py b/src/otx/hpo/hpo_runner.py index 27ecc0a84a9..3736221989c 100644 --- a/src/otx/hpo/hpo_runner.py +++ b/src/otx/hpo/hpo_runner.py @@ -47,24 +47,26 @@ class HpoLoop: Args: hpo_algo (HpoBase): HPO algorithms. train_func (Callable): Function to train a model. - resource_type (Literal['gpu', 'cpu'], optional): Which type of resource to use. + resource_type (Literal['gpu', 'cpu', 'xpu'], optional): Which type of resource to use. If can be changed depending on environment. Defaults to "gpu". num_parallel_trial (Optional[int], optional): How many trials to run in parallel. It's used for CPUResourceManager. Defaults to None. - num_gpu_for_single_trial (Optional[int], optional): How many GPUs are used for a single trial. - It's used for GPUResourceManager. Defaults to None. - available_gpu (Optional[str], optional): How many GPUs are available. It's used for GPUResourceManager. - Defaults to None. + num_devices_per_trial (Optional[int], optional): Number of devices used for a single trial. + It's used for GPUResourceManager and XPUResourceManager. + Defaults to None. + available_devices (Optional[str], optional): Number of devices available. + It's used for GPUResourceManager and XPUResourceManager. + Defaults to None. """ def __init__( self, hpo_algo: HpoBase, train_func: Callable, - resource_type: Literal["gpu", "cpu"] = "gpu", + resource_type: Literal["gpu", "cpu", "xpu"] = "gpu", num_parallel_trial: Optional[int] = None, - num_gpu_for_single_trial: Optional[int] = None, - available_gpu: Optional[str] = None, + num_devices_per_trial: Optional[int] = None, + available_devices: Optional[str] = None, ): self._hpo_algo = hpo_algo self._train_func = train_func @@ -74,7 +76,7 @@ def __init__( self._uid_index = 0 self._trial_fault_count = 0 self._resource_manager = get_resource_manager( - resource_type, num_parallel_trial, num_gpu_for_single_trial, available_gpu + resource_type, num_parallel_trial, num_devices_per_trial, available_devices ) self._main_pid = os.getpid() @@ -228,24 +230,28 @@ def _report_score( def run_hpo_loop( hpo_algo: HpoBase, train_func: Callable, - resource_type: Literal["gpu", "cpu"] = "gpu", + resource_type: Literal["gpu", "cpu", "xpu"] = "gpu", num_parallel_trial: Optional[int] = None, - num_gpu_for_single_trial: Optional[int] = None, - available_gpu: Optional[str] = None, + num_devices_per_trial: Optional[int] = None, + available_devices: Optional[str] = None, ): """Run the HPO loop. Args: hpo_algo (HpoBase): HPO algorithms. train_func (Callable): Function to train a model. - resource_type (Literal['gpu', 'cpu'], optional): Which type of resource to use. + resource_type (Literal['gpu', 'cpu', 'xpu'], optional): Which type of resource to use. If can be changed depending on environment. Defaults to "gpu". num_parallel_trial (Optional[int], optional): How many trials to run in parallel. It's used for CPUResourceManager. Defaults to None. - num_gpu_for_single_trial (Optional[int], optional): How many GPUs are used for a single trial. - It's used for GPUResourceManager. Defaults to None. - available_gpu (Optional[str], optional): How many GPUs are available. It's used for GPUResourceManager. - Defaults to None. + num_devices_per_trial (Optional[int], optional): Number of devices used for a single trial. + It's used for GPUResourceManager and XPUResourceManager. + Defaults to None. + available_devices (Optional[str], optional): Number of devices available. + It's used for GPUResourceManager and XPUResourceManager. + Defaults to None. """ - hpo_loop = HpoLoop(hpo_algo, train_func, resource_type, num_parallel_trial, num_gpu_for_single_trial, available_gpu) + hpo_loop = HpoLoop( + hpo_algo, train_func, resource_type, num_parallel_trial, num_devices_per_trial, available_devices + ) hpo_loop.run() diff --git a/src/otx/hpo/resource_manager.py b/src/otx/hpo/resource_manager.py index a6df2d9930a..3971613d170 100644 --- a/src/otx/hpo/resource_manager.py +++ b/src/otx/hpo/resource_manager.py @@ -20,6 +20,7 @@ import torch +from otx.algorithms.common.utils import is_xpu_available from otx.hpo.utils import check_positive from otx.utils.logger import get_logger @@ -93,41 +94,24 @@ def have_available_resource(self): return len(self._usage_status) < self._num_parallel_trial -class GPUResourceManager(BaseResourceManager): - """Resource manager class for GPU. +class AcceleratorManager(BaseResourceManager): + """Abstract Resource manager class for accelerators. Args: - num_gpu_for_single_trial (int, optional): How many GPUs is used for a single trial. Defaults to 1. - available_gpu (Optional[str], optional): How many GPUs are available. Defaults to None. + num_devices_per_trial (int, optional): Number of devices used for a single trial. Defaults to 1. + available_devices (Optional[str], optional): Number of devices available. Defaults to None. """ - def __init__(self, num_gpu_for_single_trial: int = 1, available_gpu: Optional[str] = None): - check_positive(num_gpu_for_single_trial, "num_gpu_for_single_trial") + def __init__(self, num_devices_per_trial: int = 1, available_devices: Optional[str] = None): + check_positive(num_devices_per_trial, "num_devices_per_trial") - self._num_gpu_for_single_trial = num_gpu_for_single_trial - self._available_gpu = self._set_available_gpu(available_gpu) + self._num_devices_per_trial = num_devices_per_trial + self._available_devices = self._set_available_devices(available_devices) self._usage_status: Dict[Any, List] = {} - def _set_available_gpu(self, available_gpu: Optional[str] = None): - if available_gpu is None: - cuda_visible_devices = os.getenv("CUDA_VISIBLE_DEVICES") - if cuda_visible_devices is not None: - available_gpu_arr = self._transform_gpu_format_from_string_to_arr(cuda_visible_devices) - else: - num_gpus = torch.cuda.device_count() - available_gpu_arr = list(range(num_gpus)) - else: - available_gpu_arr = self._transform_gpu_format_from_string_to_arr(available_gpu) - - return available_gpu_arr - - def _transform_gpu_format_from_string_to_arr(self, gpu: str): - for val in gpu.split(","): - if not val.isnumeric(): - raise ValueError( - "gpu format is wrong. " "gpu should only have numbers delimited by ','.\n" f"your value is {gpu}" - ) - return [int(val) for val in gpu.split(",")] + @abstractmethod + def _set_available_devices(self, available_devices: Optional[str] = None) -> List[int]: + raise NotImplementedError def reserve_resource(self, trial_id: Any) -> Optional[Dict]: """Reserve a resource under 'trial_id'. @@ -146,11 +130,15 @@ def reserve_resource(self, trial_id: Any) -> Optional[Dict]: if trial_id in self._usage_status: raise RuntimeError(f"{trial_id} already has reserved resource.") - resource = list(self._available_gpu[: self._num_gpu_for_single_trial]) - self._available_gpu = self._available_gpu[self._num_gpu_for_single_trial :] + resource = list(self._available_devices[: self._num_devices_per_trial]) + self._available_devices = self._available_devices[self._num_devices_per_trial :] self._usage_status[trial_id] = resource - return {"CUDA_VISIBLE_DEVICES": ",".join([str(val) for val in resource])} + return self._make_env_var_for_train(resource) + + @abstractmethod + def _make_env_var_for_train(self, device_arr: List[int]) -> Dict[str, str]: + raise NotImplementedError def release_resource(self, trial_id: Any): """Release a resource under 'trial_id'. @@ -161,39 +149,82 @@ def release_resource(self, trial_id: Any): if trial_id not in self._usage_status: logger.warning(f"{trial_id} trial don't use resource now.") else: - self._available_gpu.extend(self._usage_status[trial_id]) + self._available_devices.extend(self._usage_status[trial_id]) del self._usage_status[trial_id] def have_available_resource(self): """Check that there is available resource.""" - return len(self._available_gpu) >= self._num_gpu_for_single_trial + return len(self._available_devices) >= self._num_devices_per_trial + + +class GPUResourceManager(AcceleratorManager): + """Resource manager class for GPU.""" + + def _set_available_devices(self, available_devices: Optional[str] = None) -> List[int]: + if available_devices is None: + cuda_visible_devices = os.getenv("CUDA_VISIBLE_DEVICES") + if cuda_visible_devices is not None: + available_devices_arr = _cvt_comma_delimited_str_to_list(cuda_visible_devices) + else: + num_gpus = torch.cuda.device_count() + available_devices_arr = list(range(num_gpus)) + else: + available_devices_arr = _cvt_comma_delimited_str_to_list(available_devices) + + return available_devices_arr + + def _make_env_var_for_train(self, device_arr: List[int]) -> Dict[str, str]: + return {"CUDA_VISIBLE_DEVICES": ",".join([str(val) for val in device_arr])} + + +class XPUResourceManager(AcceleratorManager): + """Resource manager class for XPU.""" + + def _set_available_devices(self, available_devices: Optional[str] = None) -> List[int]: + if available_devices is None: + visible_devices = os.getenv("ONEAPI_DEVICE_SELECTOR", "").split(":") + if len(visible_devices) > 1: + available_devices_arr = _cvt_comma_delimited_str_to_list(visible_devices[1]) + else: + num_gpus = torch.xpu.device_count() + available_devices_arr = list(range(num_gpus)) + else: + available_devices_arr = _cvt_comma_delimited_str_to_list(available_devices) + + return available_devices_arr + + def _make_env_var_for_train(self, device_arr: List[int]) -> Dict[str, str]: + return {"ONEAPI_DEVICE_SELECTOR": "level_zero:" + ",".join([str(val) for val in device_arr])} def get_resource_manager( - resource_type: Literal["gpu", "cpu"], + resource_type: Literal["gpu", "cpu", "xpu"], num_parallel_trial: Optional[int] = None, - num_gpu_for_single_trial: Optional[int] = None, - available_gpu: Optional[str] = None, + num_devices_per_trial: Optional[int] = None, + available_devices: Optional[str] = None, ) -> BaseResourceManager: """Get an appropriate resource manager depending on current environment. Args: - resource_type (Literal["gpu", "cpu"]): Which type of resource to use. + resource_type (Literal["gpu", "cpu", "xpu"]): Which type of resource to use. If can be changed depending on environment. num_parallel_trial (Optional[int]): How many trials to run in parallel. It's used for CPUResourceManager. Defaults to None. - num_gpu_for_single_trial (Optional[int]): How many GPUs is used for a single trial. + num_devices_per_trial (Optional[int]): How many GPUs is used for a single trial. It's used for GPUResourceManager. Defaults to None. - available_gpu (Optional[str]): How many GPUs are available. It's used for GPUResourceManager. Defaults to None. + available_devices (Optional[str]): How many GPUs are available. It's used for GPUResourceManager. + Defaults to None. Raises: - ValueError: If resource_type is neither 'gpu' nor 'cpu', then raise an error. + ValueError: If resource_type is neither 'gpu', 'cpu', nor 'xpu' then raise an error. Returns: BaseResourceManager: Resource manager to use. """ - if resource_type == "gpu" and not torch.cuda.is_available(): - logger.warning("GPU can't be used now. resource type is modified to cpu.") + if (resource_type == "gpu" and not torch.cuda.is_available()) or ( + resource_type == "xpu" and not is_xpu_available() + ): + logger.warning("{} can't be used now. resource type is modified to cpu.".format(resource_type)) resource_type = "cpu" if resource_type == "cpu": @@ -201,9 +232,13 @@ def get_resource_manager( args = _remove_none_from_dict(args) return CPUResourceManager(**args) # type: ignore if resource_type == "gpu": - args = {"num_gpu_for_single_trial": num_gpu_for_single_trial, "available_gpu": available_gpu} # type: ignore + args = {"num_devices_per_trial": num_devices_per_trial, "available_devices": available_devices} # type: ignore args = _remove_none_from_dict(args) return GPUResourceManager(**args) # type: ignore + if resource_type == "xpu": + args = {"num_devices_per_trial": num_devices_per_trial, "available_devices": available_devices} # type: ignore + args = _remove_none_from_dict(args) + return XPUResourceManager(**args) # type: ignore raise ValueError(f"Available resource type is cpu, gpu. Your value is {resource_type}.") @@ -212,3 +247,14 @@ def _remove_none_from_dict(dict_val: Dict): for key in key_to_remove: del dict_val[key] return dict_val + + +def _cvt_comma_delimited_str_to_list(string: str): + for val in string.split(","): + if not val.isnumeric(): + raise ValueError( + "string format is wrong. " + "string should only have numbers delimited by ','.\n" + f"your value is {string}" + ) + return [int(val) for val in string.split(",")] diff --git a/tests/unit/algorithms/common/adapters/mmcv/utils/test_config_utils.py b/tests/unit/algorithms/common/adapters/mmcv/utils/test_config_utils.py index 1f250eee3ff..9e1a43528dc 100644 --- a/tests/unit/algorithms/common/adapters/mmcv/utils/test_config_utils.py +++ b/tests/unit/algorithms/common/adapters/mmcv/utils/test_config_utils.py @@ -65,6 +65,7 @@ def test_patch_persistent_workers_dist_semisl(mocker): def test_get_adaptive_num_workers(mocker, num_dataloader): num_gpu = 5 mock_torch = mocker.patch.object(config_utils, "torch") + mocker.patch.object(config_utils, "is_xpu_available", return_value=False) mock_torch.cuda.device_count.return_value = num_gpu num_cpu = 20 @@ -78,6 +79,7 @@ def test_get_adaptive_num_workers(mocker, num_dataloader): def test_get_adaptive_num_workers_no_gpu(mocker): num_gpu = 0 mock_torch = mocker.patch.object(config_utils, "torch") + mocker.patch.object(config_utils, "is_xpu_available", return_value=False) mock_torch.cuda.device_count.return_value = num_gpu num_cpu = 20 @@ -87,6 +89,21 @@ def test_get_adaptive_num_workers_no_gpu(mocker): assert get_adaptive_num_workers() is None +@e2e_pytest_unit +@pytest.mark.parametrize("num_dataloader", [1, 2, 4]) +def test_get_adaptive_num_workers_xpu(mocker, num_dataloader): + num_gpu = 5 + mock_torch = mocker.patch.object(config_utils, "torch") + mocker.patch.object(config_utils, "is_xpu_available", return_value=True) + mock_torch.xpu.device_count.return_value = num_gpu + + num_cpu = 20 + mock_multiprocessing = mocker.patch.object(config_utils, "multiprocessing") + mock_multiprocessing.cpu_count.return_value = num_cpu + + assert get_adaptive_num_workers(num_dataloader) == num_cpu // (num_gpu * num_dataloader) + + @pytest.fixture def mock_data_pipeline(): image_size = (400, 400) diff --git a/tests/unit/algorithms/detection/adapters/mmdet/test_configurer.py b/tests/unit/algorithms/detection/adapters/mmdet/test_configurer.py index 9df0626976d..4ae43b9be6f 100644 --- a/tests/unit/algorithms/detection/adapters/mmdet/test_configurer.py +++ b/tests/unit/algorithms/detection/adapters/mmdet/test_configurer.py @@ -27,6 +27,15 @@ ) +@pytest.fixture +def device_availability_func(mocker): + return { + "cuda": mocker.patch("torch.cuda.is_available"), + "xpu": mocker.patch("otx.algorithms.common.adapters.mmcv.configurer.is_xpu_available"), + "hpu": mocker.patch("otx.algorithms.common.adapters.mmcv.configurer.is_hpu_available"), + } + + class TestDetectionConfigurer: @pytest.fixture(autouse=True) def setup(self) -> None: @@ -115,45 +124,40 @@ def test_configure_env(self): self.configurer.configure_env(self.model_cfg) @e2e_pytest_unit - def test_configure_device(self, mocker): - mocker.patch( - "torch.distributed.is_initialized", - return_value=True, - ) - mocker.patch("torch.distributed.get_world_size", return_value=2) - world_size = 2 - mocker.patch("os.environ", return_value={"LOCAL_RANK": 2}) - config = copy.deepcopy(self.model_cfg) - origin_lr = config.optimizer.lr - self.configurer.configure_device(config) - assert config.distributed is True - assert config.optimizer.lr == pytest.approx(origin_lr * world_size) + @pytest.mark.parametrize("current_device", ["cpu", "cuda", "xpu", "hpu"]) + def test_configure_device(self, mocker, device_availability_func, current_device): + for key, mock_func in device_availability_func.items(): + if current_device == key: + mock_func.return_value = True + else: + mock_func.return_value = False mocker.patch( "torch.distributed.is_initialized", return_value=False, ) - mocker.patch( - "torch.cuda.is_available", - return_value=False, - ) + config = copy.deepcopy(self.model_cfg) self.configurer.configure_device(config) assert config.distributed is False - assert config.device == "cpu" + assert config.device == current_device + @e2e_pytest_unit + def test_configure_dist_device(self, mocker): mocker.patch( "torch.distributed.is_initialized", - return_value=False, - ) - mocker.patch( - "torch.cuda.is_available", return_value=True, ) + config = copy.deepcopy(self.model_cfg) + mocker.patch("torch.distributed.get_world_size", return_value=2) + world_size = 2 + mocker.patch("os.environ", return_value={"LOCAL_RANK": 2}) + origin_lr = config.optimizer.lr + self.configurer.configure_device(config) - assert config.distributed is False - assert config.device == "cuda" + assert config.distributed is True + assert config.optimizer.lr == pytest.approx(origin_lr * world_size) @e2e_pytest_unit def test_configure_samples_per_gpu(self): @@ -220,21 +224,36 @@ def test_configure_input_size_yolox(self, mocker, is_yolox_tiny): mock_input_manager_cls.assert_called_once_with(mock_cfg, base_input_size) @e2e_pytest_unit - def test_configure_fp16(self): + @pytest.mark.parametrize("optimizer_hook", ["OptimizerHook", "SAMOptimizerHook", "DummyOptimizerHook"]) + def test_configure_fp16_cpu(self, device_availability_func, optimizer_hook): + for func in device_availability_func.values(): + func.return_value = False + model_cfg = copy.deepcopy(self.model_cfg) model_cfg.fp16 = {} + model_cfg.optimizer_config.type = optimizer_hook self.configurer.configure_fp16(model_cfg) - assert model_cfg.optimizer_config.type == "Fp16OptimizerHook" + assert model_cfg.optimizer_config.type == optimizer_hook - model_cfg.fp16 = {} - model_cfg.optimizer_config.type = "SAMOptimizerHook" - self.configurer.configure_fp16(model_cfg) - assert model_cfg.optimizer_config.type == "Fp16SAMOptimizerHook" + @e2e_pytest_unit + @pytest.mark.parametrize("optimizer_hook", ["OptimizerHook", "SAMOptimizerHook", "DummyOptimizerHook"]) + def test_configure_fp16_cuda(self, device_availability_func, optimizer_hook): + for key, func in device_availability_func.items(): + if key == "cuda": + func.return_value = True + else: + func.return_value = False + if "Dummy" in optimizer_hook: + expected_optimizer_hook = optimizer_hook + else: + expected_optimizer_hook = f"Fp16{optimizer_hook}" + + model_cfg = copy.deepcopy(self.model_cfg) model_cfg.fp16 = {} - model_cfg.optimizer_config.type = "DummyOptimizerHook" + model_cfg.optimizer_config.type = optimizer_hook self.configurer.configure_fp16(model_cfg) - assert model_cfg.optimizer_config.type == "DummyOptimizerHook" + assert model_cfg.optimizer_config.type == expected_optimizer_hook @e2e_pytest_unit def test_configure_model(self): diff --git a/tests/unit/hpo/test_resource_manager.py b/tests/unit/hpo/test_resource_manager.py index 46beb4ac066..05df5cfe39e 100644 --- a/tests/unit/hpo/test_resource_manager.py +++ b/tests/unit/hpo/test_resource_manager.py @@ -1,9 +1,12 @@ import pytest +from otx.hpo import resource_manager as target_file from otx.hpo.resource_manager import ( CPUResourceManager, GPUResourceManager, + XPUResourceManager, _remove_none_from_dict, + _cvt_comma_delimited_str_to_list, get_resource_manager, ) from tests.test_suite.e2e_test_system import e2e_pytest_component @@ -16,7 +19,7 @@ def cpu_resource_manager(): @pytest.fixture def gpu_resource_manager(): - return GPUResourceManager(num_gpu_for_single_trial=1, available_gpu="0,1,2,3") + return GPUResourceManager(num_devices_per_trial=1, available_devices="0,1,2,3") class TestCPUResourceManager: @@ -70,36 +73,36 @@ def test_have_available_resource(self, cpu_resource_manager): class TestGPUResourceManager: @e2e_pytest_component def test_init(self): - GPUResourceManager(num_gpu_for_single_trial=1, available_gpu="0,1,2") + GPUResourceManager(num_devices_per_trial=1, available_devices="0,1,2") @e2e_pytest_component - @pytest.mark.parametrize("num_gpu_for_single_trial", [-1, 0]) - def test_init_not_positive_num_gpu(self, num_gpu_for_single_trial): + @pytest.mark.parametrize("num_devices_per_trial", [-1, 0]) + def test_init_not_positive_num_gpu(self, num_devices_per_trial): with pytest.raises(ValueError): - GPUResourceManager(num_gpu_for_single_trial=num_gpu_for_single_trial) + GPUResourceManager(num_devices_per_trial=num_devices_per_trial) @e2e_pytest_component - @pytest.mark.parametrize("available_gpu", [",", "a,b", "0,a", ""]) - def test_init_wrong_available_gpu_value(self, available_gpu): + @pytest.mark.parametrize("available_devices", [",", "a,b", "0,a", ""]) + def test_init_wrong_available_devices_value(self, available_devices): with pytest.raises(ValueError): - GPUResourceManager(available_gpu=available_gpu) + GPUResourceManager(available_devices=available_devices) @e2e_pytest_component def test_reserve_resource(self): - num_gpu_for_single_trial = 2 + num_devices_per_trial = 2 num_gpus = 8 - max_parallel = num_gpus // num_gpu_for_single_trial + max_parallel = num_gpus // num_devices_per_trial gpu_resource_manager = GPUResourceManager( - num_gpu_for_single_trial=num_gpu_for_single_trial, - available_gpu=",".join([str(val) for val in range(num_gpus)]), + num_devices_per_trial=num_devices_per_trial, + available_devices=",".join([str(val) for val in range(num_gpus)]), ) - num_gpus = len(gpu_resource_manager._available_gpu) + num_gpus = len(gpu_resource_manager._available_devices) for i in range(max_parallel): env = gpu_resource_manager.reserve_resource(i) assert env is not None assert "CUDA_VISIBLE_DEVICES" in env - assert len(env["CUDA_VISIBLE_DEVICES"].split(",")) == num_gpu_for_single_trial + assert len(env["CUDA_VISIBLE_DEVICES"].split(",")) == num_devices_per_trial for i in range(max_parallel, max_parallel + 10): assert gpu_resource_manager.reserve_resource(i) is None @@ -121,14 +124,14 @@ def test_release_unreserved_resource(self, gpu_resource_manager): @e2e_pytest_component def test_have_available_resource(self): - num_gpu_for_single_trial = 2 + num_devices_per_trial = 2 num_gpus = 8 - max_parallel = num_gpus // num_gpu_for_single_trial + max_parallel = num_gpus // num_devices_per_trial gpu_resource_manager = GPUResourceManager( - num_gpu_for_single_trial=num_gpu_for_single_trial, - available_gpu=",".join([str(val) for val in range(num_gpus)]), + num_devices_per_trial=num_devices_per_trial, + available_devices=",".join([str(val) for val in range(num_gpus)]), ) - num_gpus = len(gpu_resource_manager._available_gpu) + num_gpus = len(gpu_resource_manager._available_devices) for i in range(max_parallel): assert gpu_resource_manager.have_available_resource() @@ -138,6 +141,41 @@ def test_have_available_resource(self): assert not gpu_resource_manager.have_available_resource() +class TestXPUResourceManager: + @e2e_pytest_component + @pytest.fixture(autouse=True) + def setup(self, mocker): + self.mock_os = mocker.patch.object(target_file, "os") + self.mock_torch = mocker.patch.object(target_file, "torch") + + def test_init_env_var_exist(self): + self.mock_os.getenv.return_value = "level_zero:1,2" + resource_manager = XPUResourceManager(num_devices_per_trial=1) + for i in range(2): + resource_manager.reserve_resource(i) + assert resource_manager.reserve_resource(3) is None + + def test_init_no_env_var(self): + self.mock_torch.xpu.device_count.return_value = 4 + resource_manager = XPUResourceManager(num_devices_per_trial=1) + for i in range(4): + resource_manager.reserve_resource(i) + assert resource_manager.reserve_resource(3) is None + + def test_reserve_resource(self): + self.mock_torch.xpu.device_count.return_value = 4 + resource_manager = XPUResourceManager(num_devices_per_trial=1) + + for i in range(4): + env = resource_manager.reserve_resource(i) + assert env is not None + assert "ONEAPI_DEVICE_SELECTOR" in env + assert env["ONEAPI_DEVICE_SELECTOR"] == f"level_zero:{i}" + + for i in range(4, 10): + assert resource_manager.reserve_resource(i) is None + + @e2e_pytest_component def test_get_resource_manager_cpu(): manager = get_resource_manager(resource_type="cpu", num_parallel_trial=4) @@ -145,11 +183,13 @@ def test_get_resource_manager_cpu(): @e2e_pytest_component -def test_get_resource_manager_gpu(): - num_gpu_for_single_trial = 1 - available_gpu = "0,1,2,3" +def test_get_resource_manager_gpu(mocker): + mock_torch = mocker.patch.object(target_file, "torch") + mock_torch.cuda.is_available.return_value = True + num_devices_per_trial = 1 + available_devices = "0,1,2,3" manager = get_resource_manager( - resource_type="gpu", num_gpu_for_single_trial=num_gpu_for_single_trial, available_gpu=available_gpu + resource_type="gpu", num_devices_per_trial=num_devices_per_trial, available_devices=available_devices ) assert isinstance(manager, GPUResourceManager) @@ -174,3 +214,14 @@ def test_remove_none_from_dict(): some_dict = {"a": 1, "b": None} ret = _remove_none_from_dict(some_dict) assert ret == {"a": 1} + + +@e2e_pytest_component +def test_cvt_comma_delimited_str_to_list(): + assert _cvt_comma_delimited_str_to_list("1,3,5") == [1, 3, 5] + + +@e2e_pytest_component +def test_cvt_comma_delimited_str_to_list_wrong_format(): + with pytest.raises(ValueError): + _cvt_comma_delimited_str_to_list("a,3,5") From 927fab61906147c49f58bf1defea72572b9c5fa7 Mon Sep 17 00:00:00 2001 From: Eunwoo Shin Date: Thu, 14 Dec 2023 23:15:22 +0900 Subject: [PATCH 15/39] Merge develop to develop-idev (#2727) * Update base.txt updated dependency version of datumaro * Update __init__.py update version string * Update requirements.txt * Temporarily skip visual prompting openvino integration test (#2323) * Fix import dm.DatasetSubset (#2324) Signed-off-by: Kim, Vinnam * Fix semantic segmentation soft prediction dtype (#2322) * Fix semantic segmentation soft prediction dtype * relax ref sal vals check --------- Co-authored-by: Songki Choi * Contrain yapf verison lesser than 0.40.0 (#2328) contrain_yapf_version * Fix detection e2e tests (#2327) Fix for detection * Mergeback: Label addtion/deletion 1.2.4 --> 1.4.0 (#2326) * Make black happy * Fix conflicts * Merge-back: add test datasets and edit the test code * Make black happy * Fix mis-merge * Make balck happy * Fix typo * Fix typoi --------- Co-authored-by: Songki Choi * Bump datumaro up to 1.4.0rc2 (#2332) bump datumaro up to 1.4.0rc2 * Tiling Doc for releases 1.4.0 (#2333) * Add tiling documentation * Bump otx version to 1.4.0rc2 (#2341) * OTX deploy for visual prompting task (#2311) * Enable `otx deploy` * (WIP) integration test * Docstring * Update args for create_model * Manually set image embedding layout * Enable to use model api for preprocessing - `fit_to_window` doesn't work expectedly, so newly implemented `VisualPromptingOpenvinoAdapter` to use new resize function * Remove skipped test * Updated * Update unit tests on model wrappers * Update * Update configuration * Fix not to patch pretrained path * pylint & update model api version in docstring --------- Co-authored-by: Wonju Lee * Bump albumentations version in anomaly requirements (#2350) increment albumentations version * Update action detection (#2346) * Remove skip mark for PTQ test of action detection * Update action detection documentation * Fix e2e (#2348) * Change classification dataset from dummy to toy * Revert test changes * Change label name for multilabel dataset * Revert e2e test changes * Change ov test cases' threshold * Add parent's label * Update ModelAPI in 1.4 release (#2347) * Upgrade model API * Update otx in exportable code * Fix unit tests * Fix black * Fix detection inference * Fix det tiling * Fix mypy * Fix demo * Fix visualizer in demo * Fix black * Add OTX optimize for visual prompting task (#2318) * Initial commit * Update block * (WIP) otx optimize * Fix * WIP * Update configs & exported outputs * Remove unused modules for torch * Add unit tests * pre-commit * Update CHANGELOG * Update detection docs (#2335) * Update detection docs * Revert template id changes * Fix wrong template id * Update docs/source/guide/explanation/algorithms/object_detection/object_detection.rst Co-authored-by: Eunwoo Shin * Update docs/source/guide/explanation/algorithms/object_detection/object_detection.rst Co-authored-by: Eunwoo Shin --------- Co-authored-by: Eunwoo Shin * Add visual prompting documentation (#2354) * (WIP) write docs * Add visual prompting documentation * Update CHANGELOG --------- Co-authored-by: sungchul.kim * Remove custom modelapi patch in visual prompting (#2359) * Remove custom modelapi patch * Update test * Fix graph metric order and label issues (#2356) * Fix graph metric going backward issue * Add license notice * Fix pre-commit issue * Add rename items & logic for metric --------- Signed-off-by: Songki Choi * Update multi-label document and conversion script (#2358) Update docs, label convert script * Update third party programs (#2365) * Make anomaly task compatible with older albumentations versions (#2363) * fix transforms export in metadata * wrap transform dict * add todo for updating to_dict call * Fixing detection saliency map for one class case (#2368) * fix softmax * fix validity tests * Add e2e test for visual prompting (#2360) * (WIP) otx optimize * pre-commit * (WIP) set e2e * Remove nncf config * Add visual prompting requirement * Add visual prompting in tox * Add visual prompting in setup.py * Fix typo * Delete unused configuration.yaml * Edit test_name * Add to limit activation range * Update from `vp` to `visprompt` * Fix about no returning the first label * pre-commit * (WIP) otx optimize * pre-commit * (WIP) set e2e * Remove nncf config * Add visual prompting requirement * Add visual prompting in tox * Add visual prompting in setup.py * Fix typo * pre-commit * Add actions * Update tests/e2e/cli/visual_prompting/test_visual_prompting.py Co-authored-by: Jaeguk Hyun * Skip PTQ e2e test * Change task name * Remove skipped tc --------- Co-authored-by: Jaeguk Hyun * Fix e2e (#2366) * Change e2e reference name * Update openvino eval threshold for multiclass classification * Change comment message * Fix tiling e2e tests --------- Co-authored-by: GalyaZalesskaya * Add Dino head unit tests (#2344) Recover DINO head unit tests * Update for release 1.4.0rc2 (#2370) * update for release 1.4.0rc2 * Add skip mark for unstable unit tests --------- Co-authored-by: jaegukhyun * Fix NNCF training on CPU (#2373) * Align label order between Geti and OTX (#2369) * align label order * align with pre-commit * update CHANGELOG.md * deal with edge case * update type hint * Remove CenterCrop from Classification test pipeline and editing missing docs link (#2375) * Fix missing link for docs and removing centercrop for classification data pipeline * Revert the test threshold * Fix H-label classification (#2377) * Fix h-labelissue * Update unit tests * Make black happy * Fix unittests * Make black happy * Fix update heades information func * Update the logic: consider the loss per batch * Update for release 1.4 (#2380) * updated for 1.4.0rc3 * update changelog & release note * bump datumaro version up --------- Co-authored-by: Songki Choi * Switch to PTQ for sseg (#2374) * Switch to PTQ for sseg * Update log messages * Fix invalid import structures in otx.api (#2383) Update tiler.py * Update for 1.4.0rc4 (#2385) update for release 1.4.0rc4 * [release 1.4.0] XAI: Return saliency maps for Mask RCNN IR async infer (#2395) * Return saliency maps for openvino async infer * add workaround to fix yapf importing error --------- Co-authored-by: eunwoosh * Update for release 1.4.0 (#2399) update version string Co-authored-by: Sungman Cho * Fix broken links in documentation (#2405) * fix docs links to datumaro's docs * fix docs links to otx's docs * bump version to 1.4.1 * Update exportable code README (#2411) * Updated for release 1.4.1 (#2412) updated for release 1.4.1 * Add workaround for the incorrect meta info M-RCNN (used for XAI) (#2437) Add workaround for the incorrect mata info * Add model category attributes to model template (#2439) Add model category attributes to model template * Add model category & status fields in model template * Add is_default_for_task attr to model template * Update model templates with category attrs * Add integration tests for model templates consistency * Fix license & doc string * Fix typo * Refactor test cases * Refactor common tests by generator --------- Signed-off-by: Songki Choi * Update for 1.4.2rc1 (#2441) update for release 1.4.2rc1 * Fix label list order for h-label classification (#2440) * Fix label list for h-label cls * Fix unit tests * Modified fq numbers for lite HRNET (#2445) modified fq numbers for lite HRNET * Update PTQ ignored scope for hrnet 18 mod2 (#2449) Update ptq ignored scope for hrnet 18 mod2 * Fix OpenVINO inference for legacy models (#2450) * bug fix for legacy openvino models * Add tests * Specific exceptions --------- * Update for 1.4.2rc2 (#2455) update for release 1.4.2rc2 * Prevent zero-sized saliency map in tiling if tile size is too big (#2452) * Prevent zero-sized saliency map in tiling if tile size is too big * Prevent zero-sized saliency in tiling (PyTorch) * Add unit tests for Tiler merge features methods --------- Co-authored-by: Galina * Update pot fq reference number (#2456) update pot fq reference number to 15 * Bump datumaro version to 1.5.0rc0 (#2470) bump datumaro version to 1.5.0rc0 * Set tox version constraint (#2472) set tox version constraint - https://github.com/tox-dev/tox/issues/3110 * Bug fix for albumentations (#2467) * bug fix for legacy openvino models * Address albumentation issue --------- Co-authored-by: Ashwin Vaidya * update for release 1.4.2rc3 * Add a dummy hierarchical config required by MAPI (#2483) * bump version to 1.4.2rc4 * Bump datumaro version (#2502) * bump datumaro version * remove deprecated/reomved attribute usage of the datumaro * Upgrade nncf version for 1.4 release (#2459) * Upgrade nncf version * Fix nncf interface warning * Set the exact nncf version * Update FQ refs after NNCF upgrade * Use NNCF from pypi * Update version for release 1.4.2rc5 (#2507) update version for release 1.4.2rc5 * Update for 1.4.2 (#2514) update for release 1.4.2 * create branch release/1.5.0 * Delete mem cache handler after training is done (#2535) release mem cache handler after training is done * Fix bug that auto batch size doesn't consider distributed training (#2533) * consider distributed training while searching batch size * update unit test * reveret gpu memory upper bound * fix typo * change allocated to reserved * add unit test for distributed training * align with pre-commit * Apply fix progress hook to release 1.5.0 (#2539) * Fix hook's ordering issue. AdaptiveRepeatHook changes the runner.max_iters before the ProgressHook * Change the expression * Fix typo * Fix multi-label, h-label issue * Fix auto_bs issue * Apply suggestions from code review Co-authored-by: Eunwoo Shin * Reflecting reviews * Refactor the name of get_data_cfg * Revert adaptive hook sampler init * Refactor the function name: get_data_cfg -> get_subset_data_cfg * Fix unit test errors * Remove adding AdaptiveRepeatDataHook for autobs * Remove unused import * Fix detection and segmentation case in Geti scenario --------- Co-authored-by: Eunwoo Shin * Re introduce adaptive scheduling for training (#2541) * Re-introduce adaptive patience for training * Revert unit tests * Update for release 1.4.3rc1 (#2542) * Mirror Anomaly ModelAPI changes (#2531) * Migrate anomaly exportable code to modelAPI (#2432) * Fix license in PR template * Migrate to modelAPI * Remove color conversion in streamer * Remove reverse_input_channels * Add float * Remove test as metadata is no longer used * Remove metadata from load method * remove anomalib openvino inferencer * fix signature * Support logacy OpenVINO model * Transform image * add configs * Re-introduce adaptive training (#2543) * Re-introduce adaptive patience for training * Revert unit tests * Fix auto input size mismatch in eval & export (#2530) * Fix auto input size mismatch in eval & export * Re-enable E2E tests for Issue#2518 * Add input size check in export testing * Format float numbers in log * Fix NNCF export shape mismatch * Fix saliency map issue * Disable auto input size if tiling enabled --------- Signed-off-by: Songki Choi * Update ref. fq number for anomaly e2e2 (#2547) * Skip e2e det tests by issue2548 (#2550) * Add skip to chained TC for issue #2548 (#2552) * Update for release 1.4.3 (#2551) * Update MAPI for 1.5 release (#2555) Upgrade MAPI to v 0.1.6 (#2529) * Upgrade MAPI * Update exp code demo commit * Fix MAPI imports * Update ModelAPI configuration (#2564) * Update MAPI rt infor for detection * Upadte export info for cls, det and seg * Update unit tests * Disable QAT for SegNexts (#2565) * Disable NNCF QAT for SegNext * Del obsolete pot configs * Move NNCF skip marks to test commands to avoid duplication * Add Anomaly modelAPI changes to releases/1.4.0 (#2563) * bug fix for legacy openvino models * Apply otx anomaly 1.5 changes * Fix tests * Fix compression config * fix modelAPI imports * update integration tests * Edit config types * Update keys in deployed model --------- Co-authored-by: Ashwin Vaidya Co-authored-by: Kim, Sungchul * Fix the CustomNonLinearClsHead when the batch_size is set to 1 (#2571) Fix bn1d issue Co-authored-by: sungmanc * Update ModelAPI configuration (#2564 from 1.4) (#2568) Update ModelAPI configuration (#2564) * Update MAPI rt infor for detection * Upadte export info for cls, det and seg * Update unit tests * Update for 1.4.4rc1 (#2572) * Hotfix DatasetEntity.get_combined_subset function loop (#2577) Fix get_combined_subset function * Revert default input size to `Default` due to YOLOX perf regression (#2580) Signed-off-by: Songki Choi * Fix for the degradation issue of the classification task (#2585) * Revert to sync with 1.4.0 * Remove repeat data * Convert to the RGB value * Fix color conversion logic * Fix precommit * Bump datumaro version to 1.5.1rc3 (#2587) * Add label ids to anomaly OpenVINO model xml (#2590) * Add label ids to model xml --------- * Fix DeiT-Tiny model regression during class incremental training (#2594) * enable IBloss for DeiT-Tiny * update changelog * add docstring * Add label ids to model xml in release 1.5 (#2591) Add label ids to model xml * Fix DeiT-Tiny regression test for release/1.4.0 (#2595) * Fix DeiT regression test * update changelog * temp * Fix mmcls bug not wrapping model in DataParallel on CPUs (#2601) Wrap multi-label and h-label classification models by MMDataParallel in case of CPU training. --------- Signed-off-by: Songki Choi * Fix h-label loss normalization issue w/ exclusive label group of singe label (#2604) * Fix h-label loss normalization issue w/ exclusive label group with signle label * Fix non-linear version --------- Signed-off-by: Songki Choi * Boost up Image numpy accessing speed through PIL (#2586) * boost up numpy accessing speed through PIL * update CHANGELOG * resolve precommit error * resolve precommit error * add fallback logic with PIL open * use convert instead of draft * Add missing import pathlib for cls e2e testing (#2610) * Fix division by zero in class incremental learning for classification (#2606) * Add empty label to reproduce zero-division error Signed-off-by: Songki Choi * Fix minor typo Signed-off-by: Songki Choi * Fix empty label 4 -> 3 Signed-off-by: Songki Choi * Prevent division by zero Signed-off-by: Songki Choi * Update license Signed-off-by: Songki Choi * Update CHANGELOG.md Signed-off-by: Songki Choi * Fix inefficient sampling Signed-off-by: Songki Choi * Revert indexing Signed-off-by: Songki Choi * Fix minor typo Signed-off-by: Songki Choi --------- Signed-off-by: Songki Choi * Unify logger usage (#2612) * unify logger * align with pre-commit * unify anomaly logger to otx * change logger file path * align with pre-commit * change logger file path in missing file * configure logger after ConfigManager is initialized * configure logger when ConfigManager instance is initialized * update unit test code * move config_logger to each cli file * align with pre-commit * change part still using mmcv logger * Fix XAI algorithm for Detection (#2609) * Impove saliency maps algorithm for Detection * Remove extra changes * Update unit tests * Changes for 1 class * Fix pre-commit * Update CHANGELOG * Tighten dependency constraint only adapting latest patches (#2607) * tighten dependency constratint only adapting latest patches * adjust scikit-image version w.r.t python version * adjust tensorboard version w.r.t python version * remove version specifier for scikit-image * Add metadata to optimized model (#2618) * bug fix for legacy openvino models * Add metadata to optimized model * Revert formatting changes --------- Co-authored-by: Ashwin Vaidya * modify omegaconf version constraint * [release 1.5.0] Fix XAI algorithm for Detection (#2617) Update detection XAI algorithm * Update dependency constraint (#2622) * Update tpp (#2621) * Fix h-label bug of missing parent labels in output (#2626) * Fix h-label bug of missing parent labels in output * Fix h-label test data label schema * Update CHANGELOG.md --------- Signed-off-by: Songki Choi * Update publish workflow (#2625) update publish workflow to push whl to internal pypi * bump datumaro version to ~=1.5.0 * fixed mistake while mergeing back 1.4.4 * modifiy readme * remove openvino model wrapper class * remove openvino model wrapper tests * [release 1.5.0] DeiT: enable tests + add ViTFeatureVectorHook (#2630) Add ViT feature vector hook * Fix docs broken link to datatumaro_h-label Signed-off-by: Songki Choi * Fix wrong label settings for non-anomaly task ModelAPIs Signed-off-by: Songki Choi * Update publish workflow for tag checking (#2632) * Update e2e tests for XAI Detection (#2634) Fix e2e XAI ref value * Disable QAT for newly added models (#2636) * Update release note and readme (#2637) * update release note and readme * remove package upload step on internal publish wf * update release note and, changelog, and readme * update version string to 1.6.0dev * fix datumaro version to 1.6.0rc0 * Mergeback 1.5.0 to develop (#2642) * Update publish workflow for tag checking (#2632) * Update e2e tests for XAI Detection (#2634) * Disable QAT for newly added models (#2636) * Update release note and readme (#2637) * remove package upload step on internal publish wf * update release note and, changelog, and readme * update version string to 1.6.0dev --------- Co-authored-by: Galina Zalesskaya Co-authored-by: Jaeguk Hyun * Revert "Mergeback 1.5.0 to develop" (#2645) Revert "Mergeback 1.5.0 to develop (#2642)" This reverts commit 2f67686103df873d020681f6d504f9595ce4a963. * Add a tool to help conduct experiments (#2651) * implement run and experiment * implement experiment result aggregator * refactor experiment.py * refactor run.py * get export model speed * add var collumn * refactor experiment.py * refine a way to update argument in cmd * refine resource tracker * support anomaly on research framework * refine code aggregating exp result * bugfix * make other task available * eval task save avg_time_per_images as result * Add new argument to track CPU&GPU utilization and memory usage (#2500) * add argument to track resource usage * fix bug * fix a bug in a multi gpu case * use total cpu usage * add unit test * add mark to unit test * cover edge case * add pynvml in requirement * align with pre-commit * add license comment * update changelog * refine argument help * align with pre-commit * add version to requirement and raise an error if not supported values are given * apply new resource tracker format * refactor run.py * support optimize in research framework * cover edge case * Handle a case where fail cases exist * make argparse raise error rather than exit if problem exist * revert tensorboard aggregator * bugfix * save failed cases as yaml file * deal with integer in variables * add epoch to metric * use latest log.json file * align with otx logging method * move experiment.py from cli to tools * refactor experiment.py * merge otx run feature into experiment.py * move set_arguments_to_cmd definition into experiment.py * refactor experiment.py * bugfix * minor bugfix * use otx.cli instead of each otx entry * add feature to parse single workspace * add comments * fix bugs * align with pre-commit * revert parser argument * align with pre-commit * Make `max_num_detections` configurable (#2647) * Make max_num_detections configurable * Fix RCNN case with integration test * Apply max_num_detections to train_cfg, too --------- Signed-off-by: Songki Choi * Revert inference batch size to 1 for instance segmentation (#2648) Signed-off-by: Songki Choi * Fix CPU training issue on non-CUDA system (#2655) Fix bug that auto adaptive batch size raises an error if CUDA isn't available (#2410) --------- Co-authored-by: Sungman Cho Co-authored-by: Eunwoo Shin * Remove unnecessary log while building a model (#2658) * revert logger in otx/algorithms/detection/adapters/mmdet/utils/builder.py * revert logger in otx/algorithms/classification/adapters/mmcls/utils/builder.py * make change more readable * Fix a minor bug of experiment.py (#2662) fix bug * Not check avg_time_per_image during test (#2665) * ignore avg_time_per_image during test * do not call stdev when length of array is less than 2 * ignore avg_time_per_image during regerssion test * Update docs for enabling sphinx.ext.autosummary (#2654) * fix some errors/warnings on docs source * enable sphinx-autosummary for API reference documentation * Update Makefile * update sphinx configuration * Update PTQ docs (#2672) * Replace POT -> PTQ * Fixes from comments * Update regression tests for develop (#2652) * Update regression tests (#2556) * update reg tests * update test suit * update regression criteria --------- Co-authored-by: Eunwoo Shin * Exclude py37 target config for cibuildwheel (#2673) * Add `--dryrun` option to tools/experiment.py (#2674) * Fix variable override bug * Add --dryrun option to see experiment list --------- Signed-off-by: Songki Choi * Update OTX explain CLI arguments (#2671) * Change int8 to uint8 to XAI tests * Add probabilities for CLI demo * Rename arguments for explain * Fix pre-commit * Remove extra changes * Fix integration tests * Fix integration "explain_all_classes" test for OV * Fix e2e tests for explain (#2681) * Add README.md for experiment.py (#2688) * write draft readme * refine readme * align with pre-commit * Fix typo in reg test cmd (#2691) * Select more proper model weight file according to commands run just before (#2696) * consider more complex case when prepare eval and optimize * update readme * align with pre-commit * add comment * Add visual prompting zero-shot learning (`learn` & `infer`) (#2616) * Add algobackend & temp configs * Update config * WIP * Fix to enable `algo_backend` * (WIP) Update dataset * (WIP) Update configs * (WIP) Update tasks * (WIP) Update models * Enable `learn` task through otx.train * (WIP) enable `infer` (TODO : normalize points) * Fix when `state_dict` is None * Enable `ZeroShotInferenceCallback` * Enable otx infer * Enable to independently use processor * Revert max_steps * Change `postprocess_masks` to `staticmethod` * Add `PromptGetter` & Enable `learn` and `infer` * precommit * Fix args * Fix typo * Change `id` to `id_` * Fix import * Fix args * precommit * (WIP) Add unit tests * Fix * Add unit tests * Fix * Add integration tests * precommit * Update CHANGELOG.md * Update docstring and type annotations * Fix * precommit * Fix unused args * precommit * Fix * Fix unsupported dtype in ov graph constant converter (#2676) * Fix unsupported dtype in ov graph constant converter * Fix more ov-graph related unit tests * Skip failure TC with adding issue number ref. (#2717) * Fix visual prompting e2e test (#2719) Skip zero-shot e2e * Remove duplicated variable combination in experiment.py (#2713) * Enhance detection & instance segmentation experiment (#2710) * Compute precision and recall along with f-measure * Log performance * Accept ellipse annotation from datumaro format * Fix dataset adapter condition for det/iset * Insert garbage collection btw experiments --------- Signed-off-by: Kim, Vinnam Signed-off-by: Songki Choi Co-authored-by: Yunchu Lee Co-authored-by: Kim, Sungchul Co-authored-by: Vinnam Kim Co-authored-by: Evgeny Tsykunov Co-authored-by: Songki Choi Co-authored-by: Jaeguk Hyun Co-authored-by: Sungman Cho Co-authored-by: Eugene Liu Co-authored-by: Wonju Lee Co-authored-by: Dick Ameln Co-authored-by: Vladislav Sovrasov Co-authored-by: sungchul.kim Co-authored-by: GalyaZalesskaya Co-authored-by: Harim Kang Co-authored-by: Ashwin Vaidya Co-authored-by: Ashwin Vaidya Co-authored-by: sungmanc --- .github/workflows/run_tests_in_tox.yml | 3 +- .github/workflows/weekly.yml | 9 +- .gitignore | 4 + CHANGELOG.md | 5 + docs/Makefile | 6 + docs/source/conf.py | 38 +- .../models_optimization.rst | 14 +- .../action/action_classification.rst | 2 +- .../explanation/algorithms/anomaly/index.rst | 2 +- .../multi_class_classification.rst | 3 +- .../multi_label_classification.rst | 1 + .../guide/explanation/algorithms/index.rst | 2 + .../object_detection/object_detection.rst | 2 + .../segmentation/instance_segmentation.rst | 6 +- .../source/guide/get_started/cli_commands.rst | 30 +- .../source/guide/get_started/installation.rst | 6 +- .../source/guide/get_started/introduction.rst | 4 +- docs/source/guide/index.rst | 6 +- .../algorithm/action/adapters/index.rst | 8 - .../algorithm/action/adapters/mmaction.rst | 18 - .../algorithm/action/adapters/openvino.rst | 14 - .../reference/algorithm/action/configs.rst | 22 - .../reference/algorithm/action/index.rst | 9 - .../reference/algorithm/action/tasks.rst | 10 - .../algorithm/anomaly/adapters/callbacks.rst | 6 - .../algorithm/anomaly/adapters/config.rst | 6 - .../algorithm/anomaly/adapters/data.rst | 6 - .../anomaly/adapters/exportable_code.rst | 6 - .../algorithm/anomaly/adapters/index.rst | 15 - .../algorithm/anomaly/adapters/logger.rst | 6 - .../reference/algorithm/anomaly/configs.rst | 26 - .../reference/algorithm/anomaly/index.rst | 10 - .../reference/algorithm/anomaly/tasks.rst | 10 - .../reference/algorithm/anomaly/tools.rst | 10 - .../classification/adapters/index.rst | 8 - .../classification/adapters/mmcls.rst | 62 - .../classification/adapters/openvino.rst | 10 - .../algorithm/classification/configs.rst | 10 - .../algorithm/classification/index.rst | 10 - .../algorithm/classification/tasks.rst | 10 - .../algorithm/classification/utils.rst | 10 - .../algorithm/detection/adapters/index.rst | 8 - .../algorithm/detection/adapters/mmdet.rst | 62 - .../algorithm/detection/adapters/openvino.rst | 10 - .../reference/algorithm/detection/configs.rst | 26 - .../reference/algorithm/detection/index.rst | 10 - .../reference/algorithm/detection/tasks.rst | 10 - .../reference/algorithm/detection/utils.rst | 10 - .../guide/reference/algorithm/index.rst | 79 - .../algorithm/segmentation/adapters/index.rst | 8 - .../algorithm/segmentation/adapters/mmseg.rst | 62 - .../segmentation/adapters/openvino.rst | 10 - .../algorithm/segmentation/configs.rst | 14 - .../algorithm/segmentation/index.rst | 9 - .../algorithm/segmentation/tasks.rst | 10 - .../reference/api/configuration/index.rst | 7 - .../guide/reference/api/entities/general.rst | 102 - .../guide/reference/api/entities/index.rst | 11 - .../reference/api/entities/interfaces.rst | 6 - .../guide/reference/api/entities/shapes.rst | 18 - docs/source/guide/reference/api/index.rst | 9 - .../guide/reference/api/usecases/adapters.rst | 6 - .../reference/api/usecases/evaluation.rst | 38 - .../api/usecases/exportable_code.rst | 2 - .../guide/reference/api/usecases/index.rst | 15 - .../reference/api/usecases/reporting.rst | 6 - .../guide/reference/api/usecases/tasks.rst | 6 - docs/source/guide/reference/core/data.rst | 18 - docs/source/guide/reference/core/index.rst | 8 - docs/source/guide/reference/core/ov/graph.rst | 22 - docs/source/guide/reference/core/ov/index.rst | 25 - .../source/guide/reference/core/ov/models.rst | 22 - docs/source/guide/reference/core/ov/ops.rst | 82 - docs/source/guide/reference/hpo/hpo.rst | 10 - docs/source/guide/reference/index.rst | 11 + docs/source/guide/reference/mpa/index.rst | 8 - .../guide/tutorials/advanced/semi_sl.rst | 2 +- docs/source/guide/tutorials/base/deploy.rst | 2 +- docs/source/guide/tutorials/base/explain.rst | 8 +- .../how_to_train/action_classification.rst | 14 +- .../base/how_to_train/action_detection.rst | 4 +- .../base/how_to_train/anomaly_detection.rst | 11 +- .../base/how_to_train/classification.rst | 10 +- .../tutorials/base/how_to_train/detection.rst | 18 +- .../how_to_train/instance_segmentation.rst | 8 +- .../how_to_train/semantic_segmentation.rst | 11 +- pyproject.toml | 2 +- .../utils/convert_public_data_to_cvat.py | 22 +- .../anomaly/adapters/anomalib/data/dataset.py | 1 + .../tools/classification_sample.py | 23 +- .../common/configs/training_base.py | 26 +- .../detection/adapters/mmdet/configurer.py | 19 + .../detection/adapters/mmdet/task.py | 7 + .../detection/adapters/openvino/task.py | 16 +- .../detection/configs/base/configuration.py | 15 +- .../configs/detection/configuration.yaml | 19 + .../instance_segmentation/configuration.yaml | 19 + .../convnext_maskrcnn/model.py | 6 +- .../efficientnetb2b_maskrcnn/model.py | 2 +- .../efficientnetb2b_maskrcnn/semisl/model.py | 2 +- .../maskrcnn_swin_t/model.py | 2 +- .../resnet50_maskrcnn/model.py | 19 +- .../resnet50_maskrcnn/semisl/model.py | 2 +- .../rotated_detection/configuration.yaml | 19 + .../efficientnetb2b_maskrcnn/model.py | 2 +- .../resnet50_maskrcnn/model.py | 4 +- src/otx/algorithms/detection/task.py | 23 +- .../mmseg/models/utils/channel_shuffle.py | 3 +- .../pytorch_lightning/callbacks/__init__.py | 2 +- .../pytorch_lightning/callbacks/inference.py | 38 + .../config/visual_prompting_config.py | 8 +- .../pytorch_lightning/datasets/dataset.py | 93 +- .../datasets/pipelines/sam_transforms.py | 19 +- .../pytorch_lightning/models/__init__.py | 2 +- .../pytorch_lightning/models/backbones/vit.py | 2 +- .../models/visual_prompters/__init__.py | 1 + .../visual_prompters/segment_anything.py | 2 +- .../zero_shot_segment_anything.py | 611 ++++ .../configs/base/configuration.py | 6 + .../configs/configuration.yaml | 62 + .../zero_shot_sam_tiny_vit/__init__.py | 6 + .../zero_shot_sam_tiny_vit/config.yaml | 78 + .../zero_shot_sam_tiny_vit/configuration.py | 14 + .../zero_shot_sam_tiny_vit/configuration.yaml | 210 ++ .../ptq_optimization_config.py | 22 + .../template_experimental.yaml | 38 + .../visual_prompting/tasks/__init__.py | 2 +- .../visual_prompting/tasks/inference.py | 125 +- .../visual_prompting/tasks/train.py | 4 +- src/otx/api/configuration/__init__.py | 4 - src/otx/api/configuration/helper/convert.py | 4 +- .../api/configuration/helper/substitute.py | 3 +- src/otx/api/usecases/__init__.py | 8 +- src/otx/api/usecases/evaluation/f_measure.py | 19 +- src/otx/api/usecases/reporting/__init__.py | 13 +- src/otx/cli/tools/eval.py | 8 +- src/otx/cli/tools/explain.py | 19 +- src/otx/cli/tools/utils/demo/visualization.py | 6 +- .../core/data/adapter/base_dataset_adapter.py | 16 + .../data/adapter/detection_dataset_adapter.py | 15 +- src/otx/core/ov/ops/infrastructures.py | 2 + src/otx/core/ov/ops/type_conversions.py | 1 + .../__init__.py | 0 .../incremental.py | 0 .../semisl.py | 0 .../train.py | 0 tests/e2e/cli/detection/test_detection.py | 2 + .../visual_prompting/test_visual_prompting.py | 8 +- .../cli/detection/test_detection.py | 2 + .../cli/detection/test_tiling_detection.py | 4 +- .../test_instance_segmentation.py | 6 + .../test_tiling_instseg.py | 4 +- .../visual_prompting/test_visual_prompting.py | 10 +- .../cli/visual_prompting/test_zero_shot.py | 53 + .../action/test_action_classification.py | 27 +- .../action/test_action_detection.py | 14 +- .../anomaly/test_anomaly_classificaiton.py | 55 +- .../anomaly/test_anomaly_detection.py | 56 +- .../anomaly/test_anomaly_segmentation.py | 55 +- .../classification/test_classification.py | 161 +- tests/regression/conftest.py | 9 +- tests/regression/detection/test_detection.py | 52 +- .../detection/test_tiling_detection.py | 39 +- .../test_instance_segmentation.py | 43 +- .../test_tiling_instance_segmentation.py | 40 +- tests/regression/regression_command.py | 133 +- tests/regression/regression_config.json | 2963 +++++++---------- tests/regression/regression_test_helpers.py | 106 +- .../test_segmentation.py | 69 +- tests/regression/summarize_test_results.py | 142 +- tests/test_suite/run_test_command.py | 250 +- .../adapters/mmdet/test_configurer.py | 25 +- .../unit/algorithms/detection/test_helpers.py | 2 +- .../callbacks/test_inference_callback.py | 61 + .../config/test_visual_prompting_config.py | 8 +- .../datasets/pipelines/test_sam_transforms.py | 2 +- .../datasets/test_dataset.py | 84 +- .../visual_prompters/test_segment_anything.py | 37 +- .../test_zero_shot_segment_anything.py | 321 ++ .../visual_prompting/tasks/test_inference.py | 95 +- .../visual_prompting/test_helpers.py | 74 +- .../unit/core/ov/graph/test_ov_graph_utils.py | 2 + tools/README.md | 94 + tools/experiment.py | 68 +- tox.ini | 4 + 185 files changed, 4427 insertions(+), 3762 deletions(-) delete mode 100644 docs/source/guide/reference/algorithm/action/adapters/index.rst delete mode 100644 docs/source/guide/reference/algorithm/action/adapters/mmaction.rst delete mode 100644 docs/source/guide/reference/algorithm/action/adapters/openvino.rst delete mode 100644 docs/source/guide/reference/algorithm/action/configs.rst delete mode 100644 docs/source/guide/reference/algorithm/action/index.rst delete mode 100644 docs/source/guide/reference/algorithm/action/tasks.rst delete mode 100644 docs/source/guide/reference/algorithm/anomaly/adapters/callbacks.rst delete mode 100644 docs/source/guide/reference/algorithm/anomaly/adapters/config.rst delete mode 100644 docs/source/guide/reference/algorithm/anomaly/adapters/data.rst delete mode 100644 docs/source/guide/reference/algorithm/anomaly/adapters/exportable_code.rst delete mode 100644 docs/source/guide/reference/algorithm/anomaly/adapters/index.rst delete mode 100644 docs/source/guide/reference/algorithm/anomaly/adapters/logger.rst delete mode 100644 docs/source/guide/reference/algorithm/anomaly/configs.rst delete mode 100644 docs/source/guide/reference/algorithm/anomaly/index.rst delete mode 100644 docs/source/guide/reference/algorithm/anomaly/tasks.rst delete mode 100644 docs/source/guide/reference/algorithm/anomaly/tools.rst delete mode 100644 docs/source/guide/reference/algorithm/classification/adapters/index.rst delete mode 100644 docs/source/guide/reference/algorithm/classification/adapters/mmcls.rst delete mode 100644 docs/source/guide/reference/algorithm/classification/adapters/openvino.rst delete mode 100644 docs/source/guide/reference/algorithm/classification/configs.rst delete mode 100644 docs/source/guide/reference/algorithm/classification/index.rst delete mode 100644 docs/source/guide/reference/algorithm/classification/tasks.rst delete mode 100644 docs/source/guide/reference/algorithm/classification/utils.rst delete mode 100644 docs/source/guide/reference/algorithm/detection/adapters/index.rst delete mode 100644 docs/source/guide/reference/algorithm/detection/adapters/mmdet.rst delete mode 100644 docs/source/guide/reference/algorithm/detection/adapters/openvino.rst delete mode 100644 docs/source/guide/reference/algorithm/detection/configs.rst delete mode 100644 docs/source/guide/reference/algorithm/detection/index.rst delete mode 100644 docs/source/guide/reference/algorithm/detection/tasks.rst delete mode 100644 docs/source/guide/reference/algorithm/detection/utils.rst delete mode 100644 docs/source/guide/reference/algorithm/index.rst delete mode 100644 docs/source/guide/reference/algorithm/segmentation/adapters/index.rst delete mode 100644 docs/source/guide/reference/algorithm/segmentation/adapters/mmseg.rst delete mode 100644 docs/source/guide/reference/algorithm/segmentation/adapters/openvino.rst delete mode 100644 docs/source/guide/reference/algorithm/segmentation/configs.rst delete mode 100644 docs/source/guide/reference/algorithm/segmentation/index.rst delete mode 100644 docs/source/guide/reference/algorithm/segmentation/tasks.rst delete mode 100644 docs/source/guide/reference/api/configuration/index.rst delete mode 100644 docs/source/guide/reference/api/entities/general.rst delete mode 100644 docs/source/guide/reference/api/entities/index.rst delete mode 100644 docs/source/guide/reference/api/entities/interfaces.rst delete mode 100644 docs/source/guide/reference/api/entities/shapes.rst delete mode 100644 docs/source/guide/reference/api/index.rst delete mode 100644 docs/source/guide/reference/api/usecases/adapters.rst delete mode 100644 docs/source/guide/reference/api/usecases/evaluation.rst delete mode 100644 docs/source/guide/reference/api/usecases/exportable_code.rst delete mode 100644 docs/source/guide/reference/api/usecases/index.rst delete mode 100644 docs/source/guide/reference/api/usecases/reporting.rst delete mode 100644 docs/source/guide/reference/api/usecases/tasks.rst delete mode 100644 docs/source/guide/reference/core/data.rst delete mode 100644 docs/source/guide/reference/core/index.rst delete mode 100644 docs/source/guide/reference/core/ov/graph.rst delete mode 100644 docs/source/guide/reference/core/ov/index.rst delete mode 100644 docs/source/guide/reference/core/ov/models.rst delete mode 100644 docs/source/guide/reference/core/ov/ops.rst delete mode 100644 docs/source/guide/reference/hpo/hpo.rst create mode 100644 docs/source/guide/reference/index.rst delete mode 100644 docs/source/guide/reference/mpa/index.rst create mode 100644 src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py create mode 100644 src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/__init__.py create mode 100644 src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/config.yaml create mode 100644 src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/configuration.py create mode 100644 src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/configuration.yaml create mode 100644 src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/ptq_optimization_config.py create mode 100644 src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/template_experimental.yaml rename src/otx/recipes/stages/{instance-segmentation => instance_segmentation}/__init__.py (100%) rename src/otx/recipes/stages/{instance-segmentation => instance_segmentation}/incremental.py (100%) rename src/otx/recipes/stages/{instance-segmentation => instance_segmentation}/semisl.py (100%) rename src/otx/recipes/stages/{instance-segmentation => instance_segmentation}/train.py (100%) create mode 100644 tests/integration/cli/visual_prompting/test_zero_shot.py create mode 100644 tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py create mode 100644 tools/README.md diff --git a/.github/workflows/run_tests_in_tox.yml b/.github/workflows/run_tests_in_tox.yml index aaa00970b74..fac265aede3 100644 --- a/.github/workflows/run_tests_in_tox.yml +++ b/.github/workflows/run_tests_in_tox.yml @@ -56,7 +56,6 @@ jobs: name: ${{ inputs.artifact-prefix }}-${{ inputs.toxenv-task }}-${{ inputs.toxenv-pyver }}-${{ inputs.toxenv-ptver }} path: | .tox/tests-${{ inputs.toxenv-task }}-${{ inputs.toxenv-pyver }}-${{ inputs.toxenv-ptver }}.csv - .tox/tests-reg_${{ inputs.task }}_*.csv - .tox/tests-reg_tiling_${{ inputs.task }}_*.csv + .tox/tests-reg_${{ inputs.task }}*.csv # Use always() to always run this step to publish test results when there are test failures if: ${{ inputs.upload-artifact && always() }} diff --git a/.github/workflows/weekly.yml b/.github/workflows/weekly.yml index 875dfc1e0f0..e492737431b 100644 --- a/.github/workflows/weekly.yml +++ b/.github/workflows/weekly.yml @@ -14,31 +14,24 @@ jobs: include: - toxenv_task: "iseg" test_dir: "tests/regression/instance_segmentation/test_instance_segmentation.py" - runs_on: "['self-hosted', 'Linux', 'X64', 'dmount']" task: "instance_segmentation" - toxenv_task: "iseg_t" test_dir: "tests/regression/instance_segmentation/test_tiling_instance_segmentation.py" - runs_on: "['self-hosted', 'Linux', 'X64', 'dmount']" task: "instance_segmentation" - toxenv_task: "seg" test_dir: "tests/regression/semantic_segmentation" - runs_on: "['self-hosted', 'Linux', 'X64', 'dmount']" task: "segmentation" - toxenv_task: "det" test_dir: "tests/regression/detection" - runs_on: "['self-hosted', 'Linux', 'X64', 'dmount']" task: "detection" - toxenv_task: "ano" test_dir: "tests/regression/anomaly" - runs_on: "['self-hosted', 'Linux', 'X64', 'dmount']" task: "anomaly" - toxenv_task: "act" test_dir: "tests/regression/action" - runs_on: "['self-hosted', 'Linux', 'X64', 'dmount']" task: "action" - toxenv_task: "cls" test_dir: "tests/regression/classification" - runs_on: "['self-hosted', 'Linux', 'X64', 'dmount']" task: "classification" name: Regression-Test-py310-${{ matrix.toxenv_task }} uses: ./.github/workflows/run_tests_in_tox.yml @@ -47,7 +40,7 @@ jobs: toxenv-pyver: "py310" toxenv-task: ${{ matrix.toxenv_task }} tests-dir: ${{ matrix.test_dir }} - runs-on: ${{ matrix.runs_on }} + runs-on: "['self-hosted', 'Linux', 'X64', 'dmount']" task: ${{ matrix.task }} timeout-minutes: 8640 upload-artifact: true diff --git a/.gitignore b/.gitignore index 934218f75c1..d7ed0a6bbd6 100644 --- a/.gitignore +++ b/.gitignore @@ -18,6 +18,7 @@ results/ build/ dist/ !src/otx/recipes/** +src/otx/recipes/**/__pycache__ *egg-info *.pth @@ -45,3 +46,6 @@ src/**/*.so # Dataset made by unit-test tests/**/detcon_mask/* + +# sphinx-autosummary generated files +docs/**/_autosummary/ diff --git a/CHANGELOG.md b/CHANGELOG.md index 9953ee7d95b..d63284cd749 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,10 @@ All notable changes to this project will be documented in this file. ## \[unreleased\] +### New features + +- Add zero-shot visual prompting (https://github.com/openvinotoolkit/training_extensions/pull/2616) + ## \[v1.5.0\] ### New features @@ -46,6 +50,7 @@ All notable changes to this project will be documented in this file. - Update ModelAPI configuration() - Add Anomaly modelAPI changes () - Update Image numpy access () +- Make max_num_detections configurable () ### Bug fixes diff --git a/docs/Makefile b/docs/Makefile index 290eb859f4e..2750a53f965 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -23,3 +23,9 @@ html: # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +# Custom clean target that also removes autosummary generated files. Can +# be removed when https://github.com/sphinx-doc/sphinx/issues/1999 is fixed. +clean: + rm -rf "$(SOURCEDIR)/guide/reference/_autosummary" + $(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/source/conf.py b/docs/source/conf.py index 61741e262b6..02e674cf0cf 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -33,8 +33,23 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ + "sphinx.ext.napoleon", # Support for NumPy and Google style docstrings 'sphinx.ext.autodoc', 'sphinx_copybutton', + "sphinx.ext.autosummary", # Create neat summary tables + "sphinx.ext.viewcode", # Find the source files + "sphinx.ext.autosectionlabel", # Refer sections its title + "sphinx.ext.intersphinx", # Generate links to the documentation +] + +source_suffix = { + ".rst": "restructuredtext", + ".md": "markdown", +} + +suppress_warnings = [ + "ref.python", + "autosectionlabel.*", ] # Add any paths that contain templates here, relative to this directory. @@ -45,7 +60,6 @@ # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] - # -- Options for HTML output ------------------------------------------------- # # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. @@ -74,3 +88,25 @@ html_css_files = [ 'css/custom.css', ] + +# -- Extension configuration ------------------------------------------------- +autodoc_docstring_signature = True +autodoc_member_order = "bysource" +intersphinx_mapping = { + "python": ("https://docs.python.org/3", None), + "numpy": ("https://numpy.org/doc/stable/", None), +} +autodoc_member_order = "groupwise" +autodoc_default_options = { + "members": True, + "methods": True, + "special-members": "__call__", + "exclude-members": "_abc_impl", + "show-inheritance": True, +} + +autoclass_content = "both" + +autosummary_generate = True # Turn on sphinx.ext.autosummary +autosummary_ignore_module_all = False # Summary list in __all__ no others +# autosummary_imported_members = True # document classes and functions imported in modules diff --git a/docs/source/guide/explanation/additional_features/models_optimization.rst b/docs/source/guide/explanation/additional_features/models_optimization.rst index 4b3c9ec4787..08715fb3165 100644 --- a/docs/source/guide/explanation/additional_features/models_optimization.rst +++ b/docs/source/guide/explanation/additional_features/models_optimization.rst @@ -1,17 +1,17 @@ Models Optimization =================== -OpenVINO™ Training Extensions provides two types of optimization algorithms: `Post-training Optimization Tool (POT) `_ and `Neural Network Compression Framework (NNCF) `_. +OpenVINO™ Training Extensions provides two types of optimization algorithms: `Post-Training Quantization tool (PTQ) `_ and `Neural Network Compression Framework (NNCF) `_. ******************************* -Post-training Optimization Tool +Post-Training Quantization Tool ******************************* -POT is designed to optimize the inference of models by applying post-training methods that do not require model retraining or fine-tuning. If you want to know more details about how POT works and to be more familiar with model optimization methods, please refer to `documentation `_. +PTQ is designed to optimize the inference of models by applying post-training methods that do not require model retraining or fine-tuning. If you want to know more details about how PTQ works and to be more familiar with model optimization methods, please refer to `documentation `_. -To run Post-training optimization it is required to convert the model to OpenVINO™ intermediate representation (IR) first. To perform fast and accurate quantization we use ``DefaultQuantization Algorithm`` for each task. Please, see the `DefaultQuantization Parameters `_ for further information about configuring the optimization. +To run Post-training quantization it is required to convert the model to OpenVINO™ intermediate representation (IR) first. To perform fast and accurate quantization we use ``DefaultQuantization Algorithm`` for each task. Please, refer to the `Tune quantization Parameters `_ for further information about configuring the optimization. -POT parameters can be found and configured in ``template.yaml`` and ``configuration.yaml`` for each task. For Anomaly and Semantic Segmentation tasks, we have separate configuration files for POT, that can be found in the same directory with ``template.yaml``, for example for `PaDiM `_, `OCR-Lite-HRNe-18-mod2 `_ model. +PTQ parameters can be found and configured in ``template.yaml`` and ``configuration.yaml`` for each task. For Anomaly and Semantic Segmentation tasks, we have separate configuration files for PTQ, that can be found in the same directory with ``template.yaml``, for example for `PaDiM `_, `OCR-Lite-HRNe-18-mod2 `_ model. ************************************ Neural Network Compression Framework @@ -25,9 +25,9 @@ You can refer to configuration files for default templates for each task accordi NNCF tends to provide better quality in terms of preserving accuracy as it uses training compression approaches. Compression results achievable with the NNCF can be found `here `_ . -Meanwhile, the POT is faster but can degrade accuracy more than the training-enabled approach. +Meanwhile, the PTQ is faster but can degrade accuracy more than the training-enabled approach. .. note:: The main recommendation is to start with post-training compression and use NNCF compression during training if you are not satisfied with the results. -Please, refer to our :doc:`dedicated tutorials <../../tutorials/base/how_to_train/index>` on how to optimize your model using POT or NNCF. \ No newline at end of file +Please, refer to our :doc:`dedicated tutorials <../../tutorials/base/how_to_train/index>` on how to optimize your model using PTQ or NNCF. \ No newline at end of file diff --git a/docs/source/guide/explanation/algorithms/action/action_classification.rst b/docs/source/guide/explanation/algorithms/action/action_classification.rst index 52fef81707c..be04c27e2b7 100644 --- a/docs/source/guide/explanation/algorithms/action/action_classification.rst +++ b/docs/source/guide/explanation/algorithms/action/action_classification.rst @@ -1,5 +1,5 @@ Action Classification -================== +===================== Action classification is a problem of identifying the action that is being performed in a video. The input to the algorithm is a sequence of video frames, and the output is a label indicating the action that is being performed. diff --git a/docs/source/guide/explanation/algorithms/anomaly/index.rst b/docs/source/guide/explanation/algorithms/anomaly/index.rst index cc2f1ba1ca5..806b235a668 100644 --- a/docs/source/guide/explanation/algorithms/anomaly/index.rst +++ b/docs/source/guide/explanation/algorithms/anomaly/index.rst @@ -143,7 +143,7 @@ Since STFPM trains the student network, we use the following parameters for its - ``Aditional Techniques``: - ``Early Stopping``: Early stopping is used to stop the training process when the validation loss stops improving. The default value of the early stopping patience is ``10``. -For more information on STFPM's training. We invite you to read Anomalib's `STFPM documentation`_. +For more information on STFPM's training. We invite you to read Anomalib's `STFPM documentation `_. Reconstruction-based Models --------------------------- diff --git a/docs/source/guide/explanation/algorithms/classification/multi_class_classification.rst b/docs/source/guide/explanation/algorithms/classification/multi_class_classification.rst index dd1930c0e97..76fe3683d6e 100644 --- a/docs/source/guide/explanation/algorithms/classification/multi_class_classification.rst +++ b/docs/source/guide/explanation/algorithms/classification/multi_class_classification.rst @@ -100,7 +100,7 @@ In the table below the top-1 accuracy on some academic datasets using our :ref:` +-----------------------+-----------------+-----------+-----------+-----------+ | EfficientNet-V2-S | 96.13 | 90.36 | 97.68 | 86.74 | +-----------------------+-----------------+-----------+-----------+-----------+ -*These datasets were splitted with auto-split (80% train, 20% test). +\* These datasets were splitted with auto-split (80% train, 20% test). ************************ Semi-supervised Learning @@ -145,7 +145,6 @@ In the table below the top-1 accuracy on some academic datasets using our pipeli | EfficientNet-V2-S | 36.03 | 39.66 | 16.81 | 20.28 | 65.99 | 69.61 | +-----------------------+---------+---------+-------+---------+--------+---------+ -| - 10 labeled images per class including unlabeled dataset for Semi-SL diff --git a/docs/source/guide/explanation/algorithms/classification/multi_label_classification.rst b/docs/source/guide/explanation/algorithms/classification/multi_label_classification.rst index 46840d0c955..eed5090426e 100644 --- a/docs/source/guide/explanation/algorithms/classification/multi_label_classification.rst +++ b/docs/source/guide/explanation/algorithms/classification/multi_label_classification.rst @@ -28,6 +28,7 @@ Specifically, this format should be converted in our `internal representation --data_root_dir --output .. note:: diff --git a/docs/source/guide/explanation/algorithms/index.rst b/docs/source/guide/explanation/algorithms/index.rst index 8202085affc..03351d37705 100644 --- a/docs/source/guide/explanation/algorithms/index.rst +++ b/docs/source/guide/explanation/algorithms/index.rst @@ -11,7 +11,9 @@ To this end, we support: - **Supervised training**. This is the most common approach for computer vision tasks such as object detection and image classification. Supervised learning involves training a model on a labeled dataset of images. The model learns to associate specific features in the images with the corresponding labels. - **Incremental learning**. This learning approach lets the model train on new data as it becomes available, rather than retraining the entire model on the whole dataset every time new data is added. OpenVINO™ Training Extensions supports also the class incremental approach for all tasks. In this approach, the model is first trained on a set of classes, and then incrementally updated with new classes of data, while keeping the previously learned classes' knowledge. The class incremental approach is particularly useful in situations where the number of classes is not fixed and new classes may be added over time. + .. _semi_sl_explanation: + - **Semi-supervised learning**. This is a type of machine learning in which the model is trained on a dataset that contains a combination of labeled and unlabeled examples. The labeled examples are used to train the model, while the unlabeled examples are used to improve the model's performance by providing additional information about the underlying distribution of the data. This approach is often used when there is a limited amount of labeled data available, but a large amount of unlabeled data. This can make it more cost-effective and efficient to train models compared to traditional supervised learning, where the model is trained only on labeled data. - **Self-supervised learning**. This is a type of machine learning where the model is trained on a dataset that contains only unlabeled examples. The model is trained to learn useful representations of the data by solving a task that can be inferred from the input itself, without human-provided labels. The objective is to learn good representations of the input data that can then be used for downstream tasks such as classification, detection, generation or clustering. diff --git a/docs/source/guide/explanation/algorithms/object_detection/object_detection.rst b/docs/source/guide/explanation/algorithms/object_detection/object_detection.rst index 7edc3065a41..e8022cc9ea1 100644 --- a/docs/source/guide/explanation/algorithms/object_detection/object_detection.rst +++ b/docs/source/guide/explanation/algorithms/object_detection/object_detection.rst @@ -92,6 +92,7 @@ We support the following ready-to-use model templates: Above table can be found using the following command .. code-block:: + $ otx find --task detection `MobileNetV2-ATSS `_ is a good medium-range model that works well and fast in most cases. @@ -147,6 +148,7 @@ Please, refer to the :doc:`tutorial <../../../tutorials/advanced/backbones>` how To see which public backbones are available for the task, the following command can be executed: .. code-block:: + $ otx find --backbone {torchvision, pytorchcv, mmcls, omz.mmcls} In the table below the test mAP on some academic datasets using our :ref:`supervised pipeline ` is presented. diff --git a/docs/source/guide/explanation/algorithms/segmentation/instance_segmentation.rst b/docs/source/guide/explanation/algorithms/segmentation/instance_segmentation.rst index c50fbe259ff..0c7b0c6191c 100644 --- a/docs/source/guide/explanation/algorithms/segmentation/instance_segmentation.rst +++ b/docs/source/guide/explanation/algorithms/segmentation/instance_segmentation.rst @@ -61,11 +61,11 @@ We support the following ready-to-use model templates: +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+---------------------+-----------------+ | Template ID | Name | Complexity (GFLOPs) | Model size (MB) | +============================================================================================================================================================================================================================================+============================+=====================+=================+ -| `Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B `_ | MaskRCNN-EfficientNetB2B | 68.48 | 13.27 | +| `Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B `_ | MaskRCNN-EfficientNetB2B | 68.48 | 13.27 | +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+---------------------+-----------------+ -| `Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50 `_ | MaskRCNN-ResNet50 | 533.80 | 177.90 | +| `Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50 `_ | MaskRCNN-ResNet50 | 533.80 | 177.90 | +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+---------------------+-----------------+ -| `Custom_Counting_Instance_Segmentation_MaskRCNN_ConvNeXt `_ | MaskRCNN-ConvNeXt | 266.78 | 192.4 | +| `Custom_Counting_Instance_Segmentation_MaskRCNN_ConvNeXt `_ | MaskRCNN-ConvNeXt | 266.78 | 192.4 | +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+---------------------+-----------------+ MaskRCNN-ResNet50 utilizes the `ResNet-50 `_ architecture as the backbone network for extracting image features. This choice of backbone network results in a higher number of parameters and FLOPs, which consequently requires more training time. However, the model offers superior performance in terms of accuracy. diff --git a/docs/source/guide/get_started/cli_commands.rst b/docs/source/guide/get_started/cli_commands.rst index 74d1e975729..9ef1e7b3ebe 100644 --- a/docs/source/guide/get_started/cli_commands.rst +++ b/docs/source/guide/get_started/cli_commands.rst @@ -342,10 +342,10 @@ To use the exported model as an input for ``otx explain``, please dump additiona Optimization ************ -``otx optimize`` optimizes a model using `NNCF `_ or `POT `_ depending on the model format. +``otx optimize`` optimizes a model using `NNCF `_ or `PTQ `_ depending on the model and transforms it to ``INT8`` format. - NNCF optimization used for trained snapshots in a framework-specific format such as checkpoint (.pth) file from Pytorch -- POT optimization used for models exported in the OpenVINO™ IR format +- PTQ optimization used for models exported in the OpenVINO™ IR format With the ``--help`` command, you can list additional information: @@ -383,16 +383,16 @@ Command example for optimizing a PyTorch model (.pth) with OpenVINO™ NNCF: --output outputs/nncf -Command example for optimizing OpenVINO™ model (.xml) with OpenVINO™ POT: +Command example for optimizing OpenVINO™ model (.xml) with OpenVINO™ PTQ: .. code-block:: (otx) ...$ otx optimize SSD --load-weights \ --val-data-roots \ - --output outputs/pot + --output outputs/ptq -Thus, to use POT pass the path to exported IR (.xml) model, to use NNCF pass the path to the PyTorch (.pth) weights. +Thus, to use PTQ pass the path to exported IR (.xml) model, to use NNCF pass the path to the PyTorch (.pth) weights. *********** @@ -419,7 +419,7 @@ With the ``--help`` command, you can list additional information, such as its pa --test-data-roots TEST_DATA_ROOTS Comma-separated paths to test data folders. --load-weights LOAD_WEIGHTS - Load model weights from previously saved checkpoint.It could be a trained/optimized model (POT only) or exported model. + Load model weights from previously saved checkpoint. It could be a trained/optimized model (with PTQ only) or exported model. -o OUTPUT, --output OUTPUT Location where the intermediate output of the task will be stored. --workspace WORKSPACE Path to the workspace where the command will run. @@ -449,7 +449,7 @@ With the ``--help`` command, you can list additional information, such as its pa .. code-block:: (otx) ...$ otx explain --help - usage: otx explain [-h] --explain-data-roots EXPLAIN_DATA_ROOTS [--save-explanation-to SAVE_EXPLANATION] --load-weights LOAD_WEIGHTS [--explain-algorithm EXPLAIN_ALGORITHM] [--overlay-weight OVERLAY_WEIGHT] [template] {params} ... + usage: otx explain [-h] --input INPUT [--output OUTPUT] --load-weights LOAD_WEIGHTS [--explain-algorithm EXPLAIN_ALGORITHM] [--overlay-weight OVERLAY_WEIGHT] [template] {params} ... positional arguments: template Enter the path or ID or name of the template file. @@ -459,9 +459,9 @@ With the ``--help`` command, you can list additional information, such as its pa optional arguments: -h, --help show this help message and exit - --explain-data-roots EXPLAIN_DATA_ROOTS + -i INPUT, --input INPUT Comma-separated paths to explain data folders. - --save-explanation-to SAVE_EXPLANATION_TO + -o OUTPUT, --output OUTPUT Output path for explanation images. --load-weights LOAD_WEIGHTS Load model weights from previously saved checkpoint. @@ -475,13 +475,13 @@ With the ``--help`` command, you can list additional information, such as its pa Weight of the saliency map when overlaying the input image with saliency map. -The command below will generate saliency maps (heatmaps with red colored areas of focus) of the trained model on the provided dataset and save the resulting images to ``save-explanation-to`` path: +The command below will generate saliency maps (heatmaps with red colored areas of focus) of the trained model on the provided dataset and save the resulting images to ``output`` path: .. code-block:: - (otx) ...$ otx explain SSD --explain-data-roots \ + (otx) ...$ otx explain SSD --input \ --load-weights \ - --save-explanation-to \ + --output \ --explain-algorithm classwisesaliencymap \ --overlay-weight 0.5 @@ -496,9 +496,9 @@ By default, the model is exported to the OpenVINO™ IR format without extra fea (otx) ...$ otx export SSD --load-weights \ --output outputs/openvino/with_features \ --dump-features - (otx) ...$ otx explain SSD --explain-data-roots \ + (otx) ...$ otx explain SSD --input \ --load-weights outputs/openvino/with_features \ - --save-explanation-to \ + --output \ --explain-algorithm classwisesaliencymap \ --overlay-weight 0.5 @@ -532,7 +532,7 @@ Demonstration -i INPUT, --input INPUT Source of input data: images folder, image, webcam and video. --load-weights LOAD_WEIGHTS - Load model weights from previously saved checkpoint.It could be a trained/optimized model (POT only) or exported model. + Load model weights from previously saved checkpoint.It could be a trained/optimized model (with PTQ only) or exported model. --fit-to-size FIT_TO_SIZE FIT_TO_SIZE Width and Height space-separated values. Fits displayed images to window with specified Width and Height. This options applies to result visualisation only. --loop Enable reading the input in a loop. diff --git a/docs/source/guide/get_started/installation.rst b/docs/source/guide/get_started/installation.rst index 1a2cb39d0a4..339992cf8d3 100644 --- a/docs/source/guide/get_started/installation.rst +++ b/docs/source/guide/get_started/installation.rst @@ -1,5 +1,5 @@ Installation -============= +============ ************** Prerequisites @@ -88,9 +88,9 @@ Install ``tox`` and create a development environment: Then you may change code, and all fixes will be directly applied to the editable package. -**************************************************** +***************************************************** Install OpenVINO™ Training Extensions by using Docker -**************************************************** +***************************************************** .. code-block:: diff --git a/docs/source/guide/get_started/introduction.rst b/docs/source/guide/get_started/introduction.rst index d7a635c874e..048eb99f80d 100644 --- a/docs/source/guide/get_started/introduction.rst +++ b/docs/source/guide/get_started/introduction.rst @@ -42,9 +42,9 @@ OpenVINO™ Training Extensions will provide the :doc:`following features <../ex - OpenVINO™ Training Extensions uses `Datumaro `_ as the backend to handle datasets. On account of that, OpenVINO™ Training Extensions supports the most common academic field dataset formats for each task. In the future there will be more supported formats available to give more freedom of datasets format choice. - Improved :doc:`auto-configuration functionality <../explanation/additional_features/auto_configuration>`. OpenVINO™ Training Extensions analyzes provided dataset and selects the proper task and model template to provide the best accuracy/speed trade-off. It will also make a random auto-split of your dataset if there is no validation set provided. -************ +********************* Documentation content -************ +********************* 1. **Quick start guide**: diff --git a/docs/source/guide/index.rst b/docs/source/guide/index.rst index 9311c189d9d..504684ce8e0 100644 --- a/docs/source/guide/index.rst +++ b/docs/source/guide/index.rst @@ -32,11 +32,7 @@ Guide :hidden: :caption: Reference - reference/api/index - reference/algorithm/index - reference/core/index - reference/hpo/hpo - reference/mpa/index + reference/index .. toctree:: diff --git a/docs/source/guide/reference/algorithm/action/adapters/index.rst b/docs/source/guide/reference/algorithm/action/adapters/index.rst deleted file mode 100644 index 0327ca90094..00000000000 --- a/docs/source/guide/reference/algorithm/action/adapters/index.rst +++ /dev/null @@ -1,8 +0,0 @@ -Adapters --------- - -.. toctree:: - :maxdepth: 3 - - mmaction - openvino diff --git a/docs/source/guide/reference/algorithm/action/adapters/mmaction.rst b/docs/source/guide/reference/algorithm/action/adapters/mmaction.rst deleted file mode 100644 index af4f59b9940..00000000000 --- a/docs/source/guide/reference/algorithm/action/adapters/mmaction.rst +++ /dev/null @@ -1,18 +0,0 @@ -mmaction -^^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.algorithms.action.adapters.mmaction.data - :members: - :undoc-members: - -.. automodule:: otx.algorithms.action.adapters.mmaction.models - :members: - :undoc-members: - -.. automodule:: otx.algorithms.action.adapters.mmaction.utils - :members: - :undoc-members: diff --git a/docs/source/guide/reference/algorithm/action/adapters/openvino.rst b/docs/source/guide/reference/algorithm/action/adapters/openvino.rst deleted file mode 100644 index 1ea1a3970de..00000000000 --- a/docs/source/guide/reference/algorithm/action/adapters/openvino.rst +++ /dev/null @@ -1,14 +0,0 @@ -openvino -^^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.algorithms.action.adapters.openvino.model_wrappers - :members: - :undoc-members: - -.. automodule:: otx.algorithms.action.adapters.openvino.dataloader - :members: - :undoc-members: diff --git a/docs/source/guide/reference/algorithm/action/configs.rst b/docs/source/guide/reference/algorithm/action/configs.rst deleted file mode 100644 index 0955b6a9e74..00000000000 --- a/docs/source/guide/reference/algorithm/action/configs.rst +++ /dev/null @@ -1,22 +0,0 @@ -Configs -^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.algorithms.action.configs - :members: - :undoc-members: - -.. automodule:: otx.algorithms.action.configs.base - :members: - :undoc-members: - -.. automodule:: otx.algorithms.action.configs.classification - :members: - :undoc-members: - -.. automodule:: otx.algorithms.action.configs.detection - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/algorithm/action/index.rst b/docs/source/guide/reference/algorithm/action/index.rst deleted file mode 100644 index f61adddc4ea..00000000000 --- a/docs/source/guide/reference/algorithm/action/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -Action -========= - -.. toctree:: - :maxdepth: 3 - - adapters/index - configs - tasks \ No newline at end of file diff --git a/docs/source/guide/reference/algorithm/action/tasks.rst b/docs/source/guide/reference/algorithm/action/tasks.rst deleted file mode 100644 index d4bb129c5cb..00000000000 --- a/docs/source/guide/reference/algorithm/action/tasks.rst +++ /dev/null @@ -1,10 +0,0 @@ -Tasks -^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.algorithms.action.tasks - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/algorithm/anomaly/adapters/callbacks.rst b/docs/source/guide/reference/algorithm/anomaly/adapters/callbacks.rst deleted file mode 100644 index 5ae0f5c42d4..00000000000 --- a/docs/source/guide/reference/algorithm/anomaly/adapters/callbacks.rst +++ /dev/null @@ -1,6 +0,0 @@ -Callbacks -^^^^^^^^^ - -.. automodule:: otx.algorithms.anomaly.adapters.anomalib.callbacks - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/algorithm/anomaly/adapters/config.rst b/docs/source/guide/reference/algorithm/anomaly/adapters/config.rst deleted file mode 100644 index 6520e37f1ac..00000000000 --- a/docs/source/guide/reference/algorithm/anomaly/adapters/config.rst +++ /dev/null @@ -1,6 +0,0 @@ -Config -^^^^^^ - -.. automodule:: otx.algorithms.anomaly.adapters.anomalib.config - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/algorithm/anomaly/adapters/data.rst b/docs/source/guide/reference/algorithm/anomaly/adapters/data.rst deleted file mode 100644 index 5a588ac1818..00000000000 --- a/docs/source/guide/reference/algorithm/anomaly/adapters/data.rst +++ /dev/null @@ -1,6 +0,0 @@ -Data -^^^^ - -.. automodule:: otx.algorithms.anomaly.adapters.anomalib.data - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/algorithm/anomaly/adapters/exportable_code.rst b/docs/source/guide/reference/algorithm/anomaly/adapters/exportable_code.rst deleted file mode 100644 index 3291aae1b10..00000000000 --- a/docs/source/guide/reference/algorithm/anomaly/adapters/exportable_code.rst +++ /dev/null @@ -1,6 +0,0 @@ -Exportable Code -^^^^^^^^^^^^^^^ - -.. automodule:: otx.algorithms.anomaly.adapters.anomalib.exportable_code - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/algorithm/anomaly/adapters/index.rst b/docs/source/guide/reference/algorithm/anomaly/adapters/index.rst deleted file mode 100644 index 5b9463cc709..00000000000 --- a/docs/source/guide/reference/algorithm/anomaly/adapters/index.rst +++ /dev/null @@ -1,15 +0,0 @@ -Adapters --------- - -This section contains adapters that wrap ``anomalib`` to be used with OpenVINO™ Training Extensions. -Overall, these adapters could be categorized into ``config``, ``data``, -``callbacks``, ``logger`` and ``exportable_code``. - -.. toctree:: - :maxdepth: 3 - - config - data - callbacks - logger - exportable_code diff --git a/docs/source/guide/reference/algorithm/anomaly/adapters/logger.rst b/docs/source/guide/reference/algorithm/anomaly/adapters/logger.rst deleted file mode 100644 index 21c5f1bc91f..00000000000 --- a/docs/source/guide/reference/algorithm/anomaly/adapters/logger.rst +++ /dev/null @@ -1,6 +0,0 @@ -Logger -^^^^^^ - -.. automodule:: otx.algorithms.anomaly.adapters.anomalib.logger - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/algorithm/anomaly/configs.rst b/docs/source/guide/reference/algorithm/anomaly/configs.rst deleted file mode 100644 index 3e4eb194d1b..00000000000 --- a/docs/source/guide/reference/algorithm/anomaly/configs.rst +++ /dev/null @@ -1,26 +0,0 @@ -Configs -^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.algorithms.anomaly.configs - :members: - :undoc-members: - -.. automodule:: otx.algorithms.anomaly.configs.base - :members: - :undoc-members: - -.. automodule:: otx.algorithms.anomaly.configs.classification - :members: - :undoc-members: - -.. automodule:: otx.algorithms.anomaly.configs.detection - :members: - :undoc-members: - -.. automodule:: otx.algorithms.anomaly.configs.segmentation - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/algorithm/anomaly/index.rst b/docs/source/guide/reference/algorithm/anomaly/index.rst deleted file mode 100644 index 49511129691..00000000000 --- a/docs/source/guide/reference/algorithm/anomaly/index.rst +++ /dev/null @@ -1,10 +0,0 @@ -Anomaly -======= - -.. toctree:: - :maxdepth: 3 - - adapters/index - configs - tasks - tools \ No newline at end of file diff --git a/docs/source/guide/reference/algorithm/anomaly/tasks.rst b/docs/source/guide/reference/algorithm/anomaly/tasks.rst deleted file mode 100644 index d076967f0c5..00000000000 --- a/docs/source/guide/reference/algorithm/anomaly/tasks.rst +++ /dev/null @@ -1,10 +0,0 @@ -Tasks -^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.algorithms.anomaly.tasks - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/algorithm/anomaly/tools.rst b/docs/source/guide/reference/algorithm/anomaly/tools.rst deleted file mode 100644 index 8e51743457a..00000000000 --- a/docs/source/guide/reference/algorithm/anomaly/tools.rst +++ /dev/null @@ -1,10 +0,0 @@ -Tools -^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.algorithms.anomaly.tools.sample - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/algorithm/classification/adapters/index.rst b/docs/source/guide/reference/algorithm/classification/adapters/index.rst deleted file mode 100644 index 36b5cab324b..00000000000 --- a/docs/source/guide/reference/algorithm/classification/adapters/index.rst +++ /dev/null @@ -1,8 +0,0 @@ -Adapters --------- - -.. toctree:: - :maxdepth: 3 - - mmcls - openvino diff --git a/docs/source/guide/reference/algorithm/classification/adapters/mmcls.rst b/docs/source/guide/reference/algorithm/classification/adapters/mmcls.rst deleted file mode 100644 index 540b104e094..00000000000 --- a/docs/source/guide/reference/algorithm/classification/adapters/mmcls.rst +++ /dev/null @@ -1,62 +0,0 @@ -mmclassification -^^^^^^^^^^^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.algorithms.classification.adapters.mmcls - :members: - :undoc-members: - -.. automodule:: otx.algorithms.classification.adapters.mmcls.task - :members: - :undoc-members: - -.. automodule:: otx.algorithms.classification.adapters.mmcls.task.exporter - :members: - :undoc-members: - -.. automodule:: otx.algorithms.classification.adapters.mmcls.task.inferrer - :members: - :undoc-members: - -.. automodule:: otx.algorithms.classification.adapters.mmcls.task.stage - :members: - :undoc-members: - -.. automodule:: otx.algorithms.classification.adapters.mmcls.task.trainer - :members: - :undoc-members: - -.. automodule:: otx.algorithms.classification.adapters.mmcls.task.incremental - :members: - :undoc-members: - -.. automodule:: otx.algorithms.classification.adapters.mmcls.task.incremental.inferrer - :members: - :undoc-members: - -.. automodule:: otx.algorithms.classification.adapters.mmcls.task.incremental.stage - :members: - :undoc-members: - -.. automodule:: otx.algorithms.classification.adapters.mmcls.task.incremental.trainer - :members: - :undoc-members: - -.. automodule:: otx.algorithms.classification.adapters.mmcls.task.semisl - :members: - :undoc-members: - -.. automodule:: otx.algorithms.classification.adapters.mmcls.task.semisl.inferrer - :members: - :undoc-members: - -.. automodule:: otx.algorithms.classification.adapters.mmcls.task.semisl.stage - :members: - :undoc-members: - -.. automodule:: otx.algorithms.classification.adapters.mmcls.task.semisl.trainer - :members: - :undoc-members: diff --git a/docs/source/guide/reference/algorithm/classification/adapters/openvino.rst b/docs/source/guide/reference/algorithm/classification/adapters/openvino.rst deleted file mode 100644 index 53d7f438522..00000000000 --- a/docs/source/guide/reference/algorithm/classification/adapters/openvino.rst +++ /dev/null @@ -1,10 +0,0 @@ -openvino -^^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.algorithms.classification.adapters.openvino.model_wrappers - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/algorithm/classification/configs.rst b/docs/source/guide/reference/algorithm/classification/configs.rst deleted file mode 100644 index 8533edf73d8..00000000000 --- a/docs/source/guide/reference/algorithm/classification/configs.rst +++ /dev/null @@ -1,10 +0,0 @@ -Configs -^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.algorithms.classification.configs - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/algorithm/classification/index.rst b/docs/source/guide/reference/algorithm/classification/index.rst deleted file mode 100644 index 418f8bd89ad..00000000000 --- a/docs/source/guide/reference/algorithm/classification/index.rst +++ /dev/null @@ -1,10 +0,0 @@ -Classification -============== - -.. toctree:: - :maxdepth: 3 - - adapters/index - configs - tasks - utils \ No newline at end of file diff --git a/docs/source/guide/reference/algorithm/classification/tasks.rst b/docs/source/guide/reference/algorithm/classification/tasks.rst deleted file mode 100644 index f9c5da8af0f..00000000000 --- a/docs/source/guide/reference/algorithm/classification/tasks.rst +++ /dev/null @@ -1,10 +0,0 @@ -Tasks -^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.algorithms.classification.task - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/algorithm/classification/utils.rst b/docs/source/guide/reference/algorithm/classification/utils.rst deleted file mode 100644 index 4c71c29c26b..00000000000 --- a/docs/source/guide/reference/algorithm/classification/utils.rst +++ /dev/null @@ -1,10 +0,0 @@ -Utils -^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.algorithms.classification.utils - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/algorithm/detection/adapters/index.rst b/docs/source/guide/reference/algorithm/detection/adapters/index.rst deleted file mode 100644 index 3a313287abc..00000000000 --- a/docs/source/guide/reference/algorithm/detection/adapters/index.rst +++ /dev/null @@ -1,8 +0,0 @@ -Adapters --------- - -.. toctree:: - :maxdepth: 3 - - mmdet - openvino diff --git a/docs/source/guide/reference/algorithm/detection/adapters/mmdet.rst b/docs/source/guide/reference/algorithm/detection/adapters/mmdet.rst deleted file mode 100644 index 67b2d413d82..00000000000 --- a/docs/source/guide/reference/algorithm/detection/adapters/mmdet.rst +++ /dev/null @@ -1,62 +0,0 @@ -mmdetection -^^^^^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.algorithms.detection.adapters.mmdet - :members: - :undoc-members: - -.. automodule:: otx.algorithms.detection.adapters.mmdet.tasks - :members: - :undoc-members: - -.. automodule:: otx.algorithms.detection.adapters.mmdet.tasks.exporter - :members: - :undoc-members: - -.. automodule:: otx.algorithms.detection.adapters.mmdet.tasks.inferrer - :members: - :undoc-members: - -.. automodule:: otx.algorithms.detection.adapters.mmdet.tasks.stage - :members: - :undoc-members: - -.. automodule:: otx.algorithms.detection.adapters.mmdet.tasks.trainer - :members: - :undoc-members: - -.. automodule:: otx.algorithms.detection.adapters.mmdet.tasks.incremental - :members: - :undoc-members: - -.. automodule:: otx.algorithms.detection.adapters.mmdet.tasks.incremental.inferrer - :members: - :undoc-members: - -.. automodule:: otx.algorithms.detection.adapters.mmdet.tasks.incremental.stage - :members: - :undoc-members: - -.. automodule:: otx.algorithms.detection.adapters.mmdet.tasks.incremental.trainer - :members: - :undoc-members: - -.. automodule:: otx.algorithms.detection.adapters.mmdet.tasks.semisl - :members: - :undoc-members: - -.. automodule:: otx.algorithms.detection.adapters.mmdet.tasks.semisl.inferrer - :members: - :undoc-members: - -.. automodule:: otx.algorithms.detection.adapters.mmdet.tasks.semisl.stage - :members: - :undoc-members: - -.. automodule:: otx.algorithms.detection.adapters.mmdet.tasks.semisl.trainer - :members: - :undoc-members: diff --git a/docs/source/guide/reference/algorithm/detection/adapters/openvino.rst b/docs/source/guide/reference/algorithm/detection/adapters/openvino.rst deleted file mode 100644 index 68dbc7cd54e..00000000000 --- a/docs/source/guide/reference/algorithm/detection/adapters/openvino.rst +++ /dev/null @@ -1,10 +0,0 @@ -openvino -^^^^^^^^^^^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.algorithms.detection.adapters.openvino.model_wrappers - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/algorithm/detection/configs.rst b/docs/source/guide/reference/algorithm/detection/configs.rst deleted file mode 100644 index 2a3ba546d1c..00000000000 --- a/docs/source/guide/reference/algorithm/detection/configs.rst +++ /dev/null @@ -1,26 +0,0 @@ -Configs -^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.algorithms.detection.configs - :members: - :undoc-members: - -.. automodule:: otx.algorithms.detection.configs.base - :members: - :undoc-members: - -.. automodule:: otx.algorithms.detection.configs.detection - :members: - :undoc-members: - -.. automodule:: otx.algorithms.detection.configs.instance_segmentation - :members: - :undoc-members: - -.. automodule:: otx.algorithms.detection.configs.rotated_detection - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/algorithm/detection/index.rst b/docs/source/guide/reference/algorithm/detection/index.rst deleted file mode 100644 index 388b39f21dd..00000000000 --- a/docs/source/guide/reference/algorithm/detection/index.rst +++ /dev/null @@ -1,10 +0,0 @@ -Detection -========= - -.. toctree:: - :maxdepth: 3 - - adapters/index - configs - tasks - utils \ No newline at end of file diff --git a/docs/source/guide/reference/algorithm/detection/tasks.rst b/docs/source/guide/reference/algorithm/detection/tasks.rst deleted file mode 100644 index 6edd44fde3a..00000000000 --- a/docs/source/guide/reference/algorithm/detection/tasks.rst +++ /dev/null @@ -1,10 +0,0 @@ -Tasks -^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.algorithms.detection.tasks - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/algorithm/detection/utils.rst b/docs/source/guide/reference/algorithm/detection/utils.rst deleted file mode 100644 index 131d5c27148..00000000000 --- a/docs/source/guide/reference/algorithm/detection/utils.rst +++ /dev/null @@ -1,10 +0,0 @@ -Utils -^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.algorithms.detection.utils - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/algorithm/index.rst b/docs/source/guide/reference/algorithm/index.rst deleted file mode 100644 index 74c57a88281..00000000000 --- a/docs/source/guide/reference/algorithm/index.rst +++ /dev/null @@ -1,79 +0,0 @@ -Algorithm -=================== - -Introduction ------------- -This section contains algorithmic implementations. OpenVINO™ Training Extensions provides number of -different algorithms such as classification, detection, -segmentation and anomaly with various learning types such as supervised, -semi and self-supervised learning. - -.. toctree:: - :maxdepth: 1 - - action/index - anomaly/index - classification/index - detection/index - segmentation/index - - -Organizational Structure ------------------------- -Algorithms have the following organizational structure: - -.. code-block:: bash - - - ├── adapters - │ └── - │ ├── config - │ ├── data - │ └── ... - ├── configs - │ └── - │ ├── template.yaml - │ ├── configuration.py - │ ├── configuration.yaml - │ ├── compression_config.json - │ └── hpo_config.yaml - ├── tasks - │ ├── train.py - │ ├── inference.py - │ ├── nncf.py - │ └── openvino.py - └── tools - ├── README.md - └── sample.py - -where each algorithm has ``adapters``, ``configs``, ``tasks`` and ``tools``. - -Adapters -^^^^^^^^ -``adapters`` contain modules to wrap the original library used to perform the -task. For instance, detection task uses -`mmdetection `_ library, meaning that -``adapters`` comprises adapters to wrap ``mmdetection`` to use with OpenVINO™ Training Extensions. - -Configs -^^^^^^^ -``configs`` contain configuration related files including training, inference, -`NNCF `_ and -`HPO `_. - -Tasks -^^^^^ -.. _tasks: - -Tasks contain implementations that correspond to each phase in the workflow from -training to OpenVINO inference. Each algorithm expects ``train``, ``inference``, -``nncf`` and ``openvino`` python modules that implement the -`task interfaces `_. - -Tools -^^^^^ -Tools contain python implementations that performs :ref:`tasks ` in -end-to-end workflow. For example, current anomaly implementation has ``sample.py`` -file that reads an input dataset, trains a model and exports the model to -OpenVINO IR via either `POT `_ -or `NNCF `_. \ No newline at end of file diff --git a/docs/source/guide/reference/algorithm/segmentation/adapters/index.rst b/docs/source/guide/reference/algorithm/segmentation/adapters/index.rst deleted file mode 100644 index b304aac6cf1..00000000000 --- a/docs/source/guide/reference/algorithm/segmentation/adapters/index.rst +++ /dev/null @@ -1,8 +0,0 @@ -Adapters --------- - -.. toctree:: - :maxdepth: 3 - - mmseg - openvino diff --git a/docs/source/guide/reference/algorithm/segmentation/adapters/mmseg.rst b/docs/source/guide/reference/algorithm/segmentation/adapters/mmseg.rst deleted file mode 100644 index c03e42319c6..00000000000 --- a/docs/source/guide/reference/algorithm/segmentation/adapters/mmseg.rst +++ /dev/null @@ -1,62 +0,0 @@ -mmsegmentation -^^^^^^^^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.algorithms.segmentation.adapters.mmseg - :members: - :undoc-members: - -.. automodule:: otx.algorithms.segmentation.adapters.mmseg.tasks - :members: - :undoc-members: - -.. automodule:: otx.algorithms.segmentation.adapters.mmseg.tasks.exporter - :members: - :undoc-members: - -.. automodule:: otx.algorithms.segmentation.adapters.mmseg.tasks.inferrer - :members: - :undoc-members: - -.. automodule:: otx.algorithms.segmentation.adapters.mmseg.tasks.stage - :members: - :undoc-members: - -.. automodule:: otx.algorithms.segmentation.adapters.mmseg.tasks.trainer - :members: - :undoc-members: - -.. automodule:: otx.algorithms.segmentation.adapters.mmseg.tasks.incremental - :members: - :undoc-members: - -.. automodule:: otx.algorithms.segmentation.adapters.mmseg.tasks.incremental.inferrer - :members: - :undoc-members: - -.. automodule:: otx.algorithms.segmentation.adapters.mmseg.tasks.incremental.stage - :members: - :undoc-members: - -.. automodule:: otx.algorithms.segmentation.adapters.mmseg.tasks.incremental.trainer - :members: - :undoc-members: - -.. automodule:: otx.algorithms.segmentation.adapters.mmseg.tasks.semisl - :members: - :undoc-members: - -.. automodule:: otx.algorithms.segmentation.adapters.mmseg.tasks.semisl.inferrer - :members: - :undoc-members: - -.. automodule:: otx.algorithms.segmentation.adapters.mmseg.tasks.semisl.stage - :members: - :undoc-members: - -.. automodule:: otx.algorithms.segmentation.adapters.mmseg.tasks.semisl.trainer - :members: - :undoc-members: diff --git a/docs/source/guide/reference/algorithm/segmentation/adapters/openvino.rst b/docs/source/guide/reference/algorithm/segmentation/adapters/openvino.rst deleted file mode 100644 index 72452bfe4f4..00000000000 --- a/docs/source/guide/reference/algorithm/segmentation/adapters/openvino.rst +++ /dev/null @@ -1,10 +0,0 @@ -openvino -^^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.algorithms.segmentation.adapters.openvino.model_wrappers - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/algorithm/segmentation/configs.rst b/docs/source/guide/reference/algorithm/segmentation/configs.rst deleted file mode 100644 index ede35f526f1..00000000000 --- a/docs/source/guide/reference/algorithm/segmentation/configs.rst +++ /dev/null @@ -1,14 +0,0 @@ -Configs -^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.algorithms.segmentation.configs - :members: - :undoc-members: - -.. automodule:: otx.algorithms.segmentation.configs.base - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/algorithm/segmentation/index.rst b/docs/source/guide/reference/algorithm/segmentation/index.rst deleted file mode 100644 index 20141192ba3..00000000000 --- a/docs/source/guide/reference/algorithm/segmentation/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -Segmentation -============ - -.. toctree:: - :maxdepth: 3 - - adapters/index - configs - tasks \ No newline at end of file diff --git a/docs/source/guide/reference/algorithm/segmentation/tasks.rst b/docs/source/guide/reference/algorithm/segmentation/tasks.rst deleted file mode 100644 index 15c516fbe56..00000000000 --- a/docs/source/guide/reference/algorithm/segmentation/tasks.rst +++ /dev/null @@ -1,10 +0,0 @@ -Tasks -^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.algorithms.segmentation.task - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/api/configuration/index.rst b/docs/source/guide/reference/api/configuration/index.rst deleted file mode 100644 index 106ea5e9e05..00000000000 --- a/docs/source/guide/reference/api/configuration/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -Configuration -============= - -.. automodule:: otx.api.configuration - :members: - :undoc-members: - :show-inheritance: \ No newline at end of file diff --git a/docs/source/guide/reference/api/entities/general.rst b/docs/source/guide/reference/api/entities/general.rst deleted file mode 100644 index c2219795e4e..00000000000 --- a/docs/source/guide/reference/api/entities/general.rst +++ /dev/null @@ -1,102 +0,0 @@ -General -------- - -.. automodule:: otx.api.entities.annotation - :members: - :undoc-members: - -.. automodule:: otx.api.entities.color - :members: - :undoc-members: - -.. automodule:: otx.api.entities.coordinate - :members: - :undoc-members: - -.. automodule:: otx.api.entities.dataset_item - :members: - :undoc-members: - -.. automodule:: otx.api.entities.datasets - :members: - :undoc-members: - -.. automodule:: otx.api.entities.graph - :members: - :undoc-members: - -.. automodule:: otx.api.entities.id - :members: - :undoc-members: - -.. automodule:: otx.api.entities.image - :members: - :undoc-members: - -.. automodule:: otx.api.entities.inference_parameters - :members: - :undoc-members: - -.. automodule:: otx.api.entities.label_schema - :members: - :undoc-members: - -.. automodule:: otx.api.entities.label - :members: - :undoc-members: - -.. automodule:: otx.api.entities.media - :members: - :undoc-members: - -.. automodule:: otx.api.entities.metadata - :members: - :undoc-members: - -.. automodule:: otx.api.entities.metrics - :members: - :undoc-members: - -.. automodule:: otx.api.entities.model_template - :members: - :undoc-members: - -.. automodule:: otx.api.entities.model - :members: - :undoc-members: - -.. automodule:: otx.api.entities.optimization_parameters - :members: - :undoc-members: - -.. automodule:: otx.api.entities.result_media - :members: - :undoc-members: - -.. automodule:: otx.api.entities.resultset - :members: - :undoc-members: - -.. automodule:: otx.api.entities.scored_label - :members: - :undoc-members: - -.. automodule:: otx.api.entities.subset - :members: - :undoc-members: - -.. automodule:: otx.api.entities.task_environment - :members: - :undoc-members: - -.. automodule:: otx.api.entities.tensor - :members: - :undoc-members: - -.. automodule:: otx.api.entities.train_parameters - :members: - :undoc-members: - -.. automodule:: otx.api.entities.url - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/api/entities/index.rst b/docs/source/guide/reference/api/entities/index.rst deleted file mode 100644 index d1901ffbf19..00000000000 --- a/docs/source/guide/reference/api/entities/index.rst +++ /dev/null @@ -1,11 +0,0 @@ -Entities -======== - -This page contains a reference description for the Entities. - -.. toctree:: - :maxdepth: 3 - - general - shapes - interfaces \ No newline at end of file diff --git a/docs/source/guide/reference/api/entities/interfaces.rst b/docs/source/guide/reference/api/entities/interfaces.rst deleted file mode 100644 index c266a107608..00000000000 --- a/docs/source/guide/reference/api/entities/interfaces.rst +++ /dev/null @@ -1,6 +0,0 @@ -Interfaces ----------- - -.. automodule:: otx.api.entities.interfaces.graph_interface - :members: - :undoc-members: diff --git a/docs/source/guide/reference/api/entities/shapes.rst b/docs/source/guide/reference/api/entities/shapes.rst deleted file mode 100644 index 5bf152948a9..00000000000 --- a/docs/source/guide/reference/api/entities/shapes.rst +++ /dev/null @@ -1,18 +0,0 @@ -Shapes ------- - -.. automodule:: otx.api.entities.shapes.ellipse - :members: - :undoc-members: - -.. automodule:: otx.api.entities.shapes.polygon - :members: - :undoc-members: - -.. automodule:: otx.api.entities.shapes.rectangle - :members: - :undoc-members: - -.. automodule:: otx.api.entities.shapes.shape - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/api/index.rst b/docs/source/guide/reference/api/index.rst deleted file mode 100644 index 57d7916a447..00000000000 --- a/docs/source/guide/reference/api/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -API -============= - -.. toctree:: - :maxdepth: 2 - - configuration/index - entities/index - usecases/index \ No newline at end of file diff --git a/docs/source/guide/reference/api/usecases/adapters.rst b/docs/source/guide/reference/api/usecases/adapters.rst deleted file mode 100644 index 8368053095a..00000000000 --- a/docs/source/guide/reference/api/usecases/adapters.rst +++ /dev/null @@ -1,6 +0,0 @@ -Adapters --------- - -.. automodule:: otx.api.usecases.adapters.model_adapter - :members: - :undoc-members: diff --git a/docs/source/guide/reference/api/usecases/evaluation.rst b/docs/source/guide/reference/api/usecases/evaluation.rst deleted file mode 100644 index a397f1eed2f..00000000000 --- a/docs/source/guide/reference/api/usecases/evaluation.rst +++ /dev/null @@ -1,38 +0,0 @@ -Evaluation ----------- - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.api.usecases.evaluation.accuracy - :members: - :undoc-members: - -.. automodule:: otx.api.usecases.evaluation.anomaly_metrics - :members: - :undoc-members: - -.. automodule:: otx.api.usecases.evaluation.averaging - :members: - :undoc-members: - -.. automodule:: otx.api.usecases.evaluation.basic_operations - :members: - :undoc-members: - -.. automodule:: otx.api.usecases.evaluation.dice - :members: - :undoc-members: - -.. automodule:: otx.api.usecases.evaluation.f_measure - :members: - :undoc-members: - -.. automodule:: otx.api.usecases.evaluation.metrics_helper - :members: - :undoc-members: - -.. automodule:: otx.api.usecases.evaluation.performance_provider_interface - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/api/usecases/exportable_code.rst b/docs/source/guide/reference/api/usecases/exportable_code.rst deleted file mode 100644 index f726b02cfad..00000000000 --- a/docs/source/guide/reference/api/usecases/exportable_code.rst +++ /dev/null @@ -1,2 +0,0 @@ -Exportable Code ---------------- \ No newline at end of file diff --git a/docs/source/guide/reference/api/usecases/index.rst b/docs/source/guide/reference/api/usecases/index.rst deleted file mode 100644 index ef4bd632477..00000000000 --- a/docs/source/guide/reference/api/usecases/index.rst +++ /dev/null @@ -1,15 +0,0 @@ -Usecases -======== - -.. automodule:: otx.api.usecases - :members: - :undoc-members: - -.. toctree:: - :maxdepth: 3 - - adapters - evaluation - exportable_code - reporting - tasks diff --git a/docs/source/guide/reference/api/usecases/reporting.rst b/docs/source/guide/reference/api/usecases/reporting.rst deleted file mode 100644 index ee85548324c..00000000000 --- a/docs/source/guide/reference/api/usecases/reporting.rst +++ /dev/null @@ -1,6 +0,0 @@ -Reporting ---------- - -.. automodule:: otx.api.usecases.reporting - :members: - :undoc-members: diff --git a/docs/source/guide/reference/api/usecases/tasks.rst b/docs/source/guide/reference/api/usecases/tasks.rst deleted file mode 100644 index 93a4da176a4..00000000000 --- a/docs/source/guide/reference/api/usecases/tasks.rst +++ /dev/null @@ -1,6 +0,0 @@ -Task interfaces ---------------- - -.. automodule:: otx.api.usecases.tasks - :members: - :undoc-members: diff --git a/docs/source/guide/reference/core/data.rst b/docs/source/guide/reference/core/data.rst deleted file mode 100644 index 86860aa1186..00000000000 --- a/docs/source/guide/reference/core/data.rst +++ /dev/null @@ -1,18 +0,0 @@ -Data -^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.core.data - :members: - :undoc-members: - -.. automodule:: otx.core.data.adapter - :members: - :undoc-members: - -.. automodule:: otx.core.data.manager - :members: - :undoc-members: diff --git a/docs/source/guide/reference/core/index.rst b/docs/source/guide/reference/core/index.rst deleted file mode 100644 index b55754cce07..00000000000 --- a/docs/source/guide/reference/core/index.rst +++ /dev/null @@ -1,8 +0,0 @@ -Core -==== - -.. toctree:: - :maxdepth: 1 - - data - ov/index diff --git a/docs/source/guide/reference/core/ov/graph.rst b/docs/source/guide/reference/core/ov/graph.rst deleted file mode 100644 index 6f47f9e154b..00000000000 --- a/docs/source/guide/reference/core/ov/graph.rst +++ /dev/null @@ -1,22 +0,0 @@ -Graph -^^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.core.ov.graph - :members: - :undoc-members: - -.. automodule:: otx.core.ov.graph.graph - :members: - :undoc-members: - -.. automodule:: otx.core.ov.graph.utils - :members: - :undoc-members: - -.. automodule:: otx.core.ov.graph.parsers - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/core/ov/index.rst b/docs/source/guide/reference/core/ov/index.rst deleted file mode 100644 index 07ce1abd4cf..00000000000 --- a/docs/source/guide/reference/core/ov/index.rst +++ /dev/null @@ -1,25 +0,0 @@ -OpenVINO -=================== - -.. toctree:: - :maxdepth: 1 - - graph - models - ops - -.. automodule:: otx.core.ov - :members: - :undoc-members: - -.. automodule:: otx.core.ov.omz_wrapper - :members: - :undoc-members: - -.. automodule:: otx.core.ov.registry - :members: - :undoc-members: - -.. automodule:: otx.core.ov.utils - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/core/ov/models.rst b/docs/source/guide/reference/core/ov/models.rst deleted file mode 100644 index c3f535e82ab..00000000000 --- a/docs/source/guide/reference/core/ov/models.rst +++ /dev/null @@ -1,22 +0,0 @@ -Models -^^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.core.ov.models - :members: - :undoc-members: - -.. automodule:: otx.core.ov.models.mmov_model - :members: - :undoc-members: - -.. automodule:: otx.core.ov.models.ov_model - :members: - :undoc-members: - -.. automodule:: otx.core.ov.models.parser_mixin - :members: - :undoc-members: diff --git a/docs/source/guide/reference/core/ov/ops.rst b/docs/source/guide/reference/core/ov/ops.rst deleted file mode 100644 index 7b249e02702..00000000000 --- a/docs/source/guide/reference/core/ov/ops.rst +++ /dev/null @@ -1,82 +0,0 @@ -OPS -^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.core.ov.ops - :members: - :undoc-members: - -.. automodule:: otx.core.ov.ops.activations - :members: - :undoc-members: - -.. automodule:: otx.core.ov.ops.arithmetics - :members: - :undoc-members: - -.. automodule:: otx.core.ov.ops.builder - :members: - :undoc-members: - -.. automodule:: otx.core.ov.ops.convolutions - :members: - :undoc-members: - -.. automodule:: otx.core.ov.ops.generation - :members: - :undoc-members: - -.. automodule:: otx.core.ov.ops.image_processings - :members: - :undoc-members: - -.. automodule:: otx.core.ov.ops.infrastructures - :members: - :undoc-members: - -.. automodule:: otx.core.ov.ops.matmuls - :members: - :undoc-members: - -.. automodule:: otx.core.ov.ops.movements - :members: - :undoc-members: - -.. automodule:: otx.core.ov.ops.normalizations - :members: - :undoc-members: - -.. automodule:: otx.core.ov.ops.object_detections - :members: - :undoc-members: - -.. automodule:: otx.core.ov.ops.op - :members: - :undoc-members: - -.. automodule:: otx.core.ov.ops.poolings - :members: - :undoc-members: - -.. automodule:: otx.core.ov.ops.reductions - :members: - :undoc-members: - -.. automodule:: otx.core.ov.ops.shape_manipulations - :members: - :undoc-members: - -.. automodule:: otx.core.ov.ops.sorting_maximization - :members: - :undoc-members: - -.. automodule:: otx.core.ov.ops.type_conversions - :members: - :undoc-members: - -.. automodule:: otx.core.ov.ops.utils - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/hpo/hpo.rst b/docs/source/guide/reference/hpo/hpo.rst deleted file mode 100644 index 13ead8124b8..00000000000 --- a/docs/source/guide/reference/hpo/hpo.rst +++ /dev/null @@ -1,10 +0,0 @@ -HPO -^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.hpo - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/index.rst b/docs/source/guide/reference/index.rst new file mode 100644 index 00000000000..6abaf03ba3d --- /dev/null +++ b/docs/source/guide/reference/index.rst @@ -0,0 +1,11 @@ +API reference +============= + +.. _api_reference: + +.. autosummary:: + :recursive: + :nosignatures: + :toctree: _autosummary + + otx diff --git a/docs/source/guide/reference/mpa/index.rst b/docs/source/guide/reference/mpa/index.rst deleted file mode 100644 index 2b7ebc58cf3..00000000000 --- a/docs/source/guide/reference/mpa/index.rst +++ /dev/null @@ -1,8 +0,0 @@ -Model Preparation Algorithm -=========================== - -.. toctree:: - :maxdepth: 1 - - modules/index - utils diff --git a/docs/source/guide/tutorials/advanced/semi_sl.rst b/docs/source/guide/tutorials/advanced/semi_sl.rst index 236c72f1189..b4ce1142627 100644 --- a/docs/source/guide/tutorials/advanced/semi_sl.rst +++ b/docs/source/guide/tutorials/advanced/semi_sl.rst @@ -28,7 +28,7 @@ The process has been tested on the following configuration: To learn how to export the trained model, refer to `classification export <../base/how_to_train/classification.html#export>`__. - To learn how to optimize the trained model (.xml) with OpenVINO™ POT, refer to `classification optimization <../base/how_to_train/classification.html#optimization>`__. + To learn how to optimize the trained model (.xml) with OpenVINO™ PTQ, refer to `classification optimization <../base/how_to_train/classification.html#optimization>`__. Currently, OpenVINO™ NNCF optimization doesn't support a full Semi-SL training algorithm. The accuracy-aware optimization will be executed on labeled data only. So, the performance drop may be more noticeable than after ordinary supervised training. diff --git a/docs/source/guide/tutorials/base/deploy.rst b/docs/source/guide/tutorials/base/deploy.rst index 8fbbdac7d97..367597d414c 100644 --- a/docs/source/guide/tutorials/base/deploy.rst +++ b/docs/source/guide/tutorials/base/deploy.rst @@ -52,7 +52,7 @@ using the command below: 2023-01-20 09:30:41,737 | INFO : Deploying the model 2023-01-20 09:30:41,753 | INFO : Deploying completed -You can also deploy the quantized model, that was optimized with NNCF or POT, passing the path to this model in IR format to ``--load-weights`` parameter. +You can also deploy the quantized model, that was optimized with NNCF or PTQ, passing the path to this model in IR format to ``--load-weights`` parameter. After that, you can use the resulting ``openvino.zip`` archive in other application. diff --git a/docs/source/guide/tutorials/base/explain.rst b/docs/source/guide/tutorials/base/explain.rst index a9367f19887..fa03aea9bea 100644 --- a/docs/source/guide/tutorials/base/explain.rst +++ b/docs/source/guide/tutorials/base/explain.rst @@ -22,12 +22,12 @@ created in the previous step. . venv/otx/bin/activate 2. ``otx explain`` returns saliency maps (heatmaps with red colored areas of focus) -at the path specified by ``--save-explanation-to``. +at the path specified by ``--output``. .. code-block:: - otx explain --explain-data-roots otx-workspace-DETECTION/splitted_dataset/val/ \ - --save-explanation-to outputs/explanation \ + otx explain --input otx-workspace-DETECTION/splitted_dataset/val/ \ + --output outputs/explanation \ --load-weights outputs/weights.pth 3. To specify the algorithm of saliency map creation for classification, @@ -48,7 +48,7 @@ For detection task, we can choose between the following methods: 4. As a result we will get a folder with a pair of generated -images for each image in ``--explain-data-roots``: +images for each image in ``--input``: - saliency map - where red color means more attention of the model - overlay - where the saliency map is combined with the original image: diff --git a/docs/source/guide/tutorials/base/how_to_train/action_classification.rst b/docs/source/guide/tutorials/base/how_to_train/action_classification.rst index 12295845c8b..7304660dc15 100644 --- a/docs/source/guide/tutorials/base/how_to_train/action_classification.rst +++ b/docs/source/guide/tutorials/base/how_to_train/action_classification.rst @@ -212,7 +212,7 @@ Export ********* 1. ``otx export`` exports a trained Pytorch `.pth` model to the OpenVINO™ Intermediate Representation (IR) format. -It allows running the model on the Intel hardware much more efficiently, especially on the CPU. Also, the resulting IR model is required to run POT optimization. IR model consists of two files: ``openvino.xml`` for weights and ``openvino.bin`` for architecture. +It allows running the model on the Intel hardware much more efficiently, especially on the CPU. Also, the resulting IR model is required to run PTQ optimization. IR model consists of two files: ``openvino.xml`` for weights and ``openvino.bin`` for architecture. 2. Run the command line below to export the trained model and save the exported model to the ``openvino`` folder. @@ -235,7 +235,7 @@ and save the exported model to the ``openvino`` folder. 2023-02-21 22:54:35,424 - mmaction - INFO - Exporting completed -3. Check the accuracy of the IR model and the consistency between the exported model and the PyTorch model, +3. Check the accuracy of the IR optimimodel and the consistency between the exported model and the PyTorch model, using ``otx eval`` and passing the IR model path to the ``--load-weights`` parameter. .. code-block:: @@ -254,22 +254,24 @@ Optimization ************* 1. You can further optimize the model with ``otx optimize``. -Currently, quantization jobs that include POT is supported for X3D template. MoViNet will be supported in near future. +Currently, quantization jobs that include PTQ is supported for X3D template. MoViNet will be supported in near future. + +The optimized model will be quantized to ``INT8`` format. Refer to :doc:`optimization explanation <../../../explanation/additional_features/models_optimization>` section for more details on model optimization. 2. Example command for optimizing -OpenVINO™ model (.xml) with OpenVINO™ POT. +OpenVINO™ model (.xml) with OpenVINO™ PTQ. .. code-block:: (otx) ...$ otx optimize --load-weights openvino/openvino.xml \ - --output pot_model + --output ptq_model ... Performance(score: 0.6252587703095486, dashboard: (3 metric groups)) -Keep in mind that POT will take some time (generally less than NNCF optimization) without logging to optimize the model. +Keep in mind that PTQ will take some time (generally less than NNCF optimization) without logging to optimize the model. 3. Now, you have fully trained, optimized and exported an efficient model representation ready-to-use action classification model. diff --git a/docs/source/guide/tutorials/base/how_to_train/action_detection.rst b/docs/source/guide/tutorials/base/how_to_train/action_detection.rst index c1252ad011a..6103735beb0 100644 --- a/docs/source/guide/tutorials/base/how_to_train/action_detection.rst +++ b/docs/source/guide/tutorials/base/how_to_train/action_detection.rst @@ -201,6 +201,8 @@ Optimization 1. You can further optimize the model with ``otx optimize``. Currently, only PTQ is supported for action detection. NNCF will be supported in near future. + +The optimized model will be quantized to ``INT8`` format. Refer to :doc:`optimization explanation <../../../explanation/additional_features/models_optimization>` section for more details on model optimization. 2. Example command for optimizing @@ -209,7 +211,7 @@ OpenVINO™ model (.xml) with OpenVINO™ PTQ. .. code-block:: (otx) ...$ otx optimize --load-weights openvino/openvino.xml \ - --save-model-to pot_model + --save-model-to ptq_model ... diff --git a/docs/source/guide/tutorials/base/how_to_train/anomaly_detection.rst b/docs/source/guide/tutorials/base/how_to_train/anomaly_detection.rst index 31b51809c95..42058ffdf8a 100644 --- a/docs/source/guide/tutorials/base/how_to_train/anomaly_detection.rst +++ b/docs/source/guide/tutorials/base/how_to_train/anomaly_detection.rst @@ -156,7 +156,7 @@ Export ****** 1. ``otx export`` exports a trained Pytorch `.pth` model to the OpenVINO™ Intermediate Representation (IR) format. -It allows running the model on the Intel hardware much more efficient, especially on the CPU. Also, the resulting IR model is required to run POT optimization. IR model consists of 2 files: ``openvino.xml`` for weights and ``openvino.bin`` for architecture. +It allows running the model on the Intel hardware much more efficient, especially on the CPU. Also, the resulting IR model is required to run PTQ optimization. IR model consists of 2 files: ``openvino.xml`` for weights and ``openvino.bin`` for architecture. 2. We can run the below command line to export the trained model and save the exported model to the ``openvino`` folder: @@ -200,10 +200,11 @@ This gives the following results: Optimization ************ -Anomaly tasks can be optimized either in POT or NNCF format. For more information refer to the :doc:`optimization explanation <../../../explanation/additional_features/models_optimization>` section. +Anomaly tasks can be optimized either in PTQ or NNCF format. The model will be quantized to ``INT8`` format. +For more information refer to the :doc:`optimization explanation <../../../explanation/additional_features/models_optimization>` section. -1. Let's start with POT +1. Let's start with PTQ optimization. .. code-block:: @@ -211,7 +212,7 @@ optimization. otx optimize ote_anomaly_detection_padim \ --train-data-roots datasets/MVTec/bottle/train \ --load-weights otx-workspace-ANOMALY_DETECTION/openvino/openvino.xml \ - --output otx-workspace-ANOMALY_DETECTION/pot_model + --output otx-workspace-ANOMALY_DETECTION/ptq_model This command generates the following files that can be used to run :doc:`otx demo <../demo>`: @@ -233,7 +234,7 @@ weights to the ``opitmize`` command: --load-weights otx-workspace-ANOMALY_DETECTION/models/weights.pth \ --output otx-workspace-ANOMALY_DETECTION/nncf_model -Similar to POT optimization, it generates the following files: +Similar to PTQ optimization, it generates the following files: - image_threshold - pixel_threshold diff --git a/docs/source/guide/tutorials/base/how_to_train/classification.rst b/docs/source/guide/tutorials/base/how_to_train/classification.rst index c84448f4c7d..6569194cd3a 100644 --- a/docs/source/guide/tutorials/base/how_to_train/classification.rst +++ b/docs/source/guide/tutorials/base/how_to_train/classification.rst @@ -177,7 +177,7 @@ Export ********* 1. ``otx export`` exports a trained Pytorch `.pth` model to the OpenVINO™ Intermediate Representation (IR) format. -It allows running the model on the Intel hardware much more efficient, especially on the CPU. Also, the resulting IR model is required to run POT optimization. IR model consists of 2 files: ``openvino.xml`` for weights and ``openvino.bin`` for architecture. +It allows running the model on the Intel hardware much more efficient, especially on the CPU. Also, the resulting IR model is required to run PTQ optimization. IR model consists of 2 files: ``openvino.xml`` for weights and ``openvino.bin`` for architecture. 2. You can run the below command line to export the trained model and save the exported model to the ``openvino_model`` folder: @@ -212,7 +212,7 @@ Optimization ************* 1. You can further optimize the model with ``otx optimize``. -It uses NNCF or POT depending on the model format. +It uses NNCF or PTQ depending on the model and transforms it to ``INT8`` format. Please, refer to :doc:`optimization explanation <../../../explanation/additional_features/models_optimization>` section for more details on model optimization. @@ -235,18 +235,18 @@ a PyTorch model (`.pth`) with OpenVINO™ NNCF. The optimization time relies on the hardware characteristics, for example on 1 NVIDIA GeForce RTX 3090 and Intel(R) Core(TM) i9-10980XE it took about 10 minutes. 3. Command example for optimizing -OpenVINO™ model (.xml) with OpenVINO™ POT. +OpenVINO™ model (.xml) with OpenVINO™ PTQ. .. code-block:: (otx) ...$ otx optimize --load-weights openvino_model/openvino.xml \ - --output pot_model + --output ptq_model ... Performance(score: 0.9577656675749319, dashboard: (3 metric groups)) -Please note, that POT will take some time (generally less than NNCF optimization) without logging to optimize the model. +Please note, that PTQ will take some time (generally less than NNCF optimization) without logging to optimize the model. 4. Now you have fully trained, optimized and exported an efficient model representation ready-to-use classification model. diff --git a/docs/source/guide/tutorials/base/how_to_train/detection.rst b/docs/source/guide/tutorials/base/how_to_train/detection.rst index f6235e76d84..b2434830c45 100644 --- a/docs/source/guide/tutorials/base/how_to_train/detection.rst +++ b/docs/source/guide/tutorials/base/how_to_train/detection.rst @@ -330,7 +330,7 @@ Export 1. ``otx export`` exports a trained Pytorch `.pth` model to the OpenVINO™ Intermediate Representation (IR) format. It allows to efficiently run it on Intel hardware, especially on CPU, using OpenVINO™ runtime. -Also, the resulting IR model is required to run POT optimization in the section below. IR model contains 2 files: ``openvino.xml`` for weights and ``openvino.bin`` for architecture. +Also, the resulting IR model is required to run PTQ optimization in the section below. IR model contains 2 files: ``openvino.xml`` for weights and ``openvino.bin`` for architecture. 2. That's how we can export the trained model ``../outputs/weights.pth`` from the previous section and save the exported model to the ``../outputs/openvino/`` folder. @@ -384,11 +384,11 @@ Optimization ************* 1. We can further optimize the model with ``otx optimize``. -It uses NNCF or POT depending on the model format. +It uses NNCF or PTQ depending on the model and transforms it to ``INT8`` format. ``NNCF`` optimization is used for trained snapshots in a framework-specific format such as checkpoint (.pth) file from Pytorch. It starts accuracy-aware quantization based on the obtained weights from the training stage. Generally, we will see the same output as during training. -``POT`` optimization is used for models exported in the OpenVINO™ IR format. It decreases the floating-point precision to integer precision of the exported model by performing the post-training optimization. +``PTQ`` optimization is used for models exported in the OpenVINO™ IR format. It decreases the floating-point precision to integer precision of the exported model by performing the post-training optimization. The function results with the following files, which could be used to run :doc:`otx demo <../demo>` as well with PyTorch (`.pth`) and IR model (`.xml`): @@ -420,20 +420,20 @@ with OpenVINO NNCF. 3. Command example for optimizing OpenVINO™ model (.xml) -with OpenVINO™ POT. +with OpenVINO™ PTQ. .. code-block:: (otx) ...$ otx optimize --load-weights ../outputs/openvino/openvino.xml \ - --output ../outputs/pot \ - --output ../outputs/pot + --output ../outputs/ptq \ + --output ../outputs/ptq ... 2023-01-10 06:29:46,751 | INFO : Loading OpenVINO OTXDetectionTask 2023-01-10 06:29:47,685 | INFO : OpenVINO task initialization completed - 2023-01-10 06:29:47,685 | INFO : Start POT optimization - 2023-01-10 06:34:29,304 | INFO : POT optimization completed + 2023-01-10 06:29:47,685 | INFO : Start PTQ optimization + 2023-01-10 06:34:29,304 | INFO : PTQ optimization completed 2023-01-10 06:34:29,419 | INFO : Start OpenVINO inference 2023-01-10 06:34:33,275 | INFO : OpenVINO inference completed 2023-01-10 06:34:33,275 | INFO : Start OpenVINO metric evaluation @@ -441,7 +441,7 @@ with OpenVINO™ POT. Performance(score: 0.5389435989256938, dashboard: (1 metric groups)) The optimization time highly relies on the hardware characteristics, for example on 1 NVIDIA GeForce RTX 3090 it took about 10 minutes. -Please note, that POT will take some time without logging to optimize the model. +Please note, that PTQ will take some time without logging to optimize the model. 4. Finally, we can also evaluate the optimized model by passing it to the ``otx eval`` function. diff --git a/docs/source/guide/tutorials/base/how_to_train/instance_segmentation.rst b/docs/source/guide/tutorials/base/how_to_train/instance_segmentation.rst index 925bf9a257a..3d9526d92ab 100644 --- a/docs/source/guide/tutorials/base/how_to_train/instance_segmentation.rst +++ b/docs/source/guide/tutorials/base/how_to_train/instance_segmentation.rst @@ -327,7 +327,7 @@ Export 1. ``otx export`` exports a trained Pytorch `.pth` model to the OpenVINO™ Intermediate Representation (IR) format. -It allows running the model on the Intel hardware much more efficient, especially on the CPU. Also, the resulting IR model is required to run POT optimization. IR model consists of 2 files: ``openvino.xml`` for weights and ``openvino.bin`` for architecture. +It allows running the model on the Intel hardware much more efficient, especially on the CPU. Also, the resulting IR model is required to run PTQ optimization. IR model consists of 2 files: ``openvino.xml`` for weights and ``openvino.bin`` for architecture. 2. We can run the below command line to export the trained model and save the exported model to the ``outputs/**_export/openvino`` folder. @@ -354,7 +354,7 @@ Optimization ************* 1. We can further optimize the model with ``otx optimize``. -It uses NNCF or POT depending on the model format. +It uses NNCF or PTQ depending on the model and transforms it to ``INT8`` format. Please, refer to :doc:`optimization explanation <../../../explanation/additional_features/models_optimization>` section to get the intuition of what we use under the hood for optimization purposes. @@ -371,13 +371,13 @@ a PyTorch model (`.pth`) with OpenVINO™ `NNCF ` section to get the intuition of what we use under the hood for optimization purposes. 2. Command example for optimizing @@ -234,18 +235,18 @@ a PyTorch model (`.pth`) with OpenVINO™ NNCF. The optimization time relies on the hardware characteristics, for example on 1 NVIDIA GeForce RTX 3090 and Intel(R) Core(TM) i9-10980XE it took about 15 minutes. 3. Command example for optimizing -OpenVINO™ model (.xml) with OpenVINO™ POT. +OpenVINO™ model (.xml) with OpenVINO™ PTQ. .. code-block:: (otx) ...$ otx optimize --load-weights openvino_model/openvino.xml \ - --output pot_model + --output ptq_model ... Performance(score: 0.9577656675749319, dashboard: (1 metric groups)) -Please note, that POT will take some time (generally less than NNCF optimization) without logging to optimize the model. +Please note, that PTQ will take some time (generally less than NNCF optimization) without logging to optimize the model. 4. Now we have fully trained, optimized and exported an efficient model representation ready-to-use semantic segmentation model. diff --git a/pyproject.toml b/pyproject.toml index 09381c67cb1..7cc86086753 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,7 +15,7 @@ build-backend = "setuptools.build_meta" # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # CIBUILDWHEEL CONFIGURATION. # [tool.cibuildwheel] -build = "cp37-manylinux_x86_64 cp38-manylinux_x86_64 cp39-manylinux_x86_64 cp310-manylinux_x86_64" +build = "cp38-manylinux_x86_64 cp39-manylinux_x86_64 cp310-manylinux_x86_64" # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # diff --git a/src/otx/algorithms/action/utils/convert_public_data_to_cvat.py b/src/otx/algorithms/action/utils/convert_public_data_to_cvat.py index 1553951a2b2..26dd67dc05b 100644 --- a/src/otx/algorithms/action/utils/convert_public_data_to_cvat.py +++ b/src/otx/algorithms/action/utils/convert_public_data_to_cvat.py @@ -4,16 +4,18 @@ Current Datumaro format for video (CVAT) -root -|- video_0 - |- images - |- frames_001.png - |- frames_002.png - |- annotations.xml -|- video_1 - |- images - |- annotations.xml -|- video_2 +:: + + root + |- video_0 + | |- images + | |- frames_001.png + | |- frames_002.png + | |- annotations.xml + |- video_1 + | |- images + | |- annotations.xml + |- video_2 """ diff --git a/src/otx/algorithms/anomaly/adapters/anomalib/data/dataset.py b/src/otx/algorithms/anomaly/adapters/anomalib/data/dataset.py index fbc92b1d9b7..3db5c341aff 100644 --- a/src/otx/algorithms/anomaly/adapters/anomalib/data/dataset.py +++ b/src/otx/algorithms/anomaly/adapters/anomalib/data/dataset.py @@ -1,4 +1,5 @@ """DataLoaders for Anomaly Tasks.""" + # Copyright (C) 2021 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/otx/algorithms/classification/tools/classification_sample.py b/src/otx/algorithms/classification/tools/classification_sample.py index 45104ddc19b..de9283dfbed 100644 --- a/src/otx/algorithms/classification/tools/classification_sample.py +++ b/src/otx/algorithms/classification/tools/classification_sample.py @@ -44,16 +44,19 @@ logger = get_logger() -parser = argparse.ArgumentParser(description="Sample showcasing the new API") -parser.add_argument("template_file_path", help="path to template file") -parser.add_argument("--export", action="store_true") -parser.add_argument("--multilabel", action="store_true") -parser.add_argument("--hierarchical", action="store_true") -args = parser.parse_args() +def parse_args(): + """Parse function for getting model template & check export.""" + parser = argparse.ArgumentParser(description="Sample showcasing the new API") + parser.add_argument("template_file_path", help="path to template file") + parser.add_argument("--export", action="store_true") + parser.add_argument("--multilabel", action="store_true") + parser.add_argument("--hierarchical", action="store_true") + return parser.parse_args() -def load_test_dataset(data_type): + +def load_test_dataset(data_type, args): """Load test dataset.""" import PIL from PIL import ImageDraw @@ -221,11 +224,11 @@ def validate(task, validation_dataset, model): print(str(resultset.performance)) -def main(): +def main(args): """Main of Classification Sample Test.""" logger.info("Train initial model with OLD dataset") - dataset, labels_list = load_test_dataset("old") + dataset, labels_list = load_test_dataset("old", args) labels_schema = get_label_schema(labels_list, multilabel=args.multilabel, hierarchical=args.hierarchical) logger.info(f"Train dataset: {len(dataset.get_subset(Subset.TRAINING))} items") @@ -363,4 +366,4 @@ def main(): if __name__ == "__main__": - sys.exit(main() or 0) + sys.exit(main(parse_args()) or 0) diff --git a/src/otx/algorithms/common/configs/training_base.py b/src/otx/algorithms/common/configs/training_base.py index 3ebe28e7d5e..8e924899ef7 100644 --- a/src/otx/algorithms/common/configs/training_base.py +++ b/src/otx/algorithms/common/configs/training_base.py @@ -1,18 +1,7 @@ """Base Configuration of OTX Common Algorithms.""" -# Copyright (C) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions -# and limitations under the License. +# Copyright (C) 2022-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 from sys import maxsize @@ -42,6 +31,7 @@ class TrainType(ConfigurableEnum): Semisupervised = "Semisupervised" Selfsupervised = "Selfsupervised" Incremental = "Incremental" + Zeroshot = "Zeroshot" Futurework = "Futurework" @@ -241,6 +231,16 @@ class BasePostprocessing(ParameterGroup): affects_outcome_of=ModelLifecycle.INFERENCE, ) + max_num_detections = configurable_integer( + header="Maximum number of detection per image", + description="Extra detection outputs will be discared in non-maximum suppression process. " + "Defaults to 0, which means per-model default value.", + default_value=0, + min_value=0, + max_value=10000, + affects_outcome_of=ModelLifecycle.INFERENCE, + ) + use_ellipse_shapes = configurable_boolean( default_value=False, header="Use ellipse shapes", diff --git a/src/otx/algorithms/detection/adapters/mmdet/configurer.py b/src/otx/algorithms/detection/adapters/mmdet/configurer.py index 4a7097d3ff5..d72cba65b67 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/configurer.py +++ b/src/otx/algorithms/detection/adapters/mmdet/configurer.py @@ -41,6 +41,25 @@ def configure_model(self, cfg, data_classes, model_classes, ir_options, **kwargs """Configuration for model config.""" super().configure_model(cfg, data_classes, model_classes, ir_options, **kwargs) self.configure_regularization(cfg) + self.configure_max_num_detections(cfg, kwargs.get("max_num_detections", 0)) + + def configure_max_num_detections(self, cfg, max_num_detections): + """Patch config for maximum number of detections.""" + if max_num_detections > 0: + logger.info(f"Model max_num_detections: {max_num_detections}") + test_cfg = cfg.model.test_cfg + test_cfg.max_per_img = max_num_detections + test_cfg.nms_pre = max_num_detections * 10 + # Special cases for 2-stage detectors (e.g. MaskRCNN) + if hasattr(test_cfg, "rpn"): + test_cfg.rpn.nms_pre = max_num_detections * 20 + test_cfg.rpn.max_per_img = max_num_detections * 10 + if hasattr(test_cfg, "rcnn"): + test_cfg.rcnn.max_per_img = max_num_detections + train_cfg = cfg.model.train_cfg + if hasattr(train_cfg, "rpn_proposal"): + train_cfg.rpn_proposal.nms_pre = max_num_detections * 20 + train_cfg.rpn_proposal.max_per_img = max_num_detections * 10 def configure_regularization(self, cfg): # noqa: C901 """Patch regularization parameters.""" diff --git a/src/otx/algorithms/detection/adapters/mmdet/task.py b/src/otx/algorithms/detection/adapters/mmdet/task.py index 92d5e212e62..3a210a6d30d 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/task.py +++ b/src/otx/algorithms/detection/adapters/mmdet/task.py @@ -178,6 +178,7 @@ def configure(self, training=True, ir_options=None, train_dataset=None, export=F model_classes, self._input_size, train_dataset=train_dataset, + max_num_detections=self.max_num_detections, ) if should_cluster_anchors(self._recipe_cfg): if train_dataset is not None: @@ -486,6 +487,12 @@ def _export_model( assert len(self._precision) == 1 export_options["precision"] = str(self._precision[0]) export_options["type"] = str(export_format) + if self.max_num_detections > 0: + logger.info(f"Export max_num_detections: {self.max_num_detections}") + post_proc_cfg = export_options["deploy_cfg"]["codebase_config"]["post_processing"] + post_proc_cfg["max_output_boxes_per_class"] = self.max_num_detections + post_proc_cfg["keep_top_k"] = self.max_num_detections + post_proc_cfg["pre_top_k"] = self.max_num_detections * 10 export_options["deploy_cfg"]["dump_features"] = dump_features if dump_features: diff --git a/src/otx/algorithms/detection/adapters/openvino/task.py b/src/otx/algorithms/detection/adapters/openvino/task.py index a0e7eb9998c..c91d14e6430 100644 --- a/src/otx/algorithms/detection/adapters/openvino/task.py +++ b/src/otx/algorithms/detection/adapters/openvino/task.py @@ -1,18 +1,7 @@ """Openvino Task of Detection.""" -# Copyright (C) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions -# and limitations under the License. +# Copyright (C) 2021-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 import copy import io @@ -630,6 +619,7 @@ def evaluate( f"Requested to use {evaluation_metric} metric, but parameter is ignored. Use F-measure instead." ) output_resultset.performance = MetricsHelper.compute_f_measure(output_resultset).get_performance() + logger.info(f"F-measure after evaluation: {output_resultset.performance}") logger.info("OpenVINO metric evaluation completed") def deploy(self, output_model: ModelEntity) -> None: diff --git a/src/otx/algorithms/detection/configs/base/configuration.py b/src/otx/algorithms/detection/configs/base/configuration.py index 147cbf38f09..188d447cfab 100644 --- a/src/otx/algorithms/detection/configs/base/configuration.py +++ b/src/otx/algorithms/detection/configs/base/configuration.py @@ -1,18 +1,7 @@ """Configuration file of OTX Detection.""" -# Copyright (C) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions -# and limitations under the License. +# Copyright (C) 2022-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 from attr import attrs diff --git a/src/otx/algorithms/detection/configs/detection/configuration.yaml b/src/otx/algorithms/detection/configs/detection/configuration.yaml index ef3a46315bc..ace8da1a37a 100644 --- a/src/otx/algorithms/detection/configs/detection/configuration.yaml +++ b/src/otx/algorithms/detection/configs/detection/configuration.yaml @@ -294,6 +294,25 @@ postprocessing: value: 0.01 visible_in_ui: true warning: null + max_num_detections: + affects_outcome_of: INFERENCE + default_value: 0 + description: + Extra detection outputs will be discared in non-maximum suppression process. + Defaults to 0, which means per-model default values. + editable: true + header: Maximum number of detections per image + max_value: 10000 + min_value: 0 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 0 + visible_in_ui: true + warning: null use_ellipse_shapes: affects_outcome_of: INFERENCE default_value: false diff --git a/src/otx/algorithms/detection/configs/instance_segmentation/configuration.yaml b/src/otx/algorithms/detection/configs/instance_segmentation/configuration.yaml index d18107fbb33..7dae320a37c 100644 --- a/src/otx/algorithms/detection/configs/instance_segmentation/configuration.yaml +++ b/src/otx/algorithms/detection/configs/instance_segmentation/configuration.yaml @@ -294,6 +294,25 @@ postprocessing: value: 0.01 visible_in_ui: true warning: null + max_num_detections: + affects_outcome_of: INFERENCE + default_value: 0 + description: + Extra detection outputs will be discared in non-maximum suppression process. + Defaults to 0, which means per-model default values. + editable: true + header: Maximum number of detections per image + max_value: 10000 + min_value: 0 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 0 + visible_in_ui: true + warning: null use_ellipse_shapes: affects_outcome_of: INFERENCE default_value: false diff --git a/src/otx/algorithms/detection/configs/instance_segmentation/convnext_maskrcnn/model.py b/src/otx/algorithms/detection/configs/instance_segmentation/convnext_maskrcnn/model.py index e35799ed7e0..a0069179b4f 100644 --- a/src/otx/algorithms/detection/configs/instance_segmentation/convnext_maskrcnn/model.py +++ b/src/otx/algorithms/detection/configs/instance_segmentation/convnext_maskrcnn/model.py @@ -6,7 +6,7 @@ # pylint: disable=invalid-name _base_ = [ - "../../../../../recipes/stages/instance-segmentation/incremental.py", + "../../../../../recipes/stages/instance_segmentation/incremental.py", "../../base/models/detector.py", ] @@ -115,9 +115,7 @@ nms=dict(type="nms", iou_threshold=0.7), min_bbox_size=0, ), - rcnn=dict( - score_thr=0.05, nms=dict(type="nms", iou_threshold=0.5, max_num=100), max_per_img=100, mask_thr_binary=0.5 - ), + rcnn=dict(score_thr=0.05, nms=dict(type="nms", iou_threshold=0.5), max_per_img=100, mask_thr_binary=0.5), ), ) diff --git a/src/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/model.py b/src/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/model.py index 65e751b21dc..a7aaa5ca571 100644 --- a/src/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/model.py +++ b/src/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/model.py @@ -17,7 +17,7 @@ # pylint: disable=invalid-name _base_ = [ - "../../../../../recipes/stages/instance-segmentation/incremental.py", + "../../../../../recipes/stages/instance_segmentation/incremental.py", "../../../../common/adapters/mmcv/configs/backbones/efficientnet_b2b.yaml", "../../base/models/detector.py", ] diff --git a/src/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/semisl/model.py b/src/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/semisl/model.py index 547a17db75e..17dae156d2b 100644 --- a/src/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/semisl/model.py +++ b/src/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/semisl/model.py @@ -6,7 +6,7 @@ # pylint: disable=invalid-name _base_ = [ - "../../../../../../recipes/stages/instance-segmentation/semisl.py", + "../../../../../../recipes/stages/instance_segmentation/semisl.py", "../../../../../common/adapters/mmcv/configs/backbones/efficientnet_b2b.yaml", "../../../base/models/detector.py", ] diff --git a/src/otx/algorithms/detection/configs/instance_segmentation/maskrcnn_swin_t/model.py b/src/otx/algorithms/detection/configs/instance_segmentation/maskrcnn_swin_t/model.py index 9c41dd65052..8c2de6032d2 100644 --- a/src/otx/algorithms/detection/configs/instance_segmentation/maskrcnn_swin_t/model.py +++ b/src/otx/algorithms/detection/configs/instance_segmentation/maskrcnn_swin_t/model.py @@ -6,7 +6,7 @@ # pylint: disable=invalid-name _base_ = [ - "../../../../../recipes/stages/instance-segmentation/incremental.py", + "../../../../../recipes/stages/instance_segmentation/incremental.py", "../../base/models/detector.py", ] diff --git a/src/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/model.py b/src/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/model.py index d8918edc33f..e67f6352be9 100644 --- a/src/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/model.py +++ b/src/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/model.py @@ -1,23 +1,12 @@ """Model configuration of Resnet50-MaskRCNN model for Instance-Seg Task.""" -# Copyright (C) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions -# and limitations under the License. +# Copyright (C) 2022-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 # pylint: disable=invalid-name _base_ = [ - "../../../../../recipes/stages/instance-segmentation/incremental.py", + "../../../../../recipes/stages/instance_segmentation/incremental.py", "../../../../common/adapters/mmcv/configs/backbones/resnet50.yaml", "../../base/models/detector.py", ] @@ -149,7 +138,7 @@ ), rcnn=dict( score_thr=0.05, - nms=dict(type="nms", iou_threshold=0.5, max_num=100), + nms=dict(type="nms", iou_threshold=0.5), max_per_img=100, mask_thr_binary=0.5, ), diff --git a/src/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/semisl/model.py b/src/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/semisl/model.py index 780c6cc8f39..6f9cce1a468 100644 --- a/src/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/semisl/model.py +++ b/src/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/semisl/model.py @@ -6,7 +6,7 @@ # pylint: disable=invalid-name _base_ = [ - "../../../../../../recipes/stages/instance-segmentation/semisl.py", + "../../../../../../recipes/stages/instance_segmentation/semisl.py", "../../../../../common/adapters/mmcv/configs/backbones/resnet50.yaml", "../../../base/models/detector.py", ] diff --git a/src/otx/algorithms/detection/configs/rotated_detection/configuration.yaml b/src/otx/algorithms/detection/configs/rotated_detection/configuration.yaml index eb2cecbb289..20b4faf33f2 100644 --- a/src/otx/algorithms/detection/configs/rotated_detection/configuration.yaml +++ b/src/otx/algorithms/detection/configs/rotated_detection/configuration.yaml @@ -313,6 +313,25 @@ postprocessing: warning: null type: PARAMETER_GROUP visible_in_ui: true + max_num_detections: + affects_outcome_of: INFERENCE + default_value: 0 + description: + Extra detection outputs will be discared in non-maximum suppression process. + Defaults to 0, which means per-model default values. + editable: true + header: Maximum number of detections per image + max_value: 10000 + min_value: 0 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 0 + visible_in_ui: true + warning: null algo_backend: description: parameters for algo backend header: Algo backend parameters diff --git a/src/otx/algorithms/detection/configs/rotated_detection/efficientnetb2b_maskrcnn/model.py b/src/otx/algorithms/detection/configs/rotated_detection/efficientnetb2b_maskrcnn/model.py index 10001822570..5a7a818925c 100644 --- a/src/otx/algorithms/detection/configs/rotated_detection/efficientnetb2b_maskrcnn/model.py +++ b/src/otx/algorithms/detection/configs/rotated_detection/efficientnetb2b_maskrcnn/model.py @@ -7,7 +7,7 @@ # pylint: disable=invalid-name _base_ = [ - "../../../../../recipes/stages/instance-segmentation/incremental.py", + "../../../../../recipes/stages/instance_segmentation/incremental.py", "../../../../common/adapters/mmcv/configs/backbones/efficientnet_b2b.yaml", "../../base/models/detector.py", ] diff --git a/src/otx/algorithms/detection/configs/rotated_detection/resnet50_maskrcnn/model.py b/src/otx/algorithms/detection/configs/rotated_detection/resnet50_maskrcnn/model.py index eee17a545c7..5d528fe4796 100644 --- a/src/otx/algorithms/detection/configs/rotated_detection/resnet50_maskrcnn/model.py +++ b/src/otx/algorithms/detection/configs/rotated_detection/resnet50_maskrcnn/model.py @@ -7,7 +7,7 @@ # pylint: disable=invalid-name _base_ = [ - "../../../../../recipes/stages/instance-segmentation/incremental.py", + "../../../../../recipes/stages/instance_segmentation/incremental.py", "../../../../common/adapters/mmcv/configs/backbones/resnet50.yaml", "../../base/models/detector.py", ] @@ -139,7 +139,7 @@ ), rcnn=dict( score_thr=0.05, - nms=dict(type="nms", iou_threshold=0.5, max_num=100), + nms=dict(type="nms", iou_threshold=0.5), max_per_img=100, mask_thr_binary=0.5, ), diff --git a/src/otx/algorithms/detection/task.py b/src/otx/algorithms/detection/task.py index db50fecbb1e..78af633e2a1 100644 --- a/src/otx/algorithms/detection/task.py +++ b/src/otx/algorithms/detection/task.py @@ -75,14 +75,13 @@ def __init__(self, task_environment: TaskEnvironment, output_path: Optional[str] ) self._anchors: Dict[str, int] = {} - if ( - hasattr(self._hyperparams, "postprocessing") - and not getattr(self._hyperparams.postprocessing, "result_based_confidence_threshold", False) - and hasattr(self._hyperparams.postprocessing, "confidence_threshold") - ): - self.confidence_threshold = self._hyperparams.postprocessing.confidence_threshold - else: - self.confidence_threshold = 0.0 + self.confidence_threshold = 0.0 + self.max_num_detections = 0 + if hasattr(self._hyperparams, "postprocessing"): + if hasattr(self._hyperparams.postprocessing, "confidence_threshold"): + self.confidence_threshold = self._hyperparams.postprocessing.confidence_threshold + if hasattr(self._hyperparams.postprocessing, "max_num_detections"): + self.max_num_detections = self._hyperparams.postprocessing.max_num_detections if task_environment.model is not None: self._load_model() @@ -115,6 +114,12 @@ def _load_postprocessing(self, model_data): hparams.use_ellipse_shapes = loaded_postprocessing["use_ellipse_shapes"]["value"] else: hparams.use_ellipse_shapes = False + if "max_num_detections" in loaded_postprocessing: + trained_max_num_detections = loaded_postprocessing["max_num_detections"]["value"] + # Prefer new hparam value set by user (>0) intentionally than trained value + if self.max_num_detections == 0: + self.max_num_detections = trained_max_num_detections + # If confidence threshold is adaptive then up-to-date value should be stored in the model # and should not be changed during inference. Otherwise user-specified value should be taken. if hparams.result_based_confidence_threshold: @@ -441,8 +446,8 @@ def evaluate( f"Requested to use {evaluation_metric} metric, " "but parameter is ignored. Use F-measure instead." ) metric = MetricsHelper.compute_f_measure(output_resultset) - logger.info(f"F-measure after evaluation: {metric.f_measure.value}") output_resultset.performance = metric.get_performance() + logger.info(f"F-measure after evaluation: {output_resultset.performance}") logger.info("Evaluation completed") def _add_predictions_to_dataset( diff --git a/src/otx/algorithms/segmentation/adapters/mmseg/models/utils/channel_shuffle.py b/src/otx/algorithms/segmentation/adapters/mmseg/models/utils/channel_shuffle.py index a86218532e1..a5697b7d984 100644 --- a/src/otx/algorithms/segmentation/adapters/mmseg/models/utils/channel_shuffle.py +++ b/src/otx/algorithms/segmentation/adapters/mmseg/models/utils/channel_shuffle.py @@ -17,8 +17,7 @@ def channel_shuffle(x, groups): Args: x (Tensor): The input tensor. - groups (int): The number of groups to divide the input tensor - in the channel dimension. + groups (int): The number of groups to divide the input tensor in the channel dimension. Returns: Tensor: The output tensor after channel shuffle operation. diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/__init__.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/__init__.py index cefcf725417..0b8f9b0c619 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/__init__.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/__init__.py @@ -14,4 +14,4 @@ # See the License for the specific language governing permissions # and limitations under the License. -from .inference import InferenceCallback # noqa: F401 +from .inference import InferenceCallback, ZeroShotInferenceCallback # noqa: F401 diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/inference.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/inference.py index 6d077123f68..9aec96bde56 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/inference.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/inference.py @@ -17,6 +17,7 @@ from typing import Any, List import numpy as np +import torch from bson import ObjectId from pytorch_lightning import LightningModule, Trainer from pytorch_lightning.callbacks import Callback @@ -25,6 +26,7 @@ from otx.api.entities.datasets import DatasetEntity from otx.api.entities.id import ID from otx.api.entities.image import Image +from otx.api.entities.label_schema import LabelSchemaEntity from otx.api.entities.scored_label import ScoredLabel from otx.api.utils.segmentation_utils import ( create_annotation_from_segmentation_map, @@ -94,3 +96,39 @@ def on_predict_epoch_end(self, _trainer: Trainer, _pl_module: LightningModule, o dataset_item.annotation_scene.append_annotations(annotations) else: dataset_item.append_annotations(annotations) + + +class ZeroShotInferenceCallback(Callback): + """Callback that updates otx_dataset during zero-shot inference. + + Args: + otx_dataset (DatasetEntity): Dataset that predictions will be updated. + label_schema (LabelSchemaEntity): Label schema information. + """ + + def __init__(self, otx_dataset: DatasetEntity, label_schema: LabelSchemaEntity): + # TODO (sungchul): consider use_mask + self.otx_dataset = otx_dataset.with_empty_annotations() + self.label_schema = {int(label.id): label for label in label_schema.get_labels(include_empty=True)} + + def on_predict_epoch_end(self, _trainer: Trainer, _pl_module: LightningModule, outputs: List[Any]) -> None: + """Call when the predict epoch ends.""" + for batch_output, dataset_item in zip(outputs[0], self.otx_dataset): + # TODO (sungchul): currently, single batch inference is only supported + output = batch_output[0] + annotations: List[Annotation] = [] + for label, masks in output.items(): + hard_prediction = torch.where(torch.stack(masks, dim=0).sum(dim=0) > 0, 1, 0) + hard_prediction = hard_prediction.numpy() + + # TODO (sungchul): consider use_mask + # generate polygon annotations + annotation = create_annotation_from_segmentation_map( + hard_prediction=hard_prediction, + soft_prediction=hard_prediction, + label_map={1: self.label_schema.get(label)}, + ) + annotations.extend(annotation) + + # TODO (sungchul): consider use_mask + dataset_item.append_annotations(annotations) diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/config/visual_prompting_config.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/config/visual_prompting_config.py index 6a212c9cbb8..3e4cbe8b574 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/config/visual_prompting_config.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/config/visual_prompting_config.py @@ -96,7 +96,13 @@ def update_visual_prompting_config( groups = getattr(otx_config, "groups", None) if groups: for group in groups: - if group in ["learning_parameters", "nncf_optimization", "pot_parameters", "postprocessing"]: + if group in [ + "learning_parameters", + "nncf_optimization", + "pot_parameters", + "postprocessing", + "algo_backend", + ]: if group in ["nncf_optimization"]: # TODO (sungchul): Consider nncf_optimization logger.warning(f"{group} will be implemented.") diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/dataset.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/dataset.py index 9f79eeda019..51b78e56880 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/dataset.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/dataset.py @@ -24,6 +24,7 @@ from torch.utils.data import DataLoader, Dataset from torchvision import transforms +from otx.algorithms.common.configs.training_base import TrainType from otx.algorithms.visual_prompting.adapters.pytorch_lightning.datasets.pipelines import ( MultipleInputsCompose, Pad, @@ -129,6 +130,13 @@ def generate_bbox_from_mask(gt_mask: np.ndarray, width: int, height: int) -> Lis return generate_bbox(x_min, y_min, x_max, y_max, width, height) +def generate_point_from_mask(gt_mask: np.ndarray) -> np.ndarray: + """Randomly generate point from given mask.""" + candidates = np.where(gt_mask == 1) + index = np.random.permutation(len(candidates))[0] + return candidates[index] + + class OTXVisualPromptingDataset(Dataset): """Visual Prompting Dataset Adaptor. @@ -236,6 +244,27 @@ def __getitem__(self, index: int) -> Dict[str, Union[int, List, Tensor]]: return item +class OTXZeroShotVisualPromptingDataset(OTXVisualPromptingDataset): + """Visual Prompting for Zero-shot learning Dataset Adaptor.""" + + def __getitem__(self, index: int) -> Dict[str, Union[int, List, Tensor]]: + """Get dataset item. + + Args: + index (int): Index of the dataset sample. + + Returns: + Dict[str, Union[int, List, Tensor]]: Dataset item. + """ + dataset_item = self.dataset[index] + item: Dict[str, Union[int, Tensor]] = {"index": index, "images": dataset_item.numpy} + + prompts = self.get_prompts(dataset_item, self.labels) # , self.generate_point, self.generate_bbox) + item.update({**prompts, "path": dataset_item.media.path}) + item = self.transform(item) + return item + + class OTXVisualPromptingDataModule(LightningDataModule): """Visual Prompting DataModule. @@ -244,10 +273,39 @@ class OTXVisualPromptingDataModule(LightningDataModule): dataset (DatasetEntity): Dataset entity. """ - def __init__(self, config: Union[DictConfig, ListConfig], dataset: DatasetEntity) -> None: + DATASETS = { + TrainType.Incremental: OTXVisualPromptingDataset, + TrainType.Zeroshot: OTXZeroShotVisualPromptingDataset, + } + + def __init__( + self, + config: Union[DictConfig, ListConfig], + dataset: DatasetEntity, + train_type: TrainType = TrainType.Incremental, + ) -> None: super().__init__() self.config = config self.dataset = dataset + self.train_type = train_type + # self.kwargs = {} + if self.train_type == TrainType.Zeroshot: + # check zero-shot configs + if self.config.get("train_batch_size", 1) != 1: + logger.warning( + ( + f"Zero-shot learning only supports single batch, " + f"update {self.config.get('train_batch_size', 1)} to 1." + ) + ) + self.config["train_batch_size"] = 1 + + # self.kwargs.update( + # { + # "generate_point": self.config.get("generate_point", False), + # "generate_bbox": self.config.get("generate_bbox", False), + # } + # ) self.train_otx_dataset: DatasetEntity self.val_otx_dataset: DatasetEntity @@ -267,21 +325,34 @@ def setup(self, stage: Optional[str] = None) -> None: mean = self.config.normalize.mean std = self.config.normalize.std if stage == "fit" or stage is None: - train_otx_dataset = self.dataset.get_subset(Subset.TRAINING) - val_otx_dataset = self.dataset.get_subset(Subset.VALIDATION) - - self.train_dataset = OTXVisualPromptingDataset( - train_otx_dataset, image_size, mean, std, offset_bbox=self.config.offset_bbox + self.train_dataset = self.DATASETS[self.train_type]( + dataset=self.dataset.get_subset(Subset.TRAINING), + image_size=image_size, + mean=mean, + std=std, + offset_bbox=self.config.offset_bbox, + # **self.kwargs, ) - self.val_dataset = OTXVisualPromptingDataset(val_otx_dataset, image_size, mean, std) + + # self.val_dataset = None + if self.train_type == TrainType.Incremental: + self.val_dataset = self.DATASETS[self.train_type]( + dataset=self.dataset.get_subset(Subset.VALIDATION), image_size=image_size, mean=mean, std=std + ) if stage == "test": - test_otx_dataset = self.dataset.get_subset(Subset.TESTING) - self.test_dataset = OTXVisualPromptingDataset(test_otx_dataset, image_size, mean, std) + self.test_dataset = self.DATASETS[self.train_type]( + dataset=self.dataset.get_subset(Subset.TESTING), image_size=image_size, mean=mean, std=std + ) if stage == "predict": - predict_otx_dataset = self.dataset - self.predict_dataset = OTXVisualPromptingDataset(predict_otx_dataset, image_size, mean, std) + self.predict_dataset = self.DATASETS[self.train_type]( + dataset=self.dataset, + image_size=image_size, + mean=mean, + std=std, + # **self.kwargs + ) def summary(self): """Print size of the dataset, number of images.""" diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/sam_transforms.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/sam_transforms.py index aeb0cc98baf..fd9b1a3057b 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/sam_transforms.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/sam_transforms.py @@ -40,7 +40,7 @@ def __call__(self, item: Dict[str, Union[List, Tensor]]) -> Dict[str, Union[List item["gt_masks"] = [torch.as_tensor(gt_mask) for gt_mask in item["gt_masks"]] item["bboxes"] = self.apply_boxes(item["bboxes"], item["original_size"]) if item["points"]: - item["points"] = self.apply_coords(item["points"], item["original_size"]) + item["points"] = self.apply_coords(item["points"], item["original_size"], self.target_length) return item @classmethod @@ -57,21 +57,28 @@ def apply_image(cls, image: np.ndarray, target_length: int) -> np.ndarray: target_size = cls.get_preprocess_shape(image.shape[0], image.shape[1], target_length) return np.array(resize(to_pil_image(image), target_size)) - def apply_coords(self, coords: np.ndarray, original_size: Union[List[Any], Tensor]) -> np.ndarray: + @classmethod + def apply_coords( + cls, coords: Union[np.ndarray, Tensor], original_size: Union[List[Any], Tensor], target_length: int + ) -> np.ndarray: """Expects a numpy array of length 2 in the final dimension. Requires the original image size in (H, W) format. Args: - coords (np.ndarray): Coordinates array. + coords (Union[np.ndarray, Tensor]): Coordinates array. original_size (Union[List[Any], Tensor]): Original size of image. + target_length (int): The length of the longest side of the image. Returns: np.ndarray: Resized coordinates. """ old_h, old_w = original_size - new_h, new_w = self.get_preprocess_shape(original_size[0], original_size[1], self.target_length) - coords = deepcopy(coords).astype(float) + new_h, new_w = cls.get_preprocess_shape(original_size[0], original_size[1], target_length) + if isinstance(coords, np.ndarray): + coords = deepcopy(coords).astype(np.float32) + else: + coords = deepcopy(coords).to(torch.float32) coords[..., 0] = coords[..., 0] * (new_w / old_w) coords[..., 1] = coords[..., 1] * (new_h / old_h) return coords @@ -86,7 +93,7 @@ def apply_boxes(self, boxes: np.ndarray, original_size: Union[List[Any], Tensor] Returns: np.ndarray: Resized boxes. """ - boxes = self.apply_coords(boxes.reshape(-1, 2, 2), original_size) + boxes = self.apply_coords(boxes.reshape(-1, 2, 2), original_size, self.target_length) return boxes.reshape(-1, 4) @staticmethod diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/__init__.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/__init__.py index 7f9dcc70d88..49caf262735 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/__init__.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/__init__.py @@ -3,4 +3,4 @@ # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -from .visual_prompters import SegmentAnything # noqa: F401 +from .visual_prompters import SegmentAnything, ZeroShotSegmentAnything # noqa: F401 diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/backbones/vit.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/backbones/vit.py index cd6300fca03..6ef1d934cad 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/backbones/vit.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/backbones/vit.py @@ -357,7 +357,7 @@ def add_decomposed_rel_pos( q_size: Tuple[int, int], k_size: Tuple[int, int], ) -> Tensor: - """Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. + """Calculate decomposed Relative Positional Embeddings from `mvitv2`. https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/__init__.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/__init__.py index 9d6eec48e1f..c7493b86fa6 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/__init__.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/__init__.py @@ -4,3 +4,4 @@ # SPDX-License-Identifier: Apache-2.0 from .segment_anything import SegmentAnything # noqa: F401 +from .zero_shot_segment_anything import ZeroShotSegmentAnything # noqa: F401 diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py index 12672dd939c..3b84daa72b8 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py @@ -552,8 +552,8 @@ def predict_step(self, batch, batch_idx) -> Dict[str, Tensor]: return dict(masks=masks, iou_predictions=iou_predictions, path=batch["path"], labels=batch["labels"]) + @staticmethod def postprocess_masks( - self, masks: Tensor, input_size: Tuple[int, int], padding: Tuple[int, ...], diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py new file mode 100644 index 00000000000..a915862523c --- /dev/null +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py @@ -0,0 +1,611 @@ +"""SAM module for visual prompting zero-shot learning.""" + +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from collections import OrderedDict, defaultdict +from copy import deepcopy +from typing import Any, DefaultDict, Dict, List, Optional, Tuple + +import torch +from omegaconf import DictConfig +from torch import nn +from torch.nn import functional as F + +from otx.algorithms.visual_prompting.adapters.pytorch_lightning.datasets.pipelines import ResizeLongestSide +from otx.api.entities.scored_label import ScoredLabel +from otx.utils.logger import get_logger + +from .segment_anything import SegmentAnything + +logger = get_logger() + + +class PromptGetter(nn.Module): + """Prompt getter for zero-shot learning.""" + + default_threshold_reference = 0.3 + default_threshold_target = 0.65 + + def __init__(self, image_size: int) -> None: + super().__init__() + self.image_size = image_size + self.initialize() + + def initialize(self) -> None: + """Initialize reference features and prompts.""" + self.reference_feats: Dict[int, torch.Tensor] = {} + self.reference_prompts: Dict[int, torch.Tensor] = {} + + def set_default_thresholds(self, default_threshold_reference: float, default_threshold_target: float) -> None: + """Set default thresholds.""" + self.default_threshold_reference = default_threshold_reference + self.default_threshold_target = default_threshold_target + + def set_reference(self, label: ScoredLabel, reference_feats: torch.Tensor, reference_prompts: torch.Tensor) -> None: + """Set reference features and prompts.""" + self.reference_feats[int(label.id_)] = reference_feats + self.reference_prompts[int(label.id_)] = reference_prompts + + def forward( + self, + image_embeddings: torch.Tensor, + padding: Tuple[int, ...], + original_size: Tuple[int, int], + ) -> Dict[int, Tuple[torch.Tensor, torch.Tensor]]: + """Get prompt candidates.""" + target_feat = image_embeddings.squeeze() + c_feat, h_feat, w_feat = target_feat.shape + target_feat = self._preprocess_target_feat(target_feat, c_feat, h_feat, w_feat) + + prompts = {} + for label, reference_feat in self.reference_feats.items(): + sim = reference_feat.to(target_feat.device) @ target_feat + sim = sim.reshape(1, 1, h_feat, w_feat) + sim = ZeroShotSegmentAnything.postprocess_masks( + sim, (self.image_size, self.image_size), padding, original_size + ).squeeze() + + # threshold = 0.85 * sim.max() if num_classes > 1 else self.default_threshold_target + threshold = self.default_threshold_target + points_scores, bg_coords = self._point_selection(sim, original_size, threshold) + if points_scores is None: + # skip if there is no point with score > threshold + continue + prompts[label] = (points_scores, bg_coords) + return prompts + + def _preprocess_target_feat(self, target_feat: torch.Tensor, c_feat: int, h_feat: int, w_feat: int) -> torch.Tensor: + target_feat = target_feat / target_feat.norm(dim=0, keepdim=True) + target_feat = target_feat.reshape(c_feat, h_feat * w_feat) + return target_feat + + def _point_selection( + self, + mask_sim: torch.Tensor, + original_size: Tuple[int, int], + threshold: float, + num_bg_points: int = 1, + downsizing: int = 16, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Select point used as point prompts.""" + _, w_sim = mask_sim.shape + + # Top-last point selection + bg_indices = mask_sim.flatten().topk(num_bg_points, largest=False)[1] + bg_x = (bg_indices // w_sim).unsqueeze(0) + bg_y = bg_indices - bg_x * w_sim + bg_coords = torch.cat((bg_y, bg_x), dim=0).permute(1, 0) + bg_coords = bg_coords + + point_coords = torch.where(mask_sim > threshold) + if len(point_coords[0]) == 0: + return None, None + + fg_coords_scores = torch.stack(point_coords[::-1] + (mask_sim[point_coords],), dim=0).T + + max_len = max(original_size) + ratio = self.image_size / max_len + _, width = map(lambda x: int(x * ratio), original_size) + n_w = width // downsizing + + res = (fg_coords_scores[:, 1] * ratio // downsizing * n_w + fg_coords_scores[:, 0] * ratio // downsizing).to( + torch.int32 + ) + points_scores = torch.stack([fg_coords_scores[res == r][0] for r in torch.unique(res)], dim=0) + points_scores = points_scores[torch.argsort(points_scores[:, -1], descending=True)] + + return points_scores, bg_coords + + +class ZeroShotSegmentAnything(SegmentAnything): + """Zero-shot learning module using Segment Anything.""" + + def __init__(self, config: Optional[DictConfig] = None, state_dict: Optional[OrderedDict] = None) -> None: + if config is None: + config = self.set_default_config() + + if not config.model.freeze_image_encoder: + logger.warning("config.model.freeze_image_encoder(=False) must be set to True, changed.") + config.model.freeze_image_encoder = True + + if not config.model.freeze_prompt_encoder: + logger.warning("config.model.freeze_prompt_encoder(=False) must be set to True, changed.") + config.model.freeze_prompt_encoder = True + + if not config.model.freeze_mask_decoder: + logger.warning("config.model.freeze_mask_decoder(=False) must be set to True, changed.") + config.model.freeze_mask_decoder = True + + prompt_getter_reference_feats = None + prompt_getter_reference_prompts = None + if state_dict: + if "prompt_getter.reference_feats" in state_dict: + prompt_getter_reference_feats = state_dict.pop("prompt_getter.reference_feats") + if "prompt_getter.reference_prompts" in state_dict: + prompt_getter_reference_prompts = state_dict.pop("prompt_getter.reference_prompts") + + super().__init__(config, state_dict) + + self.prompt_getter = PromptGetter(image_size=config.model.image_size) + self.prompt_getter.initialize() + self.prompt_getter.set_default_thresholds( + config.model.default_threshold_reference, config.model.default_threshold_target + ) + + if prompt_getter_reference_feats: + self.prompt_getter.reference_feats = prompt_getter_reference_feats + if prompt_getter_reference_prompts: + self.prompt_getter.reference_prompts = prompt_getter_reference_prompts + + def set_default_config(self) -> DictConfig: + """Set default config when using independently.""" + return DictConfig( + { + "model": { + "backbone": "tiny_vit", + "checkpoint": "https://github.com/ChaoningZhang/MobileSAM/raw/master/weights/mobile_sam.pt", + "default_threshold_reference": 0.3, + "default_threshold_target": 0.65, + "freeze_image_encoder": True, + "freeze_mask_decoder": True, + "freeze_prompt_encoder": True, + "image_size": 1024, + "mask_threshold": 0.0, + } + } + ) + + @torch.no_grad() + def learn( + self, + images: torch.Tensor, + processed_prompts: Dict[ScoredLabel, List[Dict[str, torch.Tensor]]], + padding: Tuple[int, ...], + original_size: Tuple[int, int], + ) -> None: + """Get reference features. + + Using given images, get reference features and save it to PromptGetter. + These reference features will be used for `infer` to get target results. + Currently, single batch is only supported. + + Args: + images (torch.Tensor): Given images for reference features. + processed_prompts (Dict[ScoredLabel, List[Dict[str, torch.Tensor]]]): The whole class-wise prompts + processed at _preprocess_prompts. + padding (Tuple[int, ...]): Padding size. + original_size (Tuple[int, int]): Original image size. + """ + assert images.shape[0] == 1, "Only single batch is supported." + + self.prompt_getter.initialize() + + image_embeddings = self.image_encoder(images) + ref_feat = image_embeddings.squeeze().permute(1, 2, 0) + + for label, input_prompts in processed_prompts.items(): + if label.name.lower() == "background": + # skip background + # TODO (sungchul): how to skip background class + continue + + # generate reference mask + # TODO (sungchul): ensemble multi reference features (current : use merged masks) + reference_prompt = torch.zeros(original_size, dtype=torch.uint8, device=images.device) + for input_prompt in input_prompts: + if "annotation" in input_prompt: + # directly use annotation information as a mask + reference_prompt[input_prompt.get("annotation") == 1] += 1 + else: + merged_input_prompts = self._merge_prompts(label, input_prompt, processed_prompts) + masks, scores, logits = self._predict_mask( + image_embeddings=image_embeddings, + input_prompts=merged_input_prompts, + padding=padding, + original_size=original_size, + multimask_output=True, + ) + best_idx = torch.argmax(scores) + reference_prompt[masks[0, best_idx]] += 1 + reference_prompt = torch.clip(reference_prompt, 0, 1) + + ref_mask = torch.tensor(reference_prompt, dtype=torch.float32) + reference_feat = None + default_threshold_reference = deepcopy(self.prompt_getter.default_threshold_reference) + while reference_feat is None: + logger.info(f"[*] default_threshold_reference : {default_threshold_reference:.4f}") + reference_feat = self._generate_masked_features( + ref_feat, ref_mask, default_threshold_reference, padding=padding + ) + default_threshold_reference -= 0.05 + + self.prompt_getter.set_reference(label, reference_feat.detach().cpu(), reference_prompt.detach().cpu()) + + @torch.no_grad() + def infer( + self, images: torch.Tensor, padding: Tuple[int, ...], original_size: Tuple[int, int] + ) -> List[List[DefaultDict[int, List[torch.Tensor]]]]: + """Zero-shot inference with reference features. + + Get target results by using reference features and target images' features. + + Args: + images (torch.Tensor): Given images for target results. + padding (Tuple[int, ...]): Padding size. + original_size (Tuple[int, int]): Original image size. + + Returns: + (List[List[DefaultDict[int, List[torch.Tensor]]]]): Target results. + Lists wrapping results is following this order: + 1. Target images + 2. Tuple of predicted masks and used points gotten by point selection + """ + assert images.shape[0] == 1, "Only single batch is supported." + + total_results = [] + # num_classes = len(self.reference_feats.keys()) + for image in images: + if image.ndim == 3: + image = image.unsqueeze(0) + + image_embeddings = self.image_encoder(images) + + prompts = self.prompt_getter( + image_embeddings=image_embeddings, padding=padding, original_size=original_size + ) + predicted_masks: defaultdict = defaultdict(list) + used_points: defaultdict = defaultdict(list) + for label, (points_scores, bg_coords) in prompts.items(): + for points_score in points_scores: + x, y = points_score[:2] + is_done = False + for pm in predicted_masks.get(label, []): + # check if that point is already assigned + if pm[int(y), int(x)] > 0: + is_done = True + break + if is_done: + continue + + mask, used_point_score = self( + image_embeddings=image_embeddings, + points_score=points_score, + bg_coords=bg_coords, + padding=padding, + original_size=original_size, + ) + predicted_masks[label].append(mask) + used_points[label].append(used_point_score) + + total_results.append([predicted_masks, used_points]) + return total_results + + @torch.no_grad() + def forward( + self, + image_embeddings: torch.Tensor, + points_score: torch.Tensor, + bg_coords: torch.Tensor, + padding: Tuple[int, ...], + original_size: Tuple[int, int], + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Predict point prompts and predicted masks. + + Args: + image_embeddings (torch.Tensor): The image embedding with a batch index of length 1. + points_score (torch.Tensor): Foreground point prompts from point selection algorithm. + bg_coords (torch.Tensor): Background point prompts from point selection algorithm. + padding (Tuple[int, ...]): Padding size. + original_size (Tuple[int, int]): Original image size. + + Returns: + (Tuple[torch.Tensor, torch.Tensor]): Predicted masks and used points with corresponding score. + """ + point_coords = torch.cat((points_score[:2].unsqueeze(0), bg_coords), dim=0).unsqueeze(0) + point_coords = ResizeLongestSide.apply_coords(point_coords, original_size, self.config.model.image_size) + point_labels = torch.tensor([1] + [0] * len(bg_coords), dtype=torch.int32).unsqueeze(0) + mask = self._predict_target_mask( + image_embeddings=image_embeddings, + input_prompts={"points": (point_coords, point_labels)}, + padding=padding, + original_size=original_size, + ) + + return mask.detach().cpu().to(torch.uint8), points_score.detach().cpu() + + def training_step(self, batch, batch_idx) -> None: + """Training step for `learn`.""" + # TODO (sungchul): each prompt will be assigned with each label + bboxes = batch["bboxes"] + labels = batch["labels"] + # TODO (sungchul): support other below prompts + # points = batch["points"] + # annotations = batch["annotations"] + + # organize prompts based on label + processed_prompts = self._preprocess_prompts(bboxes=bboxes[0], labels=labels[0]) + + self.learn( + images=batch["images"], + processed_prompts=processed_prompts, + padding=batch.get("padding")[0], + original_size=batch.get("original_size")[0], + ) + + def predict_step(self, batch, batch_idx): + """Predict step for `infer`.""" + results = self.infer( + images=batch["images"], padding=batch.get("padding")[0], original_size=batch.get("original_size")[0] + ) + return [result[0] for result in results] # tmp: only mask + + def _preprocess_prompts( + self, + bboxes: Optional[torch.Tensor] = None, + points: Optional[torch.Tensor] = None, + annotations: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + ) -> Dict[ScoredLabel, List[Dict[str, torch.Tensor]]]: + """Preprocess prompts. + + Currently, preprocessing for bounding boxes is only supported. + + Args: + bboxes (torch.Tensor, optional): Bounding box prompts to be preprocessed. + points (torch.Tensor, optional): Point prompts to be preprocessed, to be supported. + annotations (torch.Tensor, optional): annotation prompts to be preprocessed, to be supported. + labels (torch.Tensor, optional): Assigned labels according to given prompts. + Currently, it is only matched to bboxes, and it will be deprecated. + + Returns: + (defaultdict[ScoredLabel, List[Dict[str, torch.Tensor]]]): Processed and arranged each single prompt + using label information as keys. Unlike other prompts, `annotation` prompts will be aggregated + as single annotation. + """ + processed_prompts = defaultdict(list) + # TODO (sungchul): will be updated + if bboxes is not None: + for bbox, label in zip(bboxes, labels): + processed_prompts[label].append({"box": bbox.reshape(-1, 4)}) + + if points: + pass + + if annotations: + pass + + processed_prompts = dict(sorted(processed_prompts.items(), key=lambda x: x[0].id_)) # type: ignore[assignment] + return processed_prompts + + def _generate_masked_features( + self, feats: torch.Tensor, masks: torch.Tensor, threshold_mask: float, padding: Optional[Tuple[int, ...]] = None + ) -> Tuple[torch.Tensor, ...]: + """Generate masked features. + + Args: + feats (torch.Tensor): Raw reference features. It will be filtered with masks. + masks (torch.Tensor): Reference masks used to filter features. + threshold_mask (float): Threshold to control masked region. + padding (Tuple[int, ...], optional): Padding size. + + Returns: + (torch.Tensor): Masked features. + """ + if padding: + resized_size = ( + self.config.model.image_size - padding[1] - padding[3], + self.config.model.image_size - padding[0] - padding[2], + ) + else: + resized_size = (self.config.model.image_size, self.config.model.image_size) + + # Post-process masks + masks = F.interpolate(masks.unsqueeze(0).unsqueeze(0), size=resized_size, mode="bilinear").squeeze() + masks = self._preprocess_mask(masks) + masks = F.interpolate(masks.unsqueeze(0).unsqueeze(0), size=feats.shape[0:2], mode="bilinear").squeeze() + + # Target feature extraction + if (masks > threshold_mask).sum() == 0: + # (for stability) there is no area to be extracted + return None, None + + masked_feat = feats[masks > threshold_mask] + masked_feat = masked_feat.mean(0).unsqueeze(0) + masked_feat = masked_feat / masked_feat.norm(dim=-1, keepdim=True) + + return masked_feat + + def _preprocess_mask(self, x: torch.Tensor) -> torch.Tensor: + """Normalize pixel values and pad to a square input. + + Args: + x (torch.Tensor): Mask to be padded. + + Returns: + (torch.Tensor): Padded mask. + """ + # Pad + h, w = x.shape[-2:] + padh = self.config.model.image_size - h + padw = self.config.model.image_size - w + x = F.pad(x, (0, padw, 0, padh)) + return x + + def _update_value(self, target: Dict[str, Any], key: str, value: torch.Tensor) -> None: + """Update tensor to target dictionary. + + Args: + target (Dict[str, Any]): Target dictionary to be updated. + key (str): Key to be used for update. + value (torch.Tensor): Value to be used for update. + """ + if key in target: + target[key] = torch.cat((target[key], value)) + else: + target[key] = value + + def _merge_prompts( + self, + label: ScoredLabel, + input_prompts: Dict[str, torch.Tensor], + processed_prompts: Dict[ScoredLabel, List[Dict[str, torch.Tensor]]], + use_only_background: bool = True, + ) -> Dict[str, torch.Tensor]: + """Merge target prompt and other prompts. + + Merge a foreground prompt and other prompts (background or prompts with other classes). + + Args: + label (ScoredLabel): Label information. Background is 0 and other foregrounds are >= 0. + input_prompts (Dict[str, torch.Tensor]): A foreground prompt to be merged with other prompts. + processed_prompts (Dict[ScoredLabel, List[Dict[str, torch.Tensor]]]): The whole class-wise prompts + processed at _preprocess_prompts. + use_only_background (bool): Whether merging only background prompt, defaults to True. + It is applied to only point_coords. + + Returns: + (Dict[str, torch.Tensor]): Merged prompts. + """ + merged_input_prompts = deepcopy(input_prompts) + for other_label, other_input_prompts in processed_prompts.items(): + if other_label.id_ == label.id_: + continue + if (use_only_background and other_label.id_ == 0) or (not use_only_background): + # only add point (and scribble) prompts + # use_only_background=True -> background prompts are only added as background + # use_only_background=False -> other prompts are added as background + for other_input_prompt in other_input_prompts: + if "point_coords" in other_input_prompt: + # point, scribble + self._update_value(merged_input_prompts, "point_coords", other_input_prompt.get("point_coords")) + self._update_value( + merged_input_prompts, + "point_labels", + torch.zeros_like(other_input_prompt.get("point_labels")), + ) + return merged_input_prompts + + def _predict_target_mask( + self, + image_embeddings: torch.Tensor, + input_prompts: Dict[str, Tuple[torch.Tensor, torch.Tensor]], + padding: Tuple[int, ...], + original_size: Tuple[int, int], + ) -> torch.Tensor: + """Predict target masks. + + Args: + image_embeddings (torch.Tensor): The image embedding with a batch index of length 1. + input_prompts (Dict[str, Tuple[torch.Tensor, torch.Tensor]]): Dictionary including point, box, + and mask prompts. index=1 of tuple is point labels which indicate whether foreground or background. + padding (Tuple[int, ...]): Padding size. + original_size (Tuple[int, int]): Original image size. + + Return: + (torch.Tensor): Predicted mask. + """ + # First-step prediction + _, _, logits = self._predict_mask( + image_embeddings, input_prompts, padding, original_size, multimask_output=False + ) + best_idx = 0 + + # Cascaded Post-refinement-1 + input_prompts.update({"masks": logits[:, best_idx : best_idx + 1, :, :]}) + masks, scores, logits = self._predict_mask( + image_embeddings, input_prompts, padding, original_size, multimask_output=True + ) + best_idx = torch.argmax(scores) + + # Cascaded Post-refinement-2 + coords = torch.nonzero(masks[0, best_idx]) + y, x = coords[:, 0], coords[:, 1] + x_min = x.min() + x_max = x.max() + y_min = y.min() + y_max = y.max() + input_prompts.update( + { + "masks": logits[:, best_idx : best_idx + 1, :, :], + "box": torch.tensor([x_min, y_min, x_max, y_max], device=logits.device), + } + ) + masks, scores, _ = self._predict_mask( + image_embeddings, input_prompts, padding, original_size, multimask_output=True + ) + best_idx = torch.argmax(scores) + + return masks[0, best_idx] + + def _predict_mask( + self, + image_embeddings: torch.Tensor, + input_prompts: Dict[str, torch.Tensor], + padding: Tuple[int, ...], + original_size: Tuple[int, int], + multimask_output: bool = True, + ) -> Tuple[torch.Tensor, ...]: + """Predict target masks. + + Args: + image_embeddings (torch.Tensor): The image embedding with a batch index of length 1. + input_prompts (Dict[str, torch.Tensor]): Dictionary including point, box, and mask prompts. + padding (Tuple[int, ...]): Padding size. + original_size (Tuple[int, int]): Original image size. + multimask_output (bool): Whether getting multi mask outputs or not. Defaults to True. + + Return: + (Tuple[torch.Tensor, ...]): Predicted mask, score, and logit. + """ + sparse_embeddings, dense_embeddings = self.prompt_encoder( + points=input_prompts.get("points", None), + boxes=input_prompts.get("box", None), # TODO (sungchul): change key box -> boxes to use **input_prompts + masks=input_prompts.get("masks", None), + ) + + low_res_masks, scores = self.mask_decoder( + image_embeddings=image_embeddings, + image_pe=self.prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + ) + high_res_masks = self.postprocess_masks( + low_res_masks, (self.config.model.image_size, self.config.model.image_size), padding, original_size + ) + masks = high_res_masks > self.config.model.mask_threshold + + return masks, scores, low_res_masks + + def set_metrics(self) -> None: + """Skip set_metrics unused in zero-shot learning.""" + pass + + def configure_optimizers(self) -> None: + """Skip configure_optimizers unused in zero-shot learning.""" + pass + + def training_epoch_end(self, outputs) -> None: + """Skip training_epoch_end unused in zero-shot learning.""" + pass diff --git a/src/otx/algorithms/visual_prompting/configs/base/configuration.py b/src/otx/algorithms/visual_prompting/configs/base/configuration.py index d9cdae0eaeb..44998684aec 100644 --- a/src/otx/algorithms/visual_prompting/configs/base/configuration.py +++ b/src/otx/algorithms/visual_prompting/configs/base/configuration.py @@ -43,6 +43,11 @@ class __LearningParameters(BaseConfig.BaseLearningParameters): header = string_attribute("Learning Parameters") description = header + @attrs + class __AlgoBackend(BaseConfig.BaseAlgoBackendParameters): + header = string_attribute("Parameters for the OTX algo-backend") + description = header + @attrs class __Postprocessing(ParameterGroup): header = string_attribute("Postprocessing") @@ -112,5 +117,6 @@ class __POTParameter(BaseConfig.BasePOTParameter): ) learning_parameters = add_parameter_group(__LearningParameters) + algo_backend = add_parameter_group(__AlgoBackend) postprocessing = add_parameter_group(__Postprocessing) pot_parameters = add_parameter_group(__POTParameter) diff --git a/src/otx/algorithms/visual_prompting/configs/configuration.yaml b/src/otx/algorithms/visual_prompting/configs/configuration.yaml index 3c216dc6220..86ea7154d7d 100644 --- a/src/otx/algorithms/visual_prompting/configs/configuration.yaml +++ b/src/otx/algorithms/visual_prompting/configs/configuration.yaml @@ -169,5 +169,67 @@ postprocessing: warning: null type: PARAMETER_GROUP visible_in_ui: true +algo_backend: + description: parameters for algo backend + header: Algo backend parameters + train_type: + affects_outcome_of: TRAINING + default_value: Incremental + description: Training scheme option that determines how to train the model + editable: True + enum_name: TrainType + header: Train type + options: + Incremental: "Incremental" + Zeroshot: "Zeroshot" + type: SELECTABLE + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: Incremental + visible_in_ui: false + warning: null + mem_cache_size: + affects_outcome_of: TRAINING + default_value: 1000000000 + description: Size of memory pool for caching decoded data to load data faster (bytes). + editable: true + header: Size of memory pool + max_value: 9223372036854775807 + min_value: 0 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + visible_in_ui: false + warning: null + storage_cache_scheme: + affects_outcome_of: TRAINING + default_value: NONE + description: Scheme for storage cache + editable: true + enum_name: StorageCacheScheme + header: Scheme for storage cache + options: + NONE: "NONE" + AS_IS: "AS-IS" + JPEG_75: "JPEG/75" + JPEG_95: "JPEG/95" + PNG: "PNG" + TIFF: "TIFF" + type: SELECTABLE + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + visible_in_ui: false + warning: null + type: PARAMETER_GROUP + visible_in_ui: false type: CONFIGURABLE_PARAMETERS visible_in_ui: true diff --git a/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/__init__.py b/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/__init__.py new file mode 100644 index 00000000000..7703180b940 --- /dev/null +++ b/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/__init__.py @@ -0,0 +1,6 @@ +"""Initialization of Configurable Parameters for SAM Visual Prompting Task.""" + +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from .configuration import VisualPromptingConfig # noqa: F401 diff --git a/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/config.yaml b/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/config.yaml new file mode 100644 index 00000000000..bd923e0b6b7 --- /dev/null +++ b/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/config.yaml @@ -0,0 +1,78 @@ +dataset: + task: visual_prompting + train_batch_size: 1 + val_batch_size: 1 + test_batch_size: 1 + num_workers: 4 + image_size: 1024 # dimensions to which images are resized (mandatory) + normalize: + mean: + - 123.675 + - 116.28 + - 103.53 + std: + - 58.395 + - 57.12 + - 57.375 + offset_bbox: 0 + +model: + name: SAM + image_size: 1024 + mask_threshold: 0. + return_logits: true + backbone: tiny_vit + freeze_image_encoder: true + freeze_prompt_encoder: true + freeze_mask_decoder: true + checkpoint: https://github.com/ChaoningZhang/MobileSAM/raw/master/weights/mobile_sam.pt + # just for inference + return_single_mask: false + use_stability_score: false + stability_score_offset: 1. + return_extra_metrics: false + # zero-shot + default_threshold_reference: 0.3 + default_threshold_target: 0.65 + +# PL Trainer Args. Don't add extra parameter here. +trainer: + enable_checkpointing: false + gradient_clip_val: 0 + gradient_clip_algorithm: norm + num_nodes: 1 + devices: 1 + enable_progress_bar: true + overfit_batches: 0.0 + track_grad_norm: -1 + check_val_every_n_epoch: 1 # Don't validate before extracting features. + fast_dev_run: false + accumulate_grad_batches: 1 + max_epochs: 1 + min_epochs: null + max_steps: -1 + min_steps: null + max_time: null + limit_train_batches: 1.0 + limit_val_batches: 0 # No validation + limit_test_batches: 1.0 + limit_predict_batches: 1.0 + val_check_interval: 1.0 + log_every_n_steps: 10 + accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> + strategy: null + sync_batchnorm: false + precision: 32 + enable_model_summary: true + num_sanity_val_steps: 0 + profiler: null + benchmark: false + deterministic: false + reload_dataloaders_every_n_epochs: 0 + auto_lr_find: false + replace_sampler_ddp: true + detect_anomaly: false + auto_scale_batch_size: false + plugins: null + move_metrics_to_cpu: false + multiple_trainloader_mode: max_size_cycle diff --git a/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/configuration.py b/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/configuration.py new file mode 100644 index 00000000000..166e904997e --- /dev/null +++ b/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/configuration.py @@ -0,0 +1,14 @@ +"""Configuration file of OTX Visual Prompting.""" + +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + + +from attr import attrs + +from otx.algorithms.visual_prompting.configs.base import VisualPromptingBaseConfig + + +@attrs +class VisualPromptingConfig(VisualPromptingBaseConfig): + """Configurable parameters for Visual Prompting task.""" diff --git a/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/configuration.yaml b/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/configuration.yaml new file mode 100644 index 00000000000..e88c783c396 --- /dev/null +++ b/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/configuration.yaml @@ -0,0 +1,210 @@ +description: Configuration for SAM +header: Configuration for SAM +id: "" +learning_parameters: + description: Learning Parameters + header: Learning Parameters + type: PARAMETER_GROUP + visible_in_ui: true + trainer: + description: Trainer Parameters + header: Trainer Parameters + type: PARAMETER_GROUP + visible_in_ui: true + max_epochs: + affects_outcome_of: TRAINING + default_value: 1 + description: + Maximum number of epochs to train for. If not specified, the training will + run until the early stopping criteria is met. + editable: true + header: Maximum number of epochs + max_value: 1 + min_value: 1 + type: INTEGER + value: 1 + dataset: + description: Dataset Parameters + header: Dataset Parameters + type: PARAMETER_GROUP + visible_in_ui: true + use_mask: + header: Flag about using mask as label + affects_outcome_of: TRAINING + default_value: false + description: If using mask as-is (true) or converting it to polygon (false) + editable: true + value: false + type: BOOLEAN + train_batch_size: + affects_outcome_of: TRAINING + auto_hpo_state: not_possible + auto_hpo_value: null + default_value: 2 + description: + The number of training samples seen in each iteration of training. + Increasing this value improves training time and may make the training more + stable. A larger batch size has higher memory requirements. + editable: true + header: Batch size + max_value: 512 + min_value: 1 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 32 + visible_in_ui: true + warning: + Increasing this value may cause the system to use more memory than available, + potentially causing out of memory errors, please update with caution. +pot_parameters: + description: POT Parameters + header: POT Parameters + preset: + affects_outcome_of: NONE + auto_hpo_state: not_possible + auto_hpo_value: null + default_value: Mixed + description: Quantization preset that defines quantization scheme + editable: true + enum_name: POTQuantizationPreset + header: Preset + options: + MIXED: Mixed + PERFORMANCE: Performance + type: SELECTABLE + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: Mixed + visible_in_ui: true + warning: null + stat_subset_size: + affects_outcome_of: NONE + auto_hpo_state: not_possible + auto_hpo_value: null + default_value: 300 + description: Number of data samples used for post-training optimization + editable: true + header: Number of data samples + max_value: 100000 + min_value: 1 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 300 + visible_in_ui: true + warning: null + type: PARAMETER_GROUP + visible_in_ui: false +postprocessing: + confidence_threshold: + affects_outcome_of: INFERENCE + default_value: 0.5 + description: + This threshold only takes effect if the threshold is not set based + on the result. + editable: true + header: Confidence threshold + max_value: 1 + min_value: 0 + type: FLOAT + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 0.5 + visible_in_ui: true + warning: null + description: Postprocessing + header: Postprocessing + result_based_confidence_threshold: + affects_outcome_of: INFERENCE + default_value: false + description: Confidence threshold is derived from the results + editable: true + header: Result based confidence threshold + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: false + visible_in_ui: true + warning: null + type: PARAMETER_GROUP + visible_in_ui: true +algo_backend: + description: parameters for algo backend + header: Algo backend parameters + train_type: + affects_outcome_of: TRAINING + default_value: Incremental + description: Training scheme option that determines how to train the model + editable: True + enum_name: TrainType + header: Train type + options: + Incremental: "Incremental" + Zeroshot: "Zeroshot" + type: SELECTABLE + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: Incremental + visible_in_ui: false + warning: null + mem_cache_size: + affects_outcome_of: TRAINING + default_value: 1000000000 + description: Size of memory pool for caching decoded data to load data faster (bytes). + editable: true + header: Size of memory pool + max_value: 9223372036854775807 + min_value: 0 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + visible_in_ui: false + warning: null + storage_cache_scheme: + affects_outcome_of: TRAINING + default_value: NONE + description: Scheme for storage cache + editable: true + enum_name: StorageCacheScheme + header: Scheme for storage cache + options: + NONE: "NONE" + AS_IS: "AS-IS" + JPEG_75: "JPEG/75" + JPEG_95: "JPEG/95" + PNG: "PNG" + TIFF: "TIFF" + type: SELECTABLE + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + visible_in_ui: false + warning: null + type: PARAMETER_GROUP + visible_in_ui: false +type: CONFIGURABLE_PARAMETERS +visible_in_ui: true diff --git a/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/ptq_optimization_config.py b/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/ptq_optimization_config.py new file mode 100644 index 00000000000..9496ea6e22b --- /dev/null +++ b/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/ptq_optimization_config.py @@ -0,0 +1,22 @@ +"""PTQ config file.""" +from nncf.parameters import ModelType +from nncf.quantization.advanced_parameters import AdvancedQuantizationParameters +from nncf.quantization.range_estimator import ( + AggregatorType, + RangeEstimatorParameters, + StatisticsCollectorParameters, + StatisticsType, +) + +advanced_parameters = AdvancedQuantizationParameters( + activations_range_estimator_params=RangeEstimatorParameters( + min=StatisticsCollectorParameters( + statistics_type=StatisticsType.QUANTILE, aggregator_type=AggregatorType.MIN, quantile_outlier_prob=1e-4 + ), + max=StatisticsCollectorParameters( + statistics_type=StatisticsType.QUANTILE, aggregator_type=AggregatorType.MAX, quantile_outlier_prob=1e-4 + ), + ), +) + +model_type = ModelType.TRANSFORMER diff --git a/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/template_experimental.yaml b/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/template_experimental.yaml new file mode 100644 index 00000000000..63ff5d3d9d4 --- /dev/null +++ b/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/template_experimental.yaml @@ -0,0 +1,38 @@ +# Description. +model_template_id: Zero_Shot_SAM_Tiny_ViT +name: Zero_Shot_SAM_Tiny_ViT +task_type: VISUAL_PROMPTING +task_family: VISION +instantiation: "CLASS" +summary: Zero SHot Visual Prompting with TinyViT for the accurate predictions +application: ~ + +# Algo backend. +framework: OTXVisualPrompting v0.1.0 + +# Task implementations. +entrypoints: + base: otx.algorithms.visual_prompting.tasks.ZeroShotTask + openvino: otx.algorithms.visual_prompting.tasks.openvino.OpenVINOVisualPromptingTask + +# Hyper Parameters +hyper_parameters: + base_path: ./configuration.yaml + parameter_overrides: + learning_parameters: + dataset: + train_batch_size: + default_value: 1 + algo_backend: + train_type: + default_value: Zeroshot + +# Training resources. +max_nodes: 1 +training_targets: + - GPU + - CPU + +# Computational Complexity +gigaflops: 38.95 +size: 47 diff --git a/src/otx/algorithms/visual_prompting/tasks/__init__.py b/src/otx/algorithms/visual_prompting/tasks/__init__.py index 9efa2d6ddf9..a4c0a3e0366 100644 --- a/src/otx/algorithms/visual_prompting/tasks/__init__.py +++ b/src/otx/algorithms/visual_prompting/tasks/__init__.py @@ -3,6 +3,6 @@ # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -from .inference import InferenceTask # noqa: F401 +from .inference import InferenceTask, ZeroShotTask # noqa: F401 from .openvino import OpenVINOVisualPromptingTask # noqa: F401 from .train import TrainingTask # noqa: F401 diff --git a/src/otx/algorithms/visual_prompting/tasks/inference.py b/src/otx/algorithms/visual_prompting/tasks/inference.py index 9358bd93242..ea8a1fbf869 100644 --- a/src/otx/algorithms/visual_prompting/tasks/inference.py +++ b/src/otx/algorithms/visual_prompting/tasks/inference.py @@ -30,10 +30,13 @@ from omegaconf import DictConfig, ListConfig from pytorch_lightning import LightningModule, Trainer from pytorch_lightning.callbacks import TQDMProgressBar +from pytorch_lightning.loggers import CSVLogger +from otx.algorithms.common.configs.training_base import TrainType from otx.algorithms.common.utils import set_random_seed from otx.algorithms.visual_prompting.adapters.pytorch_lightning.callbacks import ( InferenceCallback, + ZeroShotInferenceCallback, ) from otx.algorithms.visual_prompting.adapters.pytorch_lightning.config import ( get_visual_promtping_config, @@ -55,6 +58,7 @@ ) from otx.api.entities.resultset import ResultSetEntity from otx.api.entities.task_environment import TaskEnvironment +from otx.api.entities.train_parameters import TrainParameters from otx.api.serialization.label_mapper import label_schema_to_bytes from otx.api.usecases.evaluation.metrics_helper import MetricsHelper from otx.api.usecases.tasks.interfaces.evaluate_interface import IEvaluationTask @@ -84,6 +88,8 @@ def __init__(self, task_environment: TaskEnvironment, output_path: Optional[str] self.task_type = task_environment.model_template.task_type self.model_name = task_environment.model_template.name self.labels = task_environment.get_labels() + self.hyper_parameters: VisualPromptingBaseConfig = self.task_environment.get_hyper_parameters() + self.train_type = self.hyper_parameters.algo_backend.train_type # type: ignore[attr-defined] template_file_path = task_environment.model_template.model_template_path self.base_dir = os.path.abspath(os.path.dirname(template_file_path)) @@ -128,8 +134,6 @@ def get_config(self) -> Union[DictConfig, ListConfig]: Returns: Union[DictConfig, ListConfig]: Visual Prompting config. """ - self.hyper_parameters: VisualPromptingBaseConfig = self.task_environment.get_hyper_parameters() - # set checkpoints model_checkpoint: Optional[str] = None resume_from_checkpoint: Optional[str] = None @@ -167,13 +171,18 @@ def load_model(self, otx_model: Optional[ModelEntity] = None) -> LightningModule LightningModule: Visual prompting model with/without weights. """ - def get_model(config: DictConfig, state_dict: Optional[OrderedDict] = None): + def get_model(config: DictConfig, train_type: TrainType, state_dict: Optional[OrderedDict] = None): if config.model.name == "SAM": - from otx.algorithms.visual_prompting.adapters.pytorch_lightning.models import ( - SegmentAnything, - ) + if train_type == TrainType.Incremental: + from otx.algorithms.visual_prompting.adapters.pytorch_lightning.models import ( + SegmentAnything as VisualPrompter, + ) + elif train_type == TrainType.Zeroshot: + from otx.algorithms.visual_prompting.adapters.pytorch_lightning.models import ( + ZeroShotSegmentAnything as VisualPrompter, + ) - model = SegmentAnything(config=config, state_dict=state_dict) + model = VisualPrompter(config=config, state_dict=state_dict) else: raise NotImplementedError( (f"Current selected model {config.model.name} is not implemented. " f"Use SAM instead.") @@ -216,7 +225,7 @@ def get_model(config: DictConfig, state_dict: Optional[OrderedDict] = None): state_dict = model_data try: - model = get_model(config=self.config, state_dict=state_dict) + model = get_model(config=self.config, train_type=self.train_type, state_dict=state_dict) logger.info("Complete to load model.") except BaseException as exception: raise ValueError("Could not load the saved model. The model file structure is invalid.") from exception @@ -238,7 +247,9 @@ def infer(self, dataset: DatasetEntity, inference_parameters: InferenceParameter """ logger.info("Performing inference on the validation set using the base torch model.") self.model = self.load_model(otx_model=self.task_environment.model) - datamodule = OTXVisualPromptingDataModule(config=self.config.dataset, dataset=dataset) + datamodule = OTXVisualPromptingDataModule( + config=self.config.dataset, dataset=dataset, train_type=self.train_type + ) logger.info("Inference Configs '%s'", self.config) @@ -464,3 +475,99 @@ def _delete_scratch_space(self) -> None: """Remove model checkpoints and otx logs.""" if os.path.exists(self.output_path): shutil.rmtree(self.output_path, ignore_errors=False) + + +class ZeroShotTask(InferenceTask): + """Learn task for Zero-shot learning. + + **There are two ways to be decided: + 1. use it independently <-- temporarily current setting + 2. use it depending on template + + The objective of this task is to get reference features and export it with decoder modules. + """ + + def train( # noqa: D102 + self, + dataset: DatasetEntity, + output_model: ModelEntity, + train_parameters: TrainParameters, + seed: Optional[int] = None, + deterministic: bool = False, + ) -> None: + logger.info("Training the model.") + + self.seed = seed + self.deterministic = deterministic + self.set_seed() + self.config.trainer.deterministic = "warn" if deterministic else deterministic + + logger.info(f"Training Configs {self.config}") + + self.model = self.load_model(otx_model=self.task_environment.model) + + datamodule = OTXVisualPromptingDataModule( + config=self.config.dataset, dataset=dataset, train_type=self.train_type + ) + + self.trainer = Trainer( + logger=CSVLogger(save_dir=self.output_path, name=".", version=self.timestamp), **self.config.trainer + ) + self.trainer.fit(model=self.model, datamodule=datamodule) + + # save resulting model + self.save_model(output_model) + + def infer(self, dataset: DatasetEntity, inference_parameters: InferenceParameters) -> DatasetEntity: + """Perform inference on a dataset. + + Args: + dataset (DatasetEntity): Dataset to infer. + inference_parameters (InferenceParameters): Inference parameters. + + Returns: + DatasetEntity: Output dataset with predictions. + """ + logger.info("Performing inference on the validation set using the base torch model.") + self.model = self.load_model(otx_model=self.task_environment.model) + datamodule = OTXVisualPromptingDataModule( + config=self.config.dataset, dataset=dataset, train_type=self.train_type + ) + + logger.info("Inference Configs '%s'", self.config) + + # Callbacks + inference_callback = ZeroShotInferenceCallback( + otx_dataset=dataset, label_schema=self.task_environment.label_schema + ) + callbacks = [TQDMProgressBar(), inference_callback] + + self.trainer = Trainer(**self.config.trainer, logger=False, callbacks=callbacks) + self.trainer.predict(model=self.model, datamodule=datamodule) + + return inference_callback.otx_dataset + + def save_model(self, output_model: ModelEntity) -> None: + """Save the model after training is completed. + + Args: + output_model (ModelEntity): Output model onto which the weights are saved. + """ + logger.info("Saving the model weights and reference features.") + + model_info = self.model.state_dict() + # TODO (sungchul): is there more efficient way not to manually add properties? + model_info.update( + { + "prompt_getter.reference_feats": self.model.prompt_getter.reference_feats, + "prompt_getter.reference_prompts": self.model.prompt_getter.reference_prompts, + } + ) + + buffer = io.BytesIO() + torch.save(model_info, buffer) + output_model.set_data("weights.pth", buffer.getvalue()) + output_model.set_data("label_schema.json", label_schema_to_bytes(self.task_environment.label_schema)) + + output_model.precision = self.precision + output_model.optimization_methods = self.optimization_methods diff --git a/src/otx/algorithms/visual_prompting/tasks/train.py b/src/otx/algorithms/visual_prompting/tasks/train.py index 344601b7b01..fc2b5311d2d 100644 --- a/src/otx/algorithms/visual_prompting/tasks/train.py +++ b/src/otx/algorithms/visual_prompting/tasks/train.py @@ -71,7 +71,9 @@ def train( # noqa: D102 self.model = self.load_model(otx_model=self.task_environment.model) - datamodule = OTXVisualPromptingDataModule(config=self.config.dataset, dataset=dataset) + datamodule = OTXVisualPromptingDataModule( + config=self.config.dataset, dataset=dataset, train_type=self.train_type + ) loggers = CSVLogger(save_dir=self.output_path, name=".", version=self.timestamp) callbacks = [ TQDMProgressBar(), diff --git a/src/otx/api/configuration/__init__.py b/src/otx/api/configuration/__init__.py index 5f765627168..e706fd27c4a 100644 --- a/src/otx/api/configuration/__init__.py +++ b/src/otx/api/configuration/__init__.py @@ -4,10 +4,6 @@ functions to interact with them. The configuration helper module can be imported as `otx_config_helper` and implements the following: - -.. automodule:: otx.api.configuration.helper - :members: - """ # Copyright (C) 2021-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 diff --git a/src/otx/api/configuration/helper/convert.py b/src/otx/api/configuration/helper/convert.py index 3d9a2a2b0ec..350293655f7 100644 --- a/src/otx/api/configuration/helper/convert.py +++ b/src/otx/api/configuration/helper/convert.py @@ -105,8 +105,8 @@ def convert( config (ConfigurableParameters): ConfigurableParameters object to convert target (Type[ConvertTypeVar]): target type to convert to. Options are [str, dict, DictConfig] enum_to_str (bool) : Boolean specifying whether to convert enums within the config - to their string representation. For conversion to yaml, enums - are automatically converted and this option is disregarded. + to their string representation. For conversion to yaml, enums are automatically converted and this option + is disregarded. id_to_str (bool): True to convert the id of the configurable parameters to a string representation, False to leave it as an ID object values_only (bool): True to keep only the parameter values, and remove all meta diff --git a/src/otx/api/configuration/helper/substitute.py b/src/otx/api/configuration/helper/substitute.py index bf24e884a3b..d0a1926b76f 100644 --- a/src/otx/api/configuration/helper/substitute.py +++ b/src/otx/api/configuration/helper/substitute.py @@ -172,8 +172,7 @@ def substitute_values_for_lifecycle( Args: config (ConfigurableParameters): ConfigurableParameter object to substitute values into - value_input (ConfigurableParameters): ConfigurableParameters to take the values to be substituted - from. + value_input (ConfigurableParameters): ConfigurableParameters to take the values to be substituted from. model_lifecycle (Union[ModelLifecycle, Sequence[ModelLifecycle]]): Phase or list of phases in the model lifecycle to carry out the substitution for. For example, if `model_lifecycle = ModelLifecycle.INFERENCE` is passed, only parameters that diff --git a/src/otx/api/usecases/__init__.py b/src/otx/api/usecases/__init__.py index 933c7db5f8b..3bea124f765 100644 --- a/src/otx/api/usecases/__init__.py +++ b/src/otx/api/usecases/__init__.py @@ -1,10 +1,4 @@ -"""Utilities and use cases built on top of OTX API. - -.. automodule:: otx.api.usecases.adapters - :members: - :undoc-members: - -""" +"""Utilities and use cases built on top of OTX API.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 diff --git a/src/otx/api/usecases/evaluation/f_measure.py b/src/otx/api/usecases/evaluation/f_measure.py index cc845ef7609..4837fcf193a 100644 --- a/src/otx/api/usecases/evaluation/f_measure.py +++ b/src/otx/api/usecases/evaluation/f_measure.py @@ -19,7 +19,7 @@ LineChartInfo, LineMetricsGroup, MetricsGroup, - Performance, + MultiScorePerformance, ScoreMetric, TextChartInfo, TextMetricsGroup, @@ -205,6 +205,7 @@ class _AggregatedResults: - all_classes_f_measure_curve - best_f_measure - best_threshold + - best_f_measure_metrics Args: classes (List[str]): List of classes. @@ -217,6 +218,7 @@ def __init__(self, classes: List[str]): self.all_classes_f_measure_curve: List[float] = [] self.best_f_measure: float = 0.0 self.best_threshold: float = 0.0 + self.best_f_measure_metrics: _Metrics = _Metrics(0.0, 0.0, 0.0) class _OverallResults: @@ -364,6 +366,7 @@ def get_results_per_confidence( if all_classes_f_measure > 0.0 and all_classes_f_measure >= result.best_f_measure: result.best_f_measure = all_classes_f_measure result.best_threshold = confidence_threshold + result.best_f_measure_metrics = result_point[ALL_CLASSES_NAME] return result def get_results_per_nms( @@ -418,6 +421,7 @@ def get_results_per_nms( if all_classes_f_measure > 0.0 and all_classes_f_measure >= result.best_f_measure: result.best_f_measure = all_classes_f_measure result.best_threshold = nms_threshold + result.best_f_measure_metrics = result_point[ALL_CLASSES_NAME] return result def evaluate_classes( @@ -693,6 +697,8 @@ def __init__( self.f_measure_per_label[label] = ScoreMetric( name=label.name, value=result.best_f_measure_per_class[label.name] ) + self._precision = ScoreMetric(name="Precision", value=result.per_confidence.best_f_measure_metrics.precision) + self._recall = ScoreMetric(name="Recall", value=result.per_confidence.best_f_measure_metrics.recall) self._f_measure_per_confidence: Optional[CurveMetric] = None self._best_confidence_threshold: Optional[ScoreMetric] = None @@ -752,13 +758,12 @@ def best_nms_threshold(self) -> Optional[ScoreMetric]: """Returns the best NMS threshold as ScoreMetric if exists.""" return self._best_nms_threshold - def get_performance(self) -> Performance: + def get_performance(self) -> MultiScorePerformance: """Returns the performance which consists of the F-Measure score and the dashboard metrics. Returns: - Performance: Performance object containing the F-Measure score and the dashboard metrics. + MultiScorePerformance: MultiScorePerformance object containing the F-Measure scores and the dashboard metrics. """ - score = self.f_measure dashboard_metrics: List[MetricsGroup] = [] dashboard_metrics.append( BarMetricsGroup( @@ -813,7 +818,11 @@ def get_performance(self) -> Performance: ), ) ) - return Performance(score=score, dashboard_metrics=dashboard_metrics) + return MultiScorePerformance( + primary_score=self.f_measure, + additional_scores=[self._precision, self._recall], + dashboard_metrics=dashboard_metrics, + ) @staticmethod def __get_boxes_from_dataset_as_list( diff --git a/src/otx/api/usecases/reporting/__init__.py b/src/otx/api/usecases/reporting/__init__.py index 8d27e8621f8..2b3bb382475 100644 --- a/src/otx/api/usecases/reporting/__init__.py +++ b/src/otx/api/usecases/reporting/__init__.py @@ -1,15 +1,4 @@ -"""Training reporting. - -.. automodule:: otx.api.usecases.reporting.callback - :members: - :undoc-members: - -.. automodule:: otx.api.usecases.reporting.time_monitor_callback - :members: - :undoc-members: - -""" - +"""Training reporting.""" # Copyright (C) 2021-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # diff --git a/src/otx/cli/tools/eval.py b/src/otx/cli/tools/eval.py index 2ed3b22a477..f609f5d3ded 100644 --- a/src/otx/cli/tools/eval.py +++ b/src/otx/cli/tools/eval.py @@ -49,8 +49,8 @@ def get_args(): ) parser.add_argument( "--load-weights", - help="Load model weights from previously saved checkpoint." - "It could be a trained/optimized model (POT only) or exported model.", + help="Load model weights from previously saved checkpoint. " + "It could be a trained/optimized model (with PTQ only) or exported model.", ) parser.add_argument( "-o", @@ -153,10 +153,12 @@ def main(): ) task.evaluate(resultset) assert resultset.performance is not None - print(resultset.performance) output_path = Path(args.output) if args.output else config_manager.output_path performance = {resultset.performance.score.name: resultset.performance.score.value} + if hasattr(resultset.performance, "additional_scores"): + for metric in resultset.performance.additional_scores: + performance[metric.name] = metric.value if hasattr(task, "avg_time_per_image"): performance["avg_time_per_image"] = task.avg_time_per_image with open(output_path / "performance.json", "w", encoding="UTF-8") as write_file: diff --git a/src/otx/cli/tools/explain.py b/src/otx/cli/tools/explain.py index b6e70cb9dc7..7cb276eed2f 100644 --- a/src/otx/cli/tools/explain.py +++ b/src/otx/cli/tools/explain.py @@ -50,12 +50,14 @@ def get_args(): parser, hyper_parameters, params = get_parser_and_hprams_data() parser.add_argument( - "--explain-data-roots", + "-i", + "--input", required=True, help="Comma-separated paths to explain data folders.", ) parser.add_argument( - "--save-explanation-to", + "-o", + "--output", default="saliency_dump", help="Output path for explanation images.", ) @@ -123,10 +125,7 @@ def _log_after_saving(explain_predicted_classes, explained_image_counter, args, "Please adjust training pipeline or use different model-data pair." ) if explained_image_counter > 0: - logger.info( - f"Saliency maps saved to {args.save_explanation_to} for {explained_image_counter} " - f"out of {num_images} images." - ) + logger.info(f"Saliency maps saved to {args.output} for {explained_image_counter} out of {num_images} images.") def main(): @@ -169,10 +168,10 @@ def main(): f"{args.explain_algorithm} currently not supported. \ Currently only support {SUPPORTED_EXPLAIN_ALGORITHMS}" ) - if not Path(args.save_explanation_to).exists(): - Path(args.save_explanation_to).mkdir(parents=True) + if not Path(args.output).exists(): + Path(args.output).mkdir(parents=True) - image_files = get_image_files(args.explain_data_roots) + image_files = get_image_files(args.input) dataset_to_explain = get_explain_dataset_from_filelist(image_files) explain_predicted_classes = not args.explain_all_classes explain_parameters = ExplainParameters( @@ -201,7 +200,7 @@ def main(): process_saliency_maps=explain_parameters.process_saliency_maps, img=explained_data.numpy, saliency_map=saliency_data.numpy, - save_dir=args.save_explanation_to, + save_dir=args.output, fname=fname, weight=args.overlay_weight, ) diff --git a/src/otx/cli/tools/utils/demo/visualization.py b/src/otx/cli/tools/utils/demo/visualization.py index 09edd4abfe2..e405d61ff7c 100644 --- a/src/otx/cli/tools/utils/demo/visualization.py +++ b/src/otx/cli/tools/utils/demo/visualization.py @@ -71,7 +71,7 @@ def draw_masks(frame: Mat, predictions, put_object_count: bool = False): cv2.drawContours(frame, contours, -1, color, 1) rect = cv2.boundingRect(contours[0]) cv2.rectangle(frame, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), color, 1) - put_text_on_rect_bg(frame, label.name, (rect[0], rect[1]), color=color) + put_text_on_rect_bg(frame, f"{label.name} {label.probability*100:.1f}%", (rect[0], rect[1]), color=color) cv2.bitwise_or(aggregated_mask, mask, dst=aggregated_mask) cv2.bitwise_or( aggregated_colored_mask, @@ -110,7 +110,7 @@ def put_labels(frame: Mat, predictions: List[Annotation]): assert len(predictions[0].get_labels()) == 1 label = predictions[0].get_labels()[0] color = tuple(getattr(label.color, x) for x in ("blue", "green", "red")) - put_text_on_rect_bg(frame, label.name, (0, 0), color=color) + put_text_on_rect_bg(frame, f"{label.name} {label.probability*100:.1f}%", (0, 0), color=color) return frame @@ -129,7 +129,7 @@ def draw_bounding_boxes(frame: Mat, predictions: List[Annotation], put_object_co label = prediction.get_labels()[0] color = tuple(getattr(label.color, x) for x in ("blue", "green", "red")) cv2.rectangle(frame, (x1, y1), (x2, y2), color, thickness=2) - put_text_on_rect_bg(frame, label.name, (x1, y1), color=color) + put_text_on_rect_bg(frame, f"{label.name} {label.probability*100:.1f}%", (x1, y1), color=color) else: warn( f"Predictions called on Annotations with shape {type(prediction.shape)}." diff --git a/src/otx/core/data/adapter/base_dataset_adapter.py b/src/otx/core/data/adapter/base_dataset_adapter.py index 51b62a0cede..52195de4d0f 100644 --- a/src/otx/core/data/adapter/base_dataset_adapter.py +++ b/src/otx/core/data/adapter/base_dataset_adapter.py @@ -39,6 +39,7 @@ from otx.api.entities.media import IMediaEntity from otx.api.entities.model_template import TaskType from otx.api.entities.scored_label import ScoredLabel +from otx.api.entities.shapes.ellipse import Ellipse from otx.api.entities.shapes.polygon import Point, Polygon from otx.api.entities.shapes.rectangle import Rectangle from otx.api.entities.subset import Subset @@ -350,6 +351,21 @@ def _get_polygon_entity( labels=[ScoredLabel(label=self.label_entities[annotation.label])], ) + def _get_ellipse_entity( + self, annotation: DatumAnnotation, width: int, height: int, num_polygons: int = -1 + ) -> Annotation: + """Get ellipse entity.""" + ellipse = Ellipse( + annotation.x1 / (width - 1), + annotation.y1 / (height - 1), + annotation.x2 / (width - 1), + annotation.y2 / (height - 1), + ) + return Annotation( + ellipse, + labels=[ScoredLabel(label=self.label_entities[annotation.label])], + ) + def _get_mask_entity(self, annotation: DatumAnnotation) -> Annotation: """Get mask entity.""" mask = Image(data=annotation.image, size=annotation.image.shape) diff --git a/src/otx/core/data/adapter/detection_dataset_adapter.py b/src/otx/core/data/adapter/detection_dataset_adapter.py index a6ce1b2bce5..612ab303f23 100644 --- a/src/otx/core/data/adapter/detection_dataset_adapter.py +++ b/src/otx/core/data/adapter/detection_dataset_adapter.py @@ -37,14 +37,15 @@ def get_otx_dataset(self) -> DatasetEntity: assert isinstance(image, Image) shapes = [] for ann in datumaro_item.annotations: - if ( - self.task_type in (TaskType.INSTANCE_SEGMENTATION, TaskType.ROTATED_DETECTION) - and ann.type == DatumAnnotationType.polygon - ): - if self._is_normal_polygon(ann): + if self.task_type in (TaskType.INSTANCE_SEGMENTATION, TaskType.ROTATED_DETECTION): + if ann.type == DatumAnnotationType.polygon and self._is_normal_polygon(ann): shapes.append(self._get_polygon_entity(ann, image.width, image.height)) - if self.task_type is TaskType.DETECTION and ann.type == DatumAnnotationType.bbox: - if self._is_normal_bbox(ann.points[0], ann.points[1], ann.points[2], ann.points[3]): + elif ann.type == DatumAnnotationType.ellipse: + shapes.append(self._get_ellipse_entity(ann, image.width, image.height)) + elif self.task_type is TaskType.DETECTION: + if ann.type == DatumAnnotationType.bbox and self._is_normal_bbox( + ann.points[0], ann.points[1], ann.points[2], ann.points[3] + ): shapes.append(self._get_normalized_bbox_entity(ann, image.width, image.height)) if ann.label not in used_labels: diff --git a/src/otx/core/ov/ops/infrastructures.py b/src/otx/core/ov/ops/infrastructures.py index 44b39b9d120..f0c9509d417 100644 --- a/src/otx/core/ov/ops/infrastructures.py +++ b/src/otx/core/ov/ops/infrastructures.py @@ -233,6 +233,8 @@ def from_ov(cls, ov_op): if not np.array_equal(data, data_): logger.warning(f"Overflow detected in {op_name}") data = torch.from_numpy(data_) + elif data.dtype == np.uint16: + data = torch.from_numpy(data.astype(np.int32)) else: data = torch.from_numpy(data) diff --git a/src/otx/core/ov/ops/type_conversions.py b/src/otx/core/ov/ops/type_conversions.py index 25454053c22..267ae7ea37d 100644 --- a/src/otx/core/ov/ops/type_conversions.py +++ b/src/otx/core/ov/ops/type_conversions.py @@ -25,6 +25,7 @@ "u1": torch.uint8, # no type in torch "u4": torch.uint8, # no type in torch "u8": torch.uint8, + "u16": torch.int32, # no type in torch "u32": torch.int32, # no type in torch "u64": torch.int64, # no type in torch "i4": torch.int8, # no type in torch diff --git a/src/otx/recipes/stages/instance-segmentation/__init__.py b/src/otx/recipes/stages/instance_segmentation/__init__.py similarity index 100% rename from src/otx/recipes/stages/instance-segmentation/__init__.py rename to src/otx/recipes/stages/instance_segmentation/__init__.py diff --git a/src/otx/recipes/stages/instance-segmentation/incremental.py b/src/otx/recipes/stages/instance_segmentation/incremental.py similarity index 100% rename from src/otx/recipes/stages/instance-segmentation/incremental.py rename to src/otx/recipes/stages/instance_segmentation/incremental.py diff --git a/src/otx/recipes/stages/instance-segmentation/semisl.py b/src/otx/recipes/stages/instance_segmentation/semisl.py similarity index 100% rename from src/otx/recipes/stages/instance-segmentation/semisl.py rename to src/otx/recipes/stages/instance_segmentation/semisl.py diff --git a/src/otx/recipes/stages/instance-segmentation/train.py b/src/otx/recipes/stages/instance_segmentation/train.py similarity index 100% rename from src/otx/recipes/stages/instance-segmentation/train.py rename to src/otx/recipes/stages/instance_segmentation/train.py diff --git a/tests/e2e/cli/detection/test_detection.py b/tests/e2e/cli/detection/test_detection.py index dd86fcf46d2..cc2efcd0b89 100644 --- a/tests/e2e/cli/detection/test_detection.py +++ b/tests/e2e/cli/detection/test_detection.py @@ -348,6 +348,8 @@ def test_otx_eval(self, template, tmp_dir_path): def test_otx_multi_gpu_train_semisl(self, template, tmp_dir_path): if not (Path(template.model_template_path).parent / "semisl").is_dir(): pytest.skip(f"Semi-SL training type isn't available for {template.name}") + if template.name == "ResNeXt101-ATSS": + pytest.skip(f"Issue#2705: multi-gpu training e2e test failure for {template.name}") tmp_dir_path = tmp_dir_path / "detection/test_multi_gpu_semisl" args_semisl_multigpu = copy.deepcopy(args_semisl) args_semisl_multigpu["--gpus"] = "0,1" diff --git a/tests/e2e/cli/visual_prompting/test_visual_prompting.py b/tests/e2e/cli/visual_prompting/test_visual_prompting.py index 43fa07a9f1b..2749a09f347 100644 --- a/tests/e2e/cli/visual_prompting/test_visual_prompting.py +++ b/tests/e2e/cli/visual_prompting/test_visual_prompting.py @@ -60,11 +60,13 @@ templates_ids = [template.model_template_id + f"-{i+1}" for i, template in enumerate(templates)] else: - templates = ( - Registry("src/otx/algorithms/visual_prompting", experimental=True) + templates = [ + template + for template in Registry("src/otx/algorithms/visual_prompting", experimental=True) .filter(task_type="VISUAL_PROMPTING") .templates - ) + if "Zero_Shot" not in template.name + ] templates_ids = [template.model_template_id for template in templates] diff --git a/tests/integration/cli/detection/test_detection.py b/tests/integration/cli/detection/test_detection.py index 218cc9454f1..1ae0ab1e825 100644 --- a/tests/integration/cli/detection/test_detection.py +++ b/tests/integration/cli/detection/test_detection.py @@ -42,6 +42,8 @@ "1", "--learning_parameters.batch_size", "4", + "--postprocessing.max_num_detections", + "200", ], } diff --git a/tests/integration/cli/detection/test_tiling_detection.py b/tests/integration/cli/detection/test_tiling_detection.py index 525ef7532aa..d87600ed66c 100644 --- a/tests/integration/cli/detection/test_tiling_detection.py +++ b/tests/integration/cli/detection/test_tiling_detection.py @@ -1,5 +1,5 @@ """Tests for OTX Class-Incremental Learning for object detection with OTX CLI""" -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # import os @@ -36,6 +36,8 @@ "1", "--tiling_parameters.enable_adaptive_params", "1", + "--postprocessing.max_num_detections", + "200", ], } diff --git a/tests/integration/cli/instance_segmentation/test_instance_segmentation.py b/tests/integration/cli/instance_segmentation/test_instance_segmentation.py index d07943b099f..69ec8b386b7 100644 --- a/tests/integration/cli/instance_segmentation/test_instance_segmentation.py +++ b/tests/integration/cli/instance_segmentation/test_instance_segmentation.py @@ -39,6 +39,8 @@ "1", "--learning_parameters.batch_size", "2", + "--postprocessing.max_num_detections", + "200", ], } @@ -54,6 +56,8 @@ "1", "--learning_parameters.batch_size", "2", + "--postprocessing.max_num_detections", + "200", ], } @@ -64,6 +68,8 @@ "2", "--learning_parameters.batch_size", "2", + "--postprocessing.max_num_detections", + "200", ] otx_dir = os.getcwd() diff --git a/tests/integration/cli/instance_segmentation/test_tiling_instseg.py b/tests/integration/cli/instance_segmentation/test_tiling_instseg.py index ccf195cbef4..320c6002a14 100644 --- a/tests/integration/cli/instance_segmentation/test_tiling_instseg.py +++ b/tests/integration/cli/instance_segmentation/test_tiling_instseg.py @@ -1,5 +1,5 @@ """Tests for OTX Class-Incremental Learning for instance segmentation with OTX CLI""" -# Copyright (C) 2022-2023 Intel Corporation +# Copyright (C) 2022-2023-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # import copy @@ -42,6 +42,8 @@ "1", "--tiling_parameters.enable_adaptive_params", "1", + "--postprocessing.max_num_detections", + "200", ], } diff --git a/tests/integration/cli/visual_prompting/test_visual_prompting.py b/tests/integration/cli/visual_prompting/test_visual_prompting.py index 9e78e92c531..18d220376a1 100644 --- a/tests/integration/cli/visual_prompting/test_visual_prompting.py +++ b/tests/integration/cli/visual_prompting/test_visual_prompting.py @@ -50,9 +50,13 @@ otx_dir = os.getcwd() -templates = ( - Registry("src/otx/algorithms/visual_prompting", experimental=True).filter(task_type="VISUAL_PROMPTING").templates -) +templates = [ + template + for template in Registry("src/otx/algorithms/visual_prompting", experimental=True) + .filter(task_type="VISUAL_PROMPTING") + .templates + if "Zero_Shot" not in template.name +] templates_ids = [template.model_template_id for template in templates] diff --git a/tests/integration/cli/visual_prompting/test_zero_shot.py b/tests/integration/cli/visual_prompting/test_zero_shot.py new file mode 100644 index 00000000000..8d403f27999 --- /dev/null +++ b/tests/integration/cli/visual_prompting/test_zero_shot.py @@ -0,0 +1,53 @@ +"""Tests for Zero-shot visual prompting with OTX CLI""" + +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import os + +import pytest + +from otx.cli.registry import Registry +from tests.test_suite.e2e_test_system import e2e_pytest_component +from tests.test_suite.run_test_command import ( + otx_eval_testing, + otx_train_testing, +) + +args = { + "--train-data-roots": "tests/assets/car_tree_bug", + "--val-data-roots": "tests/assets/car_tree_bug", + "--test-data-roots": "tests/assets/car_tree_bug", + "--input": "tests/assets/car_tree_bug/images/train", + "train_params": [ + "params", + "--learning_parameters.trainer.max_epochs", + "1", + ], +} + +otx_dir = os.getcwd() + + +templates = [ + template + for template in Registry("src/otx/algorithms/visual_prompting", experimental=True) + .filter(task_type="VISUAL_PROMPTING") + .templates + if "Zero_Shot" in template.name +] +templates_ids = [template.model_template_id for template in templates] + + +class TestVisualPromptingCLI: + @e2e_pytest_component + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_otx_train(self, template, tmp_dir_path): + tmp_dir_path = tmp_dir_path / "zero_shot_visual_prompting" + otx_train_testing(template, tmp_dir_path, otx_dir, args, deterministic=True) + + @e2e_pytest_component + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_otx_eval(self, template, tmp_dir_path): + tmp_dir_path = tmp_dir_path / "zero_shot_visual_prompting" + otx_eval_testing(template, tmp_dir_path, otx_dir, args) diff --git a/tests/regression/action/test_action_classification.py b/tests/regression/action/test_action_classification.py index c953ad9a9cc..6f2595700ba 100644 --- a/tests/regression/action/test_action_classification.py +++ b/tests/regression/action/test_action_classification.py @@ -45,20 +45,19 @@ class TestRegressionActionClassification: @classmethod @pytest.fixture(scope="class") def reg_cfg(cls, tmp_dir_path): + results_root = os.environ.get("REG_RESULTS_ROOT", tmp_dir_path) cls.reg_cfg = RegressionTestConfig( cls.TASK_TYPE, cls.TRAIN_TYPE, cls.LABEL_TYPE, os.getcwd(), train_params=cls.TRAIN_PARAMS, - tmp_results_root=tmp_dir_path, + results_root=results_root, ) yield cls.reg_cfg - print(f"writting regression result to {cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json") - with open(f"{cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json", "w") as result_file: - json.dump(cls.reg_cfg.result_dict, result_file, indent=4) + cls.reg_cfg.dump_result_dict() def setup_method(self): self.performance = {} @@ -66,6 +65,7 @@ def setup_method(self): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train(self, reg_cfg, template, tmp_dir_path): + test_type = "train" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / reg_cfg.task_type @@ -79,14 +79,14 @@ def test_otx_train(self, reg_cfg, template, tmp_dir_path): tmp_dir_path, reg_cfg.otx_dir, reg_cfg.args, - reg_cfg.config_dict["regression_criteria"]["train"], + reg_cfg.config_dict["regression_criteria"][test_type], self.performance[template.name], ) infer_elapsed_time = timer() - infer_start_time self.performance[template.name][TIME_LOG["train_time"]] = round(train_elapsed_time, 3) self.performance[template.name][TIME_LOG["infer_time"]] = round(infer_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["train"].append(self.performance) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @@ -94,6 +94,8 @@ def test_otx_train(self, reg_cfg, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train_kpi_test(self, reg_cfg, template): performance = reg_cfg.get_template_performance(template) + if performance is None: + pytest.skip(reason="Cannot find performance data from results.") kpi_train_result = regression_train_time_testing( train_time_criteria=reg_cfg.config_dict["kpi_e2e_train_time_criteria"]["train"], @@ -115,6 +117,7 @@ def test_otx_train_kpi_test(self, reg_cfg, template): def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): if template.name == "MoViNet": pytest.skip(reason="Issue#2058: MoViNet fails with OpenVINO inference occasionally") + test_type = "export" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / reg_cfg.task_type @@ -129,7 +132,7 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): reg_cfg.otx_dir, reg_cfg.args, threshold=0.05, - criteria=reg_cfg.config_dict["regression_criteria"]["export"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -137,10 +140,7 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["export_time"]] = round(export_elapsed_time, 3) self.performance[template.name][TIME_LOG["export_eval_time"]] = round(export_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["export"].append( - self.performance - ) - + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @e2e_pytest_component @@ -148,6 +148,7 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path): if template.name == "MoViNet": pytest.skip(reason="Issue#2058: MoViNet fails with OpenVINO inference occasionally") + test_type = "ptq" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / reg_cfg.task_type @@ -161,7 +162,7 @@ def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path): tmp_dir_path, reg_cfg.otx_dir, reg_cfg.args, - criteria=reg_cfg.config_dict["regression_criteria"]["ptq"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -169,6 +170,6 @@ def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["ptq_time"]] = round(ptq_elapsed_time, 3) self.performance[template.name][TIME_LOG["ptq_eval_time"]] = round(ptq_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["ptq"].append(self.performance) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] diff --git a/tests/regression/action/test_action_detection.py b/tests/regression/action/test_action_detection.py index 14196f8e2fe..f7dd494dcf8 100644 --- a/tests/regression/action/test_action_detection.py +++ b/tests/regression/action/test_action_detection.py @@ -47,20 +47,19 @@ class TestRegressionActionDetection: @classmethod @pytest.fixture(scope="class") def reg_cfg(cls, tmp_dir_path): + results_root = os.environ.get("REG_RESULTS_ROOT", tmp_dir_path) cls.reg_cfg = RegressionTestConfig( cls.TASK_TYPE, cls.TRAIN_TYPE, cls.LABEL_TYPE, os.getcwd(), train_params=cls.TRAIN_PARAMS, - tmp_results_root=tmp_dir_path, + results_root=results_root, ) yield cls.reg_cfg - print(f"writting regression result to {cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json") - with open(f"{cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json", "w") as result_file: - json.dump(cls.reg_cfg.result_dict, result_file, indent=4) + cls.reg_cfg.dump_result_dict() def setup_method(self): self.performance = {} @@ -68,6 +67,7 @@ def setup_method(self): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train(self, reg_cfg, template, tmp_dir_path): + test_type = "train" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / reg_cfg.task_type @@ -81,14 +81,14 @@ def test_otx_train(self, reg_cfg, template, tmp_dir_path): tmp_dir_path, reg_cfg.otx_dir, reg_cfg.args, - reg_cfg.config_dict["regression_criteria"]["train"], + reg_cfg.config_dict["regression_criteria"][test_type], self.performance[template.name], ) infer_elapsed_time = timer() - infer_start_time self.performance[template.name][TIME_LOG["train_time"]] = round(train_elapsed_time, 3) self.performance[template.name][TIME_LOG["infer_time"]] = round(infer_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["train"].append(self.performance) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @@ -96,6 +96,8 @@ def test_otx_train(self, reg_cfg, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train_kpi_test(self, reg_cfg, template): performance = reg_cfg.get_template_performance(template) + if performance is None: + pytest.skip(reason="Cannot find performance data from results.") kpi_train_result = regression_train_time_testing( train_time_criteria=reg_cfg.config_dict["kpi_e2e_train_time_criteria"]["train"], diff --git a/tests/regression/anomaly/test_anomaly_classificaiton.py b/tests/regression/anomaly/test_anomaly_classificaiton.py index 1cff676fa91..ae928e7997b 100644 --- a/tests/regression/anomaly/test_anomaly_classificaiton.py +++ b/tests/regression/anomaly/test_anomaly_classificaiton.py @@ -11,6 +11,15 @@ import pytest from otx.cli.registry import Registry +from tests.regression.regression_command import ( + regression_deployment_testing, + regression_eval_testing, + regression_eval_time_testing, + regression_nncf_eval_testing, + regression_openvino_testing, + regression_ptq_eval_testing, + regression_train_time_testing, +) from tests.regression.regression_test_helpers import ( ANOMALY_DATASET_CATEGORIES, TIME_LOG, @@ -25,16 +34,6 @@ ptq_optimize_testing, ) -from tests.regression.regression_command import ( - regression_eval_testing, - regression_openvino_testing, - regression_deployment_testing, - regression_nncf_eval_testing, - regression_ptq_eval_testing, - regression_train_time_testing, - regression_eval_time_testing, -) - class TestRegressionAnomalyClassification: # Configurations for regression test. @@ -44,7 +43,7 @@ class TestRegressionAnomalyClassification: LABEL_TYPE = None TRAIN_PARAMS = None - SAMPLED_ANOMALY_DATASET_CATEGORIES = random.sample(ANOMALY_DATASET_CATEGORIES, 3) + SAMPLED_ANOMALY_DATASET_CATEGORIES = ANOMALY_DATASET_CATEGORIES templates = Registry(f"src/otx/algorithms/{REG_CATEGORY}").filter(task_type=TASK_TYPE.upper()).templates templates_ids = [template.model_template_id for template in templates] @@ -54,20 +53,19 @@ class TestRegressionAnomalyClassification: @classmethod @pytest.fixture(scope="class") def reg_cfg(cls, tmp_dir_path): + results_root = os.environ.get("REG_RESULTS_ROOT", tmp_dir_path) cls.reg_cfg = RegressionTestConfig( cls.TASK_TYPE, cls.TRAIN_TYPE, cls.LABEL_TYPE, os.getcwd(), enable_auto_num_worker=False, - tmp_results_root=tmp_dir_path, + results_root=results_root, ) yield cls.reg_cfg - print(f"writting regression result to {cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json") - with open(f"{cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json", "w") as result_file: - json.dump(cls.reg_cfg.result_dict, result_file, indent=4) + cls.reg_cfg.dump_result_dict(dump_path=os.path.join(cls.reg_cfg.result_dir, f"result_{cls.TASK_TYPE}.json")) def setup_method(self): self.performance = {} @@ -87,6 +85,7 @@ def _apply_category(self, data_dict, category): @pytest.mark.parametrize("template", templates, ids=templates_ids) @pytest.mark.parametrize("category", SAMPLED_ANOMALY_DATASET_CATEGORIES) def test_otx_train(self, reg_cfg, template, tmp_dir_path, category): + test_type = "train" self.performance[template.name] = {} category_data_args = self._apply_category(reg_cfg.args, category) @@ -101,14 +100,14 @@ def test_otx_train(self, reg_cfg, template, tmp_dir_path, category): tmp_dir_path, reg_cfg.otx_dir, category_data_args, - reg_cfg.config_dict["regression_criteria"]["train"][category], + reg_cfg.config_dict["regression_criteria"][test_type][category], self.performance[template.name], ) infer_elapsed_time = timer() - infer_start_time self.performance[template.name][TIME_LOG["train_time"]] = round(train_elapsed_time, 3) self.performance[template.name][TIME_LOG["infer_time"]] = round(infer_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type]["train"][category].append(self.performance) + reg_cfg.update_result(test_type, self.performance, is_anomaly=True, category=category) assert test_result["passed"] is True, test_result["log"] @@ -118,6 +117,8 @@ def test_otx_train(self, reg_cfg, template, tmp_dir_path, category): def test_otx_train_kpi_test(self, reg_cfg, template, category): """KPI tests: measure the train+val time and evaluation time and compare with criteria.""" performance = reg_cfg.get_template_performance(template, category=category) + if performance is None: + pytest.skip(reason="Cannot find performance data from results.") # Compare train+val time with the KPI criteria. kpi_train_result = regression_train_time_testing( @@ -142,6 +143,7 @@ def test_otx_train_kpi_test(self, reg_cfg, template, category): def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path, category): if category in ["transistor", "cable"]: pytest.skip("Issue#2189: Anomaly task sometimes shows performance drop") + test_type = "export" self.performance[template.name] = {} category_data_args = self._apply_category(reg_cfg.args, category) @@ -157,7 +159,7 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path, categor reg_cfg.otx_dir, category_data_args, threshold=0.05, - criteria=reg_cfg.config_dict["regression_criteria"]["export"][category], + criteria=reg_cfg.config_dict["regression_criteria"][test_type][category], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -165,7 +167,7 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path, categor self.performance[template.name][TIME_LOG["export_time"]] = round(export_elapsed_time, 3) self.performance[template.name][TIME_LOG["export_eval_time"]] = round(export_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type]["export"][category].append(self.performance) + reg_cfg.update_result(test_type, self.performance, is_anomaly=True, category=category) assert test_result["passed"] is True, test_result["log"] @@ -175,6 +177,7 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path, categor def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path, category): if category in ["transistor", "cable"]: pytest.skip("Issue#2189: Anomaly task sometimes shows performance drop") + test_type = "deploy" self.performance[template.name] = {} category_data_args = self._apply_category(reg_cfg.args, category) @@ -190,7 +193,7 @@ def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path, categ reg_cfg.otx_dir, category_data_args, threshold=0.0, - criteria=reg_cfg.config_dict["regression_criteria"]["deploy"][category], + criteria=reg_cfg.config_dict["regression_criteria"][test_type][category], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -198,7 +201,7 @@ def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path, categ self.performance[template.name][TIME_LOG["deploy_time"]] = round(deploy_elapsed_time, 3) self.performance[template.name][TIME_LOG["deploy_eval_time"]] = round(deploy_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type]["deploy"][category].append(self.performance) + reg_cfg.update_result(test_type, self.performance, is_anomaly=True, category=category) assert test_result["passed"] is True, test_result["log"] @@ -208,6 +211,7 @@ def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path, categ def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path, category): if category in ["transistor", "cable", "bottle"]: pytest.skip("Issue#2189: Anomaly task sometimes shows performance drop") + test_type = "nncf" self.performance[template.name] = {} category_data_args = self._apply_category(reg_cfg.args, category) @@ -226,7 +230,7 @@ def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path, category): reg_cfg.otx_dir, category_data_args, threshold=0.01, - criteria=reg_cfg.config_dict["regression_criteria"]["nncf"][category], + criteria=reg_cfg.config_dict["regression_criteria"][test_type][category], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -234,7 +238,7 @@ def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path, category): self.performance[template.name][TIME_LOG["nncf_time"]] = round(nncf_elapsed_time, 3) self.performance[template.name][TIME_LOG["nncf_eval_time"]] = round(nncf_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type]["nncf"][category].append(self.performance) + reg_cfg.update_result(test_type, self.performance, is_anomaly=True, category=category) assert test_result["passed"] is True, test_result["log"] @@ -242,6 +246,7 @@ def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path, category): @pytest.mark.parametrize("template", templates, ids=templates_ids) @pytest.mark.parametrize("category", SAMPLED_ANOMALY_DATASET_CATEGORIES) def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path, category): + test_type = "ptq" self.performance[template.name] = {} category_data_args = self._apply_category(reg_cfg.args, category) @@ -256,7 +261,7 @@ def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path, category): tmp_dir_path, reg_cfg.otx_dir, category_data_args, - criteria=reg_cfg.config_dict["regression_criteria"]["ptq"][category], + criteria=reg_cfg.config_dict["regression_criteria"][test_type][category], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -264,6 +269,6 @@ def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path, category): self.performance[template.name][TIME_LOG["ptq_time"]] = round(ptq_elapsed_time, 3) self.performance[template.name][TIME_LOG["ptq_eval_time"]] = round(ptq_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type]["ptq"][category].append(self.performance) + reg_cfg.update_result(test_type, self.performance, is_anomaly=True, category=category) assert test_result["passed"] is True, test_result["log"] diff --git a/tests/regression/anomaly/test_anomaly_detection.py b/tests/regression/anomaly/test_anomaly_detection.py index 688f140e3dd..e638a88ad28 100644 --- a/tests/regression/anomaly/test_anomaly_detection.py +++ b/tests/regression/anomaly/test_anomaly_detection.py @@ -11,6 +11,15 @@ import pytest from otx.cli.registry import Registry +from tests.regression.regression_command import ( + regression_deployment_testing, + regression_eval_testing, + regression_eval_time_testing, + regression_nncf_eval_testing, + regression_openvino_testing, + regression_ptq_eval_testing, + regression_train_time_testing, +) from tests.regression.regression_test_helpers import ( ANOMALY_DATASET_CATEGORIES, TIME_LOG, @@ -25,16 +34,6 @@ ptq_optimize_testing, ) -from tests.regression.regression_command import ( - regression_eval_testing, - regression_openvino_testing, - regression_deployment_testing, - regression_nncf_eval_testing, - regression_ptq_eval_testing, - regression_train_time_testing, - regression_eval_time_testing, -) - class TestRegressionAnomalyDetection: # Configurations for regression test. @@ -44,7 +43,7 @@ class TestRegressionAnomalyDetection: LABEL_TYPE = None TRAIN_PARAMS = None - SAMPLED_ANOMALY_DATASET_CATEGORIES = random.sample(ANOMALY_DATASET_CATEGORIES, 3) + SAMPLED_ANOMALY_DATASET_CATEGORIES = ANOMALY_DATASET_CATEGORIES templates = Registry(f"src/otx/algorithms/{REG_CATEGORY}").filter(task_type=TASK_TYPE.upper()).templates templates_ids = [template.model_template_id for template in templates] @@ -54,20 +53,19 @@ class TestRegressionAnomalyDetection: @classmethod @pytest.fixture(scope="class") def reg_cfg(cls, tmp_dir_path): + results_root = os.environ.get("REG_RESULTS_ROOT", tmp_dir_path) cls.reg_cfg = RegressionTestConfig( cls.TASK_TYPE, cls.TRAIN_TYPE, cls.LABEL_TYPE, os.getcwd(), enable_auto_num_worker=False, - tmp_results_root=tmp_dir_path, + results_root=results_root, ) yield cls.reg_cfg - print(f"writting regression result to {cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json") - with open(f"{cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json", "w") as result_file: - json.dump(cls.reg_cfg.result_dict, result_file, indent=4) + cls.reg_cfg.dump_result_dict(dump_path=os.path.join(cls.reg_cfg.result_dir, f"result_{cls.TASK_TYPE}.json")) def setup_method(self): self.performance = {} @@ -87,6 +85,7 @@ def _apply_category(self, data_dict, category): @pytest.mark.parametrize("template", templates, ids=templates_ids) @pytest.mark.parametrize("category", SAMPLED_ANOMALY_DATASET_CATEGORIES) def test_otx_train(self, reg_cfg, template, tmp_dir_path, category): + test_type = "train" self.performance[template.name] = {} category_data_args = self._apply_category(reg_cfg.args, category) @@ -101,14 +100,14 @@ def test_otx_train(self, reg_cfg, template, tmp_dir_path, category): tmp_dir_path, reg_cfg.otx_dir, category_data_args, - reg_cfg.config_dict["regression_criteria"]["train"][category], + reg_cfg.config_dict["regression_criteria"][test_type][category], self.performance[template.name], ) infer_elapsed_time = timer() - infer_start_time self.performance[template.name][TIME_LOG["train_time"]] = round(train_elapsed_time, 3) self.performance[template.name][TIME_LOG["infer_time"]] = round(infer_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type]["train"][category].append(self.performance) + reg_cfg.update_result(test_type, self.performance, is_anomaly=True, category=category) assert test_result["passed"] is True, test_result["log"] @@ -118,6 +117,9 @@ def test_otx_train(self, reg_cfg, template, tmp_dir_path, category): def test_otx_train_kpi_test(self, reg_cfg, template, category): """KPI tests: measure the train+val time and evaluation time and compare with criteria.""" performance = reg_cfg.get_template_performance(template, category=category) + if performance is None: + pytest.skip(reason="Cannot find performance data from results.") + # Compare train+val time with the KPI criteria. kpi_train_result = regression_train_time_testing( train_time_criteria=reg_cfg.config_dict["kpi_e2e_train_time_criteria"]["train"][category], @@ -141,6 +143,7 @@ def test_otx_train_kpi_test(self, reg_cfg, template, category): def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path, category): if category in ["tile", "grid"]: pytest.skip("Issue#2189: Anomaly task sometimes shows performance drop") + test_type = "export" self.performance[template.name] = {} category_data_args = self._apply_category(reg_cfg.args, category) @@ -156,7 +159,7 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path, categor reg_cfg.otx_dir, category_data_args, threshold=0.05, - criteria=reg_cfg.config_dict["regression_criteria"]["export"][category], + criteria=reg_cfg.config_dict["regression_criteria"][test_type][category], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -164,7 +167,7 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path, categor self.performance[template.name][TIME_LOG["export_time"]] = round(export_elapsed_time, 3) self.performance[template.name][TIME_LOG["export_eval_time"]] = round(export_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type]["export"][category].append(self.performance) + reg_cfg.update_result(test_type, self.performance, is_anomaly=True, category=category) assert test_result["passed"] is True, test_result["log"] @@ -174,6 +177,7 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path, categor def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path, category): if category in ["tile", "cable"]: pytest.skip("Issue#2189: Anomaly task sometimes shows performance drop") + test_type = "deploy" self.performance[template.name] = {} category_data_args = self._apply_category(reg_cfg.args, category) @@ -189,7 +193,7 @@ def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path, categ reg_cfg.otx_dir, category_data_args, threshold=0.0, - criteria=reg_cfg.config_dict["regression_criteria"]["deploy"][category], + criteria=reg_cfg.config_dict["regression_criteria"][test_type][category], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -197,7 +201,7 @@ def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path, categ self.performance[template.name][TIME_LOG["deploy_time"]] = round(deploy_elapsed_time, 3) self.performance[template.name][TIME_LOG["deploy_eval_time"]] = round(deploy_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type]["deploy"][category].append(self.performance) + reg_cfg.update_result(test_type, self.performance, is_anomaly=True, category=category) assert test_result["passed"] is True, test_result["log"] @@ -207,6 +211,7 @@ def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path, categ def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path, category): if category in ["tile", "cable", "grid"]: pytest.skip("Issue#2189: Anomaly task sometimes shows performance drop") + test_type = "nncf" self.performance[template.name] = {} category_data_args = self._apply_category(reg_cfg.args, category) @@ -225,7 +230,7 @@ def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path, category): reg_cfg.otx_dir, category_data_args, threshold=0.01, - criteria=reg_cfg.config_dict["regression_criteria"]["nncf"][category], + criteria=reg_cfg.config_dict["regression_criteria"][test_type][category], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -233,7 +238,7 @@ def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path, category): self.performance[template.name][TIME_LOG["nncf_time"]] = round(nncf_elapsed_time, 3) self.performance[template.name][TIME_LOG["nncf_eval_time"]] = round(nncf_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type]["nncf"][category].append(self.performance) + reg_cfg.update_result(test_type, self.performance, is_anomaly=True, category=category) assert test_result["passed"] is True, test_result["log"] @@ -243,6 +248,7 @@ def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path, category): def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path, category): if category in ["tile", "grid"]: pytest.skip("Issue#2189: Anomaly task sometimes shows performance drop") + test_type = "ptq" self.performance[template.name] = {} category_data_args = self._apply_category(reg_cfg.args, category) @@ -257,7 +263,7 @@ def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path, category): tmp_dir_path, reg_cfg.otx_dir, category_data_args, - criteria=reg_cfg.config_dict["regression_criteria"]["ptq"][category], + criteria=reg_cfg.config_dict["regression_criteria"][test_type][category], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -265,6 +271,6 @@ def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path, category): self.performance[template.name][TIME_LOG["ptq_time"]] = round(ptq_elapsed_time, 3) self.performance[template.name][TIME_LOG["ptq_eval_time"]] = round(ptq_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type]["ptq"][category].append(self.performance) + reg_cfg.update_result(test_type, self.performance, is_anomaly=True, category=category) assert test_result["passed"] is True, test_result["log"] diff --git a/tests/regression/anomaly/test_anomaly_segmentation.py b/tests/regression/anomaly/test_anomaly_segmentation.py index 83383569898..13f90320aab 100644 --- a/tests/regression/anomaly/test_anomaly_segmentation.py +++ b/tests/regression/anomaly/test_anomaly_segmentation.py @@ -11,6 +11,15 @@ import pytest from otx.cli.registry import Registry +from tests.regression.regression_command import ( + regression_deployment_testing, + regression_eval_testing, + regression_eval_time_testing, + regression_nncf_eval_testing, + regression_openvino_testing, + regression_ptq_eval_testing, + regression_train_time_testing, +) from tests.regression.regression_test_helpers import ( ANOMALY_DATASET_CATEGORIES, TIME_LOG, @@ -25,16 +34,6 @@ ptq_optimize_testing, ) -from tests.regression.regression_command import ( - regression_eval_testing, - regression_openvino_testing, - regression_deployment_testing, - regression_nncf_eval_testing, - regression_ptq_eval_testing, - regression_train_time_testing, - regression_eval_time_testing, -) - class TestRegressionAnomalySegmentation: # Configurations for regression test. @@ -44,7 +43,7 @@ class TestRegressionAnomalySegmentation: LABEL_TYPE = None TRAIN_PARAMS = None - SAMPLED_ANOMALY_DATASET_CATEGORIES = random.sample(ANOMALY_DATASET_CATEGORIES, 3) + SAMPLED_ANOMALY_DATASET_CATEGORIES = ANOMALY_DATASET_CATEGORIES templates = Registry(f"src/otx/algorithms/{REG_CATEGORY}").filter(task_type=TASK_TYPE.upper()).templates templates_ids = [template.model_template_id for template in templates] @@ -54,20 +53,19 @@ class TestRegressionAnomalySegmentation: @classmethod @pytest.fixture(scope="class") def reg_cfg(cls, tmp_dir_path): + results_root = os.environ.get("REG_RESULTS_ROOT", tmp_dir_path) cls.reg_cfg = RegressionTestConfig( cls.TASK_TYPE, cls.TRAIN_TYPE, cls.LABEL_TYPE, os.getcwd(), enable_auto_num_worker=False, - tmp_results_root=tmp_dir_path, + results_root=results_root, ) yield cls.reg_cfg - print(f"writting regression result to {cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json") - with open(f"{cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json", "w") as result_file: - json.dump(cls.reg_cfg.result_dict, result_file, indent=4) + cls.reg_cfg.dump_result_dict(dump_path=os.path.join(cls.reg_cfg.result_dir, f"result_{cls.TASK_TYPE}.json")) def setup_method(self): self.performance = {} @@ -87,6 +85,7 @@ def _apply_category(self, data_dict, category): @pytest.mark.parametrize("template", templates, ids=templates_ids) @pytest.mark.parametrize("category", SAMPLED_ANOMALY_DATASET_CATEGORIES) def test_otx_train(self, reg_cfg, template, tmp_dir_path, category): + test_type = "train" self.performance[template.name] = {} category_data_args = self._apply_category(reg_cfg.args, category) @@ -101,14 +100,14 @@ def test_otx_train(self, reg_cfg, template, tmp_dir_path, category): tmp_dir_path, reg_cfg.otx_dir, category_data_args, - reg_cfg.config_dict["regression_criteria"]["train"][category], + reg_cfg.config_dict["regression_criteria"][test_type][category], self.performance[template.name], ) infer_elapsed_time = timer() - infer_start_time self.performance[template.name][TIME_LOG["train_time"]] = round(train_elapsed_time, 3) self.performance[template.name][TIME_LOG["infer_time"]] = round(infer_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type]["train"][category].append(self.performance) + reg_cfg.update_result(test_type, self.performance, is_anomaly=True, category=category) assert test_result["passed"] is True, test_result["log"] @@ -118,6 +117,8 @@ def test_otx_train(self, reg_cfg, template, tmp_dir_path, category): def test_otx_train_kpi_test(self, reg_cfg, template, category): """KPI tests: measure the train+val time and evaluation time and compare with criteria.""" performance = reg_cfg.get_template_performance(template, category=category) + if performance is None: + pytest.skip(reason="Cannot find performance data from results.") # Compare train+val time with the KPI criteria. kpi_train_result = regression_train_time_testing( @@ -142,6 +143,7 @@ def test_otx_train_kpi_test(self, reg_cfg, template, category): def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path, category): if category in ["metal_nut", "screw"]: pytest.skip("Issue#2189: Anomaly task sometimes shows performance drop") + test_type = "export" self.performance[template.name] = {} category_data_args = self._apply_category(reg_cfg.args, category) @@ -157,7 +159,7 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path, categor reg_cfg.otx_dir, category_data_args, threshold=0.05, - criteria=reg_cfg.config_dict["regression_criteria"]["export"][category], + criteria=reg_cfg.config_dict["regression_criteria"][test_type][category], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -165,7 +167,7 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path, categor self.performance[template.name][TIME_LOG["export_time"]] = round(export_elapsed_time, 3) self.performance[template.name][TIME_LOG["export_eval_time"]] = round(export_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type]["export"][category].append(self.performance) + reg_cfg.update_result(test_type, self.performance, is_anomaly=True, category=category) assert test_result["passed"] is True, test_result["log"] @@ -175,6 +177,7 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path, categor def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path, category): if category in ["metal_nut", "screw"]: pytest.skip("Issue#2189: Anomaly task sometimes shows performance drop") + test_type = "deploy" self.performance[template.name] = {} category_data_args = self._apply_category(reg_cfg.args, category) @@ -190,7 +193,7 @@ def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path, categ reg_cfg.otx_dir, category_data_args, threshold=0.0, - criteria=reg_cfg.config_dict["regression_criteria"]["deploy"][category], + criteria=reg_cfg.config_dict["regression_criteria"][test_type][category], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -198,7 +201,7 @@ def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path, categ self.performance[template.name][TIME_LOG["deploy_time"]] = round(deploy_elapsed_time, 3) self.performance[template.name][TIME_LOG["deploy_eval_time"]] = round(deploy_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type]["deploy"][category].append(self.performance) + reg_cfg.update_result(test_type, self.performance, is_anomaly=True, category=category) assert test_result["passed"] is True, test_result["log"] @@ -208,6 +211,7 @@ def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path, categ def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path, category): if category in ["screw"]: pytest.skip("Issue#2189: Anomaly task sometimes shows performance drop") + test_type = "nncf" self.performance[template.name] = {} category_data_args = self._apply_category(reg_cfg.args, category) @@ -226,7 +230,7 @@ def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path, category): reg_cfg.otx_dir, category_data_args, threshold=0.01, - criteria=reg_cfg.config_dict["regression_criteria"]["nncf"][category], + criteria=reg_cfg.config_dict["regression_criteria"][test_type][category], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -234,7 +238,7 @@ def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path, category): self.performance[template.name][TIME_LOG["nncf_time"]] = round(nncf_elapsed_time, 3) self.performance[template.name][TIME_LOG["nncf_eval_time"]] = round(nncf_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type]["nncf"][category].append(self.performance) + reg_cfg.update_result(test_type, self.performance, is_anomaly=True, category=category) assert test_result["passed"] is True, test_result["log"] @@ -244,6 +248,7 @@ def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path, category): def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path, category): if category in ["metal_nut", "screw"]: pytest.skip("Issue#2189: Anomaly task sometimes shows performance drop") + test_type = "ptq" self.performance[template.name] = {} category_data_args = self._apply_category(reg_cfg.args, category) @@ -258,7 +263,7 @@ def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path, category): tmp_dir_path, reg_cfg.otx_dir, category_data_args, - criteria=reg_cfg.config_dict["regression_criteria"]["ptq"][category], + criteria=reg_cfg.config_dict["regression_criteria"][test_type][category], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -266,6 +271,6 @@ def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path, category): self.performance[template.name][TIME_LOG["ptq_time"]] = round(ptq_elapsed_time, 3) self.performance[template.name][TIME_LOG["ptq_eval_time"]] = round(ptq_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type]["ptq"][category].append(self.performance) + reg_cfg.update_result(test_type, self.performance, is_anomaly=True, category=category) assert test_result["passed"] is True, test_result["log"] diff --git a/tests/regression/classification/test_classification.py b/tests/regression/classification/test_classification.py index b03c91d2f70..83a7c26627f 100644 --- a/tests/regression/classification/test_classification.py +++ b/tests/regression/classification/test_classification.py @@ -53,20 +53,19 @@ class TestRegressionMultiClassClassification: @classmethod @pytest.fixture(scope="class") def reg_cfg(cls, tmp_dir_path): + results_root = os.environ.get("REG_RESULTS_ROOT", tmp_dir_path) cls.reg_cfg = RegressionTestConfig( cls.TASK_TYPE, cls.TRAIN_TYPE, cls.LABEL_TYPE, os.getcwd(), train_params=cls.TRAIN_PARAMS, - tmp_results_root=tmp_dir_path, + results_root=results_root, ) yield cls.reg_cfg - print(f"writting regression result to {cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json") - with open(f"{cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json", "w") as result_file: - json.dump(cls.reg_cfg.result_dict, result_file, indent=4) + cls.reg_cfg.dump_result_dict() def setup_method(self): self.performance = {} @@ -74,6 +73,7 @@ def setup_method(self): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train(self, reg_cfg, template, tmp_dir_path): + test_type = "train" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / "multi_class_cls" @@ -87,14 +87,14 @@ def test_otx_train(self, reg_cfg, template, tmp_dir_path): tmp_dir_path, reg_cfg.otx_dir, reg_cfg.args, - reg_cfg.config_dict["regression_criteria"]["train"], + reg_cfg.config_dict["regression_criteria"][test_type], self.performance[template.name], ) infer_elapsed_time = timer() - infer_start_time self.performance[template.name][TIME_LOG["train_time"]] = round(train_elapsed_time, 3) self.performance[template.name][TIME_LOG["infer_time"]] = round(infer_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["train"].append(self.performance) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @@ -102,6 +102,8 @@ def test_otx_train(self, reg_cfg, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train_kpi_test(self, reg_cfg, template): performance = reg_cfg.get_template_performance(template) + if performance is None: + pytest.skip(reason="Cannot find performance data from results.") kpi_train_result = regression_train_time_testing( train_time_criteria=reg_cfg.config_dict["kpi_e2e_train_time_criteria"]["train"], @@ -121,7 +123,10 @@ def test_otx_train_kpi_test(self, reg_cfg, template): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train_cls_incr(self, reg_cfg, template, tmp_dir_path): + if template.name == "DeiT-Tiny": + pytest.skip(reason="Issue#2567: error while calc IB loss for DeiT-Tiny") train_type = "class_incr" + test_type = "train" self.performance[template.name] = {} sl_template_work_dir = get_template_dir(template, tmp_dir_path / "multi_class_cls") @@ -146,14 +151,14 @@ def test_otx_train_cls_incr(self, reg_cfg, template, tmp_dir_path): tmp_dir_path, reg_cfg.otx_dir, args_cls_incr, - config_cls_incr["regression_criteria"]["train"], + config_cls_incr["regression_criteria"][test_type], self.performance[template.name], ) infer_elapsed_time = timer() - infer_start_time self.performance[template.name][TIME_LOG["train_time"]] = round(train_elapsed_time, 3) self.performance[template.name][TIME_LOG["infer_time"]] = round(infer_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][train_type]["train"].append(self.performance) + reg_cfg.update_result(test_type, self.performance, train_type=train_type) assert test_result["passed"] is True, test_result["log"] @@ -164,6 +169,8 @@ def test_otx_train_cls_incr_kpi_test(self, reg_cfg, template): config_cls_incr = reg_cfg.load_config(train_type=train_type) performance = reg_cfg.get_template_performance(template, train_type=train_type) + if performance is None: + pytest.skip(reason="Cannot find performance data from results.") kpi_train_result = regression_train_time_testing( train_time_criteria=config_cls_incr["kpi_e2e_train_time_criteria"]["train"], @@ -184,6 +191,7 @@ def test_otx_train_cls_incr_kpi_test(self, reg_cfg, template): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train_semisl(self, reg_cfg, template, tmp_dir_path): train_type = "semi_supervised" + test_type = "train" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / "multi_class_cls/test_semisl" @@ -211,14 +219,14 @@ def test_otx_train_semisl(self, reg_cfg, template, tmp_dir_path): tmp_dir_path, reg_cfg.otx_dir, args_semisl, - config_semisl["regression_criteria"]["train"], + config_semisl["regression_criteria"][test_type], self.performance[template.name], ) infer_elapsed_time = timer() - infer_start_time self.performance[template.name][TIME_LOG["train_time"]] = round(train_elapsed_time, 3) self.performance[template.name][TIME_LOG["infer_time"]] = round(infer_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][train_type]["train"].append(self.performance) + reg_cfg.update_result(test_type, self.performance, train_type=train_type) assert test_result["passed"] is True, test_result["log"] @@ -229,6 +237,8 @@ def test_otx_train_semisl_kpi_test(self, reg_cfg, template): config_semisl = reg_cfg.load_config(train_type=train_type) performance = reg_cfg.get_template_performance(template, train_type=train_type) + if performance is None: + pytest.skip(reason="Cannot find performance data from results.") kpi_train_result = regression_train_time_testing( train_time_criteria=config_semisl["kpi_e2e_train_time_criteria"]["train"], @@ -251,6 +261,7 @@ def test_otx_train_selfsl(self, reg_cfg, template, tmp_dir_path): if template.name == "DeiT-Tiny": pytest.skip(reason="Self-SL for ViT template is not supported yet.") train_type = "self_supervised" + test_type = "train" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / "multi_class_cls/test_selfsl" @@ -295,14 +306,14 @@ def test_otx_train_selfsl(self, reg_cfg, template, tmp_dir_path): new_tmp_dir_path, reg_cfg.otx_dir, args_selfsl, - config_selfsl["regression_criteria"]["train"], + config_selfsl["regression_criteria"][test_type], self.performance[template.name], ) infer_elapsed_time = timer() - infer_start_time self.performance[template.name][TIME_LOG["train_time"]] = round(train_elapsed_time, 3) self.performance[template.name][TIME_LOG["infer_time"]] = round(infer_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][train_type]["train"].append(self.performance) + reg_cfg.update_result(test_type, self.performance, train_type=train_type) assert test_result["passed"] is True, test_result["log"] @@ -313,6 +324,8 @@ def test_otx_train_selfsl_kpi_test(self, reg_cfg, template): config_selfsl = reg_cfg.load_config(train_type=train_type) performance = reg_cfg.get_template_performance(template, train_type=train_type) + if performance is None: + pytest.skip(reason="Cannot find performance data from results.") kpi_train_result = regression_train_time_testing( train_time_criteria=config_selfsl["kpi_e2e_train_time_criteria"]["train"], @@ -332,6 +345,7 @@ def test_otx_train_selfsl_kpi_test(self, reg_cfg, template): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): + test_type = "export" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / "multi_class_cls" @@ -346,7 +360,7 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): reg_cfg.otx_dir, reg_cfg.args, threshold=0.05, - criteria=reg_cfg.config_dict["regression_criteria"]["export"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -354,15 +368,14 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["export_time"]] = round(export_elapsed_time, 3) self.performance[template.name][TIME_LOG["export_eval_time"]] = round(export_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["export"].append( - self.performance - ) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path): + test_type = "deploy" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / "multi_class_cls" @@ -377,7 +390,7 @@ def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path): reg_cfg.otx_dir, reg_cfg.args, threshold=0.0, - criteria=reg_cfg.config_dict["regression_criteria"]["deploy"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -385,15 +398,14 @@ def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["deploy_time"]] = round(deploy_elapsed_time, 3) self.performance[template.name][TIME_LOG["deploy_eval_time"]] = round(deploy_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["deploy"].append( - self.performance - ) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path): + test_type = "nncf" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / "multi_class_cls" @@ -411,7 +423,7 @@ def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path): reg_cfg.otx_dir, reg_cfg.args, threshold=0.01, - criteria=reg_cfg.config_dict["regression_criteria"]["nncf"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -419,13 +431,14 @@ def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["nncf_time"]] = round(nncf_elapsed_time, 3) self.performance[template.name][TIME_LOG["nncf_eval_time"]] = round(nncf_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["nncf"].append(self.performance) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path): + test_type = "ptq" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / "multi_class_cls" @@ -439,7 +452,7 @@ def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path): tmp_dir_path, reg_cfg.otx_dir, reg_cfg.args, - criteria=reg_cfg.config_dict["regression_criteria"]["ptq"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -447,7 +460,7 @@ def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["ptq_time"]] = round(ptq_elapsed_time, 3) self.performance[template.name][TIME_LOG["ptq_eval_time"]] = round(ptq_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["ptq"].append(self.performance) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @@ -467,20 +480,19 @@ class TestRegressionMultiLabelClassification: @classmethod @pytest.fixture(scope="class") def reg_cfg(cls, tmp_dir_path): + results_root = os.environ.get("REG_RESULTS_ROOT", tmp_dir_path) cls.reg_cfg = RegressionTestConfig( cls.TASK_TYPE, cls.TRAIN_TYPE, cls.LABEL_TYPE, os.getcwd(), train_params=cls.TRAIN_PARAMS, - tmp_results_root=tmp_dir_path, + results_root=results_root, ) yield cls.reg_cfg - print(f"writting regression result to {cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json") - with open(f"{cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json", "w") as result_file: - json.dump(cls.reg_cfg.result_dict, result_file, indent=4) + cls.reg_cfg.dump_result_dict() def setup_method(self): self.performance = {} @@ -488,6 +500,7 @@ def setup_method(self): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train(self, reg_cfg, template, tmp_dir_path): + test_type = "train" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / "multi_label_cls" @@ -501,14 +514,14 @@ def test_otx_train(self, reg_cfg, template, tmp_dir_path): tmp_dir_path, reg_cfg.otx_dir, reg_cfg.args, - reg_cfg.config_dict["regression_criteria"]["train"], + reg_cfg.config_dict["regression_criteria"][test_type], self.performance[template.name], ) infer_elapsed_time = timer() - infer_start_time self.performance[template.name][TIME_LOG["train_time"]] = round(train_elapsed_time, 3) self.performance[template.name][TIME_LOG["infer_time"]] = round(infer_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["train"].append(self.performance) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @@ -516,6 +529,8 @@ def test_otx_train(self, reg_cfg, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train_kpi_test(self, reg_cfg, template): performance = reg_cfg.get_template_performance(template) + if performance is None: + pytest.skip(reason="Cannot find performance data from results.") kpi_train_result = regression_train_time_testing( train_time_criteria=reg_cfg.config_dict["kpi_e2e_train_time_criteria"]["train"], @@ -536,6 +551,7 @@ def test_otx_train_kpi_test(self, reg_cfg, template): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train_cls_incr(self, reg_cfg, template, tmp_dir_path): train_type = "class_incr" + test_type = "train" self.performance[template.name] = {} sl_template_work_dir = get_template_dir(template, tmp_dir_path / "multi_label_cls") @@ -560,14 +576,14 @@ def test_otx_train_cls_incr(self, reg_cfg, template, tmp_dir_path): tmp_dir_path, reg_cfg.otx_dir, args_cls_incr, - config_cls_incr["regression_criteria"]["train"], + config_cls_incr["regression_criteria"][test_type], self.performance[template.name], ) infer_elapsed_time = timer() - infer_start_time self.performance[template.name][TIME_LOG["train_time"]] = round(train_elapsed_time, 3) self.performance[template.name][TIME_LOG["infer_time"]] = round(infer_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][train_type]["train"].append(self.performance) + reg_cfg.update_result(test_type, self.performance, train_type=train_type) assert test_result["passed"] is True, test_result["log"] @@ -578,6 +594,8 @@ def test_otx_train_cls_incr_kpi_test(self, reg_cfg, template): config_cls_incr = reg_cfg.load_config(train_type=train_type) performance = reg_cfg.get_template_performance(template, train_type=train_type) + if performance is None: + pytest.skip(reason="Cannot find performance data from results.") kpi_train_result = regression_train_time_testing( train_time_criteria=config_cls_incr["kpi_e2e_train_time_criteria"]["train"], @@ -597,6 +615,7 @@ def test_otx_train_cls_incr_kpi_test(self, reg_cfg, template): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): + test_type = "export" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / "multi_label_cls" @@ -611,7 +630,7 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): reg_cfg.otx_dir, reg_cfg.args, threshold=0.05, - criteria=reg_cfg.config_dict["regression_criteria"]["export"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -619,15 +638,14 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["export_time"]] = round(export_elapsed_time, 3) self.performance[template.name][TIME_LOG["export_eval_time"]] = round(export_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["export"].append( - self.performance - ) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path): + test_type = "deploy" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / "multi_label_cls" @@ -642,7 +660,7 @@ def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path): reg_cfg.otx_dir, reg_cfg.args, threshold=0.0, - criteria=reg_cfg.config_dict["regression_criteria"]["deploy"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -650,15 +668,14 @@ def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["deploy_time"]] = round(deploy_elapsed_time, 3) self.performance[template.name][TIME_LOG["deploy_eval_time"]] = round(deploy_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["deploy"].append( - self.performance - ) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path): + test_type = "nncf" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / "multi_label_cls" @@ -676,7 +693,7 @@ def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path): reg_cfg.otx_dir, reg_cfg.args, threshold=0.01, - criteria=reg_cfg.config_dict["regression_criteria"]["nncf"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -684,13 +701,14 @@ def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["nncf_time"]] = round(nncf_elapsed_time, 3) self.performance[template.name][TIME_LOG["nncf_eval_time"]] = round(nncf_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["nncf"].append(self.performance) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path): + test_type = "ptq" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / "multi_label_cls" @@ -704,7 +722,7 @@ def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path): tmp_dir_path, reg_cfg.otx_dir, reg_cfg.args, - criteria=reg_cfg.config_dict["regression_criteria"]["ptq"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -712,7 +730,7 @@ def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["ptq_time"]] = round(ptq_elapsed_time, 3) self.performance[template.name][TIME_LOG["ptq_eval_time"]] = round(ptq_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["ptq"].append(self.performance) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @@ -732,20 +750,19 @@ class TestRegressionHierarchicalLabelClassification: @classmethod @pytest.fixture(scope="class") def reg_cfg(cls, tmp_dir_path): + results_root = os.environ.get("REG_RESULTS_ROOT", tmp_dir_path) cls.reg_cfg = RegressionTestConfig( cls.TASK_TYPE, cls.TRAIN_TYPE, cls.LABEL_TYPE, os.getcwd(), train_params=cls.TRAIN_PARAMS, - tmp_results_root=tmp_dir_path, + results_root=results_root, ) yield cls.reg_cfg - print(f"writting regression result to {cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json") - with open(f"{cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json", "w") as result_file: - json.dump(cls.reg_cfg.result_dict, result_file, indent=4) + cls.reg_cfg.dump_result_dict() def setup_method(self): self.performance = {} @@ -753,6 +770,7 @@ def setup_method(self): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train(self, reg_cfg, template, tmp_dir_path): + test_type = "train" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / "h_label_cls" @@ -766,14 +784,14 @@ def test_otx_train(self, reg_cfg, template, tmp_dir_path): tmp_dir_path, reg_cfg.otx_dir, reg_cfg.args, - reg_cfg.config_dict["regression_criteria"]["train"], + reg_cfg.config_dict["regression_criteria"][test_type], self.performance[template.name], ) infer_elapsed_time = timer() - infer_start_time self.performance[template.name][TIME_LOG["train_time"]] = round(train_elapsed_time, 3) self.performance[template.name][TIME_LOG["infer_time"]] = round(infer_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["train"].append(self.performance) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @@ -781,6 +799,8 @@ def test_otx_train(self, reg_cfg, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train_kpi_test(self, reg_cfg, template): performance = reg_cfg.get_template_performance(template) + if performance is None: + pytest.skip(reason="Cannot find performance data from results.") kpi_train_result = regression_train_time_testing( train_time_criteria=reg_cfg.config_dict["kpi_e2e_train_time_criteria"]["train"], @@ -800,6 +820,7 @@ def test_otx_train_kpi_test(self, reg_cfg, template): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): + test_type = "export" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / "h_label_cls" @@ -814,7 +835,7 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): reg_cfg.otx_dir, reg_cfg.args, threshold=0.05, - criteria=reg_cfg.config_dict["regression_criteria"]["export"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -822,15 +843,13 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["export_time"]] = round(export_elapsed_time, 3) self.performance[template.name][TIME_LOG["export_eval_time"]] = round(export_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["export"].append( - self.performance - ) - + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path): + test_type = "deploy" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / "h_label_cls" @@ -845,7 +864,7 @@ def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path): reg_cfg.otx_dir, reg_cfg.args, threshold=0.0, - criteria=reg_cfg.config_dict["regression_criteria"]["deploy"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -853,15 +872,14 @@ def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["deploy_time"]] = round(deploy_elapsed_time, 3) self.performance[template.name][TIME_LOG["deploy_eval_time"]] = round(deploy_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["deploy"].append( - self.performance - ) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path): + test_type = "nncf" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / "h_label_cls" @@ -879,7 +897,7 @@ def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path): reg_cfg.otx_dir, reg_cfg.args, threshold=0.01, - criteria=reg_cfg.config_dict["regression_criteria"]["nncf"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -887,13 +905,14 @@ def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["nncf_time"]] = round(nncf_elapsed_time, 3) self.performance[template.name][TIME_LOG["nncf_eval_time"]] = round(nncf_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["nncf"].append(self.performance) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path): + test_type = "ptq" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / "h_label_cls" @@ -907,7 +926,7 @@ def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path): tmp_dir_path, reg_cfg.otx_dir, reg_cfg.args, - criteria=reg_cfg.config_dict["regression_criteria"]["ptq"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -915,7 +934,7 @@ def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["ptq_time"]] = round(ptq_elapsed_time, 3) self.performance[template.name][TIME_LOG["ptq_eval_time"]] = round(ptq_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["ptq"].append(self.performance) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @@ -940,20 +959,19 @@ class TestRegressionSupconClassification: @classmethod @pytest.fixture(scope="class") def reg_cfg(cls, tmp_dir_path): + results_root = os.environ.get("REG_RESULTS_ROOT", tmp_dir_path) cls.reg_cfg = RegressionTestConfig( cls.TASK_TYPE, cls.TRAIN_TYPE, cls.LABEL_TYPE, os.getcwd(), train_params=cls.TRAIN_PARAMS, - tmp_results_root=tmp_dir_path, + results_root=results_root, ) yield cls.reg_cfg - print(f"writting regression result to {cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json") - with open(f"{cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json", "w") as result_file: - json.dump(cls.reg_cfg.result_dict, result_file, indent=4) + cls.reg_cfg.dump_result_dict() def setup_method(self): self.performance = {} @@ -961,6 +979,9 @@ def setup_method(self): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train(self, reg_cfg, template, tmp_dir_path): + if template.name == "DeiT-Tiny": + pytest.skip(reason="Supcon for ViT template is not supported yet.") + test_type = "train" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / "supcon_cls" @@ -977,14 +998,14 @@ def test_otx_train(self, reg_cfg, template, tmp_dir_path): tmp_dir_path, reg_cfg.otx_dir, reg_cfg.args, - reg_cfg.config_dict["regression_criteria"]["train"], + reg_cfg.config_dict["regression_criteria"][test_type], self.performance[template.name], ) infer_elapsed_time = timer() - infer_start_time self.performance[template.name][TIME_LOG["train_time"]] = round(train_elapsed_time, 3) self.performance[template.name][TIME_LOG["infer_time"]] = round(infer_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["train"].append(self.performance) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @@ -992,6 +1013,8 @@ def test_otx_train(self, reg_cfg, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train_kpi_test(self, reg_cfg, template): performance = reg_cfg.get_template_performance(template) + if performance is None: + pytest.skip(reason="Cannot find performance data from results.") kpi_train_result = regression_train_time_testing( train_time_criteria=reg_cfg.config_dict["kpi_e2e_train_time_criteria"]["train"], diff --git a/tests/regression/conftest.py b/tests/regression/conftest.py index c1747a5dc53..37f1bc0d379 100644 --- a/tests/regression/conftest.py +++ b/tests/regression/conftest.py @@ -8,10 +8,13 @@ @pytest.fixture(autouse=True, scope="session") def run_regression_tests(tmp_dir_path): - print(f"tmp dir path = {tmp_dir_path}") + result_path = os.path.join(os.environ.get("REG_RESULTS_ROOT", tmp_dir_path), "reg_test_results") + print(f"reg results path = {result_path}") + if not os.path.exists(result_path): + os.makedirs(result_path) + yield - input_path = os.path.join(tmp_dir_path, "regression_test_results") output_path = os.environ.get("TOX_WORK_DIR", os.getcwd()) - summarize_results_data(input_path, output_path) + summarize_results_data(result_path, output_path) diff --git a/tests/regression/detection/test_detection.py b/tests/regression/detection/test_detection.py index 6ab44ced7fa..c6b5a508d26 100644 --- a/tests/regression/detection/test_detection.py +++ b/tests/regression/detection/test_detection.py @@ -52,20 +52,19 @@ class TestRegressionDetection: @classmethod @pytest.fixture(scope="class") def reg_cfg(cls, tmp_dir_path): + results_root = os.environ.get("REG_RESULTS_ROOT", tmp_dir_path) cls.reg_cfg = RegressionTestConfig( cls.TASK_TYPE, cls.TRAIN_TYPE, cls.LABEL_TYPE, os.getcwd(), train_params=cls.TRAIN_PARAMS, - tmp_results_root=tmp_dir_path, + results_root=results_root, ) yield cls.reg_cfg - print(f"writting regression result to {cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json") - with open(f"{cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json", "w") as result_file: - json.dump(cls.reg_cfg.result_dict, result_file, indent=4) + cls.reg_cfg.dump_result_dict() def setup_method(self): self.performance = {} @@ -73,6 +72,7 @@ def setup_method(self): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train(self, reg_cfg, template, tmp_dir_path): + test_type = "train" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / reg_cfg.task_type @@ -86,14 +86,14 @@ def test_otx_train(self, reg_cfg, template, tmp_dir_path): tmp_dir_path, reg_cfg.otx_dir, reg_cfg.args, - reg_cfg.config_dict["regression_criteria"]["train"], + reg_cfg.config_dict["regression_criteria"][test_type], self.performance[template.name], ) infer_elapsed_time = timer() - infer_start_time self.performance[template.name][TIME_LOG["train_time"]] = round(train_elapsed_time, 3) self.performance[template.name][TIME_LOG["infer_time"]] = round(infer_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["train"].append(self.performance) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @@ -101,6 +101,8 @@ def test_otx_train(self, reg_cfg, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train_kpi_test(self, reg_cfg, template): performance = reg_cfg.get_template_performance(template) + if performance is None: + pytest.skip(reason="Cannot find performance data from results.") kpi_train_result = regression_train_time_testing( train_time_criteria=reg_cfg.config_dict["kpi_e2e_train_time_criteria"]["train"], @@ -121,6 +123,7 @@ def test_otx_train_kpi_test(self, reg_cfg, template): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train_cls_incr(self, reg_cfg, template, tmp_dir_path): train_type = "class_incr" + test_type = "train" self.performance[template.name] = {} sl_template_work_dir = get_template_dir(template, tmp_dir_path / reg_cfg.task_type) @@ -152,7 +155,7 @@ def test_otx_train_cls_incr(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["train_time"]] = round(train_elapsed_time, 3) self.performance[template.name][TIME_LOG["infer_time"]] = round(infer_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][train_type]["train"].append(self.performance) + reg_cfg.update_result(test_type, self.performance, train_type=train_type) assert test_result["passed"] is True, test_result["log"] @@ -162,6 +165,8 @@ def test_otx_train_cls_incr_kpi_test(self, reg_cfg, template): train_type = "class_incr" config_cls_incr = reg_cfg.load_config(train_type=train_type) performance = reg_cfg.get_template_performance(template, train_type=train_type) + if performance is None: + pytest.skip(reason="Cannot find performance data from results.") kpi_train_result = regression_train_time_testing( train_time_criteria=config_cls_incr["kpi_e2e_train_time_criteria"]["train"], @@ -181,9 +186,10 @@ def test_otx_train_cls_incr_kpi_test(self, reg_cfg, template): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train_semisl(self, reg_cfg, template, tmp_dir_path): + train_type = "semi_supervised" + test_type = "train" self.performance[template.name] = {} - train_type = "semi_supervised" tmp_dir_path = tmp_dir_path / f"{reg_cfg.task_type}/test_semisl" config_semisl = reg_cfg.load_config(train_type=train_type) args_semisl = config_semisl["data_path"] @@ -215,7 +221,7 @@ def test_otx_train_semisl(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["train_time"]] = round(train_elapsed_time, 3) self.performance[template.name][TIME_LOG["infer_time"]] = round(infer_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][train_type]["train"].append(self.performance) + reg_cfg.update_result(test_type, self.performance, train_type=train_type) assert test_result["passed"] is True, test_result["log"] @@ -225,6 +231,8 @@ def test_otx_train_semisl_kpi_test(self, reg_cfg, template): train_type = "semi_supervised" config_semisl = reg_cfg.load_config(train_type=train_type) performance = reg_cfg.get_template_performance(template, train_type=train_type) + if performance is None: + pytest.skip(reason="Cannot find performance data from results.") kpi_train_result = regression_train_time_testing( train_time_criteria=config_semisl["kpi_e2e_train_time_criteria"]["train"], @@ -244,6 +252,7 @@ def test_otx_train_semisl_kpi_test(self, reg_cfg, template): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): + test_type = "export" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / reg_cfg.task_type @@ -258,7 +267,7 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): reg_cfg.otx_dir, reg_cfg.args, threshold=0.05, - criteria=reg_cfg.config_dict["regression_criteria"]["export"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -266,15 +275,14 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["export_time"]] = round(export_elapsed_time, 3) self.performance[template.name][TIME_LOG["export_eval_time"]] = round(export_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["export"].append( - self.performance - ) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path): + test_type = "deploy" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / reg_cfg.task_type @@ -289,7 +297,7 @@ def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path): reg_cfg.otx_dir, reg_cfg.args, threshold=0.0, - criteria=reg_cfg.config_dict["regression_criteria"]["deploy"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -297,15 +305,16 @@ def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["deploy_time"]] = round(deploy_elapsed_time, 3) self.performance[template.name][TIME_LOG["deploy_eval_time"]] = round(deploy_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["deploy"].append( - self.performance - ) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path): + if template.name == "YOLOX-S": + pytest.skip("Issue#2596: IndexError") + test_type = "nncf" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / reg_cfg.task_type @@ -323,7 +332,7 @@ def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path): reg_cfg.otx_dir, reg_cfg.args, threshold=0.01, - criteria=reg_cfg.config_dict["regression_criteria"]["nncf"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -331,13 +340,14 @@ def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["nncf_time"]] = round(nncf_elapsed_time, 3) self.performance[template.name][TIME_LOG["nncf_eval_time"]] = round(nncf_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["nncf"].append(self.performance) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path): + test_type = "ptq" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / reg_cfg.task_type @@ -351,7 +361,7 @@ def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path): tmp_dir_path, reg_cfg.otx_dir, reg_cfg.args, - criteria=reg_cfg.config_dict["regression_criteria"]["ptq"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -359,6 +369,6 @@ def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["ptq_time"]] = round(ptq_elapsed_time, 3) self.performance[template.name][TIME_LOG["ptq_eval_time"]] = round(ptq_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["ptq"].append(self.performance) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] diff --git a/tests/regression/detection/test_tiling_detection.py b/tests/regression/detection/test_tiling_detection.py index 4155580f947..b25f8990675 100644 --- a/tests/regression/detection/test_tiling_detection.py +++ b/tests/regression/detection/test_tiling_detection.py @@ -58,21 +58,19 @@ class TestRegressionTilingDetection: @classmethod @pytest.fixture(scope="class") def reg_cfg(cls, tmp_dir_path): + results_root = os.environ.get("REG_RESULTS_ROOT", tmp_dir_path) cls.reg_cfg = RegressionTestConfig( cls.TASK_TYPE, cls.TRAIN_TYPE, cls.LABEL_TYPE, os.getcwd(), train_params=cls.TRAIN_PARAMS, - result_dir="tiling", - tmp_results_root=tmp_dir_path, + results_root=results_root, ) yield cls.reg_cfg - print(f"writting regression result to {cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json") - with open(f"{cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json", "w") as result_file: - json.dump(cls.reg_cfg.result_dict, result_file, indent=4) + cls.reg_cfg.dump_result_dict() def setup_method(self): self.performance = {} @@ -80,6 +78,7 @@ def setup_method(self): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train(self, reg_cfg, template, tmp_dir_path): + test_type = "train" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / reg_cfg.task_type @@ -93,14 +92,14 @@ def test_otx_train(self, reg_cfg, template, tmp_dir_path): tmp_dir_path, reg_cfg.otx_dir, reg_cfg.args, - reg_cfg.config_dict["regression_criteria"]["train"], + reg_cfg.config_dict["regression_criteria"][test_type], self.performance[template.name], ) infer_elapsed_time = timer() - infer_start_time self.performance[template.name][TIME_LOG["train_time"]] = round(train_elapsed_time, 3) self.performance[template.name][TIME_LOG["infer_time"]] = round(infer_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["train"].append(self.performance) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @@ -108,6 +107,8 @@ def test_otx_train(self, reg_cfg, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train_kpi_test(self, reg_cfg, template): performance = reg_cfg.get_template_performance(template) + if performance is None: + pytest.skip(reason="Cannot find performance data from results.") kpi_train_result = regression_train_time_testing( train_time_criteria=reg_cfg.config_dict["kpi_e2e_train_time_criteria"]["train"], @@ -127,6 +128,7 @@ def test_otx_train_kpi_test(self, reg_cfg, template): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): + test_type = "export" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / reg_cfg.task_type @@ -141,7 +143,7 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): reg_cfg.otx_dir, reg_cfg.args, threshold=0.05, - criteria=reg_cfg.config_dict["regression_criteria"]["export"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -149,15 +151,14 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["export_time"]] = round(export_elapsed_time, 3) self.performance[template.name][TIME_LOG["export_eval_time"]] = round(export_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["export"].append( - self.performance - ) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path): + test_type = "deploy" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / reg_cfg.task_type @@ -172,7 +173,7 @@ def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path): reg_cfg.otx_dir, reg_cfg.args, threshold=0.0, - criteria=reg_cfg.config_dict["regression_criteria"]["deploy"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -180,15 +181,14 @@ def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["deploy_time"]] = round(deploy_elapsed_time, 3) self.performance[template.name][TIME_LOG["deploy_eval_time"]] = round(deploy_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["deploy"].append( - self.performance - ) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path): + test_type = "nncf" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / reg_cfg.task_type @@ -206,7 +206,7 @@ def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path): reg_cfg.otx_dir, reg_cfg.args, threshold=0.01, - criteria=reg_cfg.config_dict["regression_criteria"]["nncf"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -214,13 +214,14 @@ def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["nncf_time"]] = round(nncf_elapsed_time, 3) self.performance[template.name][TIME_LOG["nncf_eval_time"]] = round(nncf_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["nncf"].append(self.performance) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path): + test_type = "ptq" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / reg_cfg.task_type @@ -234,7 +235,7 @@ def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path): tmp_dir_path, reg_cfg.otx_dir, reg_cfg.args, - criteria=reg_cfg.config_dict["regression_criteria"]["ptq"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -242,6 +243,6 @@ def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["ptq_time"]] = round(ptq_elapsed_time, 3) self.performance[template.name][TIME_LOG["ptq_eval_time"]] = round(ptq_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["ptq"].append(self.performance) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] diff --git a/tests/regression/instance_segmentation/test_instance_segmentation.py b/tests/regression/instance_segmentation/test_instance_segmentation.py index 45666dedcc5..14029c56b7c 100644 --- a/tests/regression/instance_segmentation/test_instance_segmentation.py +++ b/tests/regression/instance_segmentation/test_instance_segmentation.py @@ -52,20 +52,19 @@ class TestRegressionInstanceSegmentation: @classmethod @pytest.fixture(scope="class") def reg_cfg(cls, tmp_dir_path): + results_root = os.environ.get("REG_RESULTS_ROOT", tmp_dir_path) cls.reg_cfg = RegressionTestConfig( cls.TASK_TYPE, cls.TRAIN_TYPE, cls.LABEL_TYPE, os.getcwd(), train_params=cls.TRAIN_PARAMS, - tmp_results_root=tmp_dir_path, + results_root=results_root, ) yield cls.reg_cfg - print(f"writting regression result to {cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json") - with open(f"{cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json", "w") as result_file: - json.dump(cls.reg_cfg.result_dict, result_file, indent=4) + cls.reg_cfg.dump_result_dict() def setup_method(self): self.performance = {} @@ -73,6 +72,7 @@ def setup_method(self): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train(self, reg_cfg, template, tmp_dir_path): + test_type = "train" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / reg_cfg.task_type @@ -86,14 +86,14 @@ def test_otx_train(self, reg_cfg, template, tmp_dir_path): tmp_dir_path, reg_cfg.otx_dir, reg_cfg.args, - reg_cfg.config_dict["regression_criteria"]["train"], + reg_cfg.config_dict["regression_criteria"][test_type], self.performance[template.name], ) infer_elapsed_time = timer() - infer_start_time self.performance[template.name][TIME_LOG["train_time"]] = round(train_elapsed_time, 3) self.performance[template.name][TIME_LOG["infer_time"]] = round(infer_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["train"].append(self.performance) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @@ -101,6 +101,8 @@ def test_otx_train(self, reg_cfg, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train_kpi_test(self, reg_cfg, template): performance = reg_cfg.get_template_performance(template) + if performance is None: + pytest.skip(reason="Cannot find performance data from results.") kpi_train_result = regression_train_time_testing( train_time_criteria=reg_cfg.config_dict["kpi_e2e_train_time_criteria"]["train"], @@ -121,6 +123,7 @@ def test_otx_train_kpi_test(self, reg_cfg, template): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train_cls_incr(self, reg_cfg, template, tmp_dir_path): train_type = "class_incr" + test_type = "train" self.performance[template.name] = {} sl_template_work_dir = get_template_dir(template, tmp_dir_path / reg_cfg.task_type) @@ -152,7 +155,7 @@ def test_otx_train_cls_incr(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["train_time"]] = round(train_elapsed_time, 3) self.performance[template.name][TIME_LOG["infer_time"]] = round(infer_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][train_type]["train"].append(self.performance) + reg_cfg.update_result(test_type, self.performance, train_type=train_type) assert test_result["passed"] is True, test_result["log"] @@ -162,6 +165,8 @@ def test_otx_train_cls_incr_kpi_test(self, reg_cfg, template): train_type = "class_incr" config_cls_incr = reg_cfg.load_config(train_type=train_type) performance = reg_cfg.get_template_performance(template) + if performance is None: + pytest.skip(reason="Cannot find performance data from results.") kpi_train_result = regression_train_time_testing( train_time_criteria=config_cls_incr["kpi_e2e_train_time_criteria"]["train"], @@ -182,6 +187,7 @@ def test_otx_train_cls_incr_kpi_test(self, reg_cfg, template): @pytest.mark.parametrize("template", templates, ids=templates_ids) # @pytest.mark.skip(reason="Issue#2290: MaskRCNN shows degraded performance when inferencing in OpenVINO") def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): + test_type = "export" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / reg_cfg.task_type @@ -196,7 +202,7 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): reg_cfg.otx_dir, reg_cfg.args, threshold=0.05, - criteria=reg_cfg.config_dict["regression_criteria"]["export"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -204,9 +210,7 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["export_time"]] = round(export_elapsed_time, 3) self.performance[template.name][TIME_LOG["export_eval_time"]] = round(export_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["export"].append( - self.performance - ) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @@ -214,6 +218,7 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) # @pytest.mark.skip(reason="Issue#2290: MaskRCNN shows degraded performance when inferencing in OpenVINO") def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path): + test_type = "deploy" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / reg_cfg.task_type @@ -228,7 +233,7 @@ def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path): reg_cfg.otx_dir, reg_cfg.args, threshold=0.0, - criteria=reg_cfg.config_dict["regression_criteria"]["deploy"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -236,15 +241,14 @@ def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["deploy_time"]] = round(deploy_elapsed_time, 3) self.performance[template.name][TIME_LOG["deploy_eval_time"]] = round(deploy_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["deploy"].append( - self.performance - ) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path): + test_type = "nncf" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / reg_cfg.task_type @@ -262,7 +266,7 @@ def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path): reg_cfg.otx_dir, reg_cfg.args, threshold=0.01, - criteria=reg_cfg.config_dict["regression_criteria"]["nncf"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -270,13 +274,14 @@ def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["nncf_time"]] = round(nncf_elapsed_time, 3) self.performance[template.name][TIME_LOG["nncf_eval_time"]] = round(nncf_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["nncf"].append(self.performance) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path): + test_type = "ptq" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / reg_cfg.task_type @@ -290,7 +295,7 @@ def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path): tmp_dir_path, reg_cfg.otx_dir, reg_cfg.args, - criteria=reg_cfg.config_dict["regression_criteria"]["ptq"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -298,6 +303,6 @@ def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["ptq_time"]] = round(ptq_elapsed_time, 3) self.performance[template.name][TIME_LOG["ptq_eval_time"]] = round(ptq_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["ptq"].append(self.performance) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] diff --git a/tests/regression/instance_segmentation/test_tiling_instance_segmentation.py b/tests/regression/instance_segmentation/test_tiling_instance_segmentation.py index 0dda9f7729c..5c45a0983cd 100644 --- a/tests/regression/instance_segmentation/test_tiling_instance_segmentation.py +++ b/tests/regression/instance_segmentation/test_tiling_instance_segmentation.py @@ -58,21 +58,19 @@ class TestRegressionTilingInstanceSegmentation: @classmethod @pytest.fixture(scope="class") def reg_cfg(cls, tmp_dir_path): + results_root = os.environ.get("REG_RESULTS_ROOT", tmp_dir_path) cls.reg_cfg = RegressionTestConfig( cls.TASK_TYPE, cls.TRAIN_TYPE, cls.LABEL_TYPE, os.getcwd(), train_params=cls.TRAIN_PARAMS, - result_dir="tiling", - tmp_results_root=tmp_dir_path, + results_root=results_root, ) yield cls.reg_cfg - print(f"writting regression result to {cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json") - with open(f"{cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json", "w") as result_file: - json.dump(cls.reg_cfg.result_dict, result_file, indent=4) + cls.reg_cfg.dump_result_dict() def setup_method(self): self.performance = {} @@ -81,6 +79,7 @@ def setup_method(self): @pytest.mark.parametrize("template", templates, ids=templates_ids) @pytest.mark.skip(reason="Issue#2381: Tiling isn't available at class incremental/deremental learning scenario") def test_otx_train(self, reg_cfg, template, tmp_dir_path): + test_type = "train" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / reg_cfg.task_type @@ -94,14 +93,14 @@ def test_otx_train(self, reg_cfg, template, tmp_dir_path): tmp_dir_path, reg_cfg.otx_dir, reg_cfg.args, - reg_cfg.config_dict["regression_criteria"]["train"], + reg_cfg.config_dict["regression_criteria"][test_type], self.performance[template.name], ) infer_elapsed_time = timer() - infer_start_time self.performance[template.name][TIME_LOG["train_time"]] = round(train_elapsed_time, 3) self.performance[template.name][TIME_LOG["infer_time"]] = round(infer_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["train"].append(self.performance) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @@ -110,6 +109,9 @@ def test_otx_train(self, reg_cfg, template, tmp_dir_path): @pytest.mark.skip(reason="Issue#2381: Tiling isn't available at class incremental/deremental learning scenario") def test_otx_train_kpi_test(self, reg_cfg, template): performance = reg_cfg.get_template_performance(template) + if performance is None: + pytest.skip(reason="Cannot find performance data from results.") + kpi_train_result = regression_train_time_testing( train_time_criteria=reg_cfg.config_dict["kpi_e2e_train_time_criteria"]["train"], e2e_train_time=performance[template.name][TIME_LOG["train_time"]], @@ -129,6 +131,7 @@ def test_otx_train_kpi_test(self, reg_cfg, template): @pytest.mark.parametrize("template", templates, ids=templates_ids) @pytest.mark.skip(reason="Issue#2381: Tiling isn't available at class incremental/deremental learning scenario") def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): + test_type = "export" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / reg_cfg.task_type @@ -143,7 +146,7 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): reg_cfg.otx_dir, reg_cfg.args, threshold=0.05, - criteria=reg_cfg.config_dict["regression_criteria"]["export"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -151,9 +154,7 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["export_time"]] = round(export_elapsed_time, 3) self.performance[template.name][TIME_LOG["export_eval_time"]] = round(export_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["export"].append( - self.performance - ) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @@ -161,6 +162,7 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) @pytest.mark.skip(reason="Issue#2381: Tiling isn't available at class incremental/deremental learning scenario") def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path): + test_type = "deploy" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / reg_cfg.task_type @@ -175,7 +177,7 @@ def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path): reg_cfg.otx_dir, reg_cfg.args, threshold=0.0, - criteria=reg_cfg.config_dict["regression_criteria"]["deploy"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -183,9 +185,7 @@ def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["deploy_time"]] = round(deploy_elapsed_time, 3) self.performance[template.name][TIME_LOG["deploy_eval_time"]] = round(deploy_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["deploy"].append( - self.performance - ) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @@ -193,6 +193,7 @@ def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) @pytest.mark.skip(reason="Issue#2381: Tiling isn't available at class incremental/deremental learning scenario") def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path): + test_type = "nncf" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / reg_cfg.task_type @@ -210,7 +211,7 @@ def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path): reg_cfg.otx_dir, reg_cfg.args, threshold=0.01, - criteria=reg_cfg.config_dict["regression_criteria"]["nncf"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -218,7 +219,7 @@ def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["nncf_time"]] = round(nncf_elapsed_time, 3) self.performance[template.name][TIME_LOG["nncf_eval_time"]] = round(nncf_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["nncf"].append(self.performance) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @@ -226,6 +227,7 @@ def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) @pytest.mark.skip(reason="Issue#2381: Tiling isn't available at class incremental/deremental learning scenario") def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path): + test_type = "ptq" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / reg_cfg.task_type @@ -239,7 +241,7 @@ def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path): tmp_dir_path, reg_cfg.otx_dir, reg_cfg.args, - criteria=reg_cfg.config_dict["regression_criteria"]["ptq"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -247,6 +249,6 @@ def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["ptq_time"]] = round(ptq_elapsed_time, 3) self.performance[template.name][TIME_LOG["ptq_eval_time"]] = round(ptq_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["ptq"].append(self.performance) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] diff --git a/tests/regression/regression_command.py b/tests/regression/regression_command.py index 121f2c33efb..8fa0b1e919a 100644 --- a/tests/regression/regression_command.py +++ b/tests/regression/regression_command.py @@ -44,20 +44,23 @@ def regression_eval_testing( with open(performance_json_path) as read_file: trained_performance = json.load(read_file) - model_criteria = criteria[template.name] if template.name in criteria.keys() else 0.0 - modified_criteria = model_criteria - (model_criteria * threshold) for k in trained_performance.keys(): result_dict[k] = round(trained_performance[k], 3) - if trained_performance[k] < modified_criteria: + model_criteria = 0.0 + if template.name not in criteria.keys(): regression_result["passed"] = False - regression_result["log"] = f"Performance: ({trained_performance[k]}) < Criteria: ({modified_criteria})." - regression_result["raw"] = { - "metric": k, - "performance": trained_performance[k], - "template": template.name, - "criteria": model_criteria, - "threshold": threshold, - } + regression_result["log"] = ( + f"Cannot find regression criteria for the template '{template.name}'. " + + f"train_performance = {trained_performance}" + ) + else: + model_criteria = criteria[template.name] * (1.0 - threshold) + if trained_performance[k] < model_criteria: + regression_result["passed"] = False + regression_result[ + "log" + ] = f"[{template.name}] Performance: ({trained_performance[k]}) < Criteria: ({model_criteria}), " + f"threshold: {threshold}." result_dict["Model size (MB)"] = round( os.path.getsize(f"{template_work_dir}/trained_{template.model_template_id}/models/weights.pth") / 1e6, 2 @@ -115,36 +118,18 @@ def regression_openvino_testing( with open(perf_path) as read_file: exported_performance = json.load(read_file) - model_criteria = 0.0 # set default model critera for not existing reg config - if template.name not in criteria.keys(): - regression_result["passed"] = False - log_msg = ( - f"Cannot find regression criteria for the template '{template.name}'. " - + f"train_performance = {trained_performance}, export_performance = {exported_performance}" - ) - regression_result["log"] = log_msg - print(log_msg) - return regression_result - - if isinstance(criteria, dict): - model_criteria = criteria[template.name] * (1.0 - reg_threshold) - for k in trained_performance.keys(): if k == "avg_time_per_image": continue result_dict[k] = round(exported_performance[k], 3) - if exported_performance[k] < model_criteria: - regression_result["passed"] = False - regression_result[ - "log" - ] = f"Export performance: ({exported_performance[k]}) < Criteria: ({model_criteria})." - if ( exported_performance[k] < trained_performance[k] and abs(trained_performance[k] - exported_performance[k]) / (trained_performance[k] + 1e-10) > threshold ): regression_result["passed"] = False - regression_result["log"] = f"{trained_performance[k]=}, {exported_performance[k]=}" + regression_result[ + "log" + ] = f"[{template.name}] {trained_performance[k]=}, {exported_performance[k]=}, {threshold=}" return regression_result @@ -177,26 +162,18 @@ def regression_deployment_testing( with open(f"{template_work_dir}/deployed_{template.model_template_id}/performance.json") as read_file: deployed_performance = json.load(read_file) - if isinstance(criteria, dict) and template.name in criteria.keys(): - model_criteria = criteria[template.name] - modified_criteria = model_criteria - (model_criteria * reg_threshold) - for k in exported_performance.keys(): if k == "avg_time_per_image": continue - if isinstance(criteria, dict) and template.name in criteria.keys(): - result_dict[k] = round(deployed_performance[k], 3) - if deployed_performance[k] < modified_criteria: - regression_result["passed"] = False - regression_result[ - "log" - ] = f"Deploy performance: ({deployed_performance[k]}) < Criteria: ({modified_criteria})." + result_dict[k] = round(deployed_performance[k], 3) if ( deployed_performance[k] < exported_performance[k] and abs(exported_performance[k] - deployed_performance[k]) / (exported_performance[k] + 1e-10) > threshold ): regression_result["passed"] = False - regression_result["log"] = f"{exported_performance[k]=}, {deployed_performance[k]=}" + regression_result[ + "log" + ] = f"[{template.name}] {exported_performance[k]=}, {deployed_performance[k]=}, {threshold=}" return regression_result @@ -229,24 +206,35 @@ def regression_nncf_eval_testing( with open(f"{template_work_dir}/nncf_{template.model_template_id}/performance.json") as read_file: evaluated_performance = json.load(read_file) - if isinstance(criteria, dict) and template.name in criteria.keys(): - model_criteria = criteria[template.name] - modified_criteria = model_criteria - (model_criteria * reg_threshold) - for k in trained_performance.keys(): - if isinstance(criteria, dict) and template.name in criteria.keys(): - result_dict[k] = round(evaluated_performance[k], 3) - if evaluated_performance[k] < modified_criteria: + result_dict[k] = round(evaluated_performance[k], 3) + model_criteria = 0.0 + if template.name not in criteria.keys(): + regression_result["passed"] = False + regression_result["log"] = ( + f"Cannot find regression criteria for the template '{template.name}'. " + + f"{trained_performance=}, {evaluated_performance=}" + ) + else: + model_criteria = criteria[template.name] * (1.0 - threshold) + if evaluated_performance[k] < model_criteria: regression_result["passed"] = False regression_result[ "log" - ] = f"NNCF performance: ({evaluated_performance[k]}) < Criteria: ({modified_criteria})." - if ( - evaluated_performance[k] < trained_performance[k] - and abs(trained_performance[k] - evaluated_performance[k]) / (trained_performance[k] + 1e-10) > threshold - ): - regression_result["passed"] = False - regression_result["log"] = f"{trained_performance[k]=}, {evaluated_performance[k]=}" + ] = f"[{template.name}] NNCF performance is lower than criteria: {evaluated_performance[k]=}, " + f"{model_criteria=}, {threshold=}" + elif evaluated_performance[k] < trained_performance[k]: + regression_result["passed"] = False + regression_result[ + "log" + ] = f"[{template.name}] NNCF eval performance is lower than train: {evaluated_performance[k]=}, " + f"{trained_performance=}" + elif abs(trained_performance[k] - evaluated_performance[k]) / (trained_performance[k] + 1e-10) > threshold: + regression_result["passed"] = False + regression_result[ + "log" + ] = f"[{template.name}] NNCF train & eval delta is too big: {evaluated_performance[k]=}, " + f"{trained_performance[k]=}, {threshold=}" return regression_result @@ -276,16 +264,21 @@ def regression_ptq_eval_testing(template, root, otx_dir, args, criteria=None, re with open(f"{template_work_dir}/ptq_{template.model_template_id}/performance.json") as read_file: ptq_performance = json.load(read_file) - if isinstance(criteria, dict) and template.name in criteria.keys(): - model_criteria = criteria[template.name] - modified_criteria = model_criteria - (model_criteria * reg_threshold) - for k in ptq_performance.keys(): - if isinstance(criteria, dict) and template.name in criteria.keys(): - result_dict[k] = round(ptq_performance[k], 3) - if ptq_performance[k] < modified_criteria: + result_dict[k] = round(ptq_performance[k], 3) + model_criteria = 0.0 + if template.name not in criteria.keys(): + regression_result["passed"] = False + regression_result["log"] = ( + f"Cannot find regression criteria for the template '{template.name}'. " + f"{ptq_performance=}" + ) + else: + model_criteria = criteria[template.name] * (1.0 * reg_threshold) + if ptq_performance[k] < model_criteria: regression_result["passed"] = False - regression_result["log"] = f"POT performance: ({ptq_performance[k]}) < Criteria: ({modified_criteria})." + regression_result[ + "log" + ] = f"[{template.name}] ptq performance: {ptq_performance[k]=}, {model_criteria=}, {reg_threshold=}" return regression_result @@ -305,7 +298,9 @@ def regression_train_time_testing(train_time_criteria, e2e_train_time, template, if e2e_train_time > modified_train_criteria: regression_result["passed"] = False - regression_result["log"] = f"Train time: ({e2e_train_time}) < Criteria: ({modified_train_criteria})." + regression_result[ + "log" + ] = f"[{template.name}] Train time: ({e2e_train_time}) < Criteria: ({modified_train_criteria})." return regression_result @@ -325,6 +320,8 @@ def regression_eval_time_testing(eval_time_criteria, e2e_eval_time, template, th if e2e_eval_time > modified_eval_criteria: regression_result["passed"] = False - regression_result["log"] = f"Eval time: ({e2e_eval_time}) < criteria: ({modified_eval_criteria})." + regression_result[ + "log" + ] = f"[{template.name}] Eval time: ({e2e_eval_time}) < criteria: ({modified_eval_criteria})." return regression_result diff --git a/tests/regression/regression_config.json b/tests/regression/regression_config.json index 20d3e3a50e5..6429a5fd1e2 100644 --- a/tests/regression/regression_config.json +++ b/tests/regression/regression_config.json @@ -3,10 +3,10 @@ "classification": { "supervised": { "multi_class": { - "--train-data-roots": "classification/cifar10_subset_cls_decr/train", - "--val-data-roots": "classification/cifar10_subset_cls_decr/test", - "--test-data-roots": "classification/cifar10_subset_cls_decr/test", - "--input": "classification/cifar10_subset/test/airplane" + "--train-data-roots": "classification/multiclass_CUB_cls_decr/train", + "--val-data-roots": "classification/multiclass_CUB_cls_decr/test", + "--test-data-roots": "classification/multiclass_CUB_cls_decr/test", + "--input": "classification/multiclass_CUB/test/Acadian_Flycatcher" }, "multi_label": { "--train-data-roots": "classification/multi_label_coco_subset_cls_decr", @@ -21,16 +21,16 @@ "--input": "classification/h_label_cifar10_subset/images/test/airplane" }, "supcon": { - "--train-data-roots": "classification/cifar10_subset_cls_decr/train", - "--val-data-roots": "classification/cifar10_subset_cls_decr/test", - "--test-data-roots": "classification/cifar10_subset_cls_decr/test" + "--train-data-roots": "classification/multiclass_CUB_cls_decr/train", + "--val-data-roots": "classification/multiclass_CUB_cls_decr/test", + "--test-data-roots": "classification/multiclass_CUB_cls_decr/test" } }, "class_incr": { "multi_class": { - "--train-data-roots": "classification/cifar10_subset/train", - "--val-data-roots": "classification/cifar10_subset/test", - "--test-data-roots": "classification/cifar10_subset/test" + "--train-data-roots": "classification/multiclass_CUB/train", + "--val-data-roots": "classification/multiclass_CUB/test", + "--test-data-roots": "classification/multiclass_CUB/test" }, "multi_label": { "--train-data-roots": "classification/multi_label_coco_subset", @@ -40,15 +40,15 @@ }, "semi_supervised": { "multi_class": { - "--train-data-roots": "classification/cifar10_subset_cls_decr/train", - "--val-data-roots": "classification/cifar10_subset_cls_decr/test", - "--test-data-roots": "classification/cifar10_subset_cls_decr/test", - "--unlabeled-data-roots": "classification/cifar10_unlabeled" + "--train-data-roots": "classification/multiclass_CUB_cls_decr/train", + "--val-data-roots": "classification/multiclass_CUB_cls_decr/test", + "--test-data-roots": "classification/multiclass_CUB_cls_decr/test", + "--unlabeled-data-roots": "classification/CUB_unlabeled" } }, "self_supervised": { "multi_class": { - "--train-data-roots": "classification/cifar10_subset_cls_decr/train" + "--train-data-roots": "classification/multiclass_CUB_cls_decr/train" } } }, @@ -185,336 +185,716 @@ "train_params": [] } }, + "regression_criteria": { + "action_classification": { + "supervised": { + "multi_class": { + "train": { + "MoViNet": 0.519, + "X3D": 0.613 + }, + "export": { + "MoViNet": 0.0, + "X3D": 0.0 + }, + "deploy": { + "MoViNet": 0.0, + "X3D": 0.0 + }, + "nncf": { + "MoViNet": 0.0, + "X3D": 0.0 + }, + "ptq": { + "MoViNet": 0.0, + "X3D": 0.0 + } + } + } + }, + "action_detection": { + "supervised": { + "multi_class": { + "train": { + "X3D_FAST_RCNN": 0.613 + }, + "export": { + "X3D_FAST_RCNN": 0.0 + }, + "deploy": { + "X3D_FAST_RCNN": 0.0 + }, + "nncf": { + "X3D_FAST_RCNN": 0.0 + }, + "ptq": { + "X3D_FAST_RCNN": 0.0 + } + } + } + }, + "anomaly_segmentation": { + "train": { + "carpet": { + "STFPM": 0.322, + "PADIM": 0.313 + }, + "wood": { + "STFPM": 0.31, + "PADIM": 0.355 + }, + "zipper": { + "STFPM": 0.357, + "PADIM": 0.232 + } + }, + "export": { + "carpet": { + "STFPM": 0.034, + "PADIM": 0.205 + }, + "wood": { + "STFPM": 0.129, + "PADIM": 0.208 + }, + "zipper": { + "STFPM": 0.357, + "PADIM": 0.232 + } + }, + "deploy": { + "carpet": { + "STFPM": 0.034, + "PADIM": 0.205 + }, + "wood": { + "STFPM": 0.129, + "PADIM": 0.208 + }, + "zipper": { + "STFPM": 0.357, + "PADIM": 0.232 + } + }, + "nncf": { + "carpet": { + "STFPM": 0.43, + "PADIM": 0.374 + }, + "wood": { + "STFPM": 0.448, + "PADIM": 0.312 + }, + "zipper": { + "STFPM": 0.483, + "PADIM": 0.305 + } + }, + "ptq": { + "carpet": { + "STFPM": 0.034, + "PADIM": 0.225 + }, + "wood": { + "STFPM": 0.127, + "PADIM": 0.227 + }, + "zipper": { + "STFPM": 0.354, + "PADIM": 0.195 + } + } + }, + "anomaly_detection": { + "train": { + "carpet": { + "STFPM": 0.167, + "PADIM": 0.267 + }, + "wood": { + "STFPM": 0.103, + "PADIM": 0.159 + }, + "zipper": { + "STFPM": 0.108, + "PADIM": 0.063 + } + }, + "export": { + "carpet": { + "STFPM": 0.045, + "PADIM": 0.226 + }, + "wood": { + "STFPM": 0.063, + "PADIM": 0.153 + }, + "zipper": { + "STFPM": 0.101, + "PADIM": 0.062 + } + }, + "deploy": { + "carpet": { + "STFPM": 0.045, + "PADIM": 0.226 + }, + "wood": { + "STFPM": 0.063, + "PADIM": 0.153 + }, + "zipper": { + "STFPM": 0.101, + "PADIM": 0.062 + } + }, + "nncf": { + "carpet": { + "STFPM": 0.145, + "PADIM": 0.164 + }, + "wood": { + "STFPM": 0.09, + "PADIM": 0.223 + }, + "zipper": { + "STFPM": 0.166, + "PADIM": 0.067 + } + }, + "ptq": { + "carpet": { + "STFPM": 0.037, + "PADIM": 0.208 + }, + "wood": { + "STFPM": 0.053, + "PADIM": 0.159 + }, + "zipper": { + "STFPM": 0.125, + "PADIM": 0.043 + } + } + }, + "anomaly_classification": { + "train": { + "carpet": { + "STFPM": 0.829, + "PADIM": 0.761 + }, + "wood": { + "STFPM": 0.886, + "PADIM": 0.924 + }, + "zipper": { + "STFPM": 0.788, + "PADIM": 0.821 + } + }, + "export": { + "carpet": { + "STFPM": 0.784, + "PADIM": 0.761 + }, + "wood": { + "STFPM": 0.8, + "PADIM": 0.82 + }, + "zipper": { + "STFPM": 0.781, + "PADIM": 0.815 + } + }, + "deploy": { + "carpet": { + "STFPM": 0.784, + "PADIM": 0.761 + }, + "wood": { + "STFPM": 0.8, + "PADIM": 0.82 + }, + "zipper": { + "STFPM": 0.781, + "PADIM": 0.815 + } + }, + "nncf": { + "carpet": { + "STFPM": 0.88, + "PADIM": 0.761 + }, + "wood": { + "STFPM": 0.886, + "PADIM": 0.937 + }, + "zipper": { + "STFPM": 0.821, + "PADIM": 0.808 + } + }, + "ptq": { + "carpet": { + "STFPM": 0.796, + "PADIM": 0.739 + }, + "wood": { + "STFPM": 0.8, + "PADIM": 0.826 + }, + "zipper": { + "STFPM": 0.788, + "PADIM": 0.768 + } + } + }, "classification": { "supervised": { "multi_class": { "train": { - "EfficientNet-V2-S": 0.772, - "MobileNet-V3-large-1x": 0.728, - "EfficientNet-B0": 0.806, - "Deti-Tiny": 0.0 + "EfficientNet-V2-S": 0.778, + "EfficientNet-B0": 0.699, + "MobileNet-V3-large-1x": 0.687, + "DeiT-Tiny": 0.596 }, "export": { - "EfficientNet-V2-S": 0.772, - "MobileNet-V3-large-1x": 0.728, - "EfficientNet-B0": 0.806, - "Deti-Tiny": 0.0 + "EfficientNet-V2-S": 0.778, + "EfficientNet-B0": 0.698, + "MobileNet-V3-large-1x": 0.686, + "DeiT-Tiny": 0.597 }, "deploy": { - "EfficientNet-V2-S": 0.772, - "MobileNet-V3-large-1x": 0.728, - "EfficientNet-B0": 0.806, - "Deti-Tiny": 0.0 + "EfficientNet-V2-S": 0.778, + "EfficientNet-B0": 0.698, + "MobileNet-V3-large-1x": 0.686, + "DeiT-Tiny": 0.597 }, "nncf": { - "EfficientNet-V2-S": 0.764, - "MobileNet-V3-large-1x": 0.733, - "EfficientNet-B0": 0.742, - "Deti-Tiny": 0.0 + "EfficientNet-V2-S": 0.776, + "EfficientNet-B0": 0.691, + "MobileNet-V3-large-1x": 0.677, + "DeiT-Tiny": 0.0 }, "ptq": { - "EfficientNet-V2-S": 0.711, - "MobileNet-V3-large-1x": 0.692, - "EfficientNet-B0": 0.725, - "Deti-Tiny": 0.0 + "EfficientNet-V2-S": 0.768, + "EfficientNet-B0": 0.681, + "MobileNet-V3-large-1x": 0.624, + "DeiT-Tiny": 0.594 } }, "multi_label": { "train": { "EfficientNet-V2-S": 0.968, - "MobileNet-V3-large-1x": 0.966, - "EfficientNet-B0": 0.965, - "Deti-Tiny": 0.0 + "EfficientNet-B0": 0.958, + "MobileNet-V3-large-1x": 0.965, + "DeiT-Tiny": 0.952 }, "export": { "EfficientNet-V2-S": 0.968, + "EfficientNet-B0": 0.958, "MobileNet-V3-large-1x": 0.965, - "EfficientNet-B0": 0.965, - "Deti-Tiny": 0.0 + "DeiT-Tiny": 0.952 }, "deploy": { "EfficientNet-V2-S": 0.968, + "EfficientNet-B0": 0.958, "MobileNet-V3-large-1x": 0.965, - "EfficientNet-B0": 0.965, - "Deti-Tiny": 0.0 + "DeiT-Tiny": 0.952 }, "nncf": { - "EfficientNet-V2-S": 0.967, + "EfficientNet-V2-S": 0.971, + "EfficientNet-B0": 0.961, "MobileNet-V3-large-1x": 0.965, - "EfficientNet-B0": 0.965, - "Deti-Tiny": 0.0 + "DeiT-Tiny": 0.0 }, "ptq": { - "EfficientNet-V2-S": 0.968, + "EfficientNet-V2-S": 0.971, + "EfficientNet-B0": 0.96, "MobileNet-V3-large-1x": 0.965, - "EfficientNet-B0": 0.964, - "Deti-Tiny": 0.0 + "DeiT-Tiny": 0.952 } }, "h_label": { "train": { - "EfficientNet-V2-S": 0.602, - "MobileNet-V3-large-1x": 0.547, - "EfficientNet-B0": 0.636, - "Deti-Tiny": 0.0 + "EfficientNet-V2-S": 0.0, + "EfficientNet-B0": 0.0, + "MobileNet-V3-large-1x": 0.0, + "DeiT-Tiny": 0.0 }, "export": { - "EfficientNet-V2-S": 0.602, - "MobileNet-V3-large-1x": 0.547, - "EfficientNet-B0": 0.636, - "Deti-Tiny": 0.0 + "EfficientNet-V2-S": 0.761, + "EfficientNet-B0": 0.737, + "MobileNet-V3-large-1x": 0.736, + "DeiT-Tiny": 0.768 }, "deploy": { - "EfficientNet-V2-S": 0.602, - "MobileNet-V3-large-1x": 0.547, - "EfficientNet-B0": 0.636, - "Deti-Tiny": 0.0 + "EfficientNet-V2-S": 0.761, + "EfficientNet-B0": 0.737, + "MobileNet-V3-large-1x": 0.736, + "DeiT-Tiny": 0.768 }, "nncf": { - "EfficientNet-V2-S": 0.594, - "MobileNet-V3-large-1x": 0.622, - "EfficientNet-B0": 0.638, - "Deti-Tiny": 0.0 + "EfficientNet-V2-S": 0.0, + "EfficientNet-B0": 0.0, + "MobileNet-V3-large-1x": 0.0, + "DeiT-Tiny": 0.0 }, "ptq": { - "EfficientNet-V2-S": 0.577, - "MobileNet-V3-large-1x": 0.472, - "EfficientNet-B0": 0.605, - "Deti-Tiny": 0.0 + "EfficientNet-V2-S": 0.757, + "EfficientNet-B0": 0.727, + "MobileNet-V3-large-1x": 0.691, + "DeiT-Tiny": 0.768 } }, "supcon": { "train": { - "EfficientNet-V2-S": 0.783, - "MobileNet-V3-large-1x": 0.728, - "EfficientNet-B0": 0.772, - "Deti-Tiny": 0.0 - } - } - }, - "class_incr": { - "multi_class": { - "train": { - "EfficientNet-V2-S": 0.79, - "MobileNet-V3-large-1x": 0.76, - "EfficientNet-B0": 0.81, - "Deti-Tiny": 0.0 - } - }, - "multi_label": { - "train": { - "EfficientNet-V2-S": 0.97, - "MobileNet-V3-large-1x": 0.97, - "EfficientNet-B0": 0.964, - "Deti-Tiny": 0.0 - } - }, - "h_label": { - "train": { + "EfficientNet-V2-S": 0.773, + "EfficientNet-B0": 0.675, + "MobileNet-V3-large-1x": 0.677 + }, + "export": { + "EfficientNet-V2-S": 0.0, + "EfficientNet-B0": 0.0, + "MobileNet-V3-large-1x": 0.0 + }, + "deploy": { + "EfficientNet-V2-S": 0.0, + "EfficientNet-B0": 0.0, + "MobileNet-V3-large-1x": 0.0 + }, + "nncf": { + "EfficientNet-V2-S": 0.0, + "EfficientNet-B0": 0.0, + "MobileNet-V3-large-1x": 0.0 + }, + "ptq": { "EfficientNet-V2-S": 0.0, - "MobileNet-V3-large-1x": 0.0, "EfficientNet-B0": 0.0, - "Deti-Tiny": 0.0 + "MobileNet-V3-large-1x": 0.0 } } }, "semi_supervised": { "multi_class": { "train": { - "EfficientNet-V2-S": 0.822, - "MobileNet-V3-large-1x": 0.764, - "EfficientNet-B0": 0.797, - "Deti-Tiny": 0.0 - } - } - }, - "self_supervised": { - "multi_class": { - "train": { - "EfficientNet-V2-S": 0.767, - "MobileNet-V3-large-1x": 0.744, - "EfficientNet-B0": 0.794, - "Deti-Tiny": 0.0 - } - } - } - }, - "detection": { - "supervised": { - "multi_class": { - "train": { - "YOLOX": 0.537, - "SSD": 0.179, - "MobileNetV2-ATSS": 0.446 + "EfficientNet-V2-S": 0.758, + "EfficientNet-B0": 0.674, + "MobileNet-V3-large-1x": 0.658, + "DeiT-Tiny": 0.656 }, "export": { - "YOLOX": 0.534, - "SSD": 0.179, - "MobileNetV2-ATSS": 0.455 + "EfficientNet-V2-S": 0.0, + "EfficientNet-B0": 0.0, + "MobileNet-V3-large-1x": 0.0, + "DeiT-Tiny": 0.0 }, "deploy": { - "YOLOX": 0.534, - "SSD": 0.179, - "MobileNetV2-ATSS": 0.455 + "EfficientNet-V2-S": 0.0, + "EfficientNet-B0": 0.0, + "MobileNet-V3-large-1x": 0.0, + "DeiT-Tiny": 0.0 }, "nncf": { - "YOLOX": 0.517, - "SSD": 0.181, - "MobileNetV2-ATSS": 0.446 + "EfficientNet-V2-S": 0.0, + "EfficientNet-B0": 0.0, + "MobileNet-V3-large-1x": 0.0, + "DeiT-Tiny": 0.0 }, "ptq": { - "YOLOX": 0.531, - "SSD": 0.18, - "MobileNetV2-ATSS": 0.458 - } - } - }, - "class_incr": { - "multi_class": { - "train": { - "YOLOX": 0.532, - "SSD": 0.2, - "MobileNetV2-ATSS": 0.443 - } - } - }, - "semi_supervised": { - "multi_class": { - "train": { - "YOLOX": 0.398, - "SSD": 0.142, - "MobileNetV2-ATSS": 0.414 + "EfficientNet-V2-S": 0.0, + "EfficientNet-B0": 0.0, + "MobileNet-V3-large-1x": 0.0, + "DeiT-Tiny": 0.0 } } }, - "tiling": { + "self_supervised": { "multi_class": { "train": { - "YOLOX": 0.545, - "SSD": 0.19, - "MobileNetV2-ATSS": 0.459 + "EfficientNet-V2-S": 0.776, + "EfficientNet-B0": 0.687, + "MobileNet-V3-large-1x": 0.662 }, "export": { - "YOLOX": 0.549, - "SSD": 0.19, - "MobileNetV2-ATSS": 0.469 + "EfficientNet-V2-S": 0.0, + "EfficientNet-B0": 0.0, + "MobileNet-V3-large-1x": 0.0 }, "deploy": { - "YOLOX": 0.549, - "SSD": 0.19, - "MobileNetV2-ATSS": 0.469 + "EfficientNet-V2-S": 0.0, + "EfficientNet-B0": 0.0, + "MobileNet-V3-large-1x": 0.0 }, "nncf": { - "YOLOX": 0.0, - "SSD": 0.0, - "MobileNetV2-ATSS": 0.0 + "EfficientNet-V2-S": 0.0, + "EfficientNet-B0": 0.0, + "MobileNet-V3-large-1x": 0.0 }, "ptq": { - "YOLOX": 0.531, - "SSD": 0.192, - "MobileNetV2-ATSS": 0.459 - } + "EfficientNet-V2-S": 0.0, + "EfficientNet-B0": 0.0, + "MobileNet-V3-large-1x": 0.0 + } } - } - }, - "segmentation": { - "supervised": { + }, + "class_incr": { "multi_class": { "train": { - "Lite-HRNet-s-mod2": 0.636, - "Lite-HRNet-18-mod2": 0.692, - "Lite-HRNet-18": 0.801, - "Lite-HRNet-x-mod3": 0.693, - "SegNext-B": 0.0, - "SegNext-s": 0.0, - "SegNext-t": 0.0 + "EfficientNet-V2-S": 0.783, + "EfficientNet-B0": 0.702, + "MobileNet-V3-large-1x": 0.687 }, "export": { - "Lite-HRNet-s-mod2": 0.636, - "Lite-HRNet-18-mod2": 0.691, - "Lite-HRNet-18": 0.8, - "Lite-HRNet-x-mod3": 0.692, - "SegNext-B": 0.0, - "SegNext-s": 0.0, - "SegNext-t": 0.0 + "EfficientNet-V2-S": 0.0, + "EfficientNet-B0": 0.0, + "MobileNet-V3-large-1x": 0.0 }, "deploy": { - "Lite-HRNet-s-mod2": 0.636, - "Lite-HRNet-18-mod2": 0.691, - "Lite-HRNet-18": 0.8, - "Lite-HRNet-x-mod3": 0.692, - "SegNext-B": 0.0, - "SegNext-s": 0.0, - "SegNext-t": 0.0 + "EfficientNet-V2-S": 0.0, + "EfficientNet-B0": 0.0, + "MobileNet-V3-large-1x": 0.0 }, "nncf": { - "Lite-HRNet-s-mod2": 0.678, - "Lite-HRNet-18-mod2": 0.707, - "Lite-HRNet-18": 0.79, - "Lite-HRNet-x-mod3": 0.668, - "SegNext-B": 0.0, - "SegNext-s": 0.0, - "SegNext-t": 0.0 + "EfficientNet-V2-S": 0.0, + "EfficientNet-B0": 0.0, + "MobileNet-V3-large-1x": 0.0 }, "ptq": { - "Lite-HRNet-s-mod2": 0.725, - "Lite-HRNet-18-mod2": 0.269, - "Lite-HRNet-18": 0.115, - "Lite-HRNet-x-mod3": 0.041, - "SegNext-B": 0.0, - "SegNext-s": 0.0, - "SegNext-t": 0.0 + "EfficientNet-V2-S": 0.0, + "EfficientNet-B0": 0.0, + "MobileNet-V3-large-1x": 0.0 } }, - "supcon": { + "multi_label": { "train": { - "Lite-HRNet-s-mod2": 0.691, - "Lite-HRNet-18-mod2": 0.676, - "Lite-HRNet-18": 0.682, - "Lite-HRNet-x-mod3": 0.698, - "SegNext-B": 0.0, - "SegNext-s": 0.0, - "SegNext-t": 0.0 + "EfficientNet-V2-S": 0.97, + "EfficientNet-B0": 0.954, + "MobileNet-V3-large-1x": 0.96, + "DeiT-Tiny": 0.954 + }, + "export": { + "EfficientNet-V2-S": 0.0, + "EfficientNet-B0": 0.0, + "MobileNet-V3-large-1x": 0.0, + "DeiT-Tiny": 0.0 + }, + "deploy": { + "EfficientNet-V2-S": 0.0, + "EfficientNet-B0": 0.0, + "MobileNet-V3-large-1x": 0.0, + "DeiT-Tiny": 0.0 + }, + "nncf": { + "EfficientNet-V2-S": 0.0, + "EfficientNet-B0": 0.0, + "MobileNet-V3-large-1x": 0.0, + "DeiT-Tiny": 0.0 + }, + "ptq": { + "EfficientNet-V2-S": 0.0, + "EfficientNet-B0": 0.0, + "MobileNet-V3-large-1x": 0.0, + "DeiT-Tiny": 0.0 + } + } + } + }, + "detection": { + "tiling": { + "multi_class": { + "train": { + "ResNeXt101-ATSS": 0.662, + "YOLOX-S": 0.389, + "MobileNetV2-ATSS": 0.474, + "SSD": 0.228, + "YOLOX-TINY": 0.549, + "YOLOX-L": 0.655, + "YOLOX-X": 0.675 + }, + "export": { + "ResNeXt101-ATSS": 0.663, + "YOLOX-S": 0.386, + "MobileNetV2-ATSS": 0.475, + "SSD": 0.228, + "YOLOX-TINY": 0.549, + "YOLOX-L": 0.654, + "YOLOX-X": 0.674 + }, + "deploy": { + "ResNeXt101-ATSS": 0.663, + "YOLOX-S": 0.386, + "MobileNetV2-ATSS": 0.475, + "SSD": 0.228, + "YOLOX-TINY": 0.549, + "YOLOX-L": 0.654, + "YOLOX-X": 0.674 + }, + "nncf": { + "ResNeXt101-ATSS": 0.655, + "YOLOX-S": 0.0, + "MobileNetV2-ATSS": 0.0, + "SSD": 0.0, + "YOLOX-TINY": 0.0, + "YOLOX-L": 0.0, + "YOLOX-X": 0.0 + }, + "ptq": { + "ResNeXt101-ATSS": 0.651, + "YOLOX-S": 0.423, + "MobileNetV2-ATSS": 0.465, + "SSD": 0.229, + "YOLOX-TINY": 0.54, + "YOLOX-L": 0.677, + "YOLOX-X": 0.691 } } }, - "class_incr": { + "supervised": { "multi_class": { "train": { - "Lite-HRNet-s-mod2": 0.66, - "Lite-HRNet-18-mod2": 0.768, - "Lite-HRNet-18": 0.669, - "Lite-HRNet-x-mod3": 0.712, - "SegNext-B": 0.0, - "SegNext-s": 0.0, - "SegNext-t": 0.0 + "ResNeXt101-ATSS": 0.635, + "YOLOX-S": 0.429, + "MobileNetV2-ATSS": 0.467, + "SSD": 0.221, + "YOLOX-TINY": 0.577, + "YOLOX-L": 0.684, + "YOLOX-X": 0.696 + }, + "export": { + "ResNeXt101-ATSS": 0.635, + "YOLOX-S": 0.429, + "MobileNetV2-ATSS": 0.466, + "SSD": 0.222, + "YOLOX-TINY": 0.575, + "YOLOX-L": 0.681, + "YOLOX-X": 0.69 + }, + "deploy": { + "ResNeXt101-ATSS": 0.635, + "YOLOX-S": 0.429, + "MobileNetV2-ATSS": 0.466, + "SSD": 0.222, + "YOLOX-TINY": 0.575, + "YOLOX-L": 0.681, + "YOLOX-X": 0.69 + }, + "nncf": { + "ResNeXt101-ATSS": 0.63, + "YOLOX-S": 0.429, + "MobileNetV2-ATSS": 0.462, + "SSD": 0.217, + "YOLOX-TINY": 0.571, + "YOLOX-L": 0.68, + "YOLOX-X": 0.691 + }, + "ptq": { + "ResNeXt101-ATSS": 0.632, + "YOLOX-S": 0.424, + "MobileNetV2-ATSS": 0.461, + "SSD": 0.219, + "YOLOX-TINY": 0.568, + "YOLOX-L": 0.675, + "YOLOX-X": 0.689 } } }, "semi_supervised": { "multi_class": { "train": { - "Lite-HRNet-s-mod2": 0.681, - "Lite-HRNet-18-mod2": 0.696, - "Lite-HRNet-18": 0.742, - "Lite-HRNet-x-mod3": 0.802, - "SegNext-B": 0.0, - "SegNext-s": 0.0, - "SegNext-t": 0.0 + "ResNeXt101-ATSS": 0.635, + "YOLOX-S": 0.074, + "MobileNetV2-ATSS": 0.374, + "SSD": 0.158, + "YOLOX-TINY": 0.453, + "YOLOX-L": 0.652, + "YOLOX-X": 0.673 + }, + "export": { + "ResNeXt101-ATSS": 0.0, + "YOLOX-S": 0.0, + "MobileNetV2-ATSS": 0.0, + "SSD": 0.0, + "YOLOX-TINY": 0.0, + "YOLOX-L": 0.0, + "YOLOX-X": 0.0 + }, + "deploy": { + "ResNeXt101-ATSS": 0.0, + "YOLOX-S": 0.0, + "MobileNetV2-ATSS": 0.0, + "SSD": 0.0, + "YOLOX-TINY": 0.0, + "YOLOX-L": 0.0, + "YOLOX-X": 0.0 + }, + "nncf": { + "ResNeXt101-ATSS": 0.0, + "YOLOX-S": 0.0, + "MobileNetV2-ATSS": 0.0, + "SSD": 0.0, + "YOLOX-TINY": 0.0, + "YOLOX-L": 0.0, + "YOLOX-X": 0.0 + }, + "ptq": { + "ResNeXt101-ATSS": 0.0, + "YOLOX-S": 0.0, + "MobileNetV2-ATSS": 0.0, + "SSD": 0.0, + "YOLOX-TINY": 0.0, + "YOLOX-L": 0.0, + "YOLOX-X": 0.0 } } }, - "self_supervised": { + "class_incr": { "multi_class": { "train": { - "Lite-HRNet-s-mod2": 0.785, - "Lite-HRNet-18-mod2": 0.798, - "Lite-HRNet-18": 0.782, - "Lite-HRNet-x-mod3": 0.785, - "SegNext-B": 0.0, - "SegNext-s": 0.0, - "SegNext-t": 0.0 + "ResNeXt101-ATSS": 0.631, + "YOLOX-S": 0.425, + "MobileNetV2-ATSS": 0.463, + "SSD": 0.23, + "YOLOX-TINY": 0.572, + "YOLOX-L": 0.679, + "YOLOX-X": 0.691 + }, + "export": { + "ResNeXt101-ATSS": 0.0, + "YOLOX-S": 0.0, + "MobileNetV2-ATSS": 0.0, + "SSD": 0.0, + "YOLOX-TINY": 0.0, + "YOLOX-L": 0.0, + "YOLOX-X": 0.0 + }, + "deploy": { + "ResNeXt101-ATSS": 0.0, + "YOLOX-S": 0.0, + "MobileNetV2-ATSS": 0.0, + "SSD": 0.0, + "YOLOX-TINY": 0.0, + "YOLOX-L": 0.0, + "YOLOX-X": 0.0 + }, + "nncf": { + "ResNeXt101-ATSS": 0.0, + "YOLOX-S": 0.0, + "MobileNetV2-ATSS": 0.0, + "SSD": 0.0, + "YOLOX-TINY": 0.0, + "YOLOX-L": 0.0, + "YOLOX-X": 0.0 + }, + "ptq": { + "ResNeXt101-ATSS": 0.0, + "YOLOX-S": 0.0, + "MobileNetV2-ATSS": 0.0, + "SSD": 0.0, + "YOLOX-TINY": 0.0, + "YOLOX-L": 0.0, + "YOLOX-X": 0.0 } } } @@ -523,79 +903,285 @@ "supervised": { "multi_class": { "train": { - "MaskRCNN-ResNet50": 0.466, - "MaskRCNN-EfficientNetB2B": 0.27, - "MaskRCNN-SwinT-FP16": 0.438 + "MaskRCNN-SwinT-FP16": 0.477, + "MaskRCNN-EfficientNetB2B": 0.333, + "MaskRCNN-ResNet50": 0.448 + }, + "export": { + "MaskRCNN-SwinT-FP16": 0.482, + "MaskRCNN-EfficientNetB2B": 0.347, + "MaskRCNN-ResNet50": 0.47 + }, + "deploy": { + "MaskRCNN-SwinT-FP16": 0.482, + "MaskRCNN-EfficientNetB2B": 0.347, + "MaskRCNN-ResNet50": 0.47 + }, + "nncf": { + "MaskRCNN-SwinT-FP16": 0.457, + "MaskRCNN-EfficientNetB2B": 0.0, + "MaskRCNN-ResNet50": 0.0 + }, + "ptq": { + "MaskRCNN-SwinT-FP16": 0.453, + "MaskRCNN-EfficientNetB2B": 0.337, + "MaskRCNN-ResNet50": 0.467 + } + } + }, + "class_incr": { + "multi_class": { + "train": { + "MaskRCNN-SwinT-FP16": 0.476, + "MaskRCNN-EfficientNetB2B": 0.306, + "MaskRCNN-ResNet50": 0.487 }, "export": { - "MaskRCNN-ResNet50": 0.0, + "MaskRCNN-SwinT-FP16": 0.0, "MaskRCNN-EfficientNetB2B": 0.0, - "MaskRCNN-SwinT-FP16": 0.438 + "MaskRCNN-ResNet50": 0.0 }, "deploy": { - "MaskRCNN-ResNet50": 0.0, + "MaskRCNN-SwinT-FP16": 0.0, "MaskRCNN-EfficientNetB2B": 0.0, - "MaskRCNN-SwinT-FP16": 0.438 + "MaskRCNN-ResNet50": 0.0 }, "nncf": { - "MaskRCNN-ResNet50": 0.437, - "MaskRCNN-EfficientNetB2B": 0.286, - "MaskRCNN-SwinT-FP16": 0.438 + "MaskRCNN-SwinT-FP16": 0.0, + "MaskRCNN-EfficientNetB2B": 0.0, + "MaskRCNN-ResNet50": 0.0 }, "ptq": { - "MaskRCNN-ResNet50": 0.0, + "MaskRCNN-SwinT-FP16": 0.0, "MaskRCNN-EfficientNetB2B": 0.0, - "MaskRCNN-SwinT-FP16": 0.438 + "MaskRCNN-ResNet50": 0.0 + } + } + } + }, + "segmentation": { + "supervised": { + "multi_class": { + "train": { + "SegNext-B": 0.823, + "Lite-HRNet-18": 0.706, + "SegNext-t": 0.769, + "SegNext-s": 0.796, + "Lite-HRNet-18-mod2": 0.709, + "Lite-HRNet-s-mod2": 0.681, + "Lite-HRNet-x-mod3": 0.678 + }, + "export": { + "SegNext-B": 0.823, + "Lite-HRNet-18": 0.705, + "SegNext-t": 0.769, + "SegNext-s": 0.795, + "Lite-HRNet-18-mod2": 0.708, + "Lite-HRNet-s-mod2": 0.681, + "Lite-HRNet-x-mod3": 0.678 + }, + "deploy": { + "SegNext-B": 0.823, + "Lite-HRNet-18": 0.705, + "SegNext-t": 0.769, + "SegNext-s": 0.795, + "Lite-HRNet-18-mod2": 0.708, + "Lite-HRNet-s-mod2": 0.681, + "Lite-HRNet-x-mod3": 0.678 + }, + "nncf": { + "SegNext-B": 0.0, + "Lite-HRNet-18": 0.0, + "SegNext-t": 0.0, + "SegNext-s": 0.0, + "Lite-HRNet-18-mod2": 0.0, + "Lite-HRNet-s-mod2": 0.0, + "Lite-HRNet-x-mod3": 0.0 + }, + "ptq": { + "SegNext-B": 0.821, + "Lite-HRNet-18": 0.121, + "SegNext-t": 0.756, + "SegNext-s": 0.794, + "Lite-HRNet-18-mod2": 0.706, + "Lite-HRNet-s-mod2": 0.619, + "Lite-HRNet-x-mod3": 0.623 + } + }, + "supcon": { + "train": { + "Lite-HRNet-18": 0.551, + "Lite-HRNet-18-mod2": 0.539, + "Lite-HRNet-s-mod2": 0.466, + "Lite-HRNet-x-mod3": 0.551 + }, + "export": { + "Lite-HRNet-18": 0.0, + "Lite-HRNet-18-mod2": 0.0, + "Lite-HRNet-s-mod2": 0.0, + "Lite-HRNet-x-mod3": 0.0 + }, + "deploy": { + "Lite-HRNet-18": 0.0, + "Lite-HRNet-18-mod2": 0.0, + "Lite-HRNet-s-mod2": 0.0, + "Lite-HRNet-x-mod3": 0.0 + }, + "nncf": { + "Lite-HRNet-18": 0.0, + "Lite-HRNet-18-mod2": 0.0, + "Lite-HRNet-s-mod2": 0.0, + "Lite-HRNet-x-mod3": 0.0 + }, + "ptq": { + "Lite-HRNet-18": 0.0, + "Lite-HRNet-18-mod2": 0.0, + "Lite-HRNet-s-mod2": 0.0, + "Lite-HRNet-x-mod3": 0.0 } } }, - "class_incr": { + "semi_supervised": { "multi_class": { "train": { - "MaskRCNN-ResNet50": 0.481, - "MaskRCNN-EfficientNetB2B": 0.307 + "SegNext-B": 0.825, + "Lite-HRNet-18": 0.665, + "SegNext-t": 0.73, + "SegNext-s": 0.746, + "Lite-HRNet-18-mod2": 0.677, + "Lite-HRNet-s-mod2": 0.681, + "Lite-HRNet-x-mod3": 0.658 + }, + "export": { + "SegNext-B": 0.0, + "Lite-HRNet-18": 0.0, + "SegNext-t": 0.0, + "SegNext-s": 0.0, + "Lite-HRNet-18-mod2": 0.0, + "Lite-HRNet-s-mod2": 0.0, + "Lite-HRNet-x-mod3": 0.0 + }, + "deploy": { + "SegNext-B": 0.0, + "Lite-HRNet-18": 0.0, + "SegNext-t": 0.0, + "SegNext-s": 0.0, + "Lite-HRNet-18-mod2": 0.0, + "Lite-HRNet-s-mod2": 0.0, + "Lite-HRNet-x-mod3": 0.0 + }, + "nncf": { + "SegNext-B": 0.0, + "Lite-HRNet-18": 0.0, + "SegNext-t": 0.0, + "SegNext-s": 0.0, + "Lite-HRNet-18-mod2": 0.0, + "Lite-HRNet-s-mod2": 0.0, + "Lite-HRNet-x-mod3": 0.0 + }, + "ptq": { + "SegNext-B": 0.0, + "Lite-HRNet-18": 0.0, + "SegNext-t": 0.0, + "SegNext-s": 0.0, + "Lite-HRNet-18-mod2": 0.0, + "Lite-HRNet-s-mod2": 0.0, + "Lite-HRNet-x-mod3": 0.0 } } }, - "tiling": { + "self_supervised": { "multi_class": { "train": { - "MaskRCNN-ResNet50": 0.471, - "MaskRCNN-EfficientNetB2B": 0.304 + "SegNext-B": 0.797, + "Lite-HRNet-18": 0.657, + "SegNext-t": 0.653, + "SegNext-s": 0.759, + "Lite-HRNet-18-mod2": 0.647, + "Lite-HRNet-s-mod2": 0.633, + "Lite-HRNet-x-mod3": 0.658 }, "export": { - "MaskRCNN-ResNet50": 0.0, - "MaskRCNN-EfficientNetB2B": 0.0 + "SegNext-B": 0.0, + "Lite-HRNet-18": 0.0, + "SegNext-t": 0.0, + "SegNext-s": 0.0, + "Lite-HRNet-18-mod2": 0.0, + "Lite-HRNet-s-mod2": 0.0, + "Lite-HRNet-x-mod3": 0.0 }, "deploy": { - "MaskRCNN-ResNet50": 0.0, - "MaskRCNN-EfficientNetB2B": 0.0 + "SegNext-B": 0.0, + "Lite-HRNet-18": 0.0, + "SegNext-t": 0.0, + "SegNext-s": 0.0, + "Lite-HRNet-18-mod2": 0.0, + "Lite-HRNet-s-mod2": 0.0, + "Lite-HRNet-x-mod3": 0.0 }, "nncf": { - "MaskRCNN-ResNet50": 0.0, - "MaskRCNN-EfficientNetB2B": 0.0 + "SegNext-B": 0.0, + "Lite-HRNet-18": 0.0, + "SegNext-t": 0.0, + "SegNext-s": 0.0, + "Lite-HRNet-18-mod2": 0.0, + "Lite-HRNet-s-mod2": 0.0, + "Lite-HRNet-x-mod3": 0.0 }, "ptq": { - "MaskRCNN-ResNet50": 0.0, - "MaskRCNN-EfficientNetB2B": 0.0 + "SegNext-B": 0.0, + "Lite-HRNet-18": 0.0, + "SegNext-t": 0.0, + "SegNext-s": 0.0, + "Lite-HRNet-18-mod2": 0.0, + "Lite-HRNet-s-mod2": 0.0, + "Lite-HRNet-x-mod3": 0.0 } } - } - }, - "action_classification": { - "supervised": { + }, + "class_incr": { "multi_class": { "train": { - "X3D": 0.612, - "MoViNet": 0.492 + "Lite-HRNet-18": 0.635, + "Lite-HRNet-18-mod2": 0.656, + "Lite-HRNet-s-mod2": 0.639, + "Lite-HRNet-x-mod3": 0.565 }, "export": { - "X3D": 0.589, - "MoViNet": 0.0 + "Lite-HRNet-18": 0.0, + "Lite-HRNet-18-mod2": 0.0, + "Lite-HRNet-s-mod2": 0.0, + "Lite-HRNet-x-mod3": 0.0 + }, + "deploy": { + "Lite-HRNet-18": 0.0, + "Lite-HRNet-18-mod2": 0.0, + "Lite-HRNet-s-mod2": 0.0, + "Lite-HRNet-x-mod3": 0.0 + }, + "nncf": { + "Lite-HRNet-18": 0.0, + "Lite-HRNet-18-mod2": 0.0, + "Lite-HRNet-s-mod2": 0.0, + "Lite-HRNet-x-mod3": 0.0 }, "ptq": { - "X3D": 0.58, - "MoViNet": 0.0 + "Lite-HRNet-18": 0.0, + "Lite-HRNet-18-mod2": 0.0, + "Lite-HRNet-s-mod2": 0.0, + "Lite-HRNet-x-mod3": 0.0 + } + } + } + } + }, + "kpi_e2e_train_time_criteria": { + "action_classification": { + "supervised": { + "multi_class": { + "train": { + "MoViNet": 261.901, + "X3D": 293.573 } } } @@ -604,1166 +1190,271 @@ "supervised": { "multi_class": { "train": { - "X3D_FAST_RCNN": 0.59 + "X3D_FAST_RCNN": 1324.578 } } } }, - "anomaly_classification": { + "anomaly_segmentation": { "train": { - "bottle": { - "STFPM": 0.759, - "PADIM": 0.94 - }, - "cable": { - "STFPM": 0.613, - "PADIM": 0.68 - }, - "capsule": { - "STFPM": 0.826, - "PADIM": 0.856 - }, "carpet": { - "STFPM": 0.829, - "PADIM": 0.761 - }, - "grid": { - "STFPM": 0.756, - "PADIM": 0.744 - }, - "hazelnut": { - "STFPM": 0.827, - "PADIM": 0.891 - }, - "leather": { - "STFPM": 0.887, - "PADIM": 0.815 - }, - "metal_nut": { - "STFPM": 0.809, - "PADIM": 0.835 - }, - "pill": { - "STFPM": 0.844, - "PADIM": 0.844 - }, - "screw": { - "STFPM": 0.762, - "PADIM": 0.744 - }, - "tile": { - "STFPM": 0.829, - "PADIM": 0.761 - }, - "toothbrush": { - "STFPM": 0.714, - "PADIM": 0.762 - }, - "transistor": { - "STFPM": 0.53, - "PADIM": 0.66 + "STFPM": 606.461, + "PADIM": 51.744 }, "wood": { - "STFPM": 0.899, - "PADIM": 0.949 + "STFPM": 843.012, + "PADIM": 54.485 }, "zipper": { - "STFPM": 0.854, - "PADIM": 0.848 + "STFPM": 570.453, + "PADIM": 52.987 } - }, - "export": { - "bottle": { - "STFPM": 0.759, - "PADIM": 0.94 - }, - "cable": { - "STFPM": 0.613, - "PADIM": 0.68 - }, - "capsule": { - "STFPM": 0.826, - "PADIM": 0.856 - }, + } + }, + "anomaly_detection": { + "train": { "carpet": { - "STFPM": 0.829, - "PADIM": 0.761 - }, - "grid": { - "STFPM": 0.756, - "PADIM": 0.744 - }, - "hazelnut": { - "STFPM": 0.827, - "PADIM": 0.891 - }, - "leather": { - "STFPM": 0.887, - "PADIM": 0.815 - }, - "metal_nut": { - "STFPM": 0.809, - "PADIM": 0.835 - }, - "pill": { - "STFPM": 0.844, - "PADIM": 0.844 - }, - "screw": { - "STFPM": 0.762, - "PADIM": 0.744 - }, - "tile": { - "STFPM": 0.829, - "PADIM": 0.761 - }, - "toothbrush": { - "STFPM": 0.714, - "PADIM": 0.762 - }, - "transistor": { - "STFPM": 0.53, - "PADIM": 0.66 + "STFPM": 380.418, + "PADIM": 35.742 }, "wood": { - "STFPM": 0.899, - "PADIM": 0.949 + "STFPM": 233.758, + "PADIM": 35.502 }, "zipper": { - "STFPM": 0.854, - "PADIM": 0.848 - } - }, - "deploy": { - "bottle": { - "STFPM": 0.759, - "PADIM": 0.94 - }, - "cable": { - "STFPM": 0.613, - "PADIM": 0.68 - }, - "capsule": { - "STFPM": 0.826, - "PADIM": 0.856 - }, - "carpet": { - "STFPM": 0.829, - "PADIM": 0.761 - }, - "grid": { - "STFPM": 0.756, - "PADIM": 0.744 - }, - "hazelnut": { - "STFPM": 0.827, - "PADIM": 0.891 - }, - "leather": { - "STFPM": 0.887, - "PADIM": 0.815 - }, - "metal_nut": { - "STFPM": 0.809, - "PADIM": 0.835 - }, - "pill": { - "STFPM": 0.844, - "PADIM": 0.844 - }, - "screw": { - "STFPM": 0.762, - "PADIM": 0.744 - }, - "tile": { - "STFPM": 0.829, - "PADIM": 0.761 - }, - "toothbrush": { - "STFPM": 0.714, - "PADIM": 0.762 - }, - "transistor": { - "STFPM": 0.53, - "PADIM": 0.66 - }, - "wood": { - "STFPM": 0.899, - "PADIM": 0.949 - }, - "zipper": { - "STFPM": 0.854, - "PADIM": 0.848 - } - }, - "nncf": { - "bottle": { - "STFPM": 0.759, - "PADIM": 0.94 - }, - "cable": { - "STFPM": 0.613, - "PADIM": 0.68 - }, - "capsule": { - "STFPM": 0.826, - "PADIM": 0.856 - }, - "carpet": { - "STFPM": 0.829, - "PADIM": 0.761 - }, - "grid": { - "STFPM": 0.756, - "PADIM": 0.744 - }, - "hazelnut": { - "STFPM": 0.827, - "PADIM": 0.891 - }, - "leather": { - "STFPM": 0.887, - "PADIM": 0.815 - }, - "metal_nut": { - "STFPM": 0.809, - "PADIM": 0.835 - }, - "pill": { - "STFPM": 0.844, - "PADIM": 0.844 - }, - "screw": { - "STFPM": 0.762, - "PADIM": 0.744 - }, - "tile": { - "STFPM": 0.829, - "PADIM": 0.761 - }, - "toothbrush": { - "STFPM": 0.714, - "PADIM": 0.762 - }, - "transistor": { - "STFPM": 0.53, - "PADIM": 0.66 - }, - "wood": { - "STFPM": 0.899, - "PADIM": 0.949 - }, - "zipper": { - "STFPM": 0.854, - "PADIM": 0.848 - } - }, - "ptq": { - "bottle": { - "STFPM": 0.759, - "PADIM": 0.94 - }, - "cable": { - "STFPM": 0.613, - "PADIM": 0.68 - }, - "capsule": { - "STFPM": 0.826, - "PADIM": 0.856 - }, - "carpet": { - "STFPM": 0.829, - "PADIM": 0.761 - }, - "grid": { - "STFPM": 0.756, - "PADIM": 0.744 - }, - "hazelnut": { - "STFPM": 0.827, - "PADIM": 0.891 - }, - "leather": { - "STFPM": 0.887, - "PADIM": 0.815 - }, - "metal_nut": { - "STFPM": 0.809, - "PADIM": 0.835 - }, - "pill": { - "STFPM": 0.844, - "PADIM": 0.844 - }, - "screw": { - "STFPM": 0.762, - "PADIM": 0.744 - }, - "tile": { - "STFPM": 0.829, - "PADIM": 0.761 - }, - "toothbrush": { - "STFPM": 0.714, - "PADIM": 0.762 - }, - "transistor": { - "STFPM": 0.53, - "PADIM": 0.66 - }, - "wood": { - "STFPM": 0.899, - "PADIM": 0.949 - }, - "zipper": { - "STFPM": 0.854, - "PADIM": 0.848 + "STFPM": 389.025, + "PADIM": 36.59 } } }, - "anomaly_detection": { + "anomaly_classification": { "train": { - "bottle": { - "STFPM": 0.029, - "PADIM": 0.156 - }, - "cable": { - "STFPM": 0.0, - "PADIM": 0.061 - }, - "capsule": { - "STFPM": 0.004, - "PADIM": 0.127 - }, - "carpet": { - "STFPM": 0.185, - "PADIM": 0.244 - }, - "grid": { - "STFPM": 0.0, - "PADIM": 0.012 - }, - "hazelnut": { - "STFPM": 0.0, - "PADIM": 0.116 - }, - "leather": { - "STFPM": 0.049, - "PADIM": 0.18 - }, - "metal_nut": { - "STFPM": 0.021, - "PADIM": 0.129 - }, - "pill": { - "STFPM": 0.032, - "PADIM": 0.028 - }, - "screw": { - "STFPM": 0.0, - "PADIM": 0.023 - }, - "tile": { - "STFPM": 0.016, - "PADIM": 0.125 - }, - "toothbrush": { - "STFPM": 0.015, - "PADIM": 0.176 - }, - "transistor": { - "STFPM": 0.0, - "PADIM": 0.116 - }, - "wood": { - "STFPM": 0.062, - "PADIM": 0.152 - }, - "zipper": { - "STFPM": 0.191, - "PADIM": 0.08 - } - }, - "export": { - "bottle": { - "STFPM": 0.029, - "PADIM": 0.156 - }, - "cable": { - "STFPM": 0.0, - "PADIM": 0.061 - }, - "capsule": { - "STFPM": 0.004, - "PADIM": 0.127 - }, - "carpet": { - "STFPM": 0.185, - "PADIM": 0.244 - }, - "grid": { - "STFPM": 0.0, - "PADIM": 0.012 - }, - "hazelnut": { - "STFPM": 0.0, - "PADIM": 0.116 - }, - "leather": { - "STFPM": 0.049, - "PADIM": 0.18 - }, - "metal_nut": { - "STFPM": 0.021, - "PADIM": 0.129 - }, - "pill": { - "STFPM": 0.032, - "PADIM": 0.028 - }, - "screw": { - "STFPM": 0.0, - "PADIM": 0.023 - }, - "tile": { - "STFPM": 0.016, - "PADIM": 0.125 - }, - "toothbrush": { - "STFPM": 0.015, - "PADIM": 0.176 - }, - "transistor": { - "STFPM": 0.0, - "PADIM": 0.116 - }, - "wood": { - "STFPM": 0.062, - "PADIM": 0.152 - }, - "zipper": { - "STFPM": 0.191, - "PADIM": 0.08 - } - }, - "deploy": { - "bottle": { - "STFPM": 0.029, - "PADIM": 0.156 - }, - "cable": { - "STFPM": 0.0, - "PADIM": 0.061 - }, - "capsule": { - "STFPM": 0.004, - "PADIM": 0.127 - }, - "carpet": { - "STFPM": 0.185, - "PADIM": 0.244 - }, - "grid": { - "STFPM": 0.0, - "PADIM": 0.012 - }, - "hazelnut": { - "STFPM": 0.0, - "PADIM": 0.116 - }, - "leather": { - "STFPM": 0.049, - "PADIM": 0.18 - }, - "metal_nut": { - "STFPM": 0.021, - "PADIM": 0.129 - }, - "pill": { - "STFPM": 0.032, - "PADIM": 0.028 - }, - "screw": { - "STFPM": 0.0, - "PADIM": 0.023 - }, - "tile": { - "STFPM": 0.016, - "PADIM": 0.125 - }, - "toothbrush": { - "STFPM": 0.015, - "PADIM": 0.176 - }, - "transistor": { - "STFPM": 0.0, - "PADIM": 0.116 - }, - "wood": { - "STFPM": 0.062, - "PADIM": 0.152 - }, - "zipper": { - "STFPM": 0.191, - "PADIM": 0.08 - } - }, - "nncf": { - "bottle": { - "STFPM": 0.029, - "PADIM": 0.156 - }, - "cable": { - "STFPM": 0.0, - "PADIM": 0.061 - }, - "capsule": { - "STFPM": 0.004, - "PADIM": 0.127 - }, - "carpet": { - "STFPM": 0.185, - "PADIM": 0.244 - }, - "grid": { - "STFPM": 0.0, - "PADIM": 0.012 - }, - "hazelnut": { - "STFPM": 0.0, - "PADIM": 0.116 - }, - "leather": { - "STFPM": 0.049, - "PADIM": 0.18 - }, - "metal_nut": { - "STFPM": 0.021, - "PADIM": 0.129 - }, - "pill": { - "STFPM": 0.032, - "PADIM": 0.028 - }, - "screw": { - "STFPM": 0.0, - "PADIM": 0.023 - }, - "tile": { - "STFPM": 0.016, - "PADIM": 0.125 - }, - "toothbrush": { - "STFPM": 0.015, - "PADIM": 0.176 - }, - "transistor": { - "STFPM": 0.0, - "PADIM": 0.116 - }, - "wood": { - "STFPM": 0.062, - "PADIM": 0.152 - }, - "zipper": { - "STFPM": 0.191, - "PADIM": 0.08 - } - }, - "ptq": { - "bottle": { - "STFPM": 0.029, - "PADIM": 0.156 - }, - "cable": { - "STFPM": 0.0, - "PADIM": 0.061 - }, - "capsule": { - "STFPM": 0.004, - "PADIM": 0.127 - }, "carpet": { - "STFPM": 0.185, - "PADIM": 0.244 - }, - "grid": { - "STFPM": 0.0, - "PADIM": 0.012 - }, - "hazelnut": { - "STFPM": 0.0, - "PADIM": 0.116 - }, - "leather": { - "STFPM": 0.049, - "PADIM": 0.18 - }, - "metal_nut": { - "STFPM": 0.021, - "PADIM": 0.129 - }, - "pill": { - "STFPM": 0.032, - "PADIM": 0.028 - }, - "screw": { - "STFPM": 0.0, - "PADIM": 0.023 - }, - "tile": { - "STFPM": 0.016, - "PADIM": 0.125 - }, - "toothbrush": { - "STFPM": 0.015, - "PADIM": 0.176 - }, - "transistor": { - "STFPM": 0.0, - "PADIM": 0.116 + "STFPM": 483.17, + "PADIM": 33.738 }, "wood": { - "STFPM": 0.062, - "PADIM": 0.152 + "STFPM": 391.582, + "PADIM": 33.527 }, "zipper": { - "STFPM": 0.191, - "PADIM": 0.08 + "STFPM": 284.238, + "PADIM": 33.347 } } }, - "anomaly_segmentation": { - "train": { - "bottle": { - "STFPM": 0.281, - "PADIM": 0.614 - }, - "cable": { - "STFPM": 0.189, - "PADIM": 0.212 - }, - "capsule": { - "STFPM": 0.131, - "PADIM": 0.307 - }, - "carpet": { - "STFPM": 0.225, - "PADIM": 0.348 - }, - "grid": { - "STFPM": 0.045, - "PADIM": 0.037 - }, - "hazelnut": { - "STFPM": 0.453, - "PADIM": 0.515 - }, - "leather": { - "STFPM": 0.262, - "PADIM": 0.417 - }, - "metal_nut": { - "STFPM": 0.389, - "PADIM": 0.428 - }, - "pill": { - "STFPM": 0.166, - "PADIM": 0.262 - }, - "screw": { - "STFPM": 0.029, - "PADIM": 0.11 - }, - "tile": { - "STFPM": 0.304, - "PADIM": 0.223 - }, - "toothbrush": { - "STFPM": 0.166, - "PADIM": 0.379 - }, - "transistor": { - "STFPM": 0.158, - "PADIM": 0.412 - }, - "wood": { - "STFPM": 0.275, - "PADIM": 0.389 - }, - "zipper": { - "STFPM": 0.352, - "PADIM": 0.235 - } - }, - "export": { - "bottle": { - "STFPM": 0.281, - "PADIM": 0.614 - }, - "cable": { - "STFPM": 0.189, - "PADIM": 0.212 - }, - "capsule": { - "STFPM": 0.131, - "PADIM": 0.307 - }, - "carpet": { - "STFPM": 0.225, - "PADIM": 0.348 - }, - "grid": { - "STFPM": 0.045, - "PADIM": 0.037 - }, - "hazelnut": { - "STFPM": 0.453, - "PADIM": 0.515 - }, - "leather": { - "STFPM": 0.262, - "PADIM": 0.417 - }, - "metal_nut": { - "STFPM": 0.389, - "PADIM": 0.428 - }, - "pill": { - "STFPM": 0.166, - "PADIM": 0.262 - }, - "screw": { - "STFPM": 0.029, - "PADIM": 0.11 - }, - "tile": { - "STFPM": 0.304, - "PADIM": 0.223 - }, - "toothbrush": { - "STFPM": 0.166, - "PADIM": 0.379 - }, - "transistor": { - "STFPM": 0.158, - "PADIM": 0.412 - }, - "wood": { - "STFPM": 0.275, - "PADIM": 0.389 - }, - "zipper": { - "STFPM": 0.352, - "PADIM": 0.235 - } - }, - "deploy": { - "bottle": { - "STFPM": 0.281, - "PADIM": 0.614 - }, - "cable": { - "STFPM": 0.189, - "PADIM": 0.212 - }, - "capsule": { - "STFPM": 0.131, - "PADIM": 0.307 - }, - "carpet": { - "STFPM": 0.225, - "PADIM": 0.348 - }, - "grid": { - "STFPM": 0.045, - "PADIM": 0.037 - }, - "hazelnut": { - "STFPM": 0.453, - "PADIM": 0.515 - }, - "leather": { - "STFPM": 0.262, - "PADIM": 0.417 - }, - "metal_nut": { - "STFPM": 0.389, - "PADIM": 0.428 - }, - "pill": { - "STFPM": 0.166, - "PADIM": 0.262 - }, - "screw": { - "STFPM": 0.029, - "PADIM": 0.11 - }, - "tile": { - "STFPM": 0.304, - "PADIM": 0.223 - }, - "toothbrush": { - "STFPM": 0.166, - "PADIM": 0.379 - }, - "transistor": { - "STFPM": 0.158, - "PADIM": 0.412 - }, - "wood": { - "STFPM": 0.275, - "PADIM": 0.389 - }, - "zipper": { - "STFPM": 0.352, - "PADIM": 0.235 - } - }, - "nncf": { - "bottle": { - "STFPM": 0.281, - "PADIM": 0.614 - }, - "cable": { - "STFPM": 0.189, - "PADIM": 0.212 - }, - "capsule": { - "STFPM": 0.131, - "PADIM": 0.307 - }, - "carpet": { - "STFPM": 0.225, - "PADIM": 0.348 - }, - "grid": { - "STFPM": 0.045, - "PADIM": 0.037 - }, - "hazelnut": { - "STFPM": 0.453, - "PADIM": 0.515 - }, - "leather": { - "STFPM": 0.262, - "PADIM": 0.417 - }, - "metal_nut": { - "STFPM": 0.389, - "PADIM": 0.428 - }, - "pill": { - "STFPM": 0.166, - "PADIM": 0.262 - }, - "screw": { - "STFPM": 0.029, - "PADIM": 0.11 - }, - "tile": { - "STFPM": 0.304, - "PADIM": 0.223 - }, - "toothbrush": { - "STFPM": 0.166, - "PADIM": 0.379 - }, - "transistor": { - "STFPM": 0.158, - "PADIM": 0.412 - }, - "wood": { - "STFPM": 0.275, - "PADIM": 0.389 - }, - "zipper": { - "STFPM": 0.352, - "PADIM": 0.235 - } - }, - "ptq": { - "bottle": { - "STFPM": 0.281, - "PADIM": 0.614 - }, - "cable": { - "STFPM": 0.189, - "PADIM": 0.212 - }, - "capsule": { - "STFPM": 0.131, - "PADIM": 0.307 - }, - "carpet": { - "STFPM": 0.225, - "PADIM": 0.348 - }, - "grid": { - "STFPM": 0.045, - "PADIM": 0.037 - }, - "hazelnut": { - "STFPM": 0.453, - "PADIM": 0.515 - }, - "leather": { - "STFPM": 0.262, - "PADIM": 0.417 - }, - "metal_nut": { - "STFPM": 0.389, - "PADIM": 0.428 - }, - "pill": { - "STFPM": 0.166, - "PADIM": 0.262 - }, - "screw": { - "STFPM": 0.029, - "PADIM": 0.11 - }, - "tile": { - "STFPM": 0.304, - "PADIM": 0.223 - }, - "toothbrush": { - "STFPM": 0.166, - "PADIM": 0.379 - }, - "transistor": { - "STFPM": 0.158, - "PADIM": 0.412 - }, - "wood": { - "STFPM": 0.275, - "PADIM": 0.389 - }, - "zipper": { - "STFPM": 0.352, - "PADIM": 0.235 - } - } - } - }, - "kpi_e2e_train_time_criteria": { - "classification": { - "supervised": { - "multi_class": { - "train": { - "EfficientNet-V2-S": 141.137, - "MobileNet-V3-large-1x": 72.732, - "EfficientNet-B0": 82.521, - "Deti-Tiny": 0.0 - } - }, - "multi_label": { - "train": { - "EfficientNet-V2-S": 214.508, - "MobileNet-V3-large-1x": 185.442, - "EfficientNet-B0": 187.332, - "Deti-Tiny": 0.0 - } - }, - "h_label": { - "train": { - "EfficientNet-V2-S": 149.627, - "MobileNet-V3-large-1x": 76.041, - "EfficientNet-B0": 87.159, - "Deti-Tiny": 0.0 - } - }, - "supcon": { - "train": { - "EfficientNet-V2-S": 136.036, - "MobileNet-V3-large-1x": 134.635, - "EfficientNet-B0": 124.474, - "Deti-Tiny": 0.0 - } - } - }, - "class_incr": { + "classification": { + "supervised": { "multi_class": { "train": { - "EfficientNet-V2-S": 141.137, - "MobileNet-V3-large-1x": 80.603, - "EfficientNet-B0": 82.521, - "Deti-Tiny": 0.0 + "EfficientNet-V2-S": 218.197, + "EfficientNet-B0": 178.446, + "MobileNet-V3-large-1x": 166.941, + "DeiT-Tiny": 164.726 } }, "multi_label": { "train": { - "EfficientNet-V2-S": 214.508, - "MobileNet-V3-large-1x": 185.442, - "EfficientNet-B0": 187.332, - "Deti-Tiny": 0.0 + "EfficientNet-V2-S": 184.384, + "EfficientNet-B0": 126.295, + "MobileNet-V3-large-1x": 102.924, + "DeiT-Tiny": 123.242 } }, "h_label": { "train": { - "EfficientNet-V2-S": 0.0, - "MobileNet-V3-large-1x": 0.0, - "EfficientNet-B0": 0.0, - "Deti-Tiny": 0.0 + "EfficientNet-V2-S": 110.326, + "EfficientNet-B0": 130.742, + "MobileNet-V3-large-1x": 115.362, + "DeiT-Tiny": 117.818 + } + }, + "supcon": { + "train": { + "EfficientNet-V2-S": 262.529, + "EfficientNet-B0": 218.38, + "MobileNet-V3-large-1x": 201.337 } } }, "semi_supervised": { "multi_class": { "train": { - "EfficientNet-V2-S": 423.184, - "MobileNet-V3-large-1x": 227.445, - "EfficientNet-B0": 230.227, - "Deti-Tiny": 0.0 + "EfficientNet-V2-S": 1114.203, + "EfficientNet-B0": 659.838, + "MobileNet-V3-large-1x": 545.86, + "DeiT-Tiny": 403.769 } } }, "self_supervised": { "multi_class": { "train": { - "EfficientNet-V2-S": 141.137, - "MobileNet-V3-large-1x": 72.239, - "EfficientNet-B0": 82.521, - "Deti-Tiny": 0.0 + "EfficientNet-V2-S": 19.834, + "EfficientNet-B0": 16.462, + "MobileNet-V3-large-1x": 15.144 + } + } + }, + "class_incr": { + "multi_class": { + "train": { + "EfficientNet-V2-S": 202.416, + "EfficientNet-B0": 229.533, + "MobileNet-V3-large-1x": 144.591 + } + }, + "multi_label": { + "train": { + "EfficientNet-V2-S": 175.599, + "EfficientNet-B0": 180.583, + "MobileNet-V3-large-1x": 118.757, + "DeiT-Tiny": 139.84 } } } }, "detection": { - "supervised": { + "tiling": { "multi_class": { "train": { - "YOLOX": 4226.84, - "SSD": 1992.196, - "MobileNetV2-ATSS": 1843.627 + "ResNeXt101-ATSS": 1252.666, + "YOLOX-S": 280.936, + "MobileNetV2-ATSS": 354.523, + "SSD": 298.68, + "YOLOX-TINY": 212.431, + "YOLOX-L": 395.392, + "YOLOX-X": 800.653 } } }, - "class_incr": { + "supervised": { "multi_class": { "train": { - "YOLOX": 3059.355, - "SSD": 1439.16, - "MobileNetV2-ATSS": 1531.103 + "ResNeXt101-ATSS": 2121.852, + "YOLOX-S": 620.867, + "MobileNetV2-ATSS": 316.917, + "SSD": 271.298, + "YOLOX-TINY": 464.486, + "YOLOX-L": 816.651, + "YOLOX-X": 1672.582 } } }, "semi_supervised": { "multi_class": { "train": { - "YOLOX": 4962.307, - "SSD": 6410.017, - "MobileNetV2-ATSS": 6263.435 + "ResNeXt101-ATSS": 2666.141, + "YOLOX-S": 443.316, + "MobileNetV2-ATSS": 536.618, + "SSD": 429.656, + "YOLOX-TINY": 406.971, + "YOLOX-L": 728.398, + "YOLOX-X": 1814.394 } } }, - "tiling": { + "class_incr": { "multi_class": { "train": { - "YOLOX": 1689.117, - "SSD": 2052.821, - "MobileNetV2-ATSS": 2074.501 + "ResNeXt101-ATSS": 2098.555, + "YOLOX-S": 626.248, + "MobileNetV2-ATSS": 518.843, + "SSD": 401.059, + "YOLOX-TINY": 471.214, + "YOLOX-L": 848.263, + "YOLOX-X": 1671.449 } } } }, - "segmentation": { + "instance_segmentation": { "supervised": { "multi_class": { "train": { - "Lite-HRNet-s-mod2": 393.137, - "Lite-HRNet-18-mod2": 443.94, - "Lite-HRNet-18": 446.99, - "Lite-HRNet-x-mod3": 667.493, - "SegNext-B": 0.0, - "SegNext-s": 0.0, - "SegNext-t": 0.0 - } - }, - "supcon": { - "train": { - "Lite-HRNet-s-mod2": 241.88, - "Lite-HRNet-18-mod2": 279.19, - "Lite-HRNet-18": 279.71, - "Lite-HRNet-x-mod3": 454.31, - "SegNext-B": 0.0, - "SegNext-s": 0.0, - "SegNext-t": 0.0 + "MaskRCNN-SwinT-FP16": 1364.765, + "MaskRCNN-EfficientNetB2B": 605.386, + "MaskRCNN-ResNet50": 1284.017 } } }, "class_incr": { "multi_class": { "train": { - "Lite-HRNet-s-mod2": 447.762, - "Lite-HRNet-18-mod2": 474.511, - "Lite-HRNet-18": 486.038, - "Lite-HRNet-x-mod3": 759.906, - "SegNext-B": 0.0, - "SegNext-s": 0.0, - "SegNext-t": 0.0 + "MaskRCNN-SwinT-FP16": 1369.209, + "MaskRCNN-EfficientNetB2B": 984.011, + "MaskRCNN-ResNet50": 1285.941 } } - }, - "semi_supervised": { + } + }, + "segmentation": { + "supervised": { "multi_class": { "train": { - "Lite-HRNet-s-mod2": 564.257, - "Lite-HRNet-18-mod2": 715.882, - "Lite-HRNet-18": 601.665, - "Lite-HRNet-x-mod3": 902.047, - "SegNext-B": 0.0, - "SegNext-s": 0.0, - "SegNext-t": 0.0 + "SegNext-B": 125.655, + "Lite-HRNet-18": 100.721, + "SegNext-t": 75.879, + "SegNext-s": 84.361, + "Lite-HRNet-18-mod2": 99.325, + "Lite-HRNet-s-mod2": 87.493, + "Lite-HRNet-x-mod3": 152.836 } - } - }, - "self_supervised": { - "multi_class": { + }, + "supcon": { "train": { - "Lite-HRNet-s-mod2": 727.372, - "Lite-HRNet-18-mod2": 756.383, - "Lite-HRNet-18": 750.812, - "Lite-HRNet-x-mod3": 1017.624, - "SegNext-B": 0.0, - "SegNext-s": 0.0, - "SegNext-t": 0.0 + "Lite-HRNet-18": 130.635, + "Lite-HRNet-18-mod2": 129.879, + "Lite-HRNet-s-mod2": 117.903, + "Lite-HRNet-x-mod3": 181.323 } } - } - }, - "instance_segmentation": { - "supervised": { + }, + "semi_supervised": { "multi_class": { "train": { - "MaskRCNN-ResNet50": 7234.698, - "MaskRCNN-EfficientNetB2B": 3450.991 + "SegNext-B": 159.307, + "Lite-HRNet-18": 147.132, + "SegNext-t": 106.195, + "SegNext-s": 114.734, + "Lite-HRNet-18-mod2": 143.903, + "Lite-HRNet-s-mod2": 122.088, + "Lite-HRNet-x-mod3": 236.362 } } }, - "class_incr": { + "self_supervised": { "multi_class": { "train": { - "MaskRCNN-ResNet50": 5154.329, - "MaskRCNN-EfficientNetB2B": 4342.104 + "SegNext-B": 185.828, + "Lite-HRNet-18": 158.973, + "SegNext-t": 143.487, + "SegNext-s": 149.674, + "Lite-HRNet-18-mod2": 161.458, + "Lite-HRNet-s-mod2": 150.652, + "Lite-HRNet-x-mod3": 205.917 } } }, - "tiling": { + "class_incr": { "multi_class": { "train": { - "MaskRCNN-ResNet50": 8086.079, - "MaskRCNN-EfficientNetB2B": 7274.234 + "Lite-HRNet-18": 148.859, + "Lite-HRNet-18-mod2": 148.226, + "Lite-HRNet-s-mod2": 125.109, + "Lite-HRNet-x-mod3": 255.65 } } } - }, + } + }, + "kpi_e2e_eval_time_criteria": { "action_classification": { "supervised": { "multi_class": { "train": { - "X3D": 3604.046, - "MoViNet": 2091.719 + "MoViNet": 39.944, + "X3D": 42.252 } } } @@ -1772,626 +1463,262 @@ "supervised": { "multi_class": { "train": { - "X3D_FAST_RCNN": 11349.635 + "X3D_FAST_RCNN": 94.44 } } } }, - "anomaly_classification": { + "anomaly_segmentation": { "train": { - "bottle": { - "STFPM": 473.916, - "PADIM": 94.682 - }, - "cable": { - "STFPM": 782.332, - "PADIM": 91.326 - }, - "capsule": { - "STFPM": 485.159, - "PADIM": 91.746 - }, "carpet": { - "STFPM": 903.789, - "PADIM": 92.068 - }, - "grid": { - "STFPM": 473.575, - "PADIM": 90.083 - }, - "hazelnut": { - "STFPM": 1070.262, - "PADIM": 92.554 - }, - "leather": { - "STFPM": 1632.646, - "PADIM": 91.489 - }, - "metal_nut": { - "STFPM": 471.863, - "PADIM": 83.262 - }, - "pill": { - "STFPM": 474.212, - "PADIM": 97.604 - }, - "screw": { - "STFPM": 1435.737, - "PADIM": 93.66 - }, - "tile": { - "STFPM": 1282.178, - "PADIM": 95.382 - }, - "toothbrush": { - "STFPM": 798.572, - "PADIM": 77.555 - }, - "transistor": { - "STFPM": 844.123, - "PADIM": 80.574 + "STFPM": 11.986, + "PADIM": 10.973 }, "wood": { - "STFPM": 472.098, - "PADIM": 86.524 + "STFPM": 14.704, + "PADIM": 13.279 }, "zipper": { - "STFPM": 1176.295, - "PADIM": 82.333 + "STFPM": 14.002, + "PADIM": 13.41 } } }, "anomaly_detection": { "train": { - "bottle": { - "STFPM": 471.13, - "PADIM": 91.643 - }, - "cable": { - "STFPM": 412.08, - "PADIM": 95.515 - }, - "capsule": { - "STFPM": 409.549, - "PADIM": 91.765 - }, "carpet": { - "STFPM": 1007.763, - "PADIM": 93.797 - }, - "grid": { - "STFPM": 639.249, - "PADIM": 95.983 - }, - "hazelnut": { - "STFPM": 397.998, - "PADIM": 91.448 - }, - "leather": { - "STFPM": 418.21, - "PADIM": 92.509 - }, - "metal_nut": { - "STFPM": 435.304, - "PADIM": 85.566 - }, - "pill": { - "STFPM": 419.626, - "PADIM": 92.345 - }, - "screw": { - "STFPM": 736.772, - "PADIM": 89.448 - }, - "tile": { - "STFPM": 584.306, - "PADIM": 83.021 - }, - "toothbrush": { - "STFPM": 602.712, - "PADIM": 88.732 - }, - "transistor": { - "STFPM": 413.1, - "PADIM": 89.572 + "STFPM": 8.138, + "PADIM": 8.243 }, "wood": { - "STFPM": 963.139, - "PADIM": 95.301 + "STFPM": 7.5, + "PADIM": 7.6 }, "zipper": { - "STFPM": 1313.441, - "PADIM": 91.329 + "STFPM": 8.728, + "PADIM": 8.825 } } }, - "anomaly_segmentation": { + "anomaly_classification": { "train": { - "bottle": { - "STFPM": 2423.196, - "PADIM": 122.496 - }, - "cable": { - "STFPM": 1061.665, - "PADIM": 134.752 - }, - "capsule": { - "STFPM": 673.885, - "PADIM": 126.904 - }, "carpet": { - "STFPM": 1053.201, - "PADIM": 126.328 - }, - "grid": { - "STFPM": 946.958, - "PADIM": 115.046 - }, - "hazelnut": { - "STFPM": 1362.821, - "PADIM": 129.905 - }, - "leather": { - "STFPM": 1317.973, - "PADIM": 117.569 - }, - "metal_nut": { - "STFPM": 1420.045, - "PADIM": 120.641 - }, - "pill": { - "STFPM": 682.699, - "PADIM": 113.66 - }, - "screw": { - "STFPM": 1298.055, - "PADIM": 98.299 - }, - "tile": { - "STFPM": 942.646, - "PADIM": 124.92 - }, - "toothbrush": { - "STFPM": 1180.837, - "PADIM": 105.42 - }, - "transistor": { - "STFPM": 710.488, - "PADIM": 107.729 + "STFPM": 5.954, + "PADIM": 6.075 }, "wood": { - "STFPM": 1202.055, - "PADIM": 117.939 + "STFPM": 5.839, + "PADIM": 5.853 }, "zipper": { - "STFPM": 1456.587, - "PADIM": 108.119 + "STFPM": 5.778, + "PADIM": 5.972 } } - } - }, - "kpi_e2e_eval_time_criteria": { + }, "classification": { "supervised": { "multi_class": { "train": { - "EfficientNet-V2-S": 17.8, - "MobileNet-V3-large-1x": 16.453, - "EfficientNet-B0": 15.145, - "Deti-Tiny": 0.0 + "EfficientNet-V2-S": 15.683, + "EfficientNet-B0": 14.97, + "MobileNet-V3-large-1x": 15.073, + "DeiT-Tiny": 55.993 } }, "multi_label": { "train": { - "EfficientNet-V2-S": 18.706, - "MobileNet-V3-large-1x": 20.311, - "EfficientNet-B0": 19.497, - "Deti-Tiny": 0.0 + "EfficientNet-V2-S": 6.952, + "EfficientNet-B0": 6.299, + "MobileNet-V3-large-1x": 6.105, + "DeiT-Tiny": 8.112 } }, "h_label": { "train": { - "EfficientNet-V2-S": 17.99, - "MobileNet-V3-large-1x": 18.068, - "EfficientNet-B0": 13.558, - "Deti-Tiny": 0.0 + "EfficientNet-V2-S": 10.636, + "EfficientNet-B0": 9.836, + "MobileNet-V3-large-1x": 9.835, + "DeiT-Tiny": 26.898 } }, "supcon": { "train": { - "EfficientNet-V2-S": 17.1, - "MobileNet-V3-large-1x": 17.988, - "EfficientNet-B0": 14.586, - "Deti-Tiny": 0.0 + "EfficientNet-V2-S": 15.597, + "EfficientNet-B0": 14.843, + "MobileNet-V3-large-1x": 14.996 } } }, - "class_incr": { + "semi_supervised": { "multi_class": { "train": { - "EfficientNet-V2-S": 16.715, - "MobileNet-V3-large-1x": 16.722, - "EfficientNet-B0": 15.488, - "Deti-Tiny": 0.0 - } - }, - "multi_label": { - "train": { - "EfficientNet-V2-S": 19.618, - "MobileNet-V3-large-1x": 20.686, - "EfficientNet-B0": 17.046, - "Deti-Tiny": 0.0 - } - }, - "h_label": { - "train": { - "EfficientNet-V2-S": 0.0, - "MobileNet-V3-large-1x": 0.0, - "EfficientNet-B0": 0.0, - "Deti-Tiny": 0.0 + "EfficientNet-V2-S": 16.646, + "EfficientNet-B0": 16.055, + "MobileNet-V3-large-1x": 15.865, + "DeiT-Tiny": 66.312 } } }, - "semi_supervised": { + "self_supervised": { "multi_class": { "train": { - "EfficientNet-V2-S": 16.885, - "MobileNet-V3-large-1x": 17.905, - "EfficientNet-B0": 15.107, - "Deti-Tiny": 0.0 + "EfficientNet-V2-S": 15.71, + "EfficientNet-B0": 15.043, + "MobileNet-V3-large-1x": 15.007 } } }, - "self_supervised": { + "class_incr": { "multi_class": { "train": { - "EfficientNet-V2-S": 15.552, - "MobileNet-V3-large-1x": 14.267, - "EfficientNet-B0": 15.179, - "Deti-Tiny": 0.0 + "EfficientNet-V2-S": 16.179, + "EfficientNet-B0": 15.279, + "MobileNet-V3-large-1x": 15.256 + } + }, + "multi_label": { + "train": { + "EfficientNet-V2-S": 6.893, + "EfficientNet-B0": 6.312, + "MobileNet-V3-large-1x": 6.107, + "DeiT-Tiny": 8.018 } } } }, "detection": { - "supervised": { + "tiling": { "multi_class": { "train": { - "YOLOX": 82.169, - "SSD": 86.989, - "MobileNetV2-ATSS": 73.888 + "ResNeXt101-ATSS": 104.089, + "YOLOX-S": 105.713, + "MobileNetV2-ATSS": 59.865, + "SSD": 135.033, + "YOLOX-TINY": 58.541, + "YOLOX-L": 110.035, + "YOLOX-X": 124.0 } } }, - "class_incr": { + "supervised": { "multi_class": { "train": { - "YOLOX": 59.154, - "SSD": 89.412, - "MobileNetV2-ATSS": 62.492 + "ResNeXt101-ATSS": 44.526, + "YOLOX-S": 14.622, + "MobileNetV2-ATSS": 14.839, + "SSD": 13.216, + "YOLOX-TINY": 13.123, + "YOLOX-L": 27.983, + "YOLOX-X": 30.28 } } }, "semi_supervised": { "multi_class": { "train": { - "YOLOX": 47.199, - "SSD": 76.703, - "MobileNetV2-ATSS": 50.34 + "ResNeXt101-ATSS": 41.62, + "YOLOX-S": 12.791, + "MobileNetV2-ATSS": 14.857, + "SSD": 13.156, + "YOLOX-TINY": 16.424, + "YOLOX-L": 22.869, + "YOLOX-X": 37.766 } } }, - "tiling": { + "class_incr": { "multi_class": { "train": { - "YOLOX": 146.561, - "SSD": 169.937, - "MobileNetV2-ATSS": 84.882 + "ResNeXt101-ATSS": 44.94, + "YOLOX-S": 26.482, + "MobileNetV2-ATSS": 24.495, + "SSD": 16.785, + "YOLOX-TINY": 22.293, + "YOLOX-L": 30.249, + "YOLOX-X": 35.993 } } } }, - "segmentation": { + "instance_segmentation": { "supervised": { "multi_class": { "train": { - "Lite-HRNet-s-mod2": 36.656, - "Lite-HRNet-18-mod2": 42.2, - "Lite-HRNet-18": 43.046, - "Lite-HRNet-x-mod3": 59.053, - "SegNext-B": 0.0, - "SegNext-s": 0.0, - "SegNext-t": 0.0 - } - }, - "supcon": { - "train": { - "Lite-HRNet-s-mod2": 13.835, - "Lite-HRNet-18-mod2": 16.39, - "Lite-HRNet-18": 16.23, - "Lite-HRNet-x-mod3": 26.549, - "SegNext-B": 0.0, - "SegNext-s": 0.0, - "SegNext-t": 0.0 + "MaskRCNN-SwinT-FP16": 47.091, + "MaskRCNN-EfficientNetB2B": 30.544, + "MaskRCNN-ResNet50": 40.727 } } }, "class_incr": { "multi_class": { "train": { - "Lite-HRNet-s-mod2": 43.425, - "Lite-HRNet-18-mod2": 48.536, - "Lite-HRNet-18": 43.817, - "Lite-HRNet-x-mod3": 74.163, - "SegNext-B": 0.0, - "SegNext-s": 0.0, - "SegNext-t": 0.0 - } - } - }, - "semi_supervised": { - "multi_class": { - "train": { - "Lite-HRNet-s-mod2": 43.347, - "Lite-HRNet-18-mod2": 51.605, - "Lite-HRNet-18": 47.816, - "Lite-HRNet-x-mod3": 56.125, - "SegNext-B": 0.0, - "SegNext-s": 0.0, - "SegNext-t": 0.0 - } - } - }, - "self_supervised": { - "multi_class": { - "train": { - "Lite-HRNet-s-mod2": 37.616, - "Lite-HRNet-18-mod2": 36.786, - "Lite-HRNet-18": 44.935, - "Lite-HRNet-x-mod3": 52.59, - "SegNext-B": 0.0, - "SegNext-s": 0.0, - "SegNext-t": 0.0 + "MaskRCNN-SwinT-FP16": 47.186, + "MaskRCNN-EfficientNetB2B": 30.885, + "MaskRCNN-ResNet50": 39.559 } } } }, - "instance_segmentation": { + "segmentation": { "supervised": { "multi_class": { "train": { - "MaskRCNN-ResNet50": 443.869, - "MaskRCNN-EfficientNetB2B": 451.211 + "SegNext-B": 12.568, + "Lite-HRNet-18": 14.135, + "SegNext-t": 10.923, + "SegNext-s": 10.89, + "Lite-HRNet-18-mod2": 14.046, + "Lite-HRNet-s-mod2": 12.14, + "Lite-HRNet-x-mod3": 21.656 } - } - }, - "class_incr": { - "multi_class": { + }, + "supcon": { "train": { - "MaskRCNN-ResNet50": 103.552, - "MaskRCNN-EfficientNetB2B": 118.353 + "Lite-HRNet-18": 15.703, + "Lite-HRNet-18-mod2": 16.187, + "Lite-HRNet-s-mod2": 12.206, + "Lite-HRNet-x-mod3": 27.72 } } }, - "tiling": { + "semi_supervised": { "multi_class": { "train": { - "MaskRCNN-ResNet50": 522.271, - "MaskRCNN-EfficientNetB2B": 321.017 + "SegNext-B": 13.182, + "Lite-HRNet-18": 14.826, + "SegNext-t": 11.236, + "SegNext-s": 11.108, + "Lite-HRNet-18-mod2": 14.542, + "Lite-HRNet-s-mod2": 12.768, + "Lite-HRNet-x-mod3": 21.772 } } - } - }, - "action_classification": { - "supervised": { + }, + "self_supervised": { "multi_class": { "train": { - "X3D": 384.374, - "MoViNet": 244.952 + "SegNext-B": 12.84, + "Lite-HRNet-18": 12.651, + "SegNext-t": 10.582, + "SegNext-s": 12.587, + "Lite-HRNet-18-mod2": 18.138, + "Lite-HRNet-s-mod2": 13.276, + "Lite-HRNet-x-mod3": 20.736 } } - } - }, - "action_detection": { - "supervised": { + }, + "class_incr": { "multi_class": { "train": { - "X3D_FAST_RCNN": 289.228 + "Lite-HRNet-18": 17.608, + "Lite-HRNet-18-mod2": 14.745, + "Lite-HRNet-s-mod2": 13.395, + "Lite-HRNet-x-mod3": 23.336 } } } - }, - "anomaly_classification": { - "train": { - "bottle": { - "STFPM": 13.302, - "PADIM": 14.602 - }, - "cable": { - "STFPM": 0.0, - "PADIM": 0.0 - }, - "capsule": { - "STFPM": 13.433, - "PADIM": 14.363 - }, - "carpet": { - "STFPM": 13.341, - "PADIM": 14.946 - }, - "grid": { - "STFPM": 13.273, - "PADIM": 15.915 - }, - "hazelnut": { - "STFPM": 13.385, - "PADIM": 15.577 - }, - "leather": { - "STFPM": 12.521, - "PADIM": 15.257 - }, - "metal_nut": { - "STFPM": 12.02, - "PADIM": 13.63 - }, - "pill": { - "STFPM": 11.368, - "PADIM": 15.067 - }, - "screw": { - "STFPM": 13.29, - "PADIM": 15.839 - }, - "tile": { - "STFPM": 12.391, - "PADIM": 15.928 - }, - "toothbrush": { - "STFPM": 11.391, - "PADIM": 14.91 - }, - "transistor": { - "STFPM": 0.0, - "PADIM": 0.0 - }, - "wood": { - "STFPM": 12.308, - "PADIM": 13.998 - }, - "zipper": { - "STFPM": 12.046, - "PADIM": 14.165 - } - } - }, - "anomaly_detection": { - "train": { - "bottle": { - "STFPM": 12.329, - "PADIM": 15.616 - }, - "cable": { - "STFPM": 11.788, - "PADIM": 15.858 - }, - "capsule": { - "STFPM": 12.491, - "PADIM": 14.235 - }, - "carpet": { - "STFPM": 12.582, - "PADIM": 14.774 - }, - "grid": { - "STFPM": 0.0, - "PADIM": 0.0 - }, - "hazelnut": { - "STFPM": 12.366, - "PADIM": 14.574 - }, - "leather": { - "STFPM": 12.924, - "PADIM": 13.225 - }, - "metal_nut": { - "STFPM": 11.543, - "PADIM": 16.109 - }, - "pill": { - "STFPM": 14.454, - "PADIM": 13.279 - }, - "screw": { - "STFPM": 13.163, - "PADIM": 15.934 - }, - "tile": { - "STFPM": 0.0, - "PADIM": 0.0 - }, - "toothbrush": { - "STFPM": 13.33, - "PADIM": 15.593 - }, - "transistor": { - "STFPM": 13.294, - "PADIM": 14.333 - }, - "wood": { - "STFPM": 13.265, - "PADIM": 15.228 - }, - "zipper": { - "STFPM": 13.609, - "PADIM": 14.914 - } - } - }, - "anomaly_segmentation": { - "train": { - "bottle": { - "STFPM": 11.796, - "PADIM": 14.88 - }, - "cable": { - "STFPM": 12.265, - "PADIM": 13.511 - }, - "capsule": { - "STFPM": 11.977, - "PADIM": 15.504 - }, - "carpet": { - "STFPM": 13.315, - "PADIM": 14.945 - }, - "grid": { - "STFPM": 12.072, - "PADIM": 15.049 - }, - "hazelnut": { - "STFPM": 12.515, - "PADIM": 15.165 - }, - "leather": { - "STFPM": 12.547, - "PADIM": 13.179 - }, - "metal_nut": { - "STFPM": 0.0, - "PADIM": 0.0 - }, - "pill": { - "STFPM": 12.404, - "PADIM": 17.594 - }, - "screw": { - "STFPM": 0.0, - "PADIM": 0.0 - }, - "tile": { - "STFPM": 11.851, - "PADIM": 15.183 - }, - "toothbrush": { - "STFPM": 13.301, - "PADIM": 14.932 - }, - "transistor": { - "STFPM": 13.137, - "PADIM": 15.278 - }, - "wood": { - "STFPM": 14.124, - "PADIM": 13.865 - }, - "zipper": { - "STFPM": 11.624, - "PADIM": 13.555 - } - } } } } diff --git a/tests/regression/regression_test_helpers.py b/tests/regression/regression_test_helpers.py index 1b1b34e41e4..5cc308720c0 100644 --- a/tests/regression/regression_test_helpers.py +++ b/tests/regression/regression_test_helpers.py @@ -14,6 +14,7 @@ import json import os +from copy import copy from pathlib import Path from typing import Any, Dict, List, Union @@ -31,25 +32,30 @@ "action_detection", "anomaly", ] +TASKS_TO_RUN_SIGNLE_GPU = [ + "detection", + "semantic_segmentation", + "instance_segmentation", +] TRAIN_TYPES = ["supervised", "semi_supervised", "self_supervised", "class_incr", "tiling"] LABEL_TYPES = ["multi_class", "multi_label", "h_label", "supcon"] REGRESSION_TEST_EPOCHS = "10" ANOMALY_DATASET_CATEGORIES = [ - "bottle", - "cable", - "capsule", + # "bottle", + # "cable", + # "capsule", "carpet", - "grid", - "hazelnut", - "leather", - "metal_nut", - "pill", - "screw", - "tile", - "toothbrush", - "transistor", + # "grid", + # "hazelnut", + # "leather", + # "metal_nut", + # "pill", + # "screw", + # "tile", + # "toothbrush", + # "transistor", "wood", "zipper", ] @@ -78,14 +84,15 @@ def __init__(self, task_type, train_type, label_type, otx_dir, **kwargs): self.label_type = label_type self.otx_dir = otx_dir - self.result_dict = self._init_result_dict() - result_dir_prefix = kwargs.get("result_dir", "") - if len(result_dir_prefix) > 0: - result_dir_prefix = result_dir_prefix + "_" - tmp_results_root = kwargs.get("tmp_results_root", "tmp/regression_test_results") - self.result_dir = os.path.join(tmp_results_root, "regression_test_results", f"{result_dir_prefix}{task_type}") + self._result_dict = {} + results_root = kwargs.get("results_root", "/tmp/reg_test_results") + result_suffix = copy(self.task_type) + if result_suffix.startswith("action_"): + result_suffix = "action" + elif result_suffix.startswith("anomaly_"): + result_suffix = "anomaly" + self.result_dir = os.path.join(results_root, "reg_test_results", f"{result_suffix}") Path(self.result_dir).mkdir(parents=True, exist_ok=True) - self.config_dict = self.load_config() self.args = self.config_dict["data_path"] train_params = kwargs.get("train_params") @@ -94,8 +101,24 @@ def __init__(self, task_type, train_type, label_type, otx_dir, **kwargs): self.args["train_params"].extend(train_params) self.num_cuda_devices = torch.cuda.device_count() + if self.task_type in TASKS_TO_RUN_SIGNLE_GPU and self.num_cuda_devices > 0: + self.num_cuda_devices = 1 self.update_gpu_args(self.args, enable_auto_num_worker=kwargs.get("enable_auto_num_worker", True)) + @property + def result_dict(self): + return self._result_dict + + def dump_result_dict(self, dump_path=None): + dump_path_ = ( + dump_path + if dump_path is not None + else os.path.join(self.result_dir, f"result_{self.task_type}_{self.train_type}_{self.label_type}.json") + ) + print(f"writing regression result to {dump_path_}") + with open(dump_path_, "w") as result_file: + json.dump(self.result_dict, result_file, indent=4) + def update_gpu_args(self, args, enable_auto_num_worker=True): if self.num_cuda_devices > 1: if enable_auto_num_worker: @@ -167,22 +190,31 @@ def load_config(self, **kwargs) -> Dict[str, Union[int, float]]: return result - def _init_result_dict(self) -> Dict[str, Any]: - result_dict = {self.task_type: {}} - if "anomaly" not in self.task_type: - for label_type in LABEL_TYPES: - result_dict[self.task_type][label_type] = {} - for train_type in TRAIN_TYPES: - result_dict[self.task_type][label_type][train_type] = {} - for test_type in TEST_TYPES: - result_dict[self.task_type][label_type][train_type][test_type] = [] + def update_result(self, test_type, result, is_anomaly=False, **kwargs): + task_type = self.task_type + if task_type not in self._result_dict: + self._result_dict[task_type] = {} + + if not is_anomaly: + label_type = kwargs.get("label_type", self.label_type) + train_type = kwargs.get("train_type", self.train_type) + + if label_type not in self._result_dict[task_type]: + self._result_dict[task_type][label_type] = {} + if train_type not in self._result_dict[task_type][label_type]: + self._result_dict[task_type][label_type][train_type] = {} + if test_type not in self._result_dict[task_type][label_type][train_type]: + self._result_dict[task_type][label_type][train_type][test_type] = [] + self._result_dict[task_type][label_type][train_type][test_type].append(result) + print(f"update_result({task_type=}, {label_type=}, {train_type=}, {test_type=}, {result=}, {is_anomaly=}") else: - for test_type in TEST_TYPES: - result_dict[self.task_type][test_type] = {} - for category in ANOMALY_DATASET_CATEGORIES: - result_dict[self.task_type][test_type][category] = [] - - return result_dict + category = kwargs.get("category", "unknown") + if test_type not in self._result_dict[task_type]: + self._result_dict[task_type][test_type] = {} + if category not in self._result_dict[task_type][test_type]: + self._result_dict[task_type][test_type][category] = [] + self._result_dict[task_type][test_type][category].append(result) + print(f"update_result({task_type=}, {test_type=}, {category=}, {result=}, {is_anomaly=}") def get_template_performance(self, template: ModelTemplate, **kwargs): """Get proper template performance inside of performance list.""" @@ -196,15 +228,13 @@ def get_template_performance(self, template: ModelTemplate, **kwargs): category = kwargs.get("category") if category is None: raise RuntimeError("missing required keyword arg 'category'") - results = self.result_dict[task_type]["train"][category] + results = self._result_dict[task_type]["train"][category] else: - results = self.result_dict[task_type][label_type][train_type]["train"] + results = self._result_dict[task_type][label_type][train_type]["train"] for result in results: template_name = list(result.keys())[0] if template_name == template.name: performance = result break - if performance is None: - raise ValueError("Performance is None.") return performance diff --git a/tests/regression/semantic_segmentation/test_segmentation.py b/tests/regression/semantic_segmentation/test_segmentation.py index dee04b4c8f3..e8870fa54c6 100644 --- a/tests/regression/semantic_segmentation/test_segmentation.py +++ b/tests/regression/semantic_segmentation/test_segmentation.py @@ -53,20 +53,19 @@ class TestRegressionSegmentation: @classmethod @pytest.fixture(scope="class") def reg_cfg(cls, tmp_dir_path): + results_root = os.environ.get("REG_RESULTS_ROOT", tmp_dir_path) cls.reg_cfg = RegressionTestConfig( cls.TASK_TYPE, cls.TRAIN_TYPE, cls.LABEL_TYPE, os.getcwd(), train_params=cls.TRAIN_PARAMS, - tmp_results_root=tmp_dir_path, + results_root=results_root, ) yield cls.reg_cfg - print(f"writting regression result to {cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json") - with open(f"{cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json", "w") as result_file: - json.dump(cls.reg_cfg.result_dict, result_file, indent=4) + cls.reg_cfg.dump_result_dict() def setup_method(self): self.performance = {} @@ -74,6 +73,7 @@ def setup_method(self): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train(self, reg_cfg, template, tmp_dir_path): + test_type = "train" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / reg_cfg.task_type @@ -87,14 +87,14 @@ def test_otx_train(self, reg_cfg, template, tmp_dir_path): tmp_dir_path, reg_cfg.otx_dir, reg_cfg.args, - reg_cfg.config_dict["regression_criteria"]["train"], + reg_cfg.config_dict["regression_criteria"][test_type], self.performance[template.name], ) infer_elapsed_time = timer() - infer_start_time self.performance[template.name][TIME_LOG["train_time"]] = round(train_elapsed_time, 3) self.performance[template.name][TIME_LOG["infer_time"]] = round(infer_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["train"].append(self.performance) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @@ -102,6 +102,8 @@ def test_otx_train(self, reg_cfg, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train_kpi_test(self, reg_cfg, template): performance = reg_cfg.get_template_performance(template) + if performance is None: + pytest.skip(reason="Cannot find performance data from results.") kpi_train_result = regression_train_time_testing( train_time_criteria=reg_cfg.config_dict["kpi_e2e_train_time_criteria"]["train"], @@ -121,7 +123,10 @@ def test_otx_train_kpi_test(self, reg_cfg, template): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train_cls_incr(self, reg_cfg, template, tmp_dir_path): + if "SegNext" in template.name: + pytest.skip("Issue#2600: RuntimeError - can't cast ComplexFloat to Float") train_type = "class_incr" + test_type = "train" self.performance[template.name] = {} sl_template_work_dir = get_template_dir(template, tmp_dir_path / reg_cfg.task_type) @@ -153,7 +158,7 @@ def test_otx_train_cls_incr(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["train_time"]] = round(train_elapsed_time, 3) self.performance[template.name][TIME_LOG["infer_time"]] = round(infer_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][train_type]["train"].append(self.performance) + reg_cfg.update_result(test_type, self.performance, train_type=train_type) assert test_result["passed"] is True, test_result["log"] @@ -163,6 +168,8 @@ def test_otx_train_cls_incr_kpi_test(self, reg_cfg, template): train_type = "class_incr" config_cls_incr = reg_cfg.load_config(train_type=train_type) performance = reg_cfg.get_template_performance(template, train_type=train_type) + if performance is None: + pytest.skip(reason="Cannot find performance data from results.") kpi_train_result = regression_train_time_testing( train_time_criteria=config_cls_incr["kpi_e2e_train_time_criteria"]["train"], @@ -183,6 +190,7 @@ def test_otx_train_cls_incr_kpi_test(self, reg_cfg, template): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train_semisl(self, reg_cfg, template, tmp_dir_path): train_type = "semi_supervised" + test_type = "train" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / f"{reg_cfg.task_type}/test_semisl" @@ -217,7 +225,7 @@ def test_otx_train_semisl(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["train_time"]] = round(train_elapsed_time, 3) self.performance[template.name][TIME_LOG["infer_time"]] = round(infer_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][train_type]["train"].append(self.performance) + reg_cfg.update_result(test_type, self.performance, train_type=train_type) assert test_result["passed"] is True, test_result["log"] @@ -227,6 +235,8 @@ def test_otx_train_semisl_kpi_test(self, reg_cfg, template): train_type = "semi_supervised" config_semisl = reg_cfg.load_config(train_type=train_type) performance = reg_cfg.get_template_performance(template, train_type=train_type) + if performance is None: + pytest.skip(reason="Cannot find performance data from results.") kpi_train_result = regression_train_time_testing( train_time_criteria=config_semisl["kpi_e2e_train_time_criteria"]["train"], @@ -247,6 +257,7 @@ def test_otx_train_semisl_kpi_test(self, reg_cfg, template): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train_selfsl(self, reg_cfg, template, tmp_dir_path): train_type = "self_supervised" + test_type = "train" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / f"{reg_cfg.task_type}/test_selfsl" @@ -290,7 +301,7 @@ def test_otx_train_selfsl(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["train_time"]] = round(train_elapsed_time, 3) self.performance[template.name][TIME_LOG["infer_time"]] = round(infer_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][train_type]["train"].append(self.performance) + reg_cfg.update_result(test_type, self.performance, train_type=train_type) assert test_result["passed"] is True, test_result["log"] @@ -300,6 +311,8 @@ def test_otx_train_selfsl_kpi_test(self, reg_cfg, template): train_type = "self_supervised" config_selfsl = reg_cfg.load_config(train_type=train_type) performance = reg_cfg.get_template_performance(template, train_type=train_type) + if performance is None: + pytest.skip(reason="Cannot find performance data from results.") kpi_train_result = regression_train_time_testing( train_time_criteria=config_selfsl["kpi_e2e_train_time_criteria"]["train"], @@ -319,6 +332,7 @@ def test_otx_train_selfsl_kpi_test(self, reg_cfg, template): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): + test_type = "export" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / reg_cfg.task_type @@ -333,7 +347,7 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): reg_cfg.otx_dir, reg_cfg.args, threshold=0.05, - criteria=reg_cfg.config_dict["regression_criteria"]["export"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -341,15 +355,14 @@ def test_otx_export_eval_openvino(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["export_time"]] = round(export_elapsed_time, 3) self.performance[template.name][TIME_LOG["export_eval_time"]] = round(export_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["export"].append( - self.performance - ) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path): + test_type = "deploy" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / reg_cfg.task_type @@ -364,7 +377,7 @@ def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path): reg_cfg.otx_dir, reg_cfg.args, threshold=0.0, - criteria=reg_cfg.config_dict["regression_criteria"]["deploy"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -372,15 +385,14 @@ def test_otx_deploy_eval_deployment(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["deploy_time"]] = round(deploy_elapsed_time, 3) self.performance[template.name][TIME_LOG["deploy_eval_time"]] = round(deploy_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["deploy"].append( - self.performance - ) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path): + test_type = "nncf" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / reg_cfg.task_type @@ -398,7 +410,7 @@ def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path): reg_cfg.otx_dir, reg_cfg.args, threshold=0.01, - criteria=reg_cfg.config_dict["regression_criteria"]["nncf"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -406,13 +418,14 @@ def test_nncf_optimize_eval(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["nncf_time"]] = round(nncf_elapsed_time, 3) self.performance[template.name][TIME_LOG["nncf_eval_time"]] = round(nncf_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["nncf"].append(self.performance) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path): + test_type = "ptq" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / reg_cfg.task_type @@ -426,7 +439,7 @@ def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path): tmp_dir_path, reg_cfg.otx_dir, reg_cfg.args, - criteria=reg_cfg.config_dict["regression_criteria"]["ptq"], + criteria=reg_cfg.config_dict["regression_criteria"][test_type], reg_threshold=0.10, result_dict=self.performance[template.name], ) @@ -434,7 +447,7 @@ def test_ptq_optimize_eval(self, reg_cfg, template, tmp_dir_path): self.performance[template.name][TIME_LOG["ptq_time"]] = round(ptq_elapsed_time, 3) self.performance[template.name][TIME_LOG["ptq_eval_time"]] = round(ptq_eval_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["ptq"].append(self.performance) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @@ -460,20 +473,19 @@ class TestRegressionSupconSegmentation: @classmethod @pytest.fixture(scope="class") def reg_cfg(cls, tmp_dir_path): + results_root = os.environ.get("REG_RESULTS_ROOT", tmp_dir_path) cls.reg_cfg = RegressionTestConfig( cls.TASK_TYPE, cls.TRAIN_TYPE, cls.LABEL_TYPE, os.getcwd(), train_params=cls.TRAIN_PARAMS, - tmp_results_root=tmp_dir_path, + results_root=results_root, ) yield cls.reg_cfg - print(f"writting regression result to {cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json") - with open(f"{cls.reg_cfg.result_dir}/result_{cls.TRAIN_TYPE}_{cls.LABEL_TYPE}.json", "w") as result_file: - json.dump(cls.reg_cfg.result_dict, result_file, indent=4) + cls.reg_cfg.dump_result_dict() def setup_method(self): self.performance = {} @@ -483,6 +495,7 @@ def setup_method(self): def test_otx_train(self, reg_cfg, template, tmp_dir_path): if not (Path(template.model_template_path).parent / "supcon").is_dir(): pytest.skip("Supcon training type isn't available for this template") + test_type = "train" self.performance[template.name] = {} tmp_dir_path = tmp_dir_path / "supcon_seg" @@ -499,14 +512,14 @@ def test_otx_train(self, reg_cfg, template, tmp_dir_path): tmp_dir_path, reg_cfg.otx_dir, reg_cfg.args, - reg_cfg.config_dict["regression_criteria"]["train"], + reg_cfg.config_dict["regression_criteria"][test_type], self.performance[template.name], ) infer_elapsed_time = timer() - infer_start_time self.performance[template.name][TIME_LOG["train_time"]] = round(train_elapsed_time, 3) self.performance[template.name][TIME_LOG["infer_time"]] = round(infer_elapsed_time, 3) - reg_cfg.result_dict[reg_cfg.task_type][reg_cfg.label_type][reg_cfg.train_type]["train"].append(self.performance) + reg_cfg.update_result(test_type, self.performance) assert test_result["passed"] is True, test_result["log"] @@ -517,6 +530,8 @@ def test_otx_train_kpi_test(self, reg_cfg, template): pytest.skip("Supcon training type isn't available for this template") performance = reg_cfg.get_template_performance(template) + if performance is None: + pytest.skip(reason="Cannot find performance data from results.") kpi_train_result = regression_train_time_testing( train_time_criteria=reg_cfg.config_dict["kpi_e2e_train_time_criteria"]["train"], diff --git a/tests/regression/summarize_test_results.py b/tests/regression/summarize_test_results.py index 73cc43198de..6dec6f6d088 100644 --- a/tests/regression/summarize_test_results.py +++ b/tests/regression/summarize_test_results.py @@ -132,31 +132,77 @@ def fill_model_performance(items: Union[list, str], test_type: str, result_data: result_data[f"{test_type} Eval Time (Sec.)"].append(items) -def summarize_non_anomaly_data(task: str, task_key: str, json_data: dict, result_data: dict) -> dict: +def summarize_non_anomaly_data(json_data: dict, result_data: dict) -> dict: """Make DataFrame by gathering all results.""" - for label_type in LABEL_TYPES: - for train_type in TRAIN_TYPES: - task_data = json_data[task_key][label_type][train_type] - - train_data = task_data.get("train") - if train_data is None: - raise ValueError("Train data can't be empty.") - export_data = task_data.get("export", None) - deploy_data = task_data.get("deploy", None) - nncf_data = task_data.get("nncf", None) - ptq_data = task_data.get("ptq", None) - - for i, per_model_data in enumerate(train_data): + for task_key in json_data.keys(): + for label_type in LABEL_TYPES: + if label_type not in json_data[task_key].keys(): + continue + for train_type in TRAIN_TYPES: + if train_type not in json_data[task_key][label_type].keys(): + continue + task_data = json_data[task_key][label_type][train_type] + + train_data = task_data.get("train") + if train_data is None: + raise ValueError("Train data can't be empty.") + export_data = task_data.get("export", None) + deploy_data = task_data.get("deploy", None) + nncf_data = task_data.get("nncf", None) + ptq_data = task_data.get("ptq", None) + + for i, per_model_data in enumerate(train_data): + for model in per_model_data: + train_items = get_metric_items(get_metric_dict(train_data, i, model)) + export_items = get_metric_items(get_metric_dict(export_data, i, model)) + deploy_items = get_metric_items(get_metric_dict(deploy_data, i, model)) + nncf_items = get_metric_items(get_metric_dict(nncf_data, i, model)) + ptq_items = get_metric_items(get_metric_dict(ptq_data, i, model)) + + result_data["Task type"].append(task_key) + result_data["Train type"].append(train_type) + result_data["Label type"].append(label_type) + result_data["Model"].append(model) + + fill_model_performance(train_items, "train", result_data) + fill_model_performance(export_items, "export", result_data) + fill_model_performance(deploy_items, "deploy", result_data) + fill_model_performance(nncf_items, "nncf", result_data) + fill_model_performance(ptq_items, "ptq", result_data) + + +def summarize_anomaly_data(json_data: dict, result_data: dict) -> dict: + """Make DataFrame by gathering all results.""" + for task_key in json_data.keys(): + task_data = json_data[task_key] + + train_data = task_data.get("train") + if train_data is None: + raise ValueError("Train data can't be empty.") + export_data = task_data.get("export") + deploy_data = task_data.get("deploy") + nncf_data = task_data.get("nncf") + ptq_data = task_data.get("ptq") + + for anomaly_category in ANOMALY_DATASET_CATEGORIES: + train_cat_data = train_data.get(anomaly_category) + if train_cat_data is None: + continue + export_cat_data = export_data.get(anomaly_category) + deploy_cat_data = deploy_data.get(anomaly_category) + nncf_cat_data = nncf_data.get(anomaly_category) + ptq_cat_data = ptq_data.get(anomaly_category) + + for i, per_model_data in enumerate(train_cat_data): for model in per_model_data: - train_items = get_metric_items(get_metric_dict(train_data, i, model)) - export_items = get_metric_items(get_metric_dict(export_data, i, model)) - deploy_items = get_metric_items(get_metric_dict(deploy_data, i, model)) - nncf_items = get_metric_items(get_metric_dict(nncf_data, i, model)) - ptq_items = get_metric_items(get_metric_dict(ptq_data, i, model)) - - result_data["Task type"].append(task) - result_data["Train type"].append(train_type) - result_data["Label type"].append(label_type) + train_items = get_metric_items(get_metric_dict(train_cat_data, i, model)) + export_items = get_metric_items(get_metric_dict(export_cat_data, i, model)) + deploy_items = get_metric_items(get_metric_dict(deploy_cat_data, i, model)) + nncf_items = get_metric_items(get_metric_dict(nncf_cat_data, i, model)) + ptq_items = get_metric_items(get_metric_dict(ptq_cat_data, i, model)) + + result_data["Task type"].append(task_key) + result_data["MVTec Category"].append(anomaly_category) result_data["Model"].append(model) fill_model_performance(train_items, "train", result_data) @@ -166,44 +212,6 @@ def summarize_non_anomaly_data(task: str, task_key: str, json_data: dict, result fill_model_performance(ptq_items, "ptq", result_data) -def summarize_anomaly_data(task: str, task_key: str, json_data: dict, result_data: dict) -> dict: - """Make DataFrame by gathering all results.""" - task_data = json_data[task_key] - - train_data = task_data.get("train") - if train_data is None: - raise ValueError("Train data can't be empty.") - export_data = task_data.get("export") - deploy_data = task_data.get("deploy") - nncf_data = task_data.get("nncf") - ptq_data = task_data.get("ptq") - - for anomaly_category in ANOMALY_DATASET_CATEGORIES: - train_cat_data = train_data.get(anomaly_category) - export_cat_data = export_data.get(anomaly_category) - deploy_cat_data = deploy_data.get(anomaly_category) - nncf_cat_data = nncf_data.get(anomaly_category) - ptq_cat_data = ptq_data.get(anomaly_category) - - for i, per_model_data in enumerate(train_cat_data): - for model in per_model_data: - train_items = get_metric_items(get_metric_dict(train_cat_data, i, model)) - export_items = get_metric_items(get_metric_dict(export_cat_data, i, model)) - deploy_items = get_metric_items(get_metric_dict(deploy_cat_data, i, model)) - nncf_items = get_metric_items(get_metric_dict(nncf_cat_data, i, model)) - ptq_items = get_metric_items(get_metric_dict(ptq_cat_data, i, model)) - - result_data["Task type"].append(task) - result_data["MVTec Category"].append(anomaly_category) - result_data["Model"].append(model) - - fill_model_performance(train_items, "train", result_data) - fill_model_performance(export_items, "export", result_data) - fill_model_performance(deploy_items, "deploy", result_data) - fill_model_performance(nncf_items, "nncf", result_data) - fill_model_performance(ptq_items, "ptq", result_data) - - def save_file(result_data: dict, output_path: str, file_name: str): df = pd.DataFrame(result_data) if not os.path.exists(output_path): @@ -241,7 +249,7 @@ def summarize_results_data(input_path: str, output_path: str): for entity in os.listdir(input_path): entity_path = os.path.join(input_path, entity) if os.path.isdir(entity_path): - task_key, task = filter_task(entity_path) + _, task = filter_task(entity_path) results_list = [] for result_json in os.listdir(entity_path): result_json_path = os.path.join(entity_path, result_json) @@ -250,9 +258,11 @@ def summarize_results_data(input_path: str, output_path: str): results_list.append(json.load(f)) json_data = merge_results_list(results_list) + assert len(json_data) != 0, "no json results to summary" + if is_anomaly_task(task) is True: - summarize_anomaly_data(task, task_key, json_data, ANOMALY_DATA) - save_file(ANOMALY_DATA, output_path, f"tests-reg_{task}_{task_key}.csv") + summarize_anomaly_data(json_data, ANOMALY_DATA) + save_file(ANOMALY_DATA, output_path, f"tests-reg_{task}.csv") else: - summarize_non_anomaly_data(task, task_key, json_data, NON_ANOMALY_DATA) - save_file(NON_ANOMALY_DATA, output_path, f"tests-reg_{task}_{task_key}.csv") + summarize_non_anomaly_data(json_data, NON_ANOMALY_DATA) + save_file(NON_ANOMALY_DATA, output_path, f"tests-reg_{task}.csv") diff --git a/tests/test_suite/run_test_command.py b/tests/test_suite/run_test_command.py index c40d092bffd..d56259c8dda 100644 --- a/tests/test_suite/run_test_command.py +++ b/tests/test_suite/run_test_command.py @@ -8,13 +8,12 @@ import os import shutil import sys -import torch from pathlib import Path from typing import Dict, Union import onnx import onnxruntime - import pytest +import torch import yaml from otx.api.entities.model_template import ModelCategory, ModelStatus @@ -129,6 +128,8 @@ def otx_train_testing(template, root, otx_dir, args, deterministic=True): command_line.extend(["--output", f"{template_work_dir}/trained_{template.model_template_id}"]) command_line.extend(["--workspace", f"{template_work_dir}"]) if "--load-weights" in args: + if not os.path.exists(args["--load-weights"]): + pytest.skip(reason=f"required file is not exist - {args['--load-weights']}") command_line.extend(["--load-weights", args["--load-weights"]]) if "--gpus" in args: command_line.extend(["--gpus", args["--gpus"]]) @@ -164,6 +165,10 @@ def otx_resume_testing(template, root, otx_dir, args): if option in args: command_line.extend([option, f"{os.path.join(otx_dir, args[option])}"]) + if "--resume-from" in args: + if not os.path.exists(args["--resume-from"]): + pytest.skip(reason=f"required file is not exist - {args['--resume-from']}") + command_line.extend(["--output", f"{template_work_dir}/trained_for_resume_{template.model_template_id}"]) command_line.extend(["--workspace", f"{template_work_dir}"]) command_line.extend(args["train_params"]) @@ -209,13 +214,18 @@ def otx_hpo_testing(template, root, otx_dir, args): def otx_export_testing(template, root, dump_features=False, half_precision=False, check_ir_meta=False, is_onnx=False): template_work_dir = get_template_dir(template, root) + + weights_path = f"{template_work_dir}/trained_{template.model_template_id}/models/weights.pth" + if not os.path.exists(weights_path): + pytest.skip(reason=f"required file is not exist - {weights_path}") + save_path = f"{template_work_dir}/exported_{template.model_template_id}" command_line = [ "otx", "export", template.model_template_path, "--load-weights", - f"{template_work_dir}/trained_{template.model_template_id}/models/weights.pth", + weights_path, "--output", save_path, ] @@ -295,6 +305,10 @@ def otx_export_testing(template, root, dump_features=False, half_precision=False def otx_eval_testing(template, root, otx_dir, args): template_work_dir = get_template_dir(template, root) + weights_path = f"{template_work_dir}/trained_{template.model_template_id}/models/weights.pth" + if not os.path.exists(weights_path): + pytest.skip(reason=f"required file is not exist - {weights_path}") + command_line = [ "otx", "eval", @@ -302,7 +316,7 @@ def otx_eval_testing(template, root, otx_dir, args): "--test-data-roots", f'{os.path.join(otx_dir, args["--test-data-roots"])}', "--load-weights", - f"{template_work_dir}/trained_{template.model_template_id}/models/weights.pth", + weights_path, "--output", f"{template_work_dir}/trained_{template.model_template_id}", ] @@ -330,6 +344,9 @@ def otx_eval_openvino_testing( output_path = f"{template_work_dir}/exported_{template.model_template_id}_fp16" perf_path = f"{template_work_dir}/exported_{template.model_template_id}_fp16/performance.json" + if not os.path.exists(weights_path): + pytest.skip(reason=f"required file is not exist - {weights_path}") + command_line = [ "otx", "eval", @@ -354,12 +371,17 @@ def otx_eval_openvino_testing( def otx_demo_testing(template, root, otx_dir, args): template_work_dir = get_template_dir(template, root) + + weights_path = f"{template_work_dir}/trained_{template.model_template_id}/models/weights.pth" + if not os.path.exists(weights_path): + pytest.skip(reason=f"required file is not exist - {weights_path}") + command_line = [ "otx", "demo", template.model_template_path, "--load-weights", - f"{template_work_dir}/trained_{template.model_template_id}/models/weights.pth", + weights_path, "--input", os.path.join(otx_dir, args["--input"]), "--delay", @@ -373,12 +395,17 @@ def otx_demo_testing(template, root, otx_dir, args): def otx_demo_openvino_testing(template, root, otx_dir, args): template_work_dir = get_template_dir(template, root) + + weights_path = f"{template_work_dir}/exported_{template.model_template_id}/openvino.xml" + if not os.path.exists(weights_path): + pytest.skip(reason=f"required file is not exist - {weights_path}") + command_line = [ "otx", "demo", template.model_template_path, "--load-weights", - f"{template_work_dir}/exported_{template.model_template_id}/openvino.xml", + weights_path, "--input", os.path.join(otx_dir, args["--input"]), "--delay", @@ -392,13 +419,18 @@ def otx_demo_openvino_testing(template, root, otx_dir, args): def otx_deploy_openvino_testing(template, root, otx_dir, args): template_work_dir = get_template_dir(template, root) + + weights_path = f"{template_work_dir}/exported_{template.model_template_id}/openvino.xml" + if not os.path.exists(weights_path): + pytest.skip(reason=f"required file is not exist - {weights_path}") + deployment_dir = f"{template_work_dir}/deployed_{template.model_template_id}" command_line = [ "otx", "deploy", template.model_template_path, "--load-weights", - f"{template_work_dir}/exported_{template.model_template_id}/openvino.xml", + weights_path, "--output", deployment_dir, ] @@ -471,6 +503,11 @@ def otx_deploy_openvino_testing(template, root, otx_dir, args): def otx_eval_deployment_testing(template, root, otx_dir, args, threshold=0.0): template_work_dir = get_template_dir(template, root) + + weights_path = f"{template_work_dir}/deployed_{template.model_template_id}/openvino.zip" + if not os.path.exists(weights_path): + pytest.skip(reason=f"required file is not exist - {weights_path}") + command_line = [ "otx", "eval", @@ -478,7 +515,7 @@ def otx_eval_deployment_testing(template, root, otx_dir, args, threshold=0.0): "--test-data-roots", f'{os.path.join(otx_dir, args["--test-data-roots"])}', "--load-weights", - f"{template_work_dir}/deployed_{template.model_template_id}/openvino.zip", + weights_path, "--output", f"{template_work_dir}/deployed_{template.model_template_id}", ] @@ -496,12 +533,17 @@ def otx_eval_deployment_testing(template, root, otx_dir, args, threshold=0.0): def otx_demo_deployment_testing(template, root, otx_dir, args): template_work_dir = get_template_dir(template, root) deployment_dir = f"{template_work_dir}/deployed_{template.model_template_id}" + + weights_path = f"{deployment_dir}/openvino.zip" + if not os.path.exists(weights_path): + pytest.skip(reason=f"required file is not exist - {weights_path}") + command_line = [ "otx", "demo", template.model_template_path, "--load-weights", - f"{deployment_dir}/openvino.zip", + weights_path, "--input", os.path.join(otx_dir, args["--input"]), "--delay", @@ -515,6 +557,13 @@ def otx_demo_deployment_testing(template, root, otx_dir, args): def ptq_optimize_testing(template, root, otx_dir, args, is_visual_prompting=False): template_work_dir = get_template_dir(template, root) + + weights_path = f"{template_work_dir}/exported_{template.model_template_id}/openvino.xml" + if is_visual_prompting: + weights_path = f"{template_work_dir}/exported_{template.model_template_id}/visual_prompting_decoder.xml" + if not os.path.exists(weights_path): + pytest.skip(reason=f"required file is not exist - {weights_path}") + command_line = [ "otx", "optimize", @@ -525,21 +574,9 @@ def ptq_optimize_testing(template, root, otx_dir, args, is_visual_prompting=Fals f'{os.path.join(otx_dir, args["--val-data-roots"])}', "--output", f"{template_work_dir}/ptq_{template.model_template_id}", + "--load-weights", + weights_path, ] - if is_visual_prompting: - command_line.extend( - [ - "--load-weights", - f"{template_work_dir}/exported_{template.model_template_id}/visual_prompting_decoder.xml", - ] - ) - else: - command_line.extend( - [ - "--load-weights", - f"{template_work_dir}/exported_{template.model_template_id}/openvino.xml", - ] - ) command_line.extend(["--workspace", f"{template_work_dir}"]) check_run(command_line) @@ -575,11 +612,17 @@ def _validate_fq_in_xml(xml_path, path_to_ref_data, compression_type, test_name, def ptq_validate_fq_testing(template, root, otx_dir, task_type, test_name): template_work_dir = get_template_dir(template, root) + xml_paths = [f"{template_work_dir}/ptq_{template.model_template_id}/openvino.xml"] if task_type == "visual_prompting": xml_paths = [ f"{template_work_dir}/ptq_{template.model_template_id}/visual_prompting_image_encoder.xml", f"{template_work_dir}/ptq_{template.model_template_id}/visual_prompting_decoder.xml", ] + for xml_path in xml_paths: + if not os.path.exists(xml_path): + pytest.skip(reason=f"required file is not exist - {xml_path}") + + if task_type == "visual_prompting": paths_to_ref_data = [ os.path.join( otx_dir, @@ -601,7 +644,6 @@ def ptq_validate_fq_testing(template, root, otx_dir, task_type, test_name): ), ] else: - xml_paths = [f"{template_work_dir}/ptq_{template.model_template_id}/openvino.xml"] paths_to_ref_data = [ os.path.join( otx_dir, "tests", "e2e/cli", task_type, "reference", template.model_template_id, "compressed_model.yml" @@ -614,6 +656,13 @@ def ptq_validate_fq_testing(template, root, otx_dir, task_type, test_name): def ptq_eval_testing(template, root, otx_dir, args, is_visual_prompting=False): template_work_dir = get_template_dir(template, root) + + weights_path = f"{template_work_dir}/ptq_{template.model_template_id}/openvino.xml" + if is_visual_prompting: + weights_path = f"{template_work_dir}/ptq_{template.model_template_id}/visual_prompting_decoder.xml" + if not os.path.exists(weights_path): + pytest.skip(reason=f"required file is not exist - {weights_path}") + command_line = [ "otx", "eval", @@ -622,34 +671,24 @@ def ptq_eval_testing(template, root, otx_dir, args, is_visual_prompting=False): f'{os.path.join(otx_dir, args["--test-data-roots"])}', "--output", f"{template_work_dir}/ptq_{template.model_template_id}", + "--load-weights", + weights_path, ] - if is_visual_prompting: - command_line.extend( - [ - "--load-weights", - f"{template_work_dir}/ptq_{template.model_template_id}/visual_prompting_decoder.xml", - ] - ) - else: - command_line.extend( - [ - "--load-weights", - f"{template_work_dir}/ptq_{template.model_template_id}/openvino.xml", - ] - ) command_line.extend(["--workspace", f"{template_work_dir}"]) check_run(command_line) assert os.path.exists(f"{template_work_dir}/ptq_{template.model_template_id}/performance.json") - with open(f"{template_work_dir}/ptq_{template.model_template_id}/performance.json") as read_file: - ptq_performance = json.load(read_file) - def nncf_optimize_testing(template, root, otx_dir, args): if template.entrypoints.nncf is None: pytest.skip("NNCF QAT is disabled: entrypoints.nncf in template is not specified") template_work_dir = get_template_dir(template, root) + + weights_path = f"{template_work_dir}/trained_{template.model_template_id}/models/weights.pth" + if not os.path.exists(weights_path): + pytest.skip(reason=f"required file is not exist - {weights_path}") + command_line = [ "otx", "optimize", @@ -659,7 +698,7 @@ def nncf_optimize_testing(template, root, otx_dir, args): "--val-data-roots", f'{os.path.join(otx_dir, args["--val-data-roots"])}', "--load-weights", - f"{template_work_dir}/trained_{template.model_template_id}/models/weights.pth", + weights_path, "--output", f"{template_work_dir}/nncf_{template.model_template_id}", ] @@ -674,12 +713,17 @@ def nncf_export_testing(template, root): if template.entrypoints.nncf is None: pytest.skip("NNCF QAT is disabled: entrypoints.nncf in template is not specified") template_work_dir = get_template_dir(template, root) + + weights_path = f"{template_work_dir}/nncf_{template.model_template_id}/weights.pth" + if not os.path.exists(weights_path): + pytest.skip(reason=f"required file is not exist - {weights_path}") + command_line = [ "otx", "export", template.model_template_path, "--load-weights", - f"{template_work_dir}/nncf_{template.model_template_id}/weights.pth", + weights_path, "--output", f"{template_work_dir}/exported_nncf_{template.model_template_id}", ] @@ -706,7 +750,11 @@ def nncf_validate_fq_testing(template, root, otx_dir, task_type, test_name): if template.entrypoints.nncf is None: pytest.skip("NNCF QAT is disabled: entrypoints.nncf in template is not specified") template_work_dir = get_template_dir(template, root) + xml_path = f"{template_work_dir}/exported_nncf_{template.model_template_id}/openvino.xml" + if not os.path.exists(xml_path): + pytest.skip(reason=f"required file is not exist - {xml_path}") + path_to_ref_data = os.path.join( otx_dir, "tests", "e2e/cli", task_type, "reference", template.model_template_id, "compressed_model.yml" ) @@ -718,6 +766,11 @@ def nncf_eval_testing(template, root, otx_dir, args, threshold=0.01): if template.entrypoints.nncf is None: pytest.skip("NNCF QAT is disabled: entrypoints.nncf in template is not specified") template_work_dir = get_template_dir(template, root) + + weights_path = f"{template_work_dir}/nncf_{template.model_template_id}/weights.pth" + if not os.path.exists(weights_path): + pytest.skip(reason=f"required file is not exist - {weights_path}") + command_line = [ "otx", "eval", @@ -725,7 +778,7 @@ def nncf_eval_testing(template, root, otx_dir, args, threshold=0.01): "--test-data-roots", f'{os.path.join(otx_dir, args["--test-data-roots"])}', "--load-weights", - f"{template_work_dir}/nncf_{template.model_template_id}/weights.pth", + weights_path, "--output", f"{template_work_dir}/nncf_{template.model_template_id}", ] @@ -744,6 +797,11 @@ def nncf_eval_openvino_testing(template, root, otx_dir, args): if template.entrypoints.nncf is None: pytest.skip("NNCF QAT is disabled: entrypoints.nncf in template is not specified") template_work_dir = get_template_dir(template, root) + + weights_path = f"{template_work_dir}/exported_nncf_{template.model_template_id}/openvino.xml" + if not os.path.exists(weights_path): + pytest.skip(reason=f"required file is not exist - {weights_path}") + command_line = [ "otx", "eval", @@ -751,7 +809,7 @@ def nncf_eval_openvino_testing(template, root, otx_dir, args): "--test-data-roots", f'{os.path.join(otx_dir, args["--test-data-roots"])}', "--load-weights", - f"{template_work_dir}/exported_nncf_{template.model_template_id}/openvino.xml", + weights_path, "--output", f"{template_work_dir}/exported_nncf_{template.model_template_id}", ] @@ -779,6 +837,11 @@ def xfail_templates(templates, xfail_template_ids_reasons): def otx_explain_testing(template, root, otx_dir, args, trained=False): template_work_dir = get_template_dir(template, root) + + weights_path = f"{template_work_dir}/trained_{template.model_template_id}/models/weights.pth" + if not os.path.exists(weights_path): + pytest.skip(reason=f"required file is not exist - {weights_path}") + test_algorithm = "ClassWiseSaliencyMap" train_ann_file = args.get("--train-ann-file", "") @@ -791,16 +854,16 @@ def otx_explain_testing(template, root, otx_dir, args, trained=False): save_dir = f"explain_{template.model_template_id}/{test_algorithm}/{train_type}/" output_dir = os.path.join(template_work_dir, save_dir) - explain_data_root = os.path.join(otx_dir, args["--input"]) + data_input = os.path.join(otx_dir, args["--input"]) command_line = [ "otx", "explain", template.model_template_path, "--load-weights", - f"{template_work_dir}/trained_{template.model_template_id}/models/weights.pth", - "--explain-data-root", - explain_data_root, - "--save-explanation-to", + weights_path, + "--input", + data_input, + "--output", output_dir, "--explain-algorithm", test_algorithm, @@ -809,11 +872,16 @@ def otx_explain_testing(template, root, otx_dir, args, trained=False): assert os.path.exists(output_dir) if trained: assert len(os.listdir(output_dir)) > 0 - assert all([os.path.splitext(fname)[1] == ".tiff" for fname in os.listdir(output_dir)]) + assert all([os.path.splitext(fname)[1] in [".tiff", ".log"] for fname in os.listdir(output_dir)]) def otx_explain_testing_all_classes(template, root, otx_dir, args): template_work_dir = get_template_dir(template, root) + + weights_path = f"{template_work_dir}/trained_{template.model_template_id}/models/weights.pth" + if not os.path.exists(weights_path): + pytest.skip(reason=f"required file is not exist - {weights_path}") + test_algorithm = "ClassWiseSaliencyMap" train_ann_file = args.get("--train-ann-file", "") @@ -826,16 +894,16 @@ def otx_explain_testing_all_classes(template, root, otx_dir, args): save_dir = f"explain_all_classes_{template.model_template_id}/{test_algorithm}/{train_type}/" output_dir = os.path.join(template_work_dir, save_dir) - explain_data_root = os.path.join(otx_dir, args["--input"]) + data_input = os.path.join(otx_dir, args["--input"]) command_line = [ "otx", "explain", template.model_template_path, "--load-weights", - f"{template_work_dir}/trained_{template.model_template_id}/models/weights.pth", - "--explain-data-root", - explain_data_root, - "--save-explanation-to", + weights_path, + "--input", + data_input, + "--output", output_dir, "--explain-algorithm", test_algorithm, @@ -850,11 +918,16 @@ def otx_explain_testing_all_classes(template, root, otx_dir, args): assert len(os.listdir(output_dir)) == len(os.listdir(output_dir_explain_only_predicted_classes)) else: assert len(os.listdir(output_dir)) >= len(os.listdir(output_dir_explain_only_predicted_classes)) - assert all([os.path.splitext(fname)[1] == ".tiff" for fname in os.listdir(output_dir)]) + assert all([os.path.splitext(fname)[1] in [".tiff", ".log"] for fname in os.listdir(output_dir)]) def otx_explain_testing_process_saliency_maps(template, root, otx_dir, args, trained=False): template_work_dir = get_template_dir(template, root) + + weights_path = f"{template_work_dir}/trained_{template.model_template_id}/models/weights.pth" + if not os.path.exists(weights_path): + pytest.skip(reason=f"required file is not exist - {weights_path}") + test_algorithm = "ClassWiseSaliencyMap" train_ann_file = args.get("--train-ann-file", "") @@ -867,16 +940,16 @@ def otx_explain_testing_process_saliency_maps(template, root, otx_dir, args, tra save_dir = f"explain_process_saliency_maps_{template.model_template_id}/{test_algorithm}/{train_type}/" output_dir = os.path.join(template_work_dir, save_dir) - explain_data_root = os.path.join(otx_dir, args["--input"]) + data_input = os.path.join(otx_dir, args["--input"]) command_line = [ "otx", "explain", template.model_template_path, "--load-weights", - f"{template_work_dir}/trained_{template.model_template_id}/models/weights.pth", - "--explain-data-root", - explain_data_root, - "--save-explanation-to", + weights_path, + "--input", + data_input, + "--output", output_dir, "--explain-algorithm", test_algorithm, @@ -886,11 +959,16 @@ def otx_explain_testing_process_saliency_maps(template, root, otx_dir, args, tra assert os.path.exists(output_dir) if trained: assert len(os.listdir(output_dir)) > 0 - assert all([os.path.splitext(fname)[1] == ".png" for fname in os.listdir(output_dir)]) + assert all([os.path.splitext(fname)[1] in [".png", ".log"] for fname in os.listdir(output_dir)]) def otx_explain_openvino_testing(template, root, otx_dir, args, trained=False): template_work_dir = get_template_dir(template, root) + + weights_path = f"{template_work_dir}/exported_{template.model_template_id}_w_features/openvino.xml" + if not os.path.exists(weights_path): + pytest.skip(reason=f"required file is not exist - {weights_path}") + test_algorithm = "ClassWiseSaliencyMap" train_ann_file = args.get("--train-ann-file", "") @@ -903,16 +981,16 @@ def otx_explain_openvino_testing(template, root, otx_dir, args, trained=False): save_dir = f"explain_ov_{template.model_template_id}/{test_algorithm}/{train_type}/" output_dir = os.path.join(template_work_dir, save_dir) - explain_data_root = os.path.join(otx_dir, args["--input"]) + data_input = os.path.join(otx_dir, args["--input"]) command_line = [ "otx", "explain", template.model_template_path, "--load-weights", - f"{template_work_dir}/exported_{template.model_template_id}_w_features/openvino.xml", - "--explain-data-root", - explain_data_root, - "--save-explanation-to", + weights_path, + "--input", + data_input, + "--output", output_dir, "--explain-algorithm", test_algorithm, @@ -922,11 +1000,16 @@ def otx_explain_openvino_testing(template, root, otx_dir, args, trained=False): assert os.path.exists(output_dir) if trained: assert len(os.listdir(output_dir)) > 0 - assert all([os.path.splitext(fname)[1] == ".tiff" for fname in os.listdir(output_dir)]) + assert all([os.path.splitext(fname)[1] in [".tiff", ".log"] for fname in os.listdir(output_dir)]) def otx_explain_all_classes_openvino_testing(template, root, otx_dir, args): template_work_dir = get_template_dir(template, root) + + weights_path = f"{template_work_dir}/exported_{template.model_template_id}_w_features/openvino.xml" + if not os.path.exists(weights_path): + pytest.skip(reason=f"required file is not exist - {weights_path}") + test_algorithm = "ClassWiseSaliencyMap" train_ann_file = args.get("--train-ann-file", "") @@ -939,16 +1022,16 @@ def otx_explain_all_classes_openvino_testing(template, root, otx_dir, args): save_dir = f"explain_ov_all_classes_{template.model_template_id}/{test_algorithm}/{train_type}/" output_dir = os.path.join(template_work_dir, save_dir) - explain_data_root = os.path.join(otx_dir, args["--input"]) + data_input = os.path.join(otx_dir, args["--input"]) command_line = [ "otx", "explain", template.model_template_path, "--load-weights", - f"{template_work_dir}/exported_{template.model_template_id}_w_features/openvino.xml", - "--explain-data-root", - explain_data_root, - "--save-explanation-to", + weights_path, + "--input", + data_input, + "--output", output_dir, "--explain-algorithm", test_algorithm, @@ -964,11 +1047,16 @@ def otx_explain_all_classes_openvino_testing(template, root, otx_dir, args): assert len(os.listdir(output_dir)) == len(os.listdir(output_dir_explain_only_predicted_classes)) else: assert len(os.listdir(output_dir)) >= len(os.listdir(output_dir_explain_only_predicted_classes)) - assert all([os.path.splitext(fname)[1] == ".tiff" for fname in os.listdir(output_dir)]) + assert all([os.path.splitext(fname)[1] in [".tiff", ".log"] for fname in os.listdir(output_dir)]) def otx_explain_process_saliency_maps_openvino_testing(template, root, otx_dir, args, trained=False): template_work_dir = get_template_dir(template, root) + + weights_path = f"{template_work_dir}/exported_{template.model_template_id}_w_features/openvino.xml" + if not os.path.exists(weights_path): + pytest.skip(reason=f"required file is not exist - {weights_path}") + test_algorithm = "ClassWiseSaliencyMap" train_ann_file = args.get("--train-ann-file", "") @@ -981,16 +1069,16 @@ def otx_explain_process_saliency_maps_openvino_testing(template, root, otx_dir, save_dir = f"explain_ov_process_saliency_maps_{template.model_template_id}/{test_algorithm}/{train_type}/" output_dir = os.path.join(template_work_dir, save_dir) - explain_data_root = os.path.join(otx_dir, args["--input"]) + data_input = os.path.join(otx_dir, args["--input"]) command_line = [ "otx", "explain", template.model_template_path, "--load-weights", - f"{template_work_dir}/exported_{template.model_template_id}_w_features/openvino.xml", - "--explain-data-root", - explain_data_root, - "--save-explanation-to", + weights_path, + "--input", + data_input, + "--output", output_dir, "--explain-algorithm", test_algorithm, @@ -1001,7 +1089,7 @@ def otx_explain_process_saliency_maps_openvino_testing(template, root, otx_dir, assert os.path.exists(output_dir) if trained: assert len(os.listdir(output_dir)) > 0 - assert all([os.path.splitext(fname)[1] == ".png" for fname in os.listdir(output_dir)]) + assert all([os.path.splitext(fname)[1] in [".png", ".log"] for fname in os.listdir(output_dir)]) def otx_find_testing(): diff --git a/tests/unit/algorithms/detection/adapters/mmdet/test_configurer.py b/tests/unit/algorithms/detection/adapters/mmdet/test_configurer.py index 4ae43b9be6f..1d6d2db1237 100644 --- a/tests/unit/algorithms/detection/adapters/mmdet/test_configurer.py +++ b/tests/unit/algorithms/detection/adapters/mmdet/test_configurer.py @@ -77,18 +77,33 @@ def test_configure(self, mocker): model_cfg.model_task = "detection" data_cfg = copy.deepcopy(self.data_cfg) returned_value = self.configurer.configure( - model_cfg, self.data_pipeline_path, None, "", data_cfg, train_dataset=self.det_dataset + model_cfg, + self.data_pipeline_path, + None, + "", + data_cfg, + train_dataset=self.det_dataset, + max_num_detections=100, ) mock_cfg_merge.assert_called_once_with( - model_cfg, data_cfg, self.data_pipeline_path, None, train_dataset=self.det_dataset + model_cfg, + data_cfg, + self.data_pipeline_path, + None, + train_dataset=self.det_dataset, + max_num_detections=100, ) mock_cfg_ckpt.assert_called_once_with(model_cfg, "") mock_cfg_env.assert_called_once_with(model_cfg) - mock_cfg_data_pipeline.assert_called_once_with(model_cfg, None, "", train_dataset=self.det_dataset) - mock_cfg_recipe.assert_called_once_with(model_cfg, train_dataset=self.det_dataset) + mock_cfg_data_pipeline.assert_called_once_with( + model_cfg, None, "", train_dataset=self.det_dataset, max_num_detections=100 + ) + mock_cfg_recipe.assert_called_once_with(model_cfg, train_dataset=self.det_dataset, max_num_detections=100) mock_cfg_hook.assert_called_once_with(model_cfg) - mock_cfg_model.assert_called_once_with(model_cfg, None, None, None, train_dataset=self.det_dataset) + mock_cfg_model.assert_called_once_with( + model_cfg, None, None, None, train_dataset=self.det_dataset, max_num_detections=100 + ) mock_cfg_compat_cfg.assert_called_once_with(model_cfg) assert returned_value == model_cfg diff --git a/tests/unit/algorithms/detection/test_helpers.py b/tests/unit/algorithms/detection/test_helpers.py index 32c8a8d67a7..d93d5531d68 100644 --- a/tests/unit/algorithms/detection/test_helpers.py +++ b/tests/unit/algorithms/detection/test_helpers.py @@ -33,7 +33,7 @@ "src/otx/algorithms/detection/configs/instance_segmentation", "efficientnetb2b_maskrcnn" ) DEFAULT_DET_RECIPE_CONFIG_PATH = "src/otx/recipes/stages/detection/incremental.py" -DEFAULT_ISEG_RECIPE_CONFIG_PATH = "src/otx/recipes/stages/instance-segmentation/incremental.py" +DEFAULT_ISEG_RECIPE_CONFIG_PATH = "src/otx/recipes/stages/instance_segmentation/incremental.py" class MockImage(Image): diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/test_inference_callback.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/test_inference_callback.py index 5e572aa464d..39aec2025d4 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/test_inference_callback.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/test_inference_callback.py @@ -13,6 +13,7 @@ from otx.algorithms.visual_prompting.adapters.pytorch_lightning.callbacks import ( InferenceCallback, + ZeroShotInferenceCallback, ) from otx.api.entities.annotation import Annotation from otx.api.entities.id import ID @@ -23,6 +24,7 @@ from tests.test_suite.e2e_test_system import e2e_pytest_unit from tests.unit.algorithms.visual_prompting.test_helpers import ( generate_visual_prompting_dataset, + generate_otx_label_schema, ) @@ -99,3 +101,62 @@ def test_on_predict_epoch_end(self, use_mask: bool, expected: Any): assert annotation.shape.points == expected assert annotation.get_labels()[0].name == "foreground" assert annotation.get_labels()[0].probability == 0.5 + + +class TestZeroShotInferenceCallback: + @pytest.fixture(autouse=True) + def setup(self, mocker, monkeypatch): + monkeypatch.setattr( + "otx.api.utils.segmentation_utils.create_annotation_from_segmentation_map", + lambda *args, **kwargs: Annotation( + shape=Image(data=np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]), size=(3, 3)), + labels=[ScoredLabel(label=LabelEntity("foreground", domain=Domain.VISUAL_PROMPTING), probability=0.9)], + id=ID(ObjectId()), + ), + ) + monkeypatch.setattr( + "otx.api.utils.segmentation_utils.create_hard_prediction_from_soft_prediction", + lambda *args, **kwargs: np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]), + ) + + self.mocker_trainer = mocker.patch("pytorch_lightning.Trainer") + self.mocker_lightning_module = mocker.patch("pytorch_lightning.LightningModule") + + @e2e_pytest_unit + @pytest.mark.parametrize( + "expected", + [[Point(0.5, 0.0), Point(0.0, 0.5), Point(0.5, 1.0), Point(1.0, 0.5)]], + ) + def test_on_predict_epoch_end(self, expected: Any): + """Test on_predict_epoch_end.""" + otx_dataset = generate_visual_prompting_dataset(use_mask=False) + labels_schema = generate_otx_label_schema() + inference_callback = ZeroShotInferenceCallback(otx_dataset, labels_schema) + + outputs = [ + [ + [ + { + 0: [ + torch.Tensor([[0, 1, 0], [1, 1, 1], [0, 1, 0]]).to(torch.uint8), + ] + } + ] + ] + ] + + inference_callback.on_predict_epoch_end(self.mocker_trainer, self.mocker_lightning_module, outputs) + predicted_otx_dataset = inference_callback.otx_dataset + + assert len(predicted_otx_dataset) == 4 + dataset_item = predicted_otx_dataset[0] + assert len(dataset_item.annotation_scene.annotations) == 1 + + annotation = dataset_item.annotation_scene.annotations[0] + assert isinstance(annotation, Annotation) + + # TODO (sungchul): consider use_mask + assert isinstance(annotation.shape, Polygon) + assert annotation.shape.points == expected + assert annotation.get_labels()[0].name == "rectangle" + assert annotation.get_labels()[0].probability == 1.0 diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/config/test_visual_prompting_config.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/config/test_visual_prompting_config.py index 105047526b8..32f3e9c6fe7 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/config/test_visual_prompting_config.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/config/test_visual_prompting_config.py @@ -61,15 +61,16 @@ def test_update_visual_prompting_config(): """Test update_visual_prompting_config.""" otx_config = OmegaConf.create( { - "groups": ["learning_parameters", "pot_parameters", "postprocessing"], + "groups": ["learning_parameters", "pot_parameters", "postprocessing", "algo_backend"], "learning_parameters": {"parameters": ["param1"], "param1": "updated_value1"}, "pot_parameters": {"parameters": ["param2"], "param2": "updated_value2"}, "postprocessing": {"parameters": ["param3"], "param3": "updated_value3"}, + "algo_backend": {"parameters": ["param4"], "param4": "updated_value4"}, "parameters": [], } ) visual_prompting_config = OmegaConf.create( - {"param1": "value1", "param2": "value2", "param3": "value3", "param4": "value4"} + {"param1": "value1", "param2": "value2", "param3": "value3", "param4": "value4", "param5": "value5"} ) update_visual_prompting_config(visual_prompting_config, otx_config) @@ -77,4 +78,5 @@ def test_update_visual_prompting_config(): assert visual_prompting_config["param1"] == "updated_value1" assert visual_prompting_config["param2"] == "updated_value2" assert visual_prompting_config["param3"] == "updated_value3" - assert visual_prompting_config["param4"] == "value4" + assert visual_prompting_config["param4"] == "updated_value4" + assert visual_prompting_config["param5"] == "value5" diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/test_sam_transforms.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/test_sam_transforms.py index 35c00c0198b..68e06b14482 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/test_sam_transforms.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/test_sam_transforms.py @@ -47,7 +47,7 @@ def test_apply_image(self, image: np.ndarray, expected: Tuple[int, int, int]): ) def test_apply_coords(self, coords: np.ndarray, original_size: Tuple[int, int], expected: np.ndarray): """Test apply_coords.""" - result = self.resize_longest_side.apply_coords(coords, original_size) + result = self.resize_longest_side.apply_coords(coords, original_size, self.resize_longest_side.target_length) assert np.array_equal(result, expected) diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/test_dataset.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/test_dataset.py index ec16356882a..c3701eb3f58 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/test_dataset.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/test_dataset.py @@ -5,17 +5,21 @@ # import numpy as np +from typing import Callable import pytest from torch.utils.data import DataLoader from torchvision import transforms +from otx.algorithms.common.configs.training_base import TrainType from otx.algorithms.visual_prompting.adapters.pytorch_lightning.datasets.dataset import ( OTXVisualPromptingDataModule, + OTXZeroShotVisualPromptingDataset, OTXVisualPromptingDataset, convert_polygon_to_mask, generate_bbox, generate_bbox_from_mask, get_transform, + # generate_point_from_mask, ) from otx.algorithms.visual_prompting.adapters.pytorch_lightning.datasets.pipelines import ( MultipleInputsCompose, @@ -142,6 +146,11 @@ def test_generate_bbox_from_mask(mocker) -> None: assert bbox[3] >= 0 and bbox[3] <= height +@e2e_pytest_unit +def test_generate_point_from_mask() -> None: + """TODO""" + + class TestOTXVIsualPromptingDataset: @e2e_pytest_unit def test_len(self, mocker, dataset_polygon, transform, image_size, mean, std) -> None: @@ -183,20 +192,69 @@ def test_getitem( assert item["points"] == [] +class TestOTXZeroShotVisualPromptingDataset: + """Test OTXZeroShotVisualPromptingDataset. + + To be updated. + """ + + @e2e_pytest_unit + @pytest.mark.parametrize("use_mask", [False, True]) + def test_getitem( + self, mocker, dataset_polygon, dataset_mask, transform, image_size, mean, std, use_mask: bool + ) -> None: + """Test __getitem__.""" + mocker.patch( + "otx.algorithms.visual_prompting.adapters.pytorch_lightning.datasets.dataset.get_transform", + return_value=transform, + ) + dataset = dataset_mask if use_mask else dataset_polygon + otx_dataset = OTXZeroShotVisualPromptingDataset(dataset, image_size, mean, std) + + item = otx_dataset[0] + + # Check the returned item's keys + expected_keys = {"index", "original_size", "images", "path", "gt_masks", "bboxes", "points", "labels"} + assert set(item.keys()) == expected_keys + + # Check specific values in the item + assert item["index"] == 0 + assert (item["images"] == dataset[0].media.numpy).all() + assert item["original_size"] == dataset[0].media.numpy.shape[:2] + assert item["path"] == dataset[0].media.path + assert isinstance(item["gt_masks"], list) + assert isinstance(item["gt_masks"][0], np.ndarray) + assert isinstance(item["bboxes"], np.ndarray) + assert item["points"] == [] + + class TestOTXVisualPromptingDataModule: @pytest.fixture - def datamodule(self) -> OTXVisualPromptingDataModule: - dataset = generate_visual_prompting_dataset() + def set_datamodule(self) -> Callable: + def datamodule(train_type: TrainType = TrainType.Incremental) -> OTXVisualPromptingDataModule: + dataset = generate_visual_prompting_dataset() - # Create a mock config - config = MockDatasetConfig() + # Create a mock config + config = MockDatasetConfig() + + # Create an instance of OTXVisualPromptingDataModule + return OTXVisualPromptingDataModule(config, dataset, train_type) + + return datamodule + + @e2e_pytest_unit + def test_init_zeroshot(self, set_datamodule): + """Test __init__ when train_type is TrainType.Zeroshot.""" + datamodule = set_datamodule(train_type=TrainType.Zeroshot) - # Create an instance of OTXVisualPromptingDataModule - return OTXVisualPromptingDataModule(config, dataset) + assert datamodule.config.get("train_batch_size") == 1 + # assert "generate_point" in datamodule.kwargs + # assert "generate_bbox" in datamodule.kwargs @e2e_pytest_unit - def test_setup(self, mocker, datamodule) -> None: + def test_setup(self, mocker, set_datamodule) -> None: """Test setup.""" + datamodule = set_datamodule() mocker.patch.object(datamodule, "summary", return_value=None) datamodule.setup() @@ -205,8 +263,9 @@ def test_setup(self, mocker, datamodule) -> None: assert isinstance(datamodule.val_dataset, OTXVisualPromptingDataset) @e2e_pytest_unit - def test_train_dataloader(self, mocker, datamodule) -> None: + def test_train_dataloader(self, mocker, set_datamodule) -> None: """Test train_dataloader.""" + datamodule = set_datamodule() mocker.patch.object(datamodule, "summary", return_value=None) datamodule.setup(stage="fit") @@ -219,8 +278,9 @@ def test_train_dataloader(self, mocker, datamodule) -> None: assert dataloader.collate_fn == collate_fn @e2e_pytest_unit - def test_val_dataloader(self, mocker, datamodule) -> None: + def test_val_dataloader(self, mocker, set_datamodule) -> None: """Test val_dataloader.""" + datamodule = set_datamodule() mocker.patch.object(datamodule, "summary", return_value=None) datamodule.setup(stage="fit") @@ -233,8 +293,9 @@ def test_val_dataloader(self, mocker, datamodule) -> None: assert dataloader.collate_fn == collate_fn @e2e_pytest_unit - def test_test_dataloader(self, mocker, datamodule) -> None: + def test_test_dataloader(self, mocker, set_datamodule) -> None: """Test test_dataloader.""" + datamodule = set_datamodule() mocker.patch.object(datamodule, "summary", return_value=None) datamodule.setup(stage="test") @@ -247,8 +308,9 @@ def test_test_dataloader(self, mocker, datamodule) -> None: assert dataloader.collate_fn == collate_fn @e2e_pytest_unit - def test_predict_dataloader(self, datamodule) -> None: + def test_predict_dataloader(self, set_datamodule) -> None: """Test predict_dataloader.""" + datamodule = set_datamodule() datamodule.setup(stage="predict") # Call the predict_dataloader method diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py index 27258658808..799d06f846b 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py @@ -16,44 +16,9 @@ from otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything import ( SegmentAnything, - CKPT_PATHS, ) from tests.test_suite.e2e_test_system import e2e_pytest_unit - - -class MockImageEncoder(nn.Module): - def __init__(self, *args, **kwargs): - super().__init__() - self.backbone = nn.Linear(1, 1) - - def forward(self, *args, **kwargs): - return torch.Tensor([[1]]) - - -class MockPromptEncoder(nn.Module): - def __init__(self, *args, **kwargs): - super().__init__() - self.layer = nn.Linear(1, 1) - self.embed_dim = 4 - self.pe_layer = None - self.mask_downscaling = None - - def forward(self, *args, **kwargs): - return torch.Tensor([[1]]), torch.Tensor([[1]]) - - def get_dense_pe(self): - return torch.Tensor([[1]]) - - -class MockMaskDecoder(nn.Module): - def __init__(self, *args, **kwargs): - super().__init__() - self.layer = nn.Linear(1, 1) - self.num_mask_tokens = 4 - self.predict_masks = None - - def forward(self, *args, **kwargs): - return torch.Tensor([[1]]), torch.Tensor([[1]]) +from tests.unit.algorithms.visual_prompting.test_helpers import MockImageEncoder, MockPromptEncoder, MockMaskDecoder class TestSegmentAnything: diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py new file mode 100644 index 00000000000..b4ac5343147 --- /dev/null +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py @@ -0,0 +1,321 @@ +"""Tests Segment Anything for zero-shot learning.""" + +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + + +import pytest +from typing import Dict, Any, Optional +from collections import OrderedDict +from tests.test_suite.e2e_test_system import e2e_pytest_unit +import torch +from omegaconf import DictConfig + +from otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.zero_shot_segment_anything import ( + PromptGetter, + ZeroShotSegmentAnything, +) +from tests.unit.algorithms.visual_prompting.test_helpers import MockScoredLabel, MockImageEncoder, MockPromptGetter + + +class TestPromptGetter: + @pytest.fixture(autouse=True) + def setup(self) -> None: + self.prompt_getter = PromptGetter(image_size=3) + + @e2e_pytest_unit + def test_initialize(self) -> None: + """Test initialize.""" + assert not self.prompt_getter.reference_feats + assert not self.prompt_getter.reference_prompts + + @e2e_pytest_unit + def test_set_default_thresholds(self) -> None: + """Test set_default_thresholds.""" + assert self.prompt_getter.default_threshold_reference == 0.3 + assert self.prompt_getter.default_threshold_target == 0.65 + + self.prompt_getter.set_default_thresholds(default_threshold_reference=0.5, default_threshold_target=0.7) + + assert self.prompt_getter.default_threshold_reference == 0.5 + assert self.prompt_getter.default_threshold_target == 0.7 + + @e2e_pytest_unit + def test_set_reference(self) -> None: + """Test set_reference.""" + self.prompt_getter.set_reference( + label=MockScoredLabel(label=1), + reference_feats=torch.ones((self.prompt_getter.image_size, self.prompt_getter.image_size)), + reference_prompts=torch.zeros((self.prompt_getter.image_size, self.prompt_getter.image_size)), + ) + + assert self.prompt_getter.reference_feats[1].sum() == 9 + assert self.prompt_getter.reference_prompts[1].sum() == 0 + + @e2e_pytest_unit + def test_forward(self, mocker) -> None: + """Test forward.""" + mocker.patch( + "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.zero_shot_segment_anything.ZeroShotSegmentAnything" + ) + mocker.patch.object(self.prompt_getter, "_point_selection", return_value=("points_scores", "bg_coords")) + + image_embeddings = torch.rand(1, 2, self.prompt_getter.image_size, self.prompt_getter.image_size) + self.prompt_getter.reference_feats = {1: torch.rand(1, 2)} + + prompts = self.prompt_getter( + image_embeddings=image_embeddings, + padding=(0, 0, 0, 0), + original_size=(self.prompt_getter.image_size, self.prompt_getter.image_size), + ) + + assert 1 in prompts + assert prompts[1] == ("points_scores", "bg_coords") + + @e2e_pytest_unit + def test_preprocess_target_feat(self) -> None: + """Test _preprocess_target_feat.""" + old_target_feat = torch.arange(1, self.prompt_getter.image_size**2 + 1, dtype=torch.float).reshape( + 1, 1, self.prompt_getter.image_size, self.prompt_getter.image_size + ) + new_target_feat = self.prompt_getter._preprocess_target_feat( + target_feat=old_target_feat, + c_feat=1, + h_feat=self.prompt_getter.image_size, + w_feat=self.prompt_getter.image_size, + ) + + assert new_target_feat.sum() == 9 + assert new_target_feat.shape == (1, self.prompt_getter.image_size**2) + + @e2e_pytest_unit + def test_point_selection(self) -> None: + """Test _point_selection.""" + mask_sim = torch.arange(0.1, 1.0, 0.1).reshape(self.prompt_getter.image_size, self.prompt_getter.image_size) + + points_scores, bg_coords = self.prompt_getter._point_selection( + mask_sim=mask_sim, + original_size=(self.prompt_getter.image_size, self.prompt_getter.image_size), + threshold=0.5, + downsizing=1, + ) + + assert torch.equal(points_scores, torch.tensor([[2, 2, 0.9], [1, 2, 0.8], [0, 2, 0.7], [2, 1, 0.6]])) + assert torch.equal(bg_coords, torch.tensor([[0, 0]])) + + +class TestZeroShotSegmentAnything: + @pytest.fixture + def set_zero_shot_segment_anything(self, monkeypatch): + def zero_shot_segment_anything(state_dict: Optional[OrderedDict] = None): + monkeypatch.setattr( + "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything.SAMImageEncoder", + MockImageEncoder, + ) + return ZeroShotSegmentAnything(state_dict=state_dict) + + return zero_shot_segment_anything + + @e2e_pytest_unit + @pytest.mark.parametrize( + "state_dict", + [ + None, + { + "prompt_getter.reference_feats": "prompt_getter.reference_feats", + "prompt_getter.reference_prompts": "prompt_getter.reference_prompts", + }, + ], + ) + def test_init(self, set_zero_shot_segment_anything, state_dict: Dict[str, Any]) -> None: + """Test __init__.""" + zero_shot_segment_anything = set_zero_shot_segment_anything(state_dict=state_dict) + + assert zero_shot_segment_anything.config.model.freeze_image_encoder + assert zero_shot_segment_anything.config.model.freeze_prompt_encoder + assert zero_shot_segment_anything.config.model.freeze_mask_decoder + + if state_dict: + zero_shot_segment_anything.prompt_getter.reference_feats = "prompt_getter.reference_feats" + zero_shot_segment_anything.prompt_getter.reference_prompts = "prompt_getter.reference_prompts" + + @e2e_pytest_unit + def test_set_default_config(self, set_zero_shot_segment_anything) -> None: + """Test set_default_config.""" + zero_shot_segment_anything = set_zero_shot_segment_anything() + + default_config = zero_shot_segment_anything.set_default_config() + + assert isinstance(default_config, DictConfig) + assert "model" in default_config + assert "backbone" in default_config.model + assert "checkpoint" in default_config.model + assert "default_threshold_reference" in default_config.model + assert "default_threshold_target" in default_config.model + assert "freeze_image_encoder" in default_config.model + assert "freeze_mask_decoder" in default_config.model + assert "freeze_prompt_encoder" in default_config.model + assert "image_size" in default_config.model + assert "mask_threshold" in default_config.model + + @e2e_pytest_unit + def test_learn(self, mocker, set_zero_shot_segment_anything) -> None: + """Test learn.""" + zero_shot_segment_anything = set_zero_shot_segment_anything() + mocker.patch.object( + zero_shot_segment_anything, + "_predict_mask", + return_value=( + torch.tensor([[[[0, 0, 0], [0, 1, 0], [0, 0, 0]]]]), + torch.tensor([1, 0, 0]), + torch.tensor([[[[0, 0, 0], [0, 1, 0], [0, 0, 0]]]]), + ), + ) + + processed_prompts = {MockScoredLabel(label=1, name="label"): [{"box": torch.tensor([[0, 0, 1, 1]])}]} + zero_shot_segment_anything.learn( + images=torch.ones((1, 3, 8, 8)), + processed_prompts=processed_prompts, + padding=(0, 0, 0, 0), + original_size=(8, 8), + ) + + assert zero_shot_segment_anything.prompt_getter.reference_feats.get(1).shape == (1, 2) + assert zero_shot_segment_anything.prompt_getter.reference_prompts.get(1).shape == (8, 8) + + @e2e_pytest_unit + @pytest.mark.parametrize( + "expected", [[torch.tensor([[0, 0, 0], [0, 1, 0], [0, 0, 0]]), torch.tensor([0.0, 0.0, 0.5])]] + ) + def test_infer(self, monkeypatch, mocker, set_zero_shot_segment_anything, expected: torch.Tensor) -> None: + """Test infer.""" + monkeypatch.setattr( + "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.zero_shot_segment_anything.PromptGetter", + MockPromptGetter, + ) + + zero_shot_segment_anything = set_zero_shot_segment_anything() + zero_shot_segment_anything.prompt_getter.reference_feats = {1: torch.rand((1, 2))} + zero_shot_segment_anything.prompt_getter.reference_prompts = {1: torch.zeros((8, 8))} + mocker.patch.object( + zero_shot_segment_anything, + "_predict_mask", + return_value=( + torch.tensor([[[[0, 0, 0], [0, 1, 0], [0, 0, 0]]]]), + torch.tensor([1, 0, 0]), + torch.tensor([[[[0, 0, 0], [0, 1, 0], [0, 0, 0]]]]), + ), + ) + + total_results = zero_shot_segment_anything.infer( + images=torch.ones((1, 3, 8, 8)), padding=(0, 0, 0, 0), original_size=(8, 8) + ) + + for i, results in enumerate(total_results[0]): + for _, result in results.items(): + assert torch.equal(result[0], expected[i]) + + @e2e_pytest_unit + def test_preprocess_prompts(self, set_zero_shot_segment_anything) -> None: + """Test _preprocess_prompts. + + TODO (sungchul) + - get inputs grouped as label and prompts + - use points and annotations. + """ + zero_shot_segment_anything = set_zero_shot_segment_anything() + bboxes = [torch.tensor([0, 0, 1, 1])] + labels = [MockScoredLabel(label=1)] + processed_prompts = zero_shot_segment_anything._preprocess_prompts( + bboxes=bboxes, + labels=labels, + ) + + # processed_prompts = {labels[0]: [{"box": torch.tensor([[0, 0, 1, 1]])}]} + assert torch.equal(processed_prompts[labels[0]][0].get("box")[0], bboxes[0]) + + @e2e_pytest_unit + def test_generate_masked_features(self, set_zero_shot_segment_anything) -> None: + """Test _generate_masked_features.""" + zero_shot_segment_anything = set_zero_shot_segment_anything() + zero_shot_segment_anything.config.model.image_size = 16 + feats = torch.rand((8, 8, 1)) + masks = torch.zeros((16, 16), dtype=torch.float32) + masks[4:12, 4:12] = 1.0 + + masked_feat = zero_shot_segment_anything._generate_masked_features(feats=feats, masks=masks, threshold_mask=0.3) + + assert masked_feat.shape == (1, 1) + + @e2e_pytest_unit + def test_preprocess_mask(self, set_zero_shot_segment_anything) -> None: + """Test _preprocess_mask.""" + zero_shot_segment_anything = set_zero_shot_segment_anything() + zero_shot_segment_anything.config.model.image_size = 16 + + result = zero_shot_segment_anything._preprocess_mask(x=torch.ones(1, 1, 8, 8)) + + assert result[:8, :8].sum() == 8**2 + assert result[:8, 8:].sum() == 0 + assert result[8:, :8].sum() == 0 + assert result[8:, 8:].sum() == 0 + + @e2e_pytest_unit + @pytest.mark.parametrize("use_only_background", [True, False]) + def test_merge_prompts(self, set_zero_shot_segment_anything, use_only_background: bool) -> None: + """Test _merge_prompts.""" + zero_shot_segment_anything = set_zero_shot_segment_anything() + + input_prompts = {"point_coords": torch.tensor([1]), "point_labels": torch.tensor([1])} + processed_prompts = { + MockScoredLabel(label=0): [{"point_coords": torch.tensor([0]), "point_labels": torch.tensor([0])}], + MockScoredLabel(label=2): [{"point_coords": torch.tensor([2]), "point_labels": torch.tensor([1])}], + } + + merged_input_prompts = zero_shot_segment_anything._merge_prompts( + label=MockScoredLabel(label=1), + input_prompts=input_prompts, + processed_prompts=processed_prompts, + use_only_background=use_only_background, + ) + + if use_only_background: + assert torch.equal(merged_input_prompts.get("point_coords"), torch.tensor([1, 0])) + assert torch.equal(merged_input_prompts.get("point_labels"), torch.tensor([1, 0])) + else: + assert torch.equal(merged_input_prompts.get("point_coords"), torch.tensor([1, 0, 2])) + assert torch.equal(merged_input_prompts.get("point_labels"), torch.tensor([1, 0, 0])) + + @e2e_pytest_unit + def test_predict_target_mask(self, mocker, set_zero_shot_segment_anything) -> None: + """Test _predict_target_mask.""" + zero_shot_segment_anything = set_zero_shot_segment_anything() + mocker.patch.object( + zero_shot_segment_anything, + "_predict_mask", + return_value=( + torch.tensor([[[[0, 0, 0], [0, 1, 0], [0, 0, 0]]]]), + torch.tensor([1, 0, 0]), + torch.tensor([[[[0, 0, 0], [0, 1, 0], [0, 0, 0]]]]), + ), + ) + + mask = zero_shot_segment_anything._predict_target_mask( + image_embeddings=torch.rand(1), input_prompts={}, padding=(0, 0, 0, 0), original_size=(1, 1) + ) + + assert mask.shape == (3, 3) + + @e2e_pytest_unit + def test_predict_mask(self, mocker, set_zero_shot_segment_anything) -> None: + """Test _predict_mask.""" + zero_shot_segment_anything = set_zero_shot_segment_anything() + mocker.patch.object(zero_shot_segment_anything, "postprocess_masks", return_value=torch.Tensor([[1]])) + + masks, scores, low_res_masks = zero_shot_segment_anything._predict_mask( + image_embeddings=torch.rand(1), input_prompts={}, padding=(0, 0, 0, 0), original_size=(1, 1) + ) + + assert masks.dtype == torch.bool + assert scores.shape[1] == 3 + assert low_res_masks.shape[1] == 3 diff --git a/tests/unit/algorithms/visual_prompting/tasks/test_inference.py b/tests/unit/algorithms/visual_prompting/tasks/test_inference.py index 12bfb36817c..996d1f97cd1 100644 --- a/tests/unit/algorithms/visual_prompting/tasks/test_inference.py +++ b/tests/unit/algorithms/visual_prompting/tasks/test_inference.py @@ -4,22 +4,24 @@ # SPDX-License-Identifier: Apache-2.0 # -from collections import OrderedDict from typing import Optional, Dict, Any import pytest from omegaconf import DictConfig -from otx.algorithms.visual_prompting.tasks.inference import InferenceTask +from otx.algorithms.visual_prompting.tasks.inference import InferenceTask, ZeroShotTask from otx.api.usecases.tasks.interfaces.export_interface import ExportType from otx.api.entities.metrics import NullPerformance from otx.api.entities.model import ModelEntity, ModelFormat, ModelOptimizationType from otx.api.entities.resultset import ResultSetEntity from tests.test_suite.e2e_test_system import e2e_pytest_unit +from otx.algorithms.common.configs.training_base import TrainType +from otx.api.entities.train_parameters import TrainParameters from otx.utils.logger import get_logger from tests.unit.algorithms.visual_prompting.test_helpers import ( generate_visual_prompting_dataset, init_environment, + MockImageEncoder, ) logger = get_logger() @@ -29,9 +31,10 @@ class TestInferenceTask: @pytest.fixture def load_inference_task(self, tmpdir, mocker): def _load_inference_task( - output_path: Optional[str] = str(tmpdir.mkdir("visual_prompting_training_test")), + output_path: Optional[str] = str(tmpdir.mkdir("visual_prompting_inference_test")), path: Optional[str] = None, resume: bool = False, + mode: str = "visual_prompt", ): if path is None: mocker_model = None @@ -41,7 +44,7 @@ def _load_inference_task( mocker.patch.dict(mocker_model.model_adapters, {"path": path, "resume": resume}) mocker.patch("pathlib.Path.write_text") - self.task_environment = init_environment(mocker_model) + self.task_environment = init_environment(mocker_model, mode=mode) return InferenceTask(self.task_environment, output_path) @@ -108,6 +111,22 @@ def test_load_model(self, mocker, load_inference_task, path: str, resume: bool, mocker_io_bytes_io.assert_called_once() mocker_torch_load.assert_called_once() + @e2e_pytest_unit + def test_load_model_zeroshot(self, mocker, load_inference_task): + """Test load_model when zero-shot.""" + mocker_segment_anything = mocker.patch( + "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.ZeroShotSegmentAnything" + ) + + inference_task = load_inference_task(mode="zero_shot") + + assert inference_task.hyper_parameters.algo_backend.train_type == TrainType.Zeroshot + + model = inference_task.load_model(otx_model=inference_task.task_environment.model) + + mocker_segment_anything.assert_called_once() + assert "ZeroShotSegmentAnything" in str(model) + @e2e_pytest_unit def test_infer(self, mocker, load_inference_task): """Test infer.""" @@ -210,3 +229,71 @@ def test_export(self, mocker, load_inference_task, export_type: ExportType): assert "visual_prompting_decoder.xml" in output_model.model_adapters assert not output_model.has_xai + + +class TestZeroShotTask: + @pytest.fixture(autouse=True) + def setup(self, tmpdir, mocker): + mocker.patch("pathlib.Path.write_text") + self.task_environment = init_environment(mode="zero_shot") + + self.output_path = str(tmpdir.mkdir("visual_prompting_zeroshot_test")) + + self.zero_shot_task = ZeroShotTask(self.task_environment, self.output_path) + + @e2e_pytest_unit + def test_train(self, mocker): + """Test train.""" + mocker_trainer = mocker.patch("otx.algorithms.visual_prompting.tasks.inference.Trainer") + mocker_save = mocker.patch("torch.save") + mocker.patch.object(self.zero_shot_task, "model_info") + + dataset = generate_visual_prompting_dataset() + output_model = ModelEntity( + dataset, + self.task_environment.get_model_configuration(), + ) + + self.zero_shot_task.train(dataset, output_model, TrainParameters()) + + mocker_trainer.assert_called_once() + mocker_save.assert_called_once() + assert isinstance(output_model.performance, NullPerformance) + assert output_model.model_adapters.get("weights.pth", None) + assert output_model.model_adapters.get("label_schema.json", None) + + @e2e_pytest_unit + def test_infer(self, mocker): + """Test infer.""" + mocker.patch( + "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything.SegmentAnything.load_checkpoint" + ) + mocker_trainer = mocker.patch("otx.algorithms.visual_prompting.tasks.inference.Trainer") + + dataset = generate_visual_prompting_dataset() + model = ModelEntity(dataset, self.zero_shot_task.task_environment.get_model_configuration()) + + self.zero_shot_task.infer(dataset, model) + + mocker_trainer.assert_called_once() + + @e2e_pytest_unit + def test_save_model(self, mocker): + """Test save_model.""" + mocker.patch( + "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything.SegmentAnything.load_checkpoint" + ) + + self.zero_shot_task.model = MockImageEncoder() + mocker_otx_model = mocker.patch("otx.api.entities.model.ModelEntity") + mocker_io_bytes_io = mocker.patch("io.BytesIO") + mocker_torch_save = mocker.patch("torch.save") + + self.zero_shot_task.model.prompt_getter = mocker.MagicMock() + self.zero_shot_task.model.prompt_getter.reference_feats.return_value = "reference_feats" + self.zero_shot_task.model.prompt_getter.reference_prompts.return_value = "reference_prompts" + + self.zero_shot_task.save_model(mocker_otx_model) + + mocker_io_bytes_io.assert_called_once() + mocker_torch_save.assert_called_once() diff --git a/tests/unit/algorithms/visual_prompting/test_helpers.py b/tests/unit/algorithms/visual_prompting/test_helpers.py index c01dd322c0b..a9f22c7bf95 100644 --- a/tests/unit/algorithms/visual_prompting/test_helpers.py +++ b/tests/unit/algorithms/visual_prompting/test_helpers.py @@ -5,7 +5,9 @@ # import os -from typing import List, Optional, Tuple, Dict +import torch +import torch.nn as nn +from typing import List, Optional, Tuple, Any import numpy as np @@ -30,7 +32,10 @@ from otx.api.entities.task_environment import TaskEnvironment from tests.test_helpers import generate_random_annotated_image -DEFAULT_VISUAL_PROMPTING_TEMPLATE_DIR = os.path.join("src/otx/algorithms/visual_prompting/configs", "sam_vit_b") +DEFAULT_VISUAL_PROMPTING_TEMPLATE_DIR = { + "visual_prompt": os.path.join("src/otx/algorithms/visual_prompting/configs", "sam_vit_b"), + "zero_shot": os.path.join("src/otx/algorithms/visual_prompting/configs", "zero_shot_sam_tiny_vit"), +} labels_names = ("rectangle", "ellipse", "triangle") @@ -103,9 +108,9 @@ def generate_visual_prompting_dataset(use_mask: bool = False) -> DatasetEntity: return DatasetEntity(items) -def init_environment(model: Optional[ModelEntity] = None): +def init_environment(model: Optional[ModelEntity] = None, mode: str = "visual_prompt"): model_template = parse_model_template( - os.path.join(DEFAULT_VISUAL_PROMPTING_TEMPLATE_DIR, "template_experimental.yaml") + os.path.join(DEFAULT_VISUAL_PROMPTING_TEMPLATE_DIR.get(mode), "template_experimental.yaml") ) hyper_parameters = create(model_template.hyper_parameters.data) labels_schema = generate_otx_label_schema() @@ -133,7 +138,68 @@ def __init__(self, use_mask: bool = False): self.offset_bbox: int = 0 self.normalize = self._normalize + def get(self, value: str, default: Optional[Any] = None) -> Any: + return getattr(self, value, default) + class MockConfig: def __init__(self, use_mask: bool = False): self.dataset = MockDatasetConfig(use_mask=use_mask) + + +class MockImageEncoder(nn.Module): + def __init__(self, *args, **kwargs): + super().__init__() + self.backbone = nn.Linear(1, 1) + + def forward(self, *args, **kwargs): + # return torch.Tensor([[1]]) + return torch.ones((1, 2, 4, 4)) + + +class MockPromptEncoder(nn.Module): + def __init__(self, *args, **kwargs): + super().__init__() + self.layer = nn.Linear(1, 1) + self.embed_dim = 4 + self.pe_layer = None + self.mask_downscaling = None + + def forward(self, *args, **kwargs): + return torch.Tensor([[1]]), torch.Tensor([[1]]) + + def get_dense_pe(self): + return torch.Tensor([[1]]) + + +class MockMaskDecoder(nn.Module): + def __init__(self, *args, **kwargs): + super().__init__() + self.layer = nn.Linear(1, 1) + self.num_mask_tokens = 4 + self.predict_masks = None + + def forward(self, *args, **kwargs): + return torch.Tensor([[1]]), torch.Tensor([[1]]) + + +class MockScoredLabel: + def __init__(self, label: int, name: str = "background"): + self.name = name + self.id_ = label + + +class MockPromptGetter(nn.Module): + def __init__(self, *args, **kwargs): + super().__init__() + + def initialize(self): + pass + + def set_default_thresholds(self, *args, **kwargs): + pass + + def forward(self, *args, **kwargs): + return { + MockScoredLabel(label=1, name="label"): (torch.tensor([[0, 0, 0.5], [1, 1, 0.7]]), torch.tensor([[2, 2]])) + } diff --git a/tests/unit/core/ov/graph/test_ov_graph_utils.py b/tests/unit/core/ov/graph/test_ov_graph_utils.py index 7133f523da4..9e3a865dfc4 100644 --- a/tests/unit/core/ov/graph/test_ov_graph_utils.py +++ b/tests/unit/core/ov/graph/test_ov_graph_utils.py @@ -2,6 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 # +import pytest from otx.core.ov.graph.graph import Graph from otx.core.ov.graph.utils import ( get_constant_input_nodes, @@ -38,6 +39,7 @@ def test_handle_merging_into_batchnorm(): @e2e_pytest_unit +@pytest.mark.skip(reason="Updated models are not compatible with the paired batchnorm converter") def test_handle_paired_batchnorm(): graph = get_graph() handle_paired_batchnorm(graph) diff --git a/tools/README.md b/tools/README.md new file mode 100644 index 00000000000..dcc82a39007 --- /dev/null +++ b/tools/README.md @@ -0,0 +1,94 @@ +# Experiment helper + +experiment.py is a powerful tool designed to streamline and automate the process of conducting experiments using OTX. +It simplifies the execution of multiple test cases, automatically parses output values, +and organizes results efficiently. +The primary goal is to reduce the manual effort required in running experiments and enhance overall productivity. + +## Key features + +### Automated Experiment Execution + +- Given multiple variables, it automatically generates all combinations and runs the experiments. +- Proper model files are selected automatically when the "otx eval" or "otx optimize" command is executed, based on the preceding command. + +### Fault Tolerance + +- Subsequent jobs are executed independently, irrespective of whether the previous job raised an error. +- All failed commands are printed and saved in a file after the entire experiment is finished. + +### Automated Experiment Execution + +- All possible values from a single workspace are organized and saved in a file. +- Experiment results are aggregated after the completion of all commands. + +## How to Use + +### Feature 1 : run experiments & aggregate results + +Arguments + +- -f / --file : Path to the YAML file describing the experiment setup. After all runs, results are aggregated and saved. +- -d / --dryrun : Preview the experiment list before execution. Use with '-f / --file' argument. + +Sample Experiment Recipe YAML File: + + output_path: research_framework_demo/det_model_test + constants: # value in constant can't have other constant or variable. + det_model_dir: otx/src/otx/algorithms/detection/configs/detection + dataset_path: dataset + variables: + model: + - cspdarknet_yolox + - mobilenetv2_atss + dataset: + - diopsis/12 + repeat: 2 + command: + - otx train ${det_model_dir}/${model}/template.yaml + --train-data-roots ${dataset_path}/${dataset} + --val-data-roots ${dataset_path}/${dataset} + --track-resource-usage + params + --learning_parameters.num_iters 20 + - otx eval + --test-data-roots ${dataset_path}/${dataset} + - otx export + - otx eval + --test-data-roots ${dataset_path}/${dataset} + +Arguments for recipe + +- output_path (optional) : Output path where all experiment outputs are saved. Default is "./experiment\_{executed_time}" +- constant (optional) : + It's similar as constant or variable in programming languages. + You can use it to replace duplicated string by using ${constant_name} in variables or commands. +- variables (optional) : + It can be used in a similar way to "constant". But it's different in that "otx experiment" makes all combinations and summarize experiment results based on variables. + For example, if two models and two dataset are given as variable, then total 4 cases will be run as experiment. Also key of each varaible will be row headers of experiment result table. +- repeat (optional) : Number of times to run experiments. Repeated experiments have different random seeds in "otx train" command. +- command (required) : Specifies the commands to run. Supports both single commands and lists of commands. + +Upon completion of each experiment, the results are organized within the own workspace. +Following the conclusion of all experiments, all experiment results are aggregated in two distinct formats: +"all experiments result" and "experiment summary" within the specified output_path. +If the repeat parameter is set to a value greater than 1, the results of repeated experiments are averaged in the summary format. + +All TensorBoard log files are automatically copied to the output_path/tensorboard directory. +If you want to run tensorboard with all experiments result, you just need to use it as a tensorboard argument. +If there are failed cases, variables and error logs are both printed and saved as a file after the execution of all commands. + +Note that all commands within each case are executed within the same workspace, +obviating the need to set a template path from the second command. +When the "otx eval" or "otx optimize" command is executed, the model file (model weight or exported model, etc.) +is automatically selected based on the preceding command. +The output file of "otx eval" is then stored at "workspace_path/outputs/XXXX\_{train, export, optimize, etc.}/" +under the name "performance.json". + +### Feature 2 : organize experiment result from single workspace + +Arguments + +- -p / --path : Path to the workspace. Experiment results in the workspace are organized and saved. + +This feature parses all possible values from a single workspace and saves them as a file. diff --git a/tools/experiment.py b/tools/experiment.py index 311b7641c2d..6d9a271e547 100644 --- a/tools/experiment.py +++ b/tools/experiment.py @@ -6,6 +6,7 @@ import argparse import csv import dataclasses +import gc import json import os import re @@ -31,6 +32,7 @@ def get_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser() parser.add_argument("-f", "--file", type=str, help="Experiment recipe file.") parser.add_argument("-p", "--parse", type=str, help="Workspace path to parse.") + parser.add_argument("-d", "--dryrun", action="store_true", help="Print experiment commands without execution.") return parser @@ -566,9 +568,9 @@ def _product_all_cases( ) -> List[Dict[str, str]]: if isinstance(target_str, str): target_str = [target_str] - found_keys = [] + found_keys = set() for each_str in target_str: - found_keys.extend([x for x in set(self._replace_pat.findall(each_str)) if x in variable]) + found_keys.update([x for x in set(self._replace_pat.findall(each_str)) if x in variable]) if not found_keys: return [] @@ -594,9 +596,11 @@ def _replace_var_in_target( for key, val in variable.items(): target = target.replace(f"${{{key}}}", val) elif isinstance(target, list): + target = target.copy() for i in range(len(target)): target[i] = self._replace_var_in_target(variable, target[i]) elif isinstance(target, dict): + target = target.copy() for key in target.keys(): target[key] = self._replace_var_in_target(variable, target[key]) else: @@ -664,7 +668,10 @@ class OtxCommandRunner: repeat_idx (int): repeat index. """ - OUTPUT_FILE_NAME = {"export": "openvino.bin", "optimize": "weights.pth"} + OUTPUT_FILE_NAME: Dict[str, List[str]] = { + "export": ["openvino.bin"], + "optimize": ["weights.pth", "openvino.bin"] + } def __init__(self, command_ins: Command, repeat_idx: int): self._command_ins = command_ins @@ -673,35 +680,46 @@ def __init__(self, command_ins: Command, repeat_idx: int): self._workspace = Path("_".join(self._command_var.values()).replace("/", "_") + f"_repeat_{repeat_idx}") self._command_var["repeat"] = str(repeat_idx) self._fail_logs: List[CommandFailInfo] = [] - self._previous_cmd_entry: Optional[str] = None + self._previous_cmd_entry: Optional[List[str]] = [] @property def fail_logs(self) -> List[CommandFailInfo]: """Information of all failed cases.""" return self._fail_logs - def run_command_list(self): + def run_command_list(self, dryrun: bool = False): """Run all commands and organize experiment results.""" for command in self._command_ins.command: command = command.split() - if not self._prepare_run_command(command): + if not self._prepare_run_command(command) and not dryrun: print(f"otx {command[1]} is skipped.") continue - self._run_otx_command(command) + if not dryrun: + self._run_otx_command(command) + else: + print(" ".join(command)) + + self._previous_cmd_entry.append(command[1]) - self._previous_cmd_entry = command[1] + gc.collect() - organize_exp_result(self._workspace, self._command_var) + if not dryrun: + organize_exp_result(self._workspace, self._command_var) def _prepare_run_command(self, command: List[str]) -> bool: self.set_arguments_to_cmd(command, "--workspace", str(self._workspace)) cmd_entry = command[1] + previous_cmd = None + for previous_cmd in reversed(self._previous_cmd_entry): + if previous_cmd != "eval": + break + if cmd_entry == "train": self.set_arguments_to_cmd(command, "--seed", str(self._repeat_idx)) elif cmd_entry == "eval": - if self._previous_cmd_entry in self.OUTPUT_FILE_NAME: - file_path = self._find_model_path(self._previous_cmd_entry) + if previous_cmd in ["export", "optimize"]: + file_path = self._find_model_path(previous_cmd) if file_path is None: return False self.set_arguments_to_cmd(command, "--load-weights", str(file_path)) @@ -709,6 +727,12 @@ def _prepare_run_command(self, command: List[str]) -> bool: else: output_path = str(self._workspace / "outputs" / "latest_trained_model") self.set_arguments_to_cmd(command, "--output", output_path) + elif cmd_entry == "optimize": + if previous_cmd == "export": # execute PTQ. If not, execute QAT + file_path = self._find_model_path(previous_cmd) + if file_path is None: + return False + self.set_arguments_to_cmd(command, "--load-weights", str(file_path)) return True @@ -724,11 +748,13 @@ def _find_model_path(self, cmd_entry: str): if output_dir is None: print(f"There is no {cmd_entry} output directory.") return None - file_path = list(output_dir.rglob(self.OUTPUT_FILE_NAME[cmd_entry])) - if not file_path: - print(f"{self.OUTPUT_FILE_NAME[cmd_entry]} can't be found.") - return None - return file_path[0] + for file_name in self.OUTPUT_FILE_NAME[cmd_entry]: + file_path = list(output_dir.rglob(file_name)) + if file_path: + return file_path[0] + + print(f"{', '.join(self.OUTPUT_FILE_NAME[cmd_entry])} can't be found.") + return None @staticmethod def set_arguments_to_cmd(command: List[str], key: str, value: Optional[str] = None, before_params: bool = True): @@ -755,11 +781,12 @@ def set_arguments_to_cmd(command: List[str], key: str, value: Optional[str] = No command.insert(index, key) -def run_experiment_recipe(recipe_file: Union[str, Path]): +def run_experiment_recipe(recipe_file: Union[str, Path], dryrun: bool = False): """Run experiments based on the recipe. Args: recipe_file (Union[str, Path]): Recipe file to run. + dryrun (bool, optional): Whether to only print experiment commands. Defaults to False. """ exp_recipe = ExpRecipeParser(recipe_file) output_path = exp_recipe.output_path @@ -771,7 +798,7 @@ def run_experiment_recipe(recipe_file: Union[str, Path]): for command_ins in exp_recipe.commands: for repeat_idx in range(exp_recipe.repeat): otx_cmd_runner = OtxCommandRunner(command_ins, repeat_idx) - otx_cmd_runner.run_command_list() + otx_cmd_runner.run_command_list(dryrun) fail_cases.extend(otx_cmd_runner.fail_logs) os.chdir(current_dir) @@ -779,7 +806,8 @@ def run_experiment_recipe(recipe_file: Union[str, Path]): if fail_cases: log_fail_cases(fail_cases, output_path) - aggregate_all_exp_result(output_path) + if not dryrun: + aggregate_all_exp_result(output_path) def main(): @@ -790,7 +818,7 @@ def main(): if args.file is not None and args.parse is not None: print("Please give either --file or --parse argument.") elif args.file is not None: - run_experiment_recipe(args.file) + run_experiment_recipe(args.file, args.dryrun) elif args.parse is not None: organize_exp_result(args.parse) else: diff --git a/tox.ini b/tox.ini index b5de386f04d..1d38a6e391b 100644 --- a/tox.ini +++ b/tox.ini @@ -14,6 +14,7 @@ passenv = HTTPS_PROXY CUDA_VISIBLE_DEVICES CI_DATA_ROOT + REG_RESULTS_ROOT test_dir = all: cli ano: cli/anomaly @@ -83,6 +84,7 @@ commands = deps = {[testenv:tests-all-py310-pt1]deps} atheris +extras = full commands = coverage erase - coverage run tests/fuzzing/cli_fuzzing.py {posargs:-dict=tests/fuzzing/assets/cli/operations.dict -artifact_prefix={toxworkdir}/ -print_final_stats=1 -atheris_runs=500000} @@ -97,7 +99,9 @@ deps = change_dir = {toxinidir}/docs allowlist_externals = make +extras = full commands = + make clean make html From dee113c4dfc5c25bc2b248ca46fa966722e2b973 Mon Sep 17 00:00:00 2001 From: Eunwoo Shin Date: Thu, 28 Dec 2023 23:37:02 +0900 Subject: [PATCH 16/39] Deal with dynamic input shape on XPU (#2740) * set keep_ratio to False and decrease number of yolox input shape when using XPU * use model_xpu.py when using XPU * use data_pipeline file for xpu when training on XPU * logging when cfg file is changed for xpu * revert configurer.py * align with pre-commit * fix typo --- src/otx/algorithms/common/utils/__init__.py | 2 + src/otx/algorithms/common/utils/utils.py | 21 +++- .../detection/adapters/mmdet/task.py | 3 +- .../iseg_efficientnet_data_pipeline_xpu.py | 107 +++++++++++++++++ .../data/iseg_resnet_data_pipeline_xpu.py | 107 +++++++++++++++++ .../detection/cspdarknet_yolox_l/model_xpu.py | 26 +++++ .../detection/cspdarknet_yolox_s/model_xpu.py | 26 +++++ .../cspdarknet_yolox_tiny/model_xpu.py | 37 ++++++ .../detection/cspdarknet_yolox_x/model_xpy.py | 26 +++++ .../data_pipeline_xpu.py | 7 ++ .../maskrcnn_swin_t/data_pipeline_xpu.py | 108 ++++++++++++++++++ .../resnet50_maskrcnn/data_pipeline_xpu.py | 7 ++ src/otx/algorithms/detection/task.py | 4 +- src/otx/cli/manager/config_manager.py | 7 ++ src/otx/utils/utils.py | 22 ++++ 15 files changed, 506 insertions(+), 4 deletions(-) create mode 100644 src/otx/algorithms/detection/configs/base/data/iseg_efficientnet_data_pipeline_xpu.py create mode 100644 src/otx/algorithms/detection/configs/base/data/iseg_resnet_data_pipeline_xpu.py create mode 100644 src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_l/model_xpu.py create mode 100644 src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_s/model_xpu.py create mode 100644 src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_tiny/model_xpu.py create mode 100644 src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_x/model_xpy.py create mode 100644 src/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/data_pipeline_xpu.py create mode 100644 src/otx/algorithms/detection/configs/instance_segmentation/maskrcnn_swin_t/data_pipeline_xpu.py create mode 100644 src/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/data_pipeline_xpu.py create mode 100644 src/otx/utils/utils.py diff --git a/src/otx/algorithms/common/utils/__init__.py b/src/otx/algorithms/common/utils/__init__.py index fd5bccd3657..2a6d2475724 100644 --- a/src/otx/algorithms/common/utils/__init__.py +++ b/src/otx/algorithms/common/utils/__init__.py @@ -28,6 +28,7 @@ UncopiableDefaultDict, cast_bf16_to_fp32, get_arg_spec, + get_cfg_based_on_device, get_default_async_reqs_num, get_task_class, is_hpu_available, @@ -57,6 +58,7 @@ "is_xpu_available", "is_hpu_available", "cast_bf16_to_fp32", + "get_cfg_based_on_device", ] diff --git a/src/otx/algorithms/common/utils/utils.py b/src/otx/algorithms/common/utils/utils.py index 675aab5bb9b..89ef89545bc 100644 --- a/src/otx/algorithms/common/utils/utils.py +++ b/src/otx/algorithms/common/utils/utils.py @@ -10,7 +10,7 @@ import sys from collections import defaultdict from pathlib import Path -from typing import Any, Callable, Dict, Optional, Tuple +from typing import Any, Callable, Dict, Optional, Tuple, Union import numpy as np import onnx @@ -18,6 +18,12 @@ import yaml from addict import Dict as adict +from otx.utils.logger import get_logger +from otx.utils.utils import add_suffix_to_filename + +logger = get_logger() + + HPU_AVAILABLE = None try: import habana_frameworks.torch as htorch @@ -200,3 +206,16 @@ def cast_bf16_to_fp32(tensor: torch.Tensor) -> torch.Tensor: if tensor.dtype == torch.bfloat16: tensor = tensor.to(torch.float32) return tensor + + +def get_cfg_based_on_device(cfg_file_path: Union[str, Path]) -> str: + """Find a config file according to device.""" + if is_xpu_available(): + cfg_for_device = add_suffix_to_filename(cfg_file_path, "_xpu") + if cfg_for_device.exists(): + logger.info( + f"XPU is detected. XPU config file will be used : {Path(cfg_file_path).name} -> {cfg_for_device.name}" + ) + cfg_file_path = cfg_for_device + + return str(cfg_file_path) diff --git a/src/otx/algorithms/detection/adapters/mmdet/task.py b/src/otx/algorithms/detection/adapters/mmdet/task.py index 3a210a6d30d..142a340ade8 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/task.py +++ b/src/otx/algorithms/detection/adapters/mmdet/task.py @@ -40,6 +40,7 @@ from otx.algorithms.common.configs.training_base import TrainType from otx.algorithms.common.tasks.nncf_task import NNCFBaseTask from otx.algorithms.common.utils.data import get_dataset +from otx.algorithms.common.utils.utils import get_cfg_based_on_device from otx.algorithms.detection.adapters.mmdet.apis.train import train_detector from otx.algorithms.detection.adapters.mmdet.configurer import ( DetectionConfigurer, @@ -94,7 +95,7 @@ def __init__(self, task_environment: TaskEnvironment, output_path: Optional[str] def _init_task(self): # noqa """Initialize task.""" - self._recipe_cfg = OTXConfig.fromfile(os.path.join(self._model_dir, "model.py")) + self._recipe_cfg = OTXConfig.fromfile(get_cfg_based_on_device(os.path.join(self._model_dir, "model.py"))) self._recipe_cfg.domain = self._task_type.domain self._config = self._recipe_cfg diff --git a/src/otx/algorithms/detection/configs/base/data/iseg_efficientnet_data_pipeline_xpu.py b/src/otx/algorithms/detection/configs/base/data/iseg_efficientnet_data_pipeline_xpu.py new file mode 100644 index 00000000000..b6a8f94e76e --- /dev/null +++ b/src/otx/algorithms/detection/configs/base/data/iseg_efficientnet_data_pipeline_xpu.py @@ -0,0 +1,107 @@ +"""Data Pipeline of EfficientNetB2B model for Instance-Seg Task.""" + +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# pylint: disable=invalid-name + +__img_size = (1024, 1024) + +# TODO: A comparison experiment is needed to determine which value is appropriate for to_rgb. +__img_norm_cfg = dict(mean=(103.53, 116.28, 123.675), std=(1.0, 1.0, 1.0), to_rgb=True) + +train_pipeline = [ + dict( + type="LoadResizeDataFromOTXDataset", + load_ann_cfg=dict( + type="LoadAnnotationFromOTXDataset", + domain="instance_segmentation", + with_bbox=True, + with_mask=True, + poly2mask=False, + ), + resize_cfg=dict( + type="Resize", + img_scale=__img_size, + keep_ratio=True, + ), + enable_memcache=True, # Cache after resizing image & annotations + ), + dict(type="RandomFlip", flip_ratio=0.5), + dict(type="Normalize", **__img_norm_cfg), + dict(type="Pad", size=__img_size), + dict(type="DefaultFormatBundle"), + dict( + type="Collect", + keys=["img", "gt_bboxes", "gt_labels", "gt_masks"], + meta_keys=[ + "ori_filename", + "flip_direction", + "scale_factor", + "img_norm_cfg", + "gt_ann_ids", + "flip", + "ignored_labels", + "ori_shape", + "filename", + "img_shape", + "pad_shape", + ], + ), +] + +val_pipeline = [ + dict( + type="LoadResizeDataFromOTXDataset", + resize_cfg=dict(type="Resize", img_scale=__img_size, keep_ratio=True), + enable_memcache=True, # Cache after resizing image + ), + dict( + type="MultiScaleFlipAug", + img_scale=__img_size, + flip=False, + transforms=[ + dict(type="RandomFlip"), + dict(type="Normalize", **__img_norm_cfg), + dict(type="Pad", size=__img_size), + dict(type="ImageToTensor", keys=["img"]), + dict(type="Collect", keys=["img"]), + ], + ), +] + +test_pipeline = [ + dict(type="LoadImageFromOTXDataset"), + dict( + type="MultiScaleFlipAug", + img_scale=__img_size, + flip=False, + transforms=[ + dict(type="Resize", keep_ratio=False), + dict(type="RandomFlip"), + dict(type="Normalize", **__img_norm_cfg), + dict(type="Pad", size=__img_size), + dict(type="ImageToTensor", keys=["img"]), + dict(type="Collect", keys=["img"]), + ], + ), +] + +__dataset_type = "OTXDetDataset" + +data = dict( + train=dict( + type=__dataset_type, + pipeline=train_pipeline, + ), + val=dict( + type=__dataset_type, + test_mode=True, + pipeline=val_pipeline, + ), + test=dict( + type=__dataset_type, + test_mode=True, + pipeline=test_pipeline, + ), +) diff --git a/src/otx/algorithms/detection/configs/base/data/iseg_resnet_data_pipeline_xpu.py b/src/otx/algorithms/detection/configs/base/data/iseg_resnet_data_pipeline_xpu.py new file mode 100644 index 00000000000..4205e2f42f9 --- /dev/null +++ b/src/otx/algorithms/detection/configs/base/data/iseg_resnet_data_pipeline_xpu.py @@ -0,0 +1,107 @@ +"""Data Pipeline of Resnet model for Instance-Seg Task.""" + +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# pylint: disable=invalid-name + +__img_size = (1024, 1024) + +# TODO: A comparison experiment is needed to determine which value is appropriate for to_rgb. +__img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +train_pipeline = [ + dict( + type="LoadResizeDataFromOTXDataset", + load_ann_cfg=dict( + type="LoadAnnotationFromOTXDataset", + domain="instance_segmentation", + with_bbox=True, + with_mask=True, + poly2mask=False, + ), + resize_cfg=dict( + type="Resize", + img_scale=__img_size, + keep_ratio=True, + ), + enable_memcache=True, # Cache after resizing image & annotations + ), + dict(type="RandomFlip", flip_ratio=0.5), + dict(type="Normalize", **__img_norm_cfg), + dict(type="Pad", size=__img_size), + dict(type="DefaultFormatBundle"), + dict( + type="Collect", + keys=["img", "gt_bboxes", "gt_labels", "gt_masks"], + meta_keys=[ + "ori_filename", + "flip_direction", + "scale_factor", + "img_norm_cfg", + "gt_ann_ids", + "flip", + "ignored_labels", + "ori_shape", + "filename", + "img_shape", + "pad_shape", + ], + ), +] + +val_pipeline = [ + dict( + type="LoadResizeDataFromOTXDataset", + resize_cfg=dict(type="Resize", img_scale=__img_size, keep_ratio=True), + enable_memcache=True, # Cache after resizing image + ), + dict( + type="MultiScaleFlipAug", + img_scale=__img_size, + flip=False, + transforms=[ + dict(type="RandomFlip"), + dict(type="Normalize", **__img_norm_cfg), + dict(type="Pad", size=__img_size), + dict(type="ImageToTensor", keys=["img"]), + dict(type="Collect", keys=["img"]), + ], + ), +] + +test_pipeline = [ + dict(type="LoadImageFromOTXDataset"), + dict( + type="MultiScaleFlipAug", + img_scale=__img_size, + flip=False, + transforms=[ + dict(type="Resize", keep_ratio=True), + dict(type="RandomFlip"), + dict(type="Normalize", **__img_norm_cfg), + dict(type="Pad", size=__img_size), + dict(type="ImageToTensor", keys=["img"]), + dict(type="Collect", keys=["img"]), + ], + ), +] + +__dataset_type = "OTXDetDataset" + +data = dict( + train=dict( + type=__dataset_type, + pipeline=train_pipeline, + ), + val=dict( + type=__dataset_type, + test_mode=True, + pipeline=val_pipeline, + ), + test=dict( + type=__dataset_type, + test_mode=True, + pipeline=test_pipeline, + ), +) diff --git a/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_l/model_xpu.py b/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_l/model_xpu.py new file mode 100644 index 00000000000..d83f70f6c60 --- /dev/null +++ b/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_l/model_xpu.py @@ -0,0 +1,26 @@ +"""Model configuration of YOLOX_L model for Detection Task.""" + +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# pylint: disable=invalid-name + +_base_ = ["../../../../../recipes/stages/detection/incremental.py", "../../base/models/detector.py"] + +model = dict( + type="CustomYOLOX", + backbone=dict(type="CSPDarknet", deepen_factor=1.0, widen_factor=1.0, out_indices=(2, 3, 4)), + neck=dict(type="YOLOXPAFPN", in_channels=[256, 512, 1024], out_channels=256, num_csp_blocks=3), + bbox_head=dict(type="CustomYOLOXHead", num_classes=80, in_channels=256, feat_channels=256), + train_cfg=dict(assigner=dict(type="SimOTAAssigner", center_radius=2.5)), + # In order to align the source code, the threshold of the val phase is + # 0.01, and the threshold of the test phase is 0.001. + test_cfg=dict(score_thr=0.01, nms=dict(type="nms", iou_threshold=0.65), max_per_img=100), + size_multiplier=160, + random_size_range=(3, 5), +) +load_from = "https://download.openmmlab.com/mmdetection/v2.0/yolox/\ +yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236-d3bd2b23.pth" + +fp16 = dict(loss_scale=512.0, bf16_training=False) +ignore = False diff --git a/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_s/model_xpu.py b/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_s/model_xpu.py new file mode 100644 index 00000000000..b5f07241f66 --- /dev/null +++ b/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_s/model_xpu.py @@ -0,0 +1,26 @@ +"""Model configuration of YOLOX_S model for Detection Task.""" + +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# pylint: disable=invalid-name + +_base_ = ["../../../../../recipes/stages/detection/incremental.py", "../../base/models/detector.py"] + +model = dict( + type="CustomYOLOX", + backbone=dict(type="CSPDarknet", deepen_factor=0.33, widen_factor=0.5, out_indices=(2, 3, 4)), + neck=dict(type="YOLOXPAFPN", in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=4), + bbox_head=dict(type="CustomYOLOXHead", num_classes=80, in_channels=128, feat_channels=128), + train_cfg=dict(assigner=dict(type="SimOTAAssigner", center_radius=2.5)), + # In order to align the source code, the threshold of the val phase is + # 0.01, and the threshold of the test phase is 0.001. + test_cfg=dict(score_thr=0.01, nms=dict(type="nms", iou_threshold=0.65), max_per_img=100), + size_multiplier=160, + random_size_range=(3, 5), +) +load_from = "https://download.openmmlab.com/mmdetection/v2.0/yolox/\ +yolox_s_8x8_300e_coco/yolox_s_8x8_300e_coco_20211121_095711-4592a793.pth" + +fp16 = dict(loss_scale=512.0, bf16_training=False) +ignore = False diff --git a/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_tiny/model_xpu.py b/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_tiny/model_xpu.py new file mode 100644 index 00000000000..e7269d687fd --- /dev/null +++ b/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_tiny/model_xpu.py @@ -0,0 +1,37 @@ +"""Model configuration of YOLOX Tiny model for Detection Task.""" + +# Copyright (C) 2022 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. + +# pylint: disable=invalid-name + +_base_ = ["../../../../../recipes/stages/detection/incremental.py", "../../base/models/detector.py"] + +model = dict( + type="CustomYOLOX", + backbone=dict(type="CSPDarknet", deepen_factor=0.33, widen_factor=0.375, out_indices=(2, 3, 4)), + neck=dict(type="YOLOXPAFPN", in_channels=[96, 192, 384], out_channels=96, num_csp_blocks=1), + bbox_head=dict(type="CustomYOLOXHead", num_classes=80, in_channels=96, feat_channels=96), + train_cfg=dict(assigner=dict(type="SimOTAAssigner", center_radius=2.5)), + # In order to align the source code, the threshold of the val phase is + # 0.01, and the threshold of the test phase is 0.001. + test_cfg=dict(score_thr=0.01, nms=dict(type="nms", iou_threshold=0.65), max_per_img=100), + size_multiplier=160, + random_size_range=(3, 5), +) +load_from = "https://storage.openvinotoolkit.org/repositories/openvino_training_extensions\ +/models/object_detection/v2/yolox_tiny_8x8.pth" + +fp16 = dict(loss_scale=512.0, bf16_training=False) +ignore = False diff --git a/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_x/model_xpy.py b/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_x/model_xpy.py new file mode 100644 index 00000000000..d54001b888d --- /dev/null +++ b/src/otx/algorithms/detection/configs/detection/cspdarknet_yolox_x/model_xpy.py @@ -0,0 +1,26 @@ +"""Model configuration of YOLOX_X model for Detection Task.""" + +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# pylint: disable=invalid-name + +_base_ = ["../../../../../recipes/stages/detection/incremental.py", "../../base/models/detector.py"] + +model = dict( + type="CustomYOLOX", + backbone=dict(type="CSPDarknet", deepen_factor=1.33, widen_factor=1.25, out_indices=(2, 3, 4)), + neck=dict(type="YOLOXPAFPN", in_channels=[320, 640, 1280], out_channels=320, num_csp_blocks=4), + bbox_head=dict(type="CustomYOLOXHead", num_classes=80, in_channels=320, feat_channels=320), + train_cfg=dict(assigner=dict(type="SimOTAAssigner", center_radius=2.5)), + # In order to align the source code, the threshold of the val phase is + # 0.01, and the threshold of the test phase is 0.001. + test_cfg=dict(score_thr=0.01, nms=dict(type="nms", iou_threshold=0.65), max_per_img=100), + size_multiplier=160, + random_size_range=(3, 5), +) +load_from = "https://download.openmmlab.com/mmdetection/v2.0/yolox\ +/yolox_x_8x8_300e_coco/yolox_x_8x8_300e_coco_20211126_140254-1ef88d67.pth" + +fp16 = dict(loss_scale=512.0, bf16_training=False) +ignore = False diff --git a/src/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/data_pipeline_xpu.py b/src/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/data_pipeline_xpu.py new file mode 100644 index 00000000000..b6b58f84442 --- /dev/null +++ b/src/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/data_pipeline_xpu.py @@ -0,0 +1,7 @@ +"""Data Pipeline of EfficientNetB2B model for Instance-Seg Task.""" + +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + + +_base_ = ["../../base/data/iseg_efficientnet_data_pipeline_xpu.py"] diff --git a/src/otx/algorithms/detection/configs/instance_segmentation/maskrcnn_swin_t/data_pipeline_xpu.py b/src/otx/algorithms/detection/configs/instance_segmentation/maskrcnn_swin_t/data_pipeline_xpu.py new file mode 100644 index 00000000000..6ff89c87dc6 --- /dev/null +++ b/src/otx/algorithms/detection/configs/instance_segmentation/maskrcnn_swin_t/data_pipeline_xpu.py @@ -0,0 +1,108 @@ +"""Data Pipeline of MaskRCNN-SwinT-FP16 model for Instance-Seg Task.""" + +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# pylint: disable=invalid-name + +__img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +__img_size = (1344, 1344) + +meta_keys = [ + "ori_filename", + "flip_direction", + "scale_factor", + "img_norm_cfg", + "gt_ann_ids", + "flip", + "ignored_labels", + "ori_shape", + "filename", + "img_shape", + "pad_shape", +] + +train_pipeline = [ + dict( + type="LoadResizeDataFromOTXDataset", + load_ann_cfg=dict( + type="LoadAnnotationFromOTXDataset", + domain="instance_segmentation", + with_bbox=True, + with_mask=True, + poly2mask=False, + ), + resize_cfg=dict( + type="Resize", + img_scale=__img_size, + keep_ratio=True, + ), + enable_memcache=True, # Cache after resizing image & annotations + ), + dict(type="RandomFlip", flip_ratio=0.5), + dict(type="Normalize", **__img_norm_cfg), + dict(type="Pad", size=__img_size), + dict(type="DefaultFormatBundle"), + dict( + type="Collect", + keys=["img", "gt_bboxes", "gt_labels", "gt_masks"], + meta_keys=meta_keys, + ), +] + +val_pipeline = [ + dict( + type="LoadResizeDataFromOTXDataset", + resize_cfg=dict(type="Resize", img_scale=__img_size, keep_ratio=True), + enable_memcache=True, # Cache after resizing image + ), + dict( + type="MultiScaleFlipAug", + img_scale=__img_size, + flip=False, + transforms=[ + dict(type="RandomFlip"), + dict(type="Normalize", **__img_norm_cfg), + dict(type="Pad", size=__img_size), + dict(type="ImageToTensor", keys=["img"]), + dict(type="Collect", keys=["img"]), + ], + ), +] + +test_pipeline = [ + dict(type="LoadImageFromOTXDataset"), + dict( + type="MultiScaleFlipAug", + img_scale=__img_size, + flip=False, + transforms=[ + dict(type="Resize", keep_ratio=True), + dict(type="RandomFlip"), + dict(type="Normalize", **__img_norm_cfg), + dict(type="Pad", size=__img_size), + dict(type="ImageToTensor", keys=["img"]), + dict(type="Collect", keys=["img"]), + ], + ), +] + +__dataset_type = "OTXDetDataset" + +data = dict( + train=dict( + type=__dataset_type, + pipeline=train_pipeline, + ), + val=dict( + type=__dataset_type, + test_mode=True, + pipeline=val_pipeline, + ), + test=dict( + type=__dataset_type, + test_mode=True, + pipeline=test_pipeline, + ), +) diff --git a/src/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/data_pipeline_xpu.py b/src/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/data_pipeline_xpu.py new file mode 100644 index 00000000000..4efb33a8fb0 --- /dev/null +++ b/src/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/data_pipeline_xpu.py @@ -0,0 +1,7 @@ +"""Data Pipeline of Resnet model for Instance-Seg Task.""" + +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + + +_base_ = ["../../base/data/iseg_resnet_data_pipeline_xpu.py"] diff --git a/src/otx/algorithms/detection/task.py b/src/otx/algorithms/detection/task.py index 78af633e2a1..7174315481f 100644 --- a/src/otx/algorithms/detection/task.py +++ b/src/otx/algorithms/detection/task.py @@ -20,7 +20,7 @@ TrainingProgressCallback, ) from otx.algorithms.common.utils.ir import embed_ir_model_data -from otx.algorithms.common.utils.utils import embed_onnx_model_data +from otx.algorithms.common.utils.utils import embed_onnx_model_data, get_cfg_based_on_device from otx.algorithms.detection.configs.base import DetectionConfig from otx.algorithms.detection.utils import create_detection_shapes, create_mask_shapes, get_det_model_api_configuration from otx.api.configuration import cfg_helper @@ -91,7 +91,7 @@ def __init__(self, task_environment: TaskEnvironment, output_path: Optional[str] if self._hyperparams.tiling_parameters.enable_tiling: self.data_pipeline_path = os.path.join(self._model_dir, "tile_pipeline.py") else: - self.data_pipeline_path = os.path.join(self._model_dir, "data_pipeline.py") + self.data_pipeline_path = get_cfg_based_on_device(os.path.join(self._model_dir, "data_pipeline.py")) if hasattr(self._hyperparams.learning_parameters, "input_size"): input_size_cfg = InputSizePreset(self._hyperparams.learning_parameters.input_size.value) diff --git a/src/otx/cli/manager/config_manager.py b/src/otx/cli/manager/config_manager.py index 1143010cd33..67528710090 100644 --- a/src/otx/cli/manager/config_manager.py +++ b/src/otx/cli/manager/config_manager.py @@ -30,6 +30,7 @@ from otx.cli.utils.parser import gen_param_help, gen_params_dict_from_args from otx.core.data.manager.dataset_manager import DatasetManager from otx.utils.logger import get_logger +from otx.utils.utils import add_suffix_to_filename logger = get_logger() @@ -636,6 +637,12 @@ def build_workspace(self, new_workspace_path: Optional[str] = None) -> None: ] for target_dir, file_name, dest_dir in config_files: self._copy_config_files(target_dir, file_name, dest_dir) + + # check xpu file exists + xpu_file = add_suffix_to_filename(target_dir / file_name, "_xpu") + if xpu_file.exists(): + self._copy_config_files(xpu_file.parent, xpu_file.name, dest_dir) + (self.workspace_root / "template.yaml").write_text(OmegaConf.to_yaml(template_config)) # Copy deployment_tile_classifier for Instance Segmentation diff --git a/src/otx/utils/utils.py b/src/otx/utils/utils.py new file mode 100644 index 00000000000..0c0cfe66e31 --- /dev/null +++ b/src/otx/utils/utils.py @@ -0,0 +1,22 @@ +"""Utility functions collection.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +from pathlib import Path +from typing import Union + + +def add_suffix_to_filename(file_path: Union[str, Path], suffix: str) -> Path: + """Add suffix to file name. + + Args: + file_path (Union[str, Path]): File path to add suffix to. + suffix (str): Suffix to add. + + Returns: + Path: Suffix added path. + """ + if isinstance(file_path, str): + file_path = Path(file_path) + return file_path.parent / f"{file_path.stem}{suffix}{file_path.suffix}" From 54751ca009a0e110bcc1cef5bd1b6a2ff704127a Mon Sep 17 00:00:00 2001 From: Prokofiev Kirill Date: Tue, 20 Feb 2024 09:23:48 +0100 Subject: [PATCH 17/39] Merge develop to Intel dGPU branch (#2927) * merge develop * fix pre-commit * fix pre-commit * minor * revert change back * revert change back for now * fix typo --- .ci/Dockerfile | 2 +- .github/workflows/code_scan.yml | 19 +- .github/workflows/daily.yml | 3 + .github/workflows/docs.yml | 11 +- .github/workflows/docs_stable.yml | 11 +- .github/workflows/labeler.yml | 5 +- .github/workflows/perf-accuracy.yml | 76 +++ .github/workflows/perf-speed.yml | 62 +++ .github/workflows/pre_merge.yml | 53 +- .github/workflows/publish.yml | 35 +- .github/workflows/publish_internal.yml | 38 +- .github/workflows/run_tests_in_tox.yml | 21 +- .github/workflows/run_tests_in_tox_custom.yml | 80 +++ .github/workflows/scorecard.yml | 72 +++ .github/workflows/stale_marker.yml | 5 +- .github/workflows/weekly.yml | 3 + CHANGELOG.md | 9 +- Dockerfile | 2 +- .../visual_prompting/fine_tuning.rst | 113 ++++ .../algorithms/visual_prompting/index.rst | 114 +--- .../algorithms/visual_prompting/zero_shot.rst | 103 ++++ docs/utils/images/vpm_ref_prediction.png | Bin 0 -> 198185 bytes docs/utils/images/vpm_ref_result.png | Bin 0 -> 190022 bytes docs/utils/images/vpm_tgt_prediction.png | Bin 0 -> 302006 bytes requirements/base.txt | 2 +- requirements/dev.txt | 1 + requirements/openvino.txt | 8 +- requirements/publish.txt | 2 + .../configs/classification/configuration.yaml | 4 +- .../configs/detection/configuration.yaml | 4 +- .../anomaly/configs/base/configuration.py | 4 +- .../classification/draem/configuration.yaml | 2 +- .../classification/padim/configuration.yaml | 2 +- .../classification/stfpm/configuration.yaml | 2 +- .../detection/draem/configuration.yaml | 2 +- .../detection/padim/configuration.yaml | 2 +- .../detection/stfpm/configuration.yaml | 2 +- .../segmentation/draem/configuration.yaml | 2 +- .../segmentation/padim/configuration.yaml | 2 +- .../segmentation/stfpm/configuration.yaml | 2 +- src/otx/algorithms/anomaly/tasks/openvino.py | 11 +- .../adapters/mmcls/datasets/otx_datasets.py | 2 +- .../classification/configs/configuration.yaml | 4 +- .../selfsl/hparam.yaml | 2 +- .../selfsl/hparam.yaml | 2 +- .../selfsl/hparam.yaml | 2 +- .../selfsl/hparam.yaml | 2 +- .../selfsl/hparam.yaml | 2 +- .../common/configs/training_base.py | 4 +- .../mmdet/models/detectors/mean_teacher.py | 37 +- .../detection/adapters/mmdet/task.py | 2 + .../configs/detection/configuration.yaml | 4 +- .../instance_segmentation/configuration.yaml | 4 +- .../convnext_maskrcnn/compression_config.json | 5 + .../compression_config.json | 5 + .../maskrcnn_swin_t/compression_config.json | 5 + .../resnet50_maskrcnn/compression_config.json | 5 + .../rotated_detection/configuration.yaml | 4 +- .../configs/base/configuration.py | 4 +- .../segmentation/configs/configuration.yaml | 4 +- .../openvino/model_wrappers/__init__.py | 2 +- .../model_wrappers/openvino_models.py | 19 +- .../pytorch_lightning/callbacks/inference.py | 25 +- .../pytorch_lightning/datasets/dataset.py | 38 +- .../datasets/pipelines/sam_transforms.py | 43 +- .../datasets/pipelines/transforms.py | 9 +- .../visual_prompters/segment_anything.py | 51 +- .../zero_shot_segment_anything.py | 519 +++++++++++------- .../configs/base/configuration.py | 30 + .../configs/configuration.yaml | 2 +- .../zero_shot_sam_tiny_vit/config.yaml | 2 + .../zero_shot_sam_tiny_vit/configuration.yaml | 2 +- .../template_experimental.yaml | 6 +- .../visual_prompting/tasks/inference.py | 198 ++++++- .../visual_prompting/tasks/openvino.py | 471 ++++++++++++++-- .../exportable_code/demo/requirements.txt | 6 +- .../prediction_to_annotation_converter.py | 4 +- src/otx/cli/tools/train.py | 10 +- src/otx/cli/utils/experiment.py | 19 +- src/otx/cli/utils/hpo.py | 47 +- src/otx/cli/utils/io.py | 2 + .../core/data/adapter/base_dataset_adapter.py | 13 +- .../data/adapter/detection_dataset_adapter.py | 7 +- .../visual_prompting_dataset_adapter.py | 2 +- tests/conftest.py | 16 + .../compressed_model.yml | 6 +- .../compressed_model.yml | 6 +- .../compressed_model.yml | 8 +- .../compressed_model.yml | 8 +- .../compressed_model.yml | 2 +- .../compressed_decoder.yml | 3 + .../compressed_image_encoder.yml | 3 + .../compressed_prompt_getter.yml | 3 + .../visual_prompting/test_visual_prompting.py | 12 +- .../cli/visual_prompting/test_zero_shot.py | 127 +++++ .../visual_prompting/test_visual_prompting.py | 10 +- .../cli/visual_prompting/test_zero_shot.py | 37 +- tests/perf/__init__.py | 4 + tests/perf/benchmark-reference.csv | 198 +++++++ tests/perf/benchmark.py | 210 +++++++ tests/perf/conftest.py | 309 +++++++++++ tests/perf/test_anomaly.py | 281 ++++++++++ tests/perf/test_classification.py | 336 ++++++++++++ tests/perf/test_detection.py | 122 ++++ tests/perf/test_instance_segmentation.py | 237 ++++++++ tests/perf/test_semantic_segmentation.py | 125 +++++ tests/test_suite/run_test_command.py | 41 +- .../adapters/mmdet/nncf/test_task.py | 38 +- .../datasets/pipelines/test_transforms.py | 8 +- .../model_wrappers/test_openvino_models.py | 11 + .../callbacks/test_inference_callback.py | 15 +- .../datasets/pipelines/test_sam_transforms.py | 31 +- .../datasets/pipelines/test_transforms.py | 9 +- .../datasets/test_dataset.py | 10 +- .../visual_prompters/test_segment_anything.py | 18 +- .../test_zero_shot_segment_anything.py | 198 ++++--- .../visual_prompting/tasks/test_inference.py | 90 +++ .../visual_prompting/tasks/test_openvino.py | 323 ++++++++++- .../visual_prompting/test_helpers.py | 11 +- tests/unit/cli/utils/test_experiment.py | 21 +- tests/unit/cli/utils/test_hpo.py | 14 + tools/README.md | 46 +- tools/experiment.py | 434 +++++++++++---- tox.ini | 6 + 124 files changed, 4978 insertions(+), 926 deletions(-) create mode 100644 .github/workflows/perf-accuracy.yml create mode 100644 .github/workflows/perf-speed.yml create mode 100644 .github/workflows/run_tests_in_tox_custom.yml create mode 100644 .github/workflows/scorecard.yml create mode 100644 docs/source/guide/explanation/algorithms/visual_prompting/fine_tuning.rst create mode 100644 docs/source/guide/explanation/algorithms/visual_prompting/zero_shot.rst create mode 100644 docs/utils/images/vpm_ref_prediction.png create mode 100644 docs/utils/images/vpm_ref_result.png create mode 100644 docs/utils/images/vpm_tgt_prediction.png create mode 100644 requirements/publish.txt create mode 100644 tests/e2e/cli/visual_prompting/reference/Zero_Shot_SAM_Tiny_ViT/compressed_decoder.yml create mode 100644 tests/e2e/cli/visual_prompting/reference/Zero_Shot_SAM_Tiny_ViT/compressed_image_encoder.yml create mode 100644 tests/e2e/cli/visual_prompting/reference/Zero_Shot_SAM_Tiny_ViT/compressed_prompt_getter.yml create mode 100644 tests/e2e/cli/visual_prompting/test_zero_shot.py create mode 100644 tests/perf/__init__.py create mode 100644 tests/perf/benchmark-reference.csv create mode 100644 tests/perf/benchmark.py create mode 100644 tests/perf/conftest.py create mode 100644 tests/perf/test_anomaly.py create mode 100644 tests/perf/test_classification.py create mode 100644 tests/perf/test_detection.py create mode 100644 tests/perf/test_instance_segmentation.py create mode 100644 tests/perf/test_semantic_segmentation.py diff --git a/.ci/Dockerfile b/.ci/Dockerfile index 03912d478a5..cb4dd924d5d 100644 --- a/.ci/Dockerfile +++ b/.ci/Dockerfile @@ -3,7 +3,7 @@ ######################################################### ARG ver_cuda="11.7.1" -FROM nvidia/cuda:${ver_cuda}-devel-ubuntu20.04 AS python_base_cuda +FROM nvidia/cuda:${ver_cuda}-devel-ubuntu20.04@sha256:f663a1cf01a46daa469c75bf246ac00098bd5179aff2c75367c44f475cd4c8f4 AS python_base_cuda LABEL maintainer="OpenVINO Training Extensions Development Team" ARG HTTP_PROXY diff --git a/.github/workflows/code_scan.yml b/.github/workflows/code_scan.yml index cbf35ef22dc..d644c7d5751 100644 --- a/.github/workflows/code_scan.yml +++ b/.github/workflows/code_scan.yml @@ -6,24 +6,27 @@ on: # every UTC 6PM from Mon to Fri - cron: "0 18 * * 1-5" +# Declare default permissions as read only. +permissions: read-all + jobs: Trivy-scan: runs-on: ubuntu-20.04 steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # v4.7.1 with: python-version: "3.10" - name: Install dependencies - run: python -m pip install tox + run: python -m pip install tox==4.21.1 - name: Trivy Scanning env: TRIVY_DOWNLOAD_URL: ${{ vars.TRIVY_DOWNLOAD_URL }} run: tox -vv -e trivy-scan - name: Upload Trivy results artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 with: name: trivy-results path: | @@ -34,17 +37,17 @@ jobs: runs-on: ubuntu-20.04 steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # v4.7.1 with: python-version: "3.10" - name: Install dependencies - run: python -m pip install tox + run: python -m pip install tox==4.21.1 - name: Bandit Scanning run: tox -e bandit-scan - name: Upload Bandit artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 with: name: bandit-report path: .tox/bandit-report.txt diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index 0884c580aed..2acb496760a 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -6,6 +6,9 @@ on: # every UTC 7PM from Mon to Fri - cron: "0 19 * * 1-5" +# Declare default permissions as read only. +permissions: read-all + jobs: E2E-tests: strategy: diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index ac2b09b7def..d94d0b738ae 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -5,16 +5,19 @@ on: branches: - develop +# Declare default permissions as read only. +permissions: read-all + jobs: Build-Docs: runs-on: ubuntu-20.04 permissions: - contents: write + pages: write steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # v4.7.1 with: python-version: "3.10" - name: Install dependencies @@ -62,7 +65,7 @@ jobs: git add ./latest ${{ env.RELEASE_VERSION }} git commit -m "Update documentation" -a || true - name: Push changes - uses: ad-m/github-push-action@master + uses: ad-m/github-push-action@fcea09907c44d7a7a3331c9c04080d55d87c95fe # master with: github_token: ${{ secrets.GITHUB_TOKEN }} branch: gh-pages diff --git a/.github/workflows/docs_stable.yml b/.github/workflows/docs_stable.yml index f4c11be074b..1a6c5e58733 100644 --- a/.github/workflows/docs_stable.yml +++ b/.github/workflows/docs_stable.yml @@ -4,18 +4,21 @@ on: release: types: [published] +# Declare default permissions as read only. +permissions: read-all + jobs: Build-Docs: runs-on: ubuntu-20.04 permissions: - contents: write + pages: write steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 with: fetch-depth: 0 # otherwise, you will failed to push refs to dest repo - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # v4.7.1 with: python-version: "3.10" - name: Install dependencies @@ -62,7 +65,7 @@ jobs: git add ./stable ${{ env.RELEASE_VERSION }} git commit -m "Update documentation" -a || true - name: Push changes - uses: ad-m/github-push-action@master + uses: ad-m/github-push-action@fcea09907c44d7a7a3331c9c04080d55d87c95fe # master with: github_token: ${{ secrets.GITHUB_TOKEN }} branch: gh-pages diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index 356bbb91112..93e119c9336 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -2,6 +2,9 @@ name: "Pull Request Labeler" on: - pull_request_target +# Declare default permissions as read only. +permissions: read-all + jobs: triage: permissions: @@ -9,6 +12,6 @@ jobs: pull-requests: write runs-on: ubuntu-latest steps: - - uses: actions/labeler@v4 + - uses: actions/labeler@ac9175f8a1f3625fd0d4fb234536d26811351594 # v4.3.0 with: repo-token: "${{ secrets.GITHUB_TOKEN }}" diff --git a/.github/workflows/perf-accuracy.yml b/.github/workflows/perf-accuracy.yml new file mode 100644 index 00000000000..1318403c3be --- /dev/null +++ b/.github/workflows/perf-accuracy.yml @@ -0,0 +1,76 @@ +name: Performance-Accuracy Benchmark Test + +on: + workflow_dispatch: # run on request (no need for PR) + inputs: + model-type: + type: choice + description: Model type to run benchmark + options: + - default # speed, balance, accuracy models only + - all # default + other models + default: default + data-size: + type: choice + description: Dataset size to run benchmark + options: + - small + - medium + - large + - all + default: all + num-repeat: + description: Overrides default per-data-size number of repeat setting + default: 0 + num-epoch: + description: Overrides default per-model number of epoch setting + default: 0 + eval-upto: + type: choice + description: The last operation to evaluate. 'optimize' means all. + options: + - train + - export + - optimize + default: optimize + +# Declare default permissions as read only. +permissions: read-all + +jobs: + Perf-Accuracy-Benchmark: + strategy: + fail-fast: false + matrix: + include: + - toxenv_task: "iseg" + task: "instance_segmentation" + - toxenv_task: "seg" + task: "semantic_segmentation" + - toxenv_task: "det" + task: "detection" + - toxenv_task: "ano" + task: "anomaly" + - toxenv_task: "cls" + task: "classification" + name: Perf-Accuracy-Benchmark-${{ matrix.toxenv_task }}-py310 + uses: ./.github/workflows/run_tests_in_tox.yml + with: + python-version: "3.10" + toxenv-pyver: "py310" + toxenv-task: ${{ matrix.toxenv_task }} + tests-dir: > + tests/perf/test_${{ matrix.task }}.py + -k accuracy + --model-type ${{ inputs.model-type }} + --data-root /home/validation/data/new/ + --data-size ${{ inputs.data-size }} + --num-repeat ${{ inputs.num-repeat }} + --num-epoch ${{ inputs.num-epoch }} + --eval-upto ${{ inputs.eval-upto }} + --summary-csv .tox/perf-accuracy-benchmark-${{ matrix.toxenv_task }}.csv + runs-on: "['self-hosted', 'Linux', 'X64', 'dmount']" + task: ${{ matrix.task }} + timeout-minutes: 8640 + upload-artifact: true + artifact-prefix: perf-accuracy-benchmark diff --git a/.github/workflows/perf-speed.yml b/.github/workflows/perf-speed.yml new file mode 100644 index 00000000000..3e33a782c2b --- /dev/null +++ b/.github/workflows/perf-speed.yml @@ -0,0 +1,62 @@ +name: Performance-Speed Benchmark Test + +on: + workflow_dispatch: # run on request (no need for PR) + inputs: + model-type: + type: choice + description: Model type to run benchmark + options: + - default # speed, balance, accuracy models only + - all # default + other models + default: default + data-size: + type: choice + description: Dataset size to run benchmark + options: + - small + - medium + - large + - all + default: medium + num-repeat: + description: Overrides default per-data-size number of repeat setting + default: 1 + num-epoch: + description: Overrides default per-model number of epoch setting + default: 3 + eval-upto: + type: choice + description: The last operation to evaluate. 'optimize' means all. + options: + - train + - export + - optimize + default: optimize + +# Declare default permissions as read only. +permissions: read-all + +jobs: + Perf-Speed-Benchmark: + name: Perf-Speed-Benchmark-all-py310 + uses: ./.github/workflows/run_tests_in_tox.yml + with: + python-version: "3.10" + toxenv-pyver: "py310" + toxenv-task: all + tests-dir: > + tests/perf/ + -k speed + --model-type ${{ inputs.model-type }} + --data-root /home/validation/data/new/ + --data-size ${{ inputs.data-size }} + --num-repeat ${{ inputs.num-repeat }} + --num-epoch ${{ inputs.num-epoch }} + --eval-upto ${{ inputs.eval-upto }} + --summary-csv .tox/perf-speed-benchmark-all.csv + runs-on: "['self-hosted', 'Linux', 'X64', 'dmount']" + task: all + timeout-minutes: 8640 + upload-artifact: true + artifact-prefix: perf-speed-benchmark diff --git a/.github/workflows/pre_merge.yml b/.github/workflows/pre_merge.yml index 142b3b38781..fd2cbddbe12 100644 --- a/.github/workflows/pre_merge.yml +++ b/.github/workflows/pre_merge.yml @@ -12,6 +12,9 @@ on: - synchronize workflow_dispatch: # run on request (no need for PR) +# Declare default permissions as read only. +permissions: read-all + jobs: Code-Quality-Checks: # This is what will cancel the job concurrency @@ -21,25 +24,24 @@ jobs: runs-on: ubuntu-20.04 steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # v4.7.1 with: python-version: "3.10" - name: Install dependencies - run: python -m pip install -r requirements/dev.txt + run: | + pip install pip-tools==7.3.0 + pip-compile --generate-hashes -o /tmp/otx-dev-requirements.txt requirements/dev.txt + pip install --require-hashes --no-deps -r /tmp/otx-dev-requirements.txt - name: Code quality checks run: tox -vv -e pre-commit-all-py310-pt1 Unit-Test: - runs-on: [self-hosted, linux, x64, dev] needs: Code-Quality-Checks - timeout-minutes: 120 strategy: fail-fast: false matrix: include: - - python-version: "3.8" - tox-env: "py38" - python-version: "3.9" tox-env: "py39" - python-version: "3.10" @@ -49,20 +51,41 @@ jobs: concurrency: group: ${{ github.workflow }}-Unit-${{ github.event.pull_request.number || github.ref }}-${{ matrix.tox-env }} cancel-in-progress: true + uses: ./.github/workflows/run_tests_in_tox_custom.yml + with: + python-version: ${{ matrix.python-version }} + toxenv-pyver: ${{ matrix.tox-env }} + toxenv-task: all + tests-dir: tests/unit + timeout-minutes: 120 + upload-artifact: true + artifact-prefix: "unit-test-results" + runs-on: "['otx-gpu-v100-1']" + Coverage-Test: + needs: Code-Quality-Checks + concurrency: + group: ${{ github.workflow }}-Coverage-${{ github.event.pull_request.number || github.ref }}} + cancel-in-progress: true + runs-on: ["otx-gpu-v100-1"] + container: + image: 219678651685.dkr.ecr.eu-central-1.amazonaws.com/ote-ci:pr-194-1334bef936df3b06d28bc3d1bd90e7248359b99e + options: --runtime=nvidia --env-file=/home/runner/.nvidia.env --shm-size=24g steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 - name: Install Python - uses: actions/setup-python@v4 + uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # v4.7.1 with: - python-version: ${{ matrix.python-version }} + python-version: "3.8" - name: Install dependencies - run: python -m pip install -r requirements/dev.txt + run: | + pip install pip-tools==7.3.0 + pip-compile --generate-hashes -o /tmp/otx-dev-requirements.txt requirements/dev.txt + pip install --require-hashes --no-deps -r /tmp/otx-dev-requirements.txt - name: Run unit test - run: tox -vv -e unittest-all-${{ matrix.tox-env }}-pt1 + run: tox -vv -e unittest-all-py38-pt1 - name: Upload coverage artifact - if: ${{ matrix.python-version == '3.8' }} - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 with: name: coverage path: .tox/coverage.xml @@ -85,7 +108,7 @@ jobs: chmod +x codecov ./codecov -t ${{ secrets.CODECOV_TOKEN }} --sha $COMMIT_ID -U $HTTP_PROXY -f .tox/coverage.xml -F ${{ matrix.tox-env }} Integration-Test: - needs: Unit-Test + needs: [Unit-Test, Coverage-Test] strategy: fail-fast: false matrix: diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 3885e3ec9cf..776b9507352 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -5,16 +5,19 @@ on: release: types: [published] +# Declare default permissions as read only. +permissions: read-all + jobs: build_wheels: name: Build wheels runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 - name: Build wheels - uses: pypa/cibuildwheel@v2.13.1 - - uses: actions/upload-artifact@v3 + uses: pypa/cibuildwheel@0ecddd92b62987d7a2ae8911f4bb8ec9e2e4496a # v2.13.1 + - uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 with: path: ./wheelhouse/*.whl @@ -23,16 +26,19 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 - name: Set up Python 3.10 - uses: actions/setup-python@v3 + uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # v4.7.1 with: python-version: "3.10" - name: Install pypa/build - run: python -m pip install build + run: | + pip install pip-tools==7.3.0 + pip-compile --generate-hashes -o /tmp/otx-publish-requirements.txt requirements/publish.txt + pip install --require-hashes --no-deps -r /tmp/otx-publish-requirements.txt - name: Build sdist run: python -m build --sdist - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 with: path: dist/*.tar.gz @@ -41,10 +47,11 @@ jobs: needs: [build_wheels, build_sdist] environment: pypi runs-on: ubuntu-latest - permissions: write-all + permissions: + packages: write steps: - name: Download artifacts - uses: actions/download-artifact@v3 + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 with: # unpacks default artifact into dist/ # if `name: artifact` is omitted, the action will create extra parent dir @@ -53,13 +60,13 @@ jobs: # to determine where to publish the source distribution to PyPI or TestPyPI - name: Check tag id: check-tag - uses: actions-ecosystem/action-regex-match@v2 + uses: actions-ecosystem/action-regex-match@9e6c4fb3d5e898f505be7a1fb6e7b0a278f6665b # v2.0.2 with: text: ${{ github.ref }} regex: '^refs/tags/[0-9]+\.[0-9]+\.[0-9]+(\.[0-9]+rc[0-9]+|rc[0-9]+)?$' - name: Upload package distributions to github if: ${{ steps.check-tag.outputs.match != '' }} - uses: svenstaro/upload-release-action@v2 + uses: svenstaro/upload-release-action@1beeb572c19a9242f4361f4cee78f8e0d9aec5df # v2 with: repo_token: ${{ secrets.GITHUB_TOKEN }} file: dist/* @@ -68,13 +75,13 @@ jobs: file_glob: true - name: Publish package distributions to PyPI if: ${{ steps.check-tag.outputs.match != '' }} - uses: pypa/gh-action-pypi-publish@v1.7.1 + uses: pypa/gh-action-pypi-publish@22b4d1f12511f2696162c08546dafbaa903448a2 # v1.7.1 with: password: ${{ secrets.PYPI_API_TOKEN }} - name: Publish package distributions to TestPyPI if: ${{ steps.check-tag.outputs.match == '' }} - uses: pypa/gh-action-pypi-publish@v1.7.1 + uses: pypa/gh-action-pypi-publish@22b4d1f12511f2696162c08546dafbaa903448a2 # v1.7.1 with: - password: ${{ secrets.TESTPYPI_API_TOKEN }} + password: ${{ secrets.TEST_PYPI_API_TOKEN }} repository-url: https://test.pypi.org/legacy/ verbose: true diff --git a/.github/workflows/publish_internal.yml b/.github/workflows/publish_internal.yml index 800cc2c60ac..802ba7b10a5 100644 --- a/.github/workflows/publish_internal.yml +++ b/.github/workflows/publish_internal.yml @@ -3,16 +3,19 @@ name: Build and upload to internal PyPI on: workflow_dispatch: # run on request (no need for PR) +# Declare default permissions as read only. +permissions: read-all + jobs: build_wheels: name: Build wheels runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 - name: Build wheels - uses: pypa/cibuildwheel@v2.13.1 - - uses: actions/upload-artifact@v3 + uses: pypa/cibuildwheel@0ecddd92b62987d7a2ae8911f4bb8ec9e2e4496a # v2.13.1 + - uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 with: path: ./wheelhouse/*.whl @@ -21,16 +24,19 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 - name: Set up Python 3.10 - uses: actions/setup-python@v3 + uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # v4.7.1 with: python-version: "3.10" - name: Install pypa/build - run: python -m pip install build + run: | + pip install pip-tools==7.3.0 + pip-compile --generate-hashes -o /tmp/otx-publish-requirements.txt requirements/publish.txt + pip install --require-hashes --no-deps -r /tmp/otx-publish-requirements.txt - name: Build sdist run: python -m build --sdist - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 with: path: dist/*.tar.gz @@ -39,16 +45,22 @@ jobs: needs: [build_wheels, build_sdist] environment: pypi runs-on: [self-hosted, linux, x64, dev] - permissions: write-all + permissions: + packages: write steps: + - name: Checkout + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # v4.7.1 with: python-version: "3.10" - name: Install dependencies - run: python -m pip install twine + run: | + pip install pip-tools==7.3.0 + pip-compile --generate-hashes -o /tmp/otx-publish-requirements.txt requirements/publish.txt + pip install --require-hashes --no-deps -r /tmp/otx-publish-requirements.txt - name: Download artifacts - uses: actions/download-artifact@v3 + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 with: # unpacks default artifact into dist/ # if `name: artifact` is omitted, the action will create extra parent dir @@ -56,7 +68,7 @@ jobs: path: dist - name: Check tag id: check-tag - uses: actions-ecosystem/action-regex-match@v2 + uses: actions-ecosystem/action-regex-match@9e6c4fb3d5e898f505be7a1fb6e7b0a278f6665b # v2.0.2 with: text: ${{ github.ref }} regex: '^refs/heads/releases/[0-9]+\.[0-9]+\.[0-9]+(\.[0-9]+rc[0-9]+|rc[0-9]+)?$' @@ -72,7 +84,7 @@ jobs: if: ${{ steps.check-tag.outputs.match == '' }} run: | export REPOSITORY_URL=https://test.pypi.org/legacy/ - twine upload --verbose --repository-url $REPOSITORY_URL dist/* -u __token__ -p ${{ secrets.TESTPYPI_API_TOKEN }} + twine upload --verbose --repository-url $REPOSITORY_URL dist/* -u __token__ -p ${{ secrets.TEST_PYPI_API_TOKEN }} - name: Clean up dist if: ${{ always() }} run: | diff --git a/.github/workflows/run_tests_in_tox.yml b/.github/workflows/run_tests_in_tox.yml index fac265aede3..1adc0c2c641 100644 --- a/.github/workflows/run_tests_in_tox.yml +++ b/.github/workflows/run_tests_in_tox.yml @@ -33,6 +33,10 @@ on: toxenv-ptver: type: string default: "pt1" + +# Declare default permissions as read only. +permissions: read-all + jobs: run_tests_in_tox: # tricky workaround to pass list from the string input type @@ -41,21 +45,30 @@ jobs: timeout-minutes: ${{ inputs.timeout-minutes }} steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # v4.7.1 with: python-version: ${{ inputs.python-version }} - name: Install dependencies - run: python -m pip install -r requirements/dev.txt + run: | + pip install pip-tools==7.3.0 + pip-compile --generate-hashes -o /tmp/otx-dev-requirements.txt requirements/dev.txt + pip install --require-hashes --no-deps -r /tmp/otx-dev-requirements.txt - name: Run Tests + env: + MLFLOW_TRACKING_SERVER_URI: ${{ vars.MLFLOW_TRACKING_SERVER_URI }} + BENCHMARK_RESULTS_CLEAR: ${{ vars.BENCHMARK_RESULTS_CLEAR }} + GH_CTX_REF_NAME: ${{ github.ref_name }} + GH_CTX_SHA: ${{ github.sha }} run: tox -vv -e tests-${{ inputs.toxenv-task }}-${{ inputs.toxenv-pyver }}-${{ inputs.toxenv-ptver }} -- ${{ inputs.tests-dir }} - name: Upload test results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 with: name: ${{ inputs.artifact-prefix }}-${{ inputs.toxenv-task }}-${{ inputs.toxenv-pyver }}-${{ inputs.toxenv-ptver }} path: | .tox/tests-${{ inputs.toxenv-task }}-${{ inputs.toxenv-pyver }}-${{ inputs.toxenv-ptver }}.csv .tox/tests-reg_${{ inputs.task }}*.csv + .tox/perf-*.csv # Use always() to always run this step to publish test results when there are test failures if: ${{ inputs.upload-artifact && always() }} diff --git a/.github/workflows/run_tests_in_tox_custom.yml b/.github/workflows/run_tests_in_tox_custom.yml new file mode 100644 index 00000000000..8bb28ade61a --- /dev/null +++ b/.github/workflows/run_tests_in_tox_custom.yml @@ -0,0 +1,80 @@ +on: + workflow_call: + inputs: + python-version: + type: string + default: "3.10" + toxenv-pyver: + description: "[py38, py39, py310]" + type: string + default: "py310" + toxenv-task: + description: "[all, act, ano, cls, det, seg, iseg]" + type: string + default: "all" + tests-dir: + type: string + default: "" + timeout-minutes: + type: number + default: 720 + upload-artifact: + type: boolean + default: false + runs-on: + type: string + default: "['self-hosted', 'Linux', 'X64', 'dev']" + task: + type: string + default: "undefined" + artifact-prefix: + type: string + default: "test-results" + toxenv-ptver: + type: string + default: "pt1" + container-options: + type: string + default: "--runtime=nvidia --env-file=/home/runner/.nvidia.env --shm-size=24g" + +# Declare default permissions as read only. +permissions: read-all + +jobs: + run_tests_on_custom: + # tricky workaround to pass list from the string input type + # https://github.com/orgs/community/discussions/11692 + runs-on: ${{ fromJson(inputs.runs-on) }} + container: + image: 219678651685.dkr.ecr.eu-central-1.amazonaws.com/ote-ci:pr-194-1334bef936df3b06d28bc3d1bd90e7248359b99e + options: ${{ inputs.container-options }} + timeout-minutes: ${{ inputs.timeout-minutes }} + steps: + - name: Checkout repository + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + - name: Set up Python + uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # v4.7.1 + with: + python-version: ${{ inputs.python-version }} + - name: Install dependencies + run: | + pip install pip-tools==7.3.0 + pip-compile --generate-hashes -o /tmp/otx-dev-requirements.txt requirements/dev.txt + pip install --require-hashes --no-deps -r /tmp/otx-dev-requirements.txt + - name: Run Tests + env: + MLFLOW_TRACKING_SERVER_URI: ${{ vars.MLFLOW_TRACKING_SERVER_URI }} + BENCHMARK_RESULTS_CLEAR: ${{ vars.BENCHMARK_RESULTS_CLEAR }} + GH_CTX_REF_NAME: ${{ github.ref_name }} + GH_CTX_SHA: ${{ github.sha }} + run: tox -vv -e tests-${{ inputs.toxenv-task }}-${{ inputs.toxenv-pyver }}-${{ inputs.toxenv-ptver }} -- ${{ inputs.tests-dir }} + - name: Upload test results + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 + with: + name: ${{ inputs.artifact-prefix }}-${{ inputs.toxenv-task }}-${{ inputs.toxenv-pyver }}-${{ inputs.toxenv-ptver }} + path: | + .tox/tests-${{ inputs.toxenv-task }}-${{ inputs.toxenv-pyver }}-${{ inputs.toxenv-ptver }}.csv + .tox/tests-reg_${{ inputs.task }}*.csv + .tox/perf-*.csv + # Use always() to always run this step to publish test results when there are test failures + if: ${{ inputs.upload-artifact && always() }} diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml new file mode 100644 index 00000000000..11effedb64f --- /dev/null +++ b/.github/workflows/scorecard.yml @@ -0,0 +1,72 @@ +# This workflow uses actions that are not certified by GitHub. They are provided +# by a third-party and are governed by separate terms of service, privacy +# policy, and support documentation. + +name: Scorecard supply-chain security +on: + # For Branch-Protection check. Only the default branch is supported. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection + # branch_protection_rule: + # To guarantee Maintained check is occasionally updated. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained + schedule: + - cron: "0 18 * * 6" + push: + branches: ["develop"] + +# Declare default permissions as read only. +permissions: read-all + +jobs: + analysis: + name: Scorecard analysis + runs-on: ubuntu-latest + permissions: + # Needed to upload the results to code-scanning dashboard. + security-events: write + # Needed to publish results and get a badge (see publish_results below). + id-token: write + # Uncomment the permissions below if installing in a private repository. + # contents: read + # actions: read + + steps: + - name: "Checkout code" + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + with: + persist-credentials: false + + - name: "Run analysis" + uses: ossf/scorecard-action@e38b1902ae4f44df626f11ba0734b14fb91f8f86 # v2.1.2 + with: + results_file: results.sarif + results_format: sarif + # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: + # - you want to enable the Branch-Protection check on a *public* repository, or + # - you are installing Scorecard on a *private* repository + # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action#authentication-with-pat. + # repo_token: ${{ secrets.SCORECARD_TOKEN }} + + # Public repositories: + # - Publish results to OpenSSF REST API for easy access by consumers + # - Allows the repository to include the Scorecard badge. + # - See https://github.com/ossf/scorecard-action#publishing-results. + # For private repositories: + # - `publish_results` will always be set to `false`, regardless + # of the value entered here. + publish_results: true + + # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF + # format to the repository Actions tab. + - name: "Upload artifact" + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 + with: + name: SARIF file + path: results.sarif + retention-days: 5 + + # Upload the results to GitHub's code scanning dashboard. + - name: "Upload to code-scanning" + uses: github/codeql-action/upload-sarif@dc021d495cb77b369e4d9d04a501700fd83b8c51 # v2.24.0 + with: + sarif_file: results.sarif diff --git a/.github/workflows/stale_marker.yml b/.github/workflows/stale_marker.yml index a88822f2d44..3cc85425def 100644 --- a/.github/workflows/stale_marker.yml +++ b/.github/workflows/stale_marker.yml @@ -3,11 +3,14 @@ on: schedule: - cron: "30 1 * * *" +# Declare default permissions as read only. +permissions: read-all + jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v4 + - uses: actions/stale@a20b814fb01b71def3bd6f56e7494d667ddf28da # v4.1.1 with: stale-issue-message: "This issue is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 7 days." stale-pr-message: "This PR is stale because it has been open 90 days with no activity." diff --git a/.github/workflows/weekly.yml b/.github/workflows/weekly.yml index e492737431b..3badd5ab79a 100644 --- a/.github/workflows/weekly.yml +++ b/.github/workflows/weekly.yml @@ -6,6 +6,9 @@ on: # every 12AM on Sunday - cron: "0 0 * * 0" +# Declare default permissions as read only. +permissions: read-all + jobs: Regression-Tests: strategy: diff --git a/CHANGELOG.md b/CHANGELOG.md index d63284cd749..7b003c2a92c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,7 +6,12 @@ All notable changes to this project will be documented in this file. ### New features -- Add zero-shot visual prompting (https://github.com/openvinotoolkit/training_extensions/pull/2616) +- Add zero-shot visual prompting (, , ) + +### Enhancements + +- Upgrade NNCF to 2.7 and OpenVINO to 2023.2 () +- Automate performance benchmark () ## \[v1.5.0\] @@ -15,7 +20,7 @@ All notable changes to this project will be documented in this file. - Enable configurable confidence threshold for otx eval and export () - Add YOLOX variants as new object detector models () - Enable FeatureVectorHook to support action tasks () -- Add ONNX metadata to detection, instance segmantation, and segmentation models () +- Add ONNX metadata to detection, instance segmentation, and segmentation models () - Add a new feature to configure input size () - Introduce the OTXSampler and AdaptiveRepeatDataHook to achieve faster training at the small data regime () - Add a new object detector Lite-DINO () diff --git a/Dockerfile b/Dockerfile index 0fab59ec3d5..221af69b797 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ -ARG UBUNTU_VER=20.04 +ARG UBUNTU_VER=20.04@sha256:a4fab1802f08df089c4b2e0a1c8f1a06f573bd1775687d07fef4076d3a2e4900 FROM ubuntu:$UBUNTU_VER ARG PYTHON_VER=3.9 diff --git a/docs/source/guide/explanation/algorithms/visual_prompting/fine_tuning.rst b/docs/source/guide/explanation/algorithms/visual_prompting/fine_tuning.rst new file mode 100644 index 00000000000..67bc77ff4f8 --- /dev/null +++ b/docs/source/guide/explanation/algorithms/visual_prompting/fine_tuning.rst @@ -0,0 +1,113 @@ +Visual Prompting (Fine-tuning) +================= + +Visual prompting is a computer vision task that uses a combination of an image and prompts, such as texts, bounding boxes, points, and so on to troubleshoot problems. +Using these useful prompts, the main purpose of this task is to obtain labels from unlabeled datasets, and to use generated label information on particular domains or to develop a new model with the generated information. + +This section examines the solutions for visual prompting offered by the OpenVINO Training Extensions library. +`Segment Anything (SAM) `_, is one of the most famous visual prompting methods and this model will be used to adapt a new dataset domain. +Because `SAM `_ was trained by using web-scale dataset and has huge backbone network, fine-tuning the whole network is difficult and lots of resources are required. +Therefore, in this section, we try to fine-tune only mask decoder only for several epochs to increase performance on the new dataset domain. +For fine-tuning `SAM `_, we use following algorithms components: + +.. _visual_prompting_finetuning_pipeline: + +- ``Pre-processing``: Resize an image according to the longest axis and pad the rest with zero. + +- ``Optimizer``: We use `Adam `_ optimizer. + +- ``Loss function``: We use standard loss combination, 20 * focal loss + dice loss + iou loss, used in `SAM `_ as it is. + +- ``Additional training techniques`` + - ``Early stopping``: To add adaptability to the training pipeline and prevent overfitting. Early stopping will be automatically applied. + + +.. note:: + + Currently, fine-tuning `SAM `_ with bounding boxes in the OpenVINO Training Extensions is only supported. + We will support fine-tuning with other prompts (points and texts) and continuous fine-tuning with predicted mask information in the near future. + +.. note:: + + Currently, Post-Training Quantization (PTQ) for `SAM `_ is only supported, not Quantization Aware Training (QAT). + + +************** +Dataset Format +************** +.. _visual_prompting_dataset: + +For the dataset handling inside OpenVINO™ Training Extensions, we use `Dataset Management Framework (Datumaro) `_. + +We support three dataset formats for visual prompting: + +- `Common Semantic Segmentation `_ for semantic segmentation + +- `COCO `_ for instance segmentation + +- `Pascal VOC `_ for instance segmentation and semantic segmentation + + +If you organized supported dataset format, starting training will be very simple. We just need to pass a path to the root folder and desired model template to start training: + +.. code-block:: + + $ otx train \ + --train-data-roots \ + --val-data-roots + +.. note:: + + During training, mDice for binary mask without label information is used for train/validation metric. + After training, if using ``otx eval`` to evaluate performance, mDice for binary or multi-class masks with label information will be used. + As you can expect, performance will be different between ``otx train`` and ``otx eval``, but if unlabeled mask performance is high, labeld mask performance is high as well. + + +****** +Models +****** +.. _visual_prompting_model: + +We support the following model templates in experimental phase: + ++--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------+---------------------+-----------------+ +| Template ID | Name | Complexity (GFLOPs) | Model size (MB) | ++============================================================================================================================================================================================+==============+=====================+=================+ +| `Visual_Prompting_SAM_Tiny_ViT `_ | SAM_Tiny_ViT | 38.95 | 47 | ++--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------+---------------------+-----------------+ +| `Visual_Prompting_SAM_ViT_B `_ | SAM_ViT_B | 483.71 | 362 | ++--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------+---------------------+-----------------+ + +To check feasibility of `SAM `_, we did experiments using three public datasets with each other domains: `WGISD `_, `Trashcan `_, and `FLARE22 `_, and checked `Dice score `_. +We used sampled training data from `Trashcan `_ and `FLARE22 `_, and full training data (=110) from `WGISD `_. + ++---------------------------------------------------------------+--------------------+ +| Dataset | #samples | ++===============================================================+====================+ +| `WGISD `_ | 110 | ++---------------------------------------------------------------+--------------------+ +| `Trashcan `_ | 500 | ++---------------------------------------------------------------+--------------------+ +| `FLARE22 `_ | 1 CT (=100 slices) | ++---------------------------------------------------------------+--------------------+ + +The below table shows performance improvement after fine-tuning. + ++------------+--------------------------------------------+---------------------------------------------------------------+---------------------------------------------------+ +| Model name | `WGISD `_ | `Trashcan `_ | `FLARE22 `_ | ++============+============================================+===============================================================+===================================================+ +| Tiny_ViT | 90.32 → 92.29 (+1.97) | 82.38 → 85.01 (+2.63) | 89.69 → 93.05 (+3.36) | ++------------+--------------------------------------------+---------------------------------------------------------------+---------------------------------------------------+ +| ViT_B | 92.32 → 92.46 (+0.14) | 79.61 → 81.50 (+1.89) | 91.48 → 91.68 (+0.20) | ++------------+--------------------------------------------+---------------------------------------------------------------+---------------------------------------------------+ + +According to datasets, ``learning rate`` and ``batch size`` can be adjusted like below: + +.. code-block:: + + $ otx train \ + --train-data-roots \ + --val-data-roots \ + params \ + --learning_parameters.dataset.train_batch_size \ + --learning_parameters.optimizer.lr diff --git a/docs/source/guide/explanation/algorithms/visual_prompting/index.rst b/docs/source/guide/explanation/algorithms/visual_prompting/index.rst index 61e6df9c46f..c9d6abac31b 100644 --- a/docs/source/guide/explanation/algorithms/visual_prompting/index.rst +++ b/docs/source/guide/explanation/algorithms/visual_prompting/index.rst @@ -1,113 +1,9 @@ Visual Prompting -================= +============ -Visual prompting is a computer vision task that uses a combination of an image and prompts, such as texts, bounding boxes, points, and so on to troubleshoot problems. -Using these useful prompts, the main purpose of this task is to obtain labels from unlabeled datasets, and to use generated label information on particular domains or to develop a new model with the generated information. +.. toctree:: + :maxdepth: 1 -This section examines the solutions for visual prompting offered by the OpenVINO Training Extensions library. -`Segment Anything (SAM) `_, is one of the most famous visual prompting methods and this model will be used to adapt a new dataset domain. -Because `SAM `_ was trained by using web-scale dataset and has huge backbone network, fine-tuning the whole network is difficult and lots of resources are required. -Therefore, in this section, we try to fine-tune only mask decoder only for several epochs to increase performance on the new dataset domain. -For fine-tuning `SAM `_, we use following algorithms components: -.. _visual_prompting_finetuning_pipeline: - -- ``Pre-processing``: Resize an image according to the longest axis and pad the rest with zero. - -- ``Optimizer``: We use `Adam `_ optimizer. - -- ``Loss function``: We use standard loss combination, 20 * focal loss + dice loss + iou loss, used in `SAM `_ as it is. - -- ``Additional training techniques`` - - ``Early stopping``: To add adaptability to the training pipeline and prevent overfitting. Early stopping will be automatically applied. - - -.. note:: - - Currently, fine-tuning `SAM `_ with bounding boxes in the OpenVINO Training Extensions is only supported. - We will support fine-tuning with other prompts (points and texts) and continuous fine-tuning with predicted mask information in the near future. - -.. note:: - - Currently, Post-Training Quantization (PTQ) for `SAM `_ is only supported, not Quantization Aware Training (QAT). - - -************** -Dataset Format -************** -.. _visual_prompting_dataset: - -For the dataset handling inside OpenVINO™ Training Extensions, we use `Dataset Management Framework (Datumaro) `_. - -We support three dataset formats for visual prompting: - -- `Common Semantic Segmentation `_ for semantic segmentation - -- `COCO `_ for instance segmentation - -- `Pascal VOC `_ for instance segmentation and semantic segmentation - - -If you organized supported dataset format, starting training will be very simple. We just need to pass a path to the root folder and desired model template to start training: - -.. code-block:: - - $ otx train \ - --train-data-roots \ - --val-data-roots - -.. note:: - - During training, mDice for binary mask without label information is used for train/validation metric. - After training, if using ``otx eval`` to evaluate performance, mDice for binary or multi-class masks with label information will be used. - As you can expect, performance will be different between ``otx train`` and ``otx eval``, but if unlabeled mask performance is high, labeld mask performance is high as well. - - -****** -Models -****** -.. _visual_prompting_model: - -We support the following model templates in experimental phase: - -+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------+---------------------+-----------------+ -| Template ID | Name | Complexity (GFLOPs) | Model size (MB) | -+============================================================================================================================================================================================+==============+=====================+=================+ -| `Visual_Prompting_SAM_Tiny_ViT `_ | SAM_Tiny_ViT | 38.95 | 47 | -+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------+---------------------+-----------------+ -| `Visual_Prompting_SAM_ViT_B `_ | SAM_ViT_B | 483.71 | 362 | -+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------+---------------------+-----------------+ - -To check feasibility of `SAM `_, we did experiments using three public datasets with each other domains: `WGISD `_, `Trashcan `_, and `FLARE22 `_, and checked `Dice score `_. -We used sampled training data from `Trashcan `_ and `FLARE22 `_, and full training data (=110) from `WGISD `_. - -+---------------------------------------------------------------+--------------------+ -| Dataset | #samples | -+===============================================================+====================+ -| `WGISD `_ | 110 | -+---------------------------------------------------------------+--------------------+ -| `Trashcan `_ | 500 | -+---------------------------------------------------------------+--------------------+ -| `FLARE22 `_ | 1 CT (=100 slices) | -+---------------------------------------------------------------+--------------------+ - -The below table shows performance improvement after fine-tuning. - -+------------+--------------------------------------------+---------------------------------------------------------------+---------------------------------------------------+ -| Model name | `WGISD `_ | `Trashcan `_ | `FLARE22 `_ | -+============+============================================+===============================================================+===================================================+ -| Tiny_ViT | 90.32 → 92.29 (+1.97) | 82.38 → 85.01 (+2.63) | 89.69 → 93.05 (+3.36) | -+------------+--------------------------------------------+---------------------------------------------------------------+---------------------------------------------------+ -| ViT_B | 92.32 → 92.46 (+0.14) | 79.61 → 81.50 (+1.89) | 91.48 → 91.68 (+0.20) | -+------------+--------------------------------------------+---------------------------------------------------------------+---------------------------------------------------+ - -According to datasets, ``learning rate`` and ``batch size`` can be adjusted like below: - -.. code-block:: - - $ otx train \ - --train-data-roots \ - --val-data-roots \ - params \ - --learning_parameters.dataset.train_batch_size \ - --learning_parameters.optimizer.lr + fine_tuning + zero_shot diff --git a/docs/source/guide/explanation/algorithms/visual_prompting/zero_shot.rst b/docs/source/guide/explanation/algorithms/visual_prompting/zero_shot.rst new file mode 100644 index 00000000000..4923a2b73c6 --- /dev/null +++ b/docs/source/guide/explanation/algorithms/visual_prompting/zero_shot.rst @@ -0,0 +1,103 @@ +Visual Prompting (Zero-shot learning) +================= + +Visual prompting is a computer vision task that uses a combination of an image and prompts, such as texts, bounding boxes, points, and so on to troubleshoot problems. +Using these useful prompts, the main purpose of this task is to obtain labels from unlabeled datasets, and to use generated label information on particular domains or to develop a new model with the generated information. + +This section examines the solutions for visual prompting offered by the OpenVINO Training Extensions library. +`Segment Anything (SAM) `_, is one of the most famous visual prompting methods and this model will be used to adapt a new dataset domain. +Especially, in this section, we try to automatically predict given images without any training, called as ``zero-shot learning``. +Unlike fine-tuning, zero-shot learning needs only pre-processing component. + + +.. _visual_prompting_zeroshot_pipeline: + +- ``Pre-processing``: Resize an image according to the longest axis and pad the rest with zero. + + +.. note:: + + Currently, zero-shot learning with `SAM `_ with bounding boxes in the OpenVINO Training Extensions is only supported. + We will support zero-shot learning with other prompts (points and texts) in the near future. + +.. note:: + + Currently, Post-Training Quantization (PTQ) for `SAM `_ is only supported, not Quantization Aware Training (QAT). + + +************** +Dataset Format +************** +.. _visual_prompting_dataset: + +For the dataset handling inside OpenVINO™ Training Extensions, we use `Dataset Management Framework (Datumaro) `_. + +We support three dataset formats for visual prompting: + +- `Common Semantic Segmentation `_ for semantic segmentation + +- `COCO `_ for instance segmentation + +- `Pascal VOC `_ for instance segmentation and semantic segmentation + + +If you organized supported dataset format, starting training will be very simple. We just need to pass a path to the root folder and desired model template to start training: + +.. code-block:: + + $ otx train \ + --train-data-roots \ + --val-data-roots + + +****** +Models +****** +.. _visual_prompting_zero_shot_model: + +We support the following model templates in experimental phase: + ++-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------+---------------------+-----------------+ +| Template ID | Name | Complexity (GFLOPs) | Model size (MB) | ++===============================================================================================================================================================================================+========================+=====================+=================+ +| `Zero_Shot_SAM_Tiny_ViT `_ | Zero_Shot_SAM_Tiny_ViT | 38.18 | 25 | ++-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------+---------------------+-----------------+ + +*************** +Simple tutorial +*************** +.. _visual_prompting_zero_shot_tutorial: + +There are two steps for zero-shot inference: ``learn`` and ``infer``. +``Learn`` is to extracet reference features from given reference images and prompts. These extracted reference features will be used to get point candidates on given target images. +Extracted reference features will be saved in the model checkpoint (such as `weight.pth`) with the model. +You can do ``learn`` with the following source code: + +.. code-block:: + + $ otx train \ + --train-data-roots \ + --val-data-roots + +``Infer`` is to get predicted masks on given target images. Unlike ``learn``, this stage doesn't need any prompt information. + +.. code-block:: + + $ otx eval \ + --load-weights + --test-data-roots + + +For example, when the positive (green) and the negative (red) points were given with the reference image for ``learn`` stage, you can get basic `SAM `_ prediction result (left). +If you give the same reference image as the target image for ``infer`` stage, you can get target prediction results (right). + +.. list-table:: + + * - .. figure:: ../../../../../utils/images/vpm_ref_result.png + + - .. figure:: ../../../../../utils/images/vpm_ref_prediction.png + + +You can get target prediction results for other given images like below. + +.. image:: ../../../../../utils/images/vpm_tgt_prediction.png diff --git a/docs/utils/images/vpm_ref_prediction.png b/docs/utils/images/vpm_ref_prediction.png new file mode 100644 index 0000000000000000000000000000000000000000..04d6bfa008d312dc9237d0af485c18961ee4682d GIT binary patch literal 198185 zcmV)mK%T#eP)Px#1ZP1_K>z@;j|==^1poj532;bRa{vGjVE_ORVF9Q=r)dBH|D{PpK~#8Noc;Be zB-ge5i~9RL=g1bfq~`8oX3ES;F37?xX0EPgW>l-yAhx*0%uE(p7E88;mdtRB2fp`> zaqs&p=9_cvh^lJ)oO8!`Kdcdv5s{IRk&)}O=UQv;{hcfR_X^~nUxCWYBM7ccVybB~ zj?7+zBQw`w*T6AsN^Qn$WEi8>tr#qeqBE}=72nH2(LZOSDYFLSbuHMC=*IeJJJto0 zn68OntfCIXCDj-zs=`2SF$VIAcuYU`_Uys#^j2(c8o>HU3#M!17%i{GU~wgS^2?FV zEvd+u7X?s4TUgeikjE8V;H=P= zHOQwGTu_a?^Q)2jy=vr~SBb2Dsi2i3>z~V!@lT~#`@d=bRA#nD>;FquLMub=cllYD z#87s56G|^@;<@Tka6Uilf&lWqTZ5c`uH^NE5XtO8x^5dLnhs*NX*XsYwqsr64y>N0OJp)(SF@v>J7^oh@K+PEXt4Gbc$_G)mEQXB#m5){5 z$;R66FuqJpwBlP<~ki#TV;)*CO{{N|EuMe5~ei|FvcPot?XXmA8G>cQS3c z_?R_Z_ibU{!hUy*Gi@)us2b6=&1lGLM{_|Jn)AEO;#sW-uWm#=pR*9HW?2-~OQWb= zo<#YQdgOnv5_#wGexFxr%lz+_@&1;g@ceSrTvmh7vO0v8%K|9BgumfG74V+q^EX;- z1-uU3hg@6w+cUV`_HkX?*YIBEpI?c<%2ou|bfRWOE2?D6TM<~>gGBxa650J|Egr#e zU=kz2QT{%I=qc+#PiY$6C7tLk?L=2`E7HYn=;F4Hf;P0~G$NIqL{nA+{~ugtBxrFo zu8pCQ%Z9aaB)N>Qi6gc;hS;hoA}b@F)uVoS7=dMB)GiI9Vo8|KT|G)J38DPbAS#!H zP|RmJ_g~78!{0mmpNf&m?<3PQlELq!^*r9c;tQ)#dT}l9lkQc3=L(^U&r9{?G1OcU zio1kqI?#8w6nUr~qXiXbA(g9!6}*DnjAp0C5+>%bMYs9sXT>#62> zt59-bIr9Id2-*DHtpAmZ9R4Qx=aq1KB`PkdM%AS?sE}P!%VixZFAMS-0;pLU4?K@8^__yKTQ1+lV^&0X zYy?$Tgiyx)rMzdQm(n+w)j7L_b}|3Yyf;O(Vy&A`sWIQ7eI>SC`{{Qss#$cb+pqOP zKI6qQ9YZVTaV0#DnRgB?OU<)vZEc}=CZUH5^%7q7RB*Q)h0u9q(fqJsCK zlBVCWoZqdC_n=hAXut016=B}jdftzE-q$ewdC>N`a|G=ic24g-rQUt8_g|{>>fUn6 zbsbwuE8%+=z60N5rfb#vc|N`;^PfwQySf11$v$5WJBn0JFaIdhxO&}PIy<7sqeCs27|0Lhgx%+|EyP-+mzJ4bO<#}KY;>&M|# zFZMTdVSlm{dn1iF-a3Fgw_Jz&_S}iv=8od}!8x4n9>G=ZI+n*Zv}0Ga1zUoRSYOkK z$+9GRa*{}8wD8YJCzREP=IjM)l6l)wIRj|U?MHJ?zin^K>P16V&%7nGx}EVn@r*9S z)};Bz=|p&CJO3awE<=1tPE%%Pdf zMCKr3YkQGc(?e&_NlT+OZxrK^y#(-WIJy2oTr+n!uARLT*Ua95YuDX@)3bNu#O(dJ zigse|eq6ixVcd1_Io{_daB|{i9O}Iq2fD7r-p(Vm!`PcXitVij>Ac1eydsIJOTq{( z<$cNSMtk8fQh7tPVYC$zI;E4`Hbe)|gqlku{7q>W^Woxqst{yY2o{j!i* zE!TbfhhupDnoDW=Ja4_HbQJ3oyRk056XU^+=q??j!yTsM9YG(hyLiZs>EZV7VlBtf zTQP~wq9G*Lwj;cv3DK3!NUTaBzN&@Jw+-EeU2MqoVyN6%dtMuXl0fa{3DjMlTrl3J zn#<^Pme5)H`?#%+_f1x-{oG!ASqzD_ofxX!jEU%83ULr{v=0-BBj~Q$ zgZ9$xm)7TK6#`^jx z%mybsn>3rLo4{mX9OG;-j8%_gq-qR96{8p|*S#6#Z#d59Ya9(Z<|o4Pc_GALCq(RSaObqMtTsHdsD@{?b1F4!!8e>*h00^Rp9Vyh#u2(air^ADb;2sTvJs7$0&5rAa=UpAd|!dCsUKZM1aj#Rx=IF-F6=`m zt%K)n%VVP>yPe-HMVK`bY|ThzrO{VCg^~DX^arN-U(oY2fOHYxlh60^oLxl&cD@1L z%aOWC4D!BqmGFKQ3_9cfwt@k)^0QlbUs8GWAGy7>9TnWXnE#nyBeKW79@c@t>N9Yr*wm*?x| z`2?plTXkK$=_42o?Z%G28wrei2#-5(YWfzOo}x|PjuTUN;;L!cUAF(srU!BJj>mB0 zrh9RG^m^>?xe~iO4q#XNe(Y@9kKOGD>BRP-w``Oxaymi&QKNLQ`Vl+JCec$li=n_q zj77E)V%w0Wvj{Cspz7kFXKcw_#FiVE3)|Sby-4P7KX>2%Y&w=LAF~iab*@#i)zV5w zGsr)Gx_FR(u1++`q2zYaX~>cAkCmC`@7iGxtvNhCxV>c)1`|E-5jv0{ze9x1B)T$1 zxU``?pAMx&)NaSrqNAX4s@%X9w^_Yuq#Ky!X zY;D+#Ey?xR7+Z%;w2jdjY=}%@jy4gb%B-5e7~wHZn9PJGF&&!X>q&I+w`(h!MoZBQ zRoe`j@~4r^okTomg76qeGG`2_!bzmdXV6owd&$@QGbh)wG2{WHp3khHTfe-K_reEB z108W2TId+s^^CA_A#nGX4P%(_7_00fBziC`AXoGeD1?QK&=}!zRA%daghwCY)`y-F zIqN?DesuQy9rb(0S2xqq)7cXu^;9>(%c-Q;Lan3KUdjdrTebm$CAO*w4FpaqCruFc z*zez2K;yH}&ZVA}wt^lSub1bSwNbsMayrV>egcK2rCulXY-_s*8ZBjV`mKD(*t-^Pj3+vFdihnvf7Pb5~ zH*Ua98*aj_+waGziBmYzy$So8N3bK_jm`CKn5$_*M|Kny7f~9WM@jeH8mf~zG_R`1 za8VqS`EFpw9)-~~c zaSRkh(7v`FvCC=@T^7LrAN-wdTXDE|4_kuU=}@;|x_*Wa&4B%=3oQvg6#s7^p~ct& zjS?tP!o-23jUW+BoYhf@)LhX-Xb>obhhS1os8n6vWTtJQm7P>01B8kG>?4FpA7U9j z{G4946!|A7P*SB6#>fa3Jx)uoxtPMeDx#`xnk9tG?tPVWQ)Lblj1Z4RkaOXwGak z2c%Y`97$6~2c2OD8u)uiL8jQ+Q41(db<$TwDAug!A7PZ>r&?S_M@2=_$Ukl;6-i$y zAwrN0_#jy(XgH7Ro!_XeyOTmO72@xnk-bdP4WFOip zwj-Rg3Bl~m2xVkK|~@l6;F%n=Z?Xf9lbc-}fB^4Fmuf0iJbLPPEZpDil5vKfrkZ^3wo4zz5X zj$y=FhE06NrADGFTL_aTHZB?*L_AdzAz&K$oTu6F()Ep0k72xeh&Dj$$5>S_K|wQs z(CJnTVUYi`A+Ae#3=tv&W&L!(4j|pdYQ#w`&>7}*^4wG)s|a|i!N{^0!h}fu6%neD zFoKtz3zBL+3pJPUdEotyuWCkf78?%)QAa)wO3`F<)gIpmZ9%mJj; zYt_ry%X|lI;PD_Lbutef!a}C^2_$CpgB_+i7Q;pG&^kZM-s|3PfTE?i-pc(tmwhJ# zjMSOHDyTRGwOEPiwe~p(x@+p7^64xdVdK0V6;dRv#kJ@wOd`yOA-ba39jjZbA z&2})^*-K?Ia~Dohjhvdgjdm*)%569~bq9`5pT%*i5-AhGL4I&OCZMkjpAQ42& zE_2JpAn^fGLx9K(B5W!9An7chLw{g52I_XvwqYQ!6+?Bj+N~H4?Lsg2C8R?ubgHI zgo?o&Go3llm*RJ7q$5vKAtY8N5ns;FqD2J~`i=1A5!Cx2Aw*;b6Z(UiOX~QX)EQX9 zyw+rf)KZs$L<+T=;OL@qwJ;ZL9wf!RNEb=H^soWah1Q%lUPCkD{QM+A)}m*Nkn1Za zOv)4{6O5cXY3F-7sBpT9bk7O=%26swHdM+-?EJptZlW`75G3eetu;=k>A>Q`;(G01 z;s9b0kvf@Q7d#fshk%(mU!YYaOFw`C1A@ zN<&Z-G#os*&gX4jbxHZ#xjY!@+IUR{6TW}`I%!fT3ga0_xGq@e^9&~Xo@$)i_tiPv z@q%DG8&grLV*^R5pb8_!Ni?qE142ox*1~jcH+CfYurt|*ZP7F~2HLQpt`+NQTQFFZ zKx~LeOyR#&@qr6qtR#W;)yz2&Wbt%P348vDEgNQb5PsqJRdmr3?4r681CGBEp8aujuYuI9BS^OlIg_W=6>vI za^V8CX7uIeb0Qu@;%2@YamyH6!ckhx)}1m5@kS`T6|^u{8|s#~oEzq{C6$SwV&N|3 zPg1B~&CkgqOmfEfKu!`SBm9hB61odf3v;D;vq#ygq%!Hb0Y}EDL?&;y@Yd<+JE&5q zNVq&fpqwCRuAI3KSFe8%-{1K-Zr^Y7-f{9c~`Y@HsQS3?|!bF(Pwq~8R zSf}DUFq_zosn`yK$@->!Y)x zet66WliPMKTnH9}RM0{i&2&B%|K^{%l@D@DE>)aj$b^4Kk#ZtbA9b5B9;TX#?7%2n z-mV3{*_7AdJWQzg_#3t4rO{i`je(N1futj!ijvMuK&T}g>gY%X5iM)@`IVOvavn%( z`Mn)b^x9J?ybtv&Q|KsNhu)f9=&RXA+e|0Uduz6Vwh4p5-DoV?iTdoVh-7aum_+h6 zpt)!cttA`ydJ`I{CK~yS)nAcBKrsc$g$s!-)~8bE&Mp|am+%P$|W?h z#ok57J5oJ@slcTDFDC1TFj+e|Zvx6hHKF3=L=?^%B0L5x_o1A{09*F`=Da(o1ln@j z3C1=w6Ce$%8x0_4EBOBxM3%-7;WdPLum2t-wSs8R!kru&mv(?ZAD zMMp%~vXS9JRa6!Pt-?^91VEaQNE2qth3J~nQX9IioDK&RUPp3u1Fy3KJ=Rzul&DN( zdauG)eIE9B}4e6ZBHe zijr7fh1*=X%R#7_ibUo^$sHr7E?78~Fb@r_3m7zQZ?S!z!=K+r<-w%^(SOZ#sS>HK z&JyKK6-pySJdn8adB+ko3R^i~dBEVcnVQnIb7?AuHvj##Pv%{N!GsDqLf=`g&{kt^ zIfe;`cDC?DyY_++qzlp*Drv-2RU>Aqn=n(=OeNBcq0&aA)`n4gNi832K1BSJD1=h_ zJqM4H?>TT3(G>D>=50H#8r2upB63BD41KSLc&6tT3 ze2sgtscAnpHt*;4@4*zmQx6@AV&`cu;ireTt$?k3DUx}Rba)`)^LRz{??56&@;`yZ zDVZ=^^<5R~(N|05QL_Vm)mzYCxt=y_)+gB2Zbq7*iREuInAGQNMXX>Wl0_T1+=3uq zHx;ZS@Vc$Vt-MWS8C%W#Z!}OLBv#T1D;drIOLRpFsr*sIsbm8gv#86Uy2;v%MBaLy zV-7u4>oHls4&#A!TyDW^Y$t}P(o}#D(0AkaQA5Cmikb`_V^ki~p-Ie!CovOZ<0{}9 zSku8#LWNf6Y_eA2ts$zA0V`VQqw?r2Q3$Ayj@An$vE|>A)qzw-E1FjG`st`;rb1T4 z4I(lrlIT+2KluPYPxStCRdRVM54dwy^QrNQFY6tZbkXY_sps!}BQQ)m=7XDn}q` zCxk4wLhG*_<~^nQr9zPc@|~@Pk@%dskdz#~XIl51s_$5BolECz9U%PN$U<0*#&w;; z0m5^{daYK!IdShi-e>By!Gb1Vtb3!}hXA1CwceV?eO#In;Wizs_XrdUXIYp_3HBnY zyW(M6_f?A^V&ByRko!z`UHZ2j}x70*PnK zTAUJ5&cuUBjdCak5@)LBs$WIfm&1EjFvFJV6cU7qawc(uNgCl5?Y!Ybn27Gj&c0iy zKJLYtxw|b_a%Sc>E*(&=wj}&LI5wqF*S)x5`=hvd=R-W^W*q7}j(uH6u#bvlZ`V<- zAH|;Z5o~PSg-IVK@tp>eIXdR)=nnf+gqFJaxgg@8qADgjQvT8Z5hRO2@&BK>Fc%vM zYIUwv!$d74r#^=3HrjPggtuWhxRL5(Gu9<{5g-H!A+kAj&}@!CltYQFW=lAuor+U& zZ9bQDBm#+YBuch-6m+1TPOX|wtePOHrBjmRy+r(f2a@OB*@Z5W8A+9O(g_L&NhN$8v(Kz4aJ*~DB6mUK$1UaYD>ARdO{;A z6|zdLc!h$R=!6>)W(!_P@nqf@K{7{xY(jt#(K4F50WF2=EO#06RPug9Vk0)h)?;0G#*+Ag$V_M)vs}*d^%R#=!AVR6#xPzpY&ns^ zG6#=t-pj5cI@dx0nU0yQ`j)I#Qy)!hn$akTtP%_pwv@$J5MBgGe0dD9WmF>u5~oB$ zgn4KQ8xT|?QX>M150h#sUnvp~CY2Y|qWppyl+$16IZ+`+n{p-!ah0gzNO_N8TUWx^ z@+Jgb2d}rC*DH{8}$OwTXJLt3??hP)Q{rQ@E;NNEwO1P=Yx~iVJBj9kS*nw1%b%z;IJ&x%&ZV8LuQvX`Lv-}}v1gDlS@!_W ztiKnhX79wAb+;21x8kG{@Y8qUI6)$q94Ab!-}*3a+4(T8;W3B$PGAp}$R2@&D&;`$ zaf8bGhMgD>(Mc;`65WnT+87(NuWj(=$NAzE)L zb1Jf(V3{R|$1qwqV~P4rja#uXxyc~1KDv%Ji@ETWHKf+5+*nYdBAKWgvryMS#UT2m zKzQ$Zim39u5K((B)qq;|0!T*6TK7^R&9o-2H&Tg6g~SMsq*Mq&B7npQ5UG)faw7&3 zs*%g;5gl#AdItK+$6UFu zawmj|l!c(9gt{Qu=|QkPyB%!=ioQdP_c^vA$?w$0XM~Q_fTBE#REiSB3VTuASoni2DG@Ks zW#`d(eQ@|;CpmM0zAmO%gzxN)!7KH3Ix0^Ze zl(!Mw>^$`0F5luHWy*!$N!xvZnHti0rFiTbbnGDgo63I)lB%V}sJp0&KYY-tojQsq z_$7wfs%vnn;XD_r($Wu8DI8Vuy($FRN>Uk^PN+&8l@M247&K^9@Bxxl^1KzEInbP| zRP@?_qwR!>sy?V$YPNAFbS_>iB&w z{X+T;B%2A69a!JE8ylPUVunsqAeoF1Fo_*hR(r9TFxklcSBS9h+BA5saJ}?$QvQW|GAS00Ys)Y(+MKe+b<7h8khn6DVyTUR4 zi3jYtQ6Cej43*q=aQkRzJK8IEBAmY+$%3uuuiAii!X%Nuk@tNguW6J{xr?phHo}9B znWjD#O>5k`@+4vY28rAW%c}&kwjz*Cm6N*_iNeiPJXAP&b4cW<^W#yGxr*w(ra zTU+*EYl^^a*n;)3Ic$ipry}vpfHFszsCsM)6Sch7${`Gv_gm6DT`cIT^g4|+9dCOM z)kh|u#mpA#rc6kXM}o>rp_?{7!)ZD!^*QPzNczf0 zEq|hLmkW8(sYy8yJ`I@!2%(WuK1cn|s7@N!5-3zCsv1H1c2bF zMwwt@@8kW}IXVaiSNY{Y;)mdLeGAoKF1I4+_!gn6H<09CRDja+s_X}?XA7;ZNQtO% zI=&VHA!yFt&v%>?u40QKuq}nh~)o~5g$F=iQ8NG$A=EMvn>a@kBLF9h`NxfISW77q_%L2?0_|kkm<)2rw4< zN_jA8;&H9~tbRT~D&L&4yh+lONjD<=oQM>VV3IdzFxfM3vz7jyp1ao)?+SIDo;_=2 z!N(_W$MNaAapmM)2A1o#Jd9g+J%;NxoW-$`YYiakZ?eDVsM+4`quA8Eo2`vutL#xp zXlzxhtqUX}Ixs60B}fA5jHmt;7ylzj7SMS)aWjQ%1c;f~ zR%{?FW@Fp2j$ql)xW_7!Osyn$8En=OETeUEbbf>OJF3hzzFHmII@zLj{YX>>oc2KC z%6}It4+je$Bz{;+;4n2p(=mcwJ%8Wk{85_v1&yJ#cm^FMGgK)9Xdpyd3+PlzC)tvn z#!S@pF-cIBG!}0~U-bsGly5>Le-nDEHeo8b0TY3BQ~}dS^ADNI?4VOtvf9BUsXU2( z7lNWbZ>xpKlwS!GBn<>fTlqGmtF{ub+p&`Xk=oqZegIpWcVbJ^4nkxb!LbSJ;~TI+ zilkvPHYPS=F0u~O!AXqQj$)WjaG;D1m;jOW6!loEy`8OgwbuF%wAmY|MwW6wX>?z9Om6 z|DC?WDH1j$=~MKd_kj{x8s{^cBn;RP;eAX~IY^~g?!;QtbZV-C$` zg+un*?gh`Myi$k_ZtF*q@x440{8NP~4gK+&xiei#XULXV^`$Xal{IeyiKig=Pz3qM zkRnN|d{b6D$_p8?xL};E3&o682I{vg8~Eo^oVfZ@ReT8?e6aYCNx7Ia@}WaV(3;(X z4WTslH+0}ga|f>M=)=`LgbSf^-5{08z%bR`u@gZ*4Yo|00trpZWa}fi zW8YJ_Y3qHsa`ajpP~5lECf+1GOnn+X%;7xb{|hgX@Xl!!nQT1rL2>sN@Y=2C&g zRbVX!iT~P9yf1)6``k8n+(KEgGq=x$%;-=qi=e+^0@IOA{4)wB6Bw?U#YFuk1IHXq zAklITm3Hb}+EP4+j*4yQtlUhPOj&V2x^xPCRdX1q*?^G% zVG-Shma?5l6l|j!*@_gMf0T-(vvisYU=$s>DpO6aR8E2GsZ95}Summ5 zzBS5?B#F9qWz-U`P_&P5doykq8il( z5#LI_s|IZqpSL>tjApdZ8Oz}*93zluUA^V4BP1yW9%7h6N}6C7#v2k=bq z>nt9lR2t?N620`r^aF@&LnfI76NOi5`0xoTDo+jYPo|c1SS?UG@Wk3QWh#I4>;%S( zRW{s2)se!UL@N$7w&P%92M*Ep({{w#F<0A)p^_9^BCWh`r%0qme2_$B1c;P@CED3y zSN{<+Dg~Exry3DRXtf5DRs^`;K%!*36bQj#3dM&B6->4E5hgMzlL(!YV&O`@Cv!E_ zMZdvBAeqmZDJYn{cH%C9c08r{?aqp72V-E7Wyz<}9wBx!aQOmVn>(DDFA@ z3~t_jKdv0U7W?UV_IDq#UU%Sfw&Lkz&jrD` zwg8WX*NgX`YYr+3YbiXT*R^a6XsCwKzQXEnl4ODc>g`^0RW&??XOVg^PJ7#5^@Q4s3R3ik4 zLF9@s!URY?|GSo8Cs3?MyQf5Is9GIB1QJ5zq8ft4$>8Ul&0M0|jh+BPk# zEJ~qr>Qpv=Yd$|R<;?_-@Unyl5H&i(eBL5#xTg612?!(C}4)mP(-eaIp}V2R^q= zYH8s;1{wiI02ENvsnkr$#2uq^NcHLbI*+LoUZY^=uASFaFlgV)IwX46s%w@{kF4@S zTNwn2s--N%#FnV4u-XeH>BdQ`Q3H!Ae$4ss&rrz+Mj@+ustu8;(<)pXKo)>9!J9q7 z_p3)Pq2gJ4p%lxoQBBTX)hIH3rV^Xh(PzGwdY3uB@(b&&6;pRf4U)?O=*S48FF%Gs zSzZKvxe;__M=9Ca+F;8onc2=(rUwy)wWL7kP{aI;b3h_+_$FY`*_s;hP4J+pT1yZy zka&r9fyAo01P=p>6w87I2#aVY9f`(FQdzKCp;Ri7Rq9`&KLf96g<}6BbgG0&FJUr6 zRkHB`oSwS}r=~SNm+UM~Dcm)4mxa1anQVImXZKT?Y`KRpIpaa(2==BA5hRCkp!XPd zb{upa*)r9l=RH{{|!j!T;@Yt9!Om0LzRifI(Kc~Vn z8Yx0$yVaMlp>#Q&bVj$Snw{Mzu&?hbHUbC`{#HBN_F_lt9)e^awtFCvZAlRxjoWyi zc#lFemOLM<=%@NnM?5(>Dv)AzeDp$C*=^_`I687#xy~i6Gb=?!A{EkzHUWjIq>;{8 z5OM0mDUhbsdM%JpiBN$kq?J_3@A4=T%c6*15up;PpNB}0zp((~sIlh3nnTo=*;h;QcN36RN*Bb5##q#xv~=TZKfcl=9G+=^3ZryuC?wQ zt`2ZM9qa^C*=eFY=uZ^nIc z`hJ+o2ZKA_IeXZ~nQ3w)MZz=ClLI4mHt+@l%yI7St z>S3#xwc>gmY>62}2#rpK0z8nIBH{8J5@Rw9;U~B7-nd?{@+vmPLB6rehaKR`c2v z=Hl^f`J)!*+TMEuj#GV{*>FEjue%4QsYXssYn(0(sd5L7Po5=ATmt^O%@5)B-H#I{ z_u=^1^%mwbkfaaNIUTnAi4wI+!0Ulgu0&=7{PB8Ky(OnAi2T2U#1Civ*QUWB=YgVD zH-VxSq8@5NLivzM0z&FULxw2-p=D3G>iE?XWNVK~Ws)jNN@^sqj(^+{KJaX8tW^wL z&s>WoR6FxK2$D3q2@!?1q6CS`&|T#gL3X+8oH(yYxKE0tiq}(1#j8KYVAVLLLbI5S ztmn0D!dPH~Rcm#WtVd_*CbSjLqP1ui>B_AbsNF)iNhxhcI~zh9nt5+0?!s8|7P9Q?ZCu6Igp1%2CpffpiX?_8?`?zt zIY)`8N^6MJw4@=JUL^R8mvo6^38Tv3S^>MNjH%T^MK+yN9w#vPGz+boYD2kRWT zU|>$zLQI}>r|DQZYlY4P2mg5A{MQy@@r!&wRj=v&dY@a`cj8i?p$aky0qfbGd44{M z&vRvXil3{OrovV21c{u#sS3e?ucap1G~|Qo`U#kh5(0$k!;T{)Onum~bP_bdMSnn< zMyM4;^nL>g%|@>!r1VF$(R*_U5q?2aC2T21>F8_dm?U!-C+DpIK;{<#d{=!`xa2lD zMwN9&2@{tS@stSR=RndcL69)ZI~kQ;9@XgOSAfN z!Gy;xjOK!>WQ`6pFB4n80Jv#2kMO3MX7B~#UDHeKLgqAH}=ss;mz zCEIQ3^>P=F(eB&SfeGrGGzl)D)gYxoN_?B zI*$?@Cva%+Y6Hmb&coOx1wxSMw~=jc*@JDV-PleAvbkX^X2a_+Q8$6nno%2}S0nY> z5GDjipN20f?m-WoVmH-US6(}zLG_W*OtsNuA*}W+fh0w1Hi&4u@*oBiLZp#!@D+zZ zqR>_&)kwoi#Fm6fOitD_%s1o_^Q)!-6eGhT@v6i0Ujfm zIF%xpShbd?Mtnsg|KqAb)gV*_r}8KsP-NU*F15mA$`k?N28(MkueekXMP z9#E`@J5`D9RWBjZ>j9-t>vsRrR^8c7*fy)ei>j;sih9eTw9#2BJf;C3RV~(6Ifibp z(#zD2j9}3*Lj=jN4Gu}5lnNwL5o}=7Da#?N)$T$=QYu=yT!w|21OihS4jvYQ@*yHe zZBE;nU#%rIp%DSpI5b5e*l?Q<5kJr3z_oBrXL`SxL>pJ zL7bYSGFf*Ap+cCfyAvm8+;A%DVkysU zeoWJe>mCPrZ4rJ}J)x*Vib^`z#froS$$t+M2MRwQ;)BGk`;ehyxwM`SYd41LW~ngd zFjl{js)RtH;t)ubBN+?M*&j$rRfT^7bolBsBDJDCiDb6M<`QUB1*lPGRe2<@&&aw?wnKe+oo`0%IHgHA=m?<0{x)IlJz8m|8Z@@tOI2wvJvhl?~ zGrz9}N!--58wUqY;_%=p?9&i8%75^4_H-WM?{df>vWuU+leWEW4>mXLuxhO7&@@Jr z++~Yb$#V@|Vq;&imE1=s)?3s==SK%fcy#5_dD7CkE$9?9vK%zpGo?OUKBPU9P$4|p z=qLpdsgahoF8`tAxypNG%6}vYmn4@0NK61(8m18-QX;-x5q3cFIwC63UaJBNDi5p7 zQbL{o$!abGmj)2NJVXofxB%)dB}hC~vKS;%BLKxDif)WKINk-wbXPCYOxCxQVQya;tyXx!DnT}LSBN66pTxTJM%$e zun|BEA`T`_K{y93=(yhK3&3%Za8*?@zq6!NfXUO`0p5I^*#9`iP6BTaeON`a(kNjXAvf*LDEAEaT(yUQkcgFN#p zvRt^znXP*OG4mi|&`=&^-W19j<;@{P@)fSo&=-Tec)~*fF>`9fsgeN#WWdTV8wr(U z2A%A(r1dSa7_3@87HW|N2o}m@GY1r3u_zoClqykJ%R*X$2puR@$UH<094)r=)rd0> z8W!eqAaP+YDHk6ks@Nhhlp|4p5`v^g%7iU_->koh_R;jdh{oVj^_E5m&gexv!%x7c z2{$PdKF|f@m}?+RdKBimA7?f^U@$o|cNeaiJBw@Q985IWqvkXbOwMc~O!jE7#|LnH z>^e)h3nu%hO7?fD#eLLT!IR+)d?2I(TB%H^mUO)Yt`7~jboW+OTds0z0Upj=GTnu+ zd}TsMeyQTso(hl=Bzm5dBT?9Ds8+d>4W>w>PPDG^wEEcUQ*u+vLj#5;*^p_J+GkjrDyYNFvLVXkqJH;VtD#G!lan+lrCb5g@UZNi)UImBSMx4J5p-@{8*EeD)I> zJFr`c?8fcb94FL*yf(HbyQQj2=Fncc2_5BI(NVS;t;OrnQMv)?ijC;3;On9dbOzJd z-+Kx-Y|!s;Gj{fz!EEv%f3rQPnFkI5CAQq{ z(=h^y#?+D%uTegP|34+uebcspsSyVer%045aXFIm3j_;4q$Q9DBF@w|%~vWeTFUBZr3q(9FJc}y!*y;`#|99d02Vx zvCo&nkUB91qkSH@y!Xv-GvoK5t#Fi~UQLR+l0fr9ovA>z>Jl0TA43ZOPdFZ1(WKKxpe`=4`Vq6qKTLsNGzmvPFTwW ziPp3A0N+3$zgkNTHKG=Df~{61)?7SxJ|=r9ogGbcU`Vl;Qjr6sa}!9UL@Xz=QV?|uk6o5zI8#7n>n(CeBG5hgbh7zD-o2dGN!CrIwW^@Pba zR3=x?5F~;L)yauofQ z4UQ=`Or{AA4FR&=LRYHb>Md6*ZkF38?ZMTMD4N8|h8dv>krL6^U{Wi}xv2W7IkN@r z{IjbnsWZRRMuAnSu_|~3m4>xSpu37dEC2NrNhzNRjoP}kc^9@eZp9>9{sYB*Nav*q zr5--d6X-18jCA!rq^tH2BwNr%sI-@EMtk`dqzIITyeT$rCa|~rB(7U8m3X@ita)_! z3=UC&?C-gX&`>y0BQ_thDy&`YQXabZN3G-N7J_Upwh0r#84HPMRN6j5M5?1-%A?dJ z&&{zF5}00|L(mXB957t|!)BC|lt{Z0>?-F~5}nE|wc3HCg&(H&@g4)=@S~)w!+P6qZTq9 zo!={7W#@K(31&L<0fJgeZ%~tTQK1Mb1{SFlK}BIN4=8D>JrySioR*ZQLh(ah1{5k! zbzYPju_U;hxBUW$TnPy&2De5P=f)Qmn$(D2q5gF@T%K7)k^5f|oCNX>mO zLCe6wbGsahzK^rST2GO528Jltuf^%<8*y^-4F4Qgb9n-%Cr;w@_z4^zJcy%xdvK(C zCk}LO#m>|QtP8U>$(yPl&s2!ih7S>it_TWK9{EFy>>okm@*@OBZa*58Xg6gdfGEV} zOe1P0*D9Yt$G<{7v=ekND*uZku+-I%t5vFSioz<@bUK<9&&pm+iOAu#c_8s1V%B0R zB}5Y>e2^GS=rGL$5iVU=O8{}zTh5$PQ4e_aF;QnlgNdB8!d;#+sp5L&WeHSV7PoBz zS1g0yi6F5NgV(x29+iNXV^$8Oqi`JS8wHc=aa8k}Y<$qv$u;Zm!L{oIle0D~y+#hc zdfi<#O_22vZlyB0b;rZFnlQ1BiiC&qB`y(v2)o%hm`iS>BcJ5wYId_;8o{!H%M}`c zO{Vhf9^RiG1ebO59?QB27mYyMW>$MSRoUe&G=6u2vSx{IDO4?KqqP$7O?2k!!_rO% zEJZuR2awN(#by=j?d0#W8BN zAv?zJ4V|{ol{yZ}_Na1>>TmIK7h$r!Z9g_O zP<@2wT*VJtssl=%Yxs5zU!uMr7G|L%E9CVMkUc6YpaN0IN?U2rI* zXTpS9Cdu5ITfN(Z9H3YcH-Qq-rh9)~Id>xe7O_GoeQJ9W%A7Mv7UL z@)#rBHq84zOqdMN2ojkOlyav~dW+J8cqh6Gqyp9RUkXk`!YJfLNayi4Qg3_ZP-wlX z){~Q$!=+QVa94>-jt^D|C{!tRlP-@UAP@#Vd^(EhjJeGpSaBW-bmA(t^-F^tARK7S z(VN59e&_HF5Kf@eEv=f(3K|F! z?^*;m_1QC!3@4{>a{M}+8Y46Y_TtLE?KsxE4aa)8?AeCH>Ge3!K7|8qlQ>A*pPIm~ zeNOJxGM9mh0M4)ITF#OQfye)!>Ao5>=#7v3y1%MbtEa}eHd!|gST(;&_kwrMf zs5W9$E2`=VuV8DREm(Ds3~A;#L1ZPjdrCw-%I87CeQrtTMe|F371HvRiPkM~?m@&? zAwER>{D@U@@$>vy6`cxECpj1D(srqnO1AU`lgcIP(~u-gIuOt7rwt;O*|(50QAHO) z5?(2oj1eXWu&w7>9GxO43?@=0R3Pi`axj^?oyz1ks*-zg)w=s|YQuxLdDj#8{^PH$t=Znk`e>10zVzqkn%7dO)umfBW+apNLupz-=Ij&gq- z6&E$ol6=kYb1|K@!dsUpv_)X@9D08xk5^bprQ;2m!~7jbt$0E?5t#-dRM^r+%r$_h zQyCiwms25Ks>-i0-%F@ai3kh|StZvrp*fqaMOEJ9b|4{;5F%aeDegZ>f`^jlUgyEyQjHzf zO9j%6E`{sz+AM_Cli!Bkf_7RPy0}f?Fx4S&5EcT4U?J7sM%EfYAT_$CN$!Nko}DQ7N)GN3$^hLIu2-DF+_ z3qj(m6Epg@K0$>*NmKm^%qmid^1d~!;qSo4kouSiT0K-i%FpPt)p1d(sMD&wRADy+ zO)jQz*D#f^8WI{yOvjmX=6lq#7l35*VJ@izg9(l6rV8Zn1pq;W01y}!l!j9hf`pFI z(mFn>N=xe=MAYBKsSpA~pW_3^JEuOUTRA8Hx(L0FdDC^eZC2sMV>DMzRRKZLAMD1# zjx9LSIfp}S6FAg5LL0`xmQgN8aWFN61E~QVXdb}+rUC3u^kRFY6Pp7qn5dx4E)?k& zl!jTZGl61$8_j`)@DM~i_2JtBKm-p#WHCs(J&<^|Hf_}czNzzJbginiTvC0(ymDUy ziWH3lh(Ux95IE{onME)J2nI7Ljb#Lhs<3Rnb{`}TB8y?-db!I84Br&inpY-%m`gJ( zx@}biit;DnRcR`d0Xn!LeusVnv711oB2kEI9wt&Ihq1lq23$E!H6m4FFuBJOiV9=Dvy$?2PLsQ)S|lEc_XHDXm=a(+F>c-~$7eFo`h2@om= z2N7AyEiVyRe7IWr- zuW*@zMZFica(!BqX9*K38VP~Fhbpk>fRr1tA@Hf@f>a!unXfTZeF`)=QafMk96VNl zR*ptVe>pAzgmCjgqHw1gA)A`_8A$pGkk%}!n!;ZG=?N0`c`9Cqj?zs?mv6E8+vl1O zVOP&-Dv#@NXz&^fUF}XQ-$Cot5GF@#@JvC)lt;TpFxEWvTQCt?htX=Pk4mevJhdTxr6EukQy;{L6zZ|tE#F(rE-}iO{i;Y!_Zt9+qYL?=e9ER&rr$KHK6R` z5K5>j1cW+5pm|Lc?Q0`wUlT@Z6=AZhjFs@jnwH|6GA$!bH_t`dsa+yO^-z zbp)t<1hxRd65+L{)R%_NSdLvF8Kl|LfHH0C{Zc0SUG)2yLLpQt2l!ecuYL@f^e%3PNDR$apH!tcjd#Hb4C4rMr%i@ zPIOH!aW7MKm_Vc|DLHCWB~mvuty}nuAW=xnLSKT3oWH<9v-JXRU0|@|1r%4=Wu?9Z zhy#v%wU+*|Tyov-S??Uvz#)b5cVRBUM7fnPRfuL*Oc+p99^7Lf z@xxroMYI=;V?)ysZ127v$ENPWwVUoYpqyEEH(_!IuBDwOR8Gv^gDYp;)V&TSPvVxH znjq^&8-{*wngEdlBv90WQGHD|H|^m=Gs5pJ$1jB;NHlWUY#`9OQ!EZHl~g8GI*y8@ zimz)f<@X{;stqDBZs+?iY@`BcM%jfaE>rxjX(}(x)iOaPqIoq49fcpMGPD#xe30n1 zzfE~CRdUtwUI_>)^Nmu0BsD0X!NiTTh&i4?+Zh44`L&MP!&fQlUShlwv zu*CU9y@r6N(^42k4x*g!Q0~Jo-Sv~_gol)es;^9eu*EB2bQ2g|xpYi4y_PcR&f_)@ zBI&FabY@bOn93kX1duGjVj-E{O68#hdzw8kA8E zQ6;@fF2^nxFzR_+7@7t39FS0rL@(EDrz-DN7)l`~mkci@7)mdwMP+FcQ`My0b;rWZDLe8tS(3pS`;^zmsuU26-RgEd}&-2w0E+KA@uLz-qYOIq^(y5a%QzuTL z`09k)y!=U@fnFe41Qz~gQYk~_0~jX!hp1Ep6oX2MO0NYJDHPsELf8SNpXYHZXn@Z_ zKc53ZMfpGr2eB0|s92uG8}Dm?N_EJ4UIZO;-ZVLHg9aV9U+pC)uDRFUQ11)oaw<|W z3Q;+QA&{s7vQ@q9Wm?yH?0gO)zJl@9iwlXlu$xmH4l45@H4iibnw^KQb)6at%aZMl zq{)Ur6L!Yhuq)9?Saf4gf)-2D_?nQ}N2u)KzMauFY!A0kjifMF-Gq^%gjJKM)6IVi z5`&3TC%zd>EV1q<*UyEBMqPCGFa^E%rgr?9I z&4sf(h!6rYQzO0!B=hDJi9n(dmvuIr4|6$)xH4cR-W^EX(t*W+W6?Cd-fM%4XZjo^ z+v6F1NM>oe-XTJyo7)JI)g5;2QYnJTTq9e(1``5g8{ zg{TAxsS)i{9!wn&1(F60d!E&5V5w8z5gzY@M9^^qhWa3pA_>_@3Y%N@VN=5nItWU4 zw&I4XCovV?XhYlYAGj7r$8N^Kfolkg6Hd7)QBJj?M7e>O;MkQuM)h%o>SixyqML0t zLxoM8bCT0?$z7?BzGA|oSXDtTM54I6OzS;tfGKp@3IH)LpHAskP8e%CFO$26E>YG#w6$*bN0bRZSHA-?SlfPRr zf#NBRzVdOZjWPRtSM4-xPFf)^l@|9BEW> zVCqBa!nO$(TsM#`s13Kze~$xzL50Ve+VPJ0M{v0F>pTLDgAI@O-^XKgjq_Do;mj(G zR}du0VI1n%fWw^|aiDD)dz(kFJJE+7u`cY0cG~E=TS86PRM$v|G*FGi(4U`x28Gk8 znCC)79Sw)5GzLA@ac&sPxB1-2B9LfmRZ}7aNRrDW6^W+KRg2O=#3>QY=iON}%+{p( zvJBZ!E>4lSj)+<+PvWYyoLM+)S(BMY@pZ;ltzHWxI)=bec*_kT?^KCE661Bqf^1c5 zd@a`jv4zLBE_76sdBdhSn9$MAD-s_r-nPrQTq2mHOqs+o`jHS!RLMm!s=7-9OPZ-P zxJb(6uuH&u%H+C@_u|_1F8O{fL2^cUlbO44^&H``o-o<;5N_V}7;d!*vTnfE? zCTZ$h)4}^Tj?l_(I(9d##eA9YA~Tq9si|aLrIrsAy~n8)CDW61qDg`znW2t~ynl3t zN~r7HuHT3c5?;5;l)0{Cdvr}JHmCMsTZ*bAvBidF->*4c^!;1+V^_y9?CF-$IOah_ z@NgllJydZ!+xKH@YLDv!6IzF%8ag5$9_BdM!X==3sXh!I1`t(Wb<*0=TS$0#NnFc= z2qdOLoD%8CaZu>WZYEINc9;8TqBRmE8r~#HU?eQ>p+tHo_jP7AqDA0XowUTT@*x6- zaw4kMS{OGgNr&fR{7M{$FAuW?uQ@~l2wWUMcuBxu(Xf=NV7aEi4pYU{(*zfSWGNl| zW$I{Hi)!ULcwcKHF>Klv#4Wd1VeN~%$PL8T?u!&lVu`h#e=B7hL@(f0iz zJojie-gsm+9=T@?PF!1n!A(?56&n7;U2pT+7-7;*$E=>|a?}bxDWs&|OuyeWO^I-Y zq?D+4FzGj#(3D59PN4&A91sxdkD~J7`8-Ou|HOb8O?Y@8{V zGHhWl8?3#uks?UI?MGLaW=0$Xij-()Lt)a#dRg%ZT@zDzo0@>1((WqT_u<6N8;4U zB8V&oih*PS9HvZE;Y84=99UMRB)m#*`M!E3+t(6Ss^Ze5SZjJLkD@*%5rV|QWC9yg zN3o;(IvkxOJT?j@_Yo#uHJ8n{cpI*sxsx!tAE(w!xez8h9>X1b+%WV<22Ti3@Knz+)lQ6)|um3?xz|{|FL4C*qI!Wqn>QjoI%S z&+LL!#L>Z1ya%Unu>XWj4yI0=`?`)1Ajd7|vA>6}6~^i~NF{R6`h9FnZnj}nMtOe* zEo`v}Aan?QWo%(u^4#xrF4fUVcnBf_h^*a0B2ppUUP667d?k?3IV$JTWy$jvv~#JD zm2VviX%QGL1ce}?4u=LA0!ZO4!NQq_JBb@u3?vrba-9z~wp5r92`&j*d9DCbdtohV zFQ`R5RYsI*Bd)~vGCK5mfUxyX=N+XQ317-%RaKRp#KeXG?zyuFZ$F-kkDth)+Q_5o zkOC>hPoFQsXD<}vig6(g{z3^pdNL0mJ-iB6URQ@yJ+G0fL?Nw~Rk9#W zW9ikSJ3C4Xql@cpYj`bGDeBLLp9!hBnNLikd0UG&BBX!uEL|Y zFURN}USmEXNkAre{jG#aI-gK3(%d6TfV%{_rZ?>S|CM3l2oLfjQY2p%IC ztWqOEP>_mHk9I0e0fnZ{jn>hTQ0ZgCK}u62EZfKcyw~kHoqR5oALBXM;5C)vReHIO zlLP1?-%gNB2GuWHpk42^YXQWj1-JN))qlB938;b6K8Ps z*wr|k-h}SVa^+JR&~e3k9yTh|no29XS>L>54zX?^+UtenDCQ`h6Z!;j*@V~^nO{deJ} z&DZn#4`Y3N8qHa5#9NnG|3{Dn>G&1q()7Cu9XXIVbD=5q`H*p&UK1#b&4-SS_^WlE zbH0P(JV;!g#3_*lkW_j7O0-^Km0U?w5F`~;COW5PY*fd^g$|1<%OyyZ=TYB{Y7$e3&?8a(cr9HV^!*yB_5+x8m@?)uv1| zHkWcHmV~D|+0k);5Bdb^E^9*B1yln|+i2|sfKww2&`^iJ#qjWyi`J=V6c&>rDZN0| zYZ^Rr0>PzeJH|@1&oLh)4jK+34jv0j2a>QrvW6hB@RkFKD!P1-SjR|zeM#J&m#*?z ztZP1m1B2J%$j}*^9L$BO96jQ%pVUVZgZds^lxpNB&lce)R3n1P=PwrH%a=R3z{48s2(1 z2e00rjpuG(gS&2Cj&y&B|3~_$OKQwlbz~)YuL*38OGXEnE>x&%+(J)+i9pg%b<$rg zs7#=zOio;CLFL)}oh!MlP{Q1q8}H0j-U%q`m*OhEdI%KPkkCMx$rVwXuDCU;9epK3 z7^>9gIc1`dnsOz6GQO8kQGX&8W{lK$K;tnsyQ7Yy!*3^0loL_KmqKxxl*^6itI(6D z9u|YdbNE`c86)_~g$O1BiNbMuzGfTtVsGCm938s>M~1H_Ku%hM+?DP+1+us21nny9 z;r5-KN3bQe7wh6%F%g)-FrlH6T{E>x2@=6Wod%WrPz$uz>vb+@D8$i61tOE$=p#&e z1ro3N%8cth0*O?Kph6|ml}mW!P+8FN_)0_O66qS=ouH5^$!xadc?(rWn}xQLXjd=x zwMp(*j)V$il^c9f<7O!lEU+ld6(t}vWJ%x>mE-c>mo{T)EQYHN2XWoC5uCmyiZf>d znA^`0&XBT0TKh zK-Iw|mB**g6yV3t72_w*m*A&bQmK6OWUd3rZ8bH4T7D(QOhh z0!%qJl?e}3Yw%BBz4)cV0)zU2I0fOtQUrzNsg>JM4ug;=AXp3_&Kyhx0Ph&SU)wq? zgy!W(Jk=ouqwUHs2_i1Z?f_!_UzEQws0}PAl7aee?C;o!y{*I8o$RoxtR2w~Y^`r4 zNLmPz7R=QtUQ6de39PYkG)SUMlU|jbuU_Rrk|{-a^cWaivRsg$@-V0f94>U#UuTEryhCzWQ)k3qu8t1o3p-`2~ zs}U`I)9b|`q2u*HV*O486N1D6h5JmIC}%==3M9OLF@?J{YVb<+9Pe1D<|_2VT-SPG zt_N@()ycJ{Of)vvS#N@@+i+^#Jya$Nby1n@cocW;eF8Ubyc@^VFM`f=Kb@)z@o1u~ zV>YCUDqJ+|yb|qJsdb5}xth3C6_+Zy)R#oL6jggUc&KX2Rc^VkR_O)mgqcQU?I`ba zyY)G#`a30ZE;JlC7F3Eew_b0Z4F!@o9cqFO)`7&S5Ho_L;-V0S19Lbqa2m(QZ^EI0 zGgKYQg?M9LDgVLeXg6W9qw^THv>vii9mYfJF;pw3;`cL9;#FZQITN)Q`)mZ+?gBSp zq6=LS9;QUn=;yMZ`wb+@iI@^m7)!utrV>dJAY9K;V%=Z5u$J}<3aZF*$#a$Rs`9EC z&8t;?C74KgsO(og*Hx+IS7*@#5?V@%gb-<39XCq~2+QgbxgvymLZs@ldUSLpar13; zc!w(C%hwC=^Vf^;?Q2C;4taRz`2swAUjfFp5I%%PcnRO95ZL+EsLD*@z`kny@QHjE zwtA%;U%pz2FJE!?#mg1=?8Q>5kwTYTf2If@5iB1(k!O(k@QDJv`$!Hxd}t+(-cW^P zT|E^^0KGZ&m?({7rYeD1h0iPFm@1E8vNVFRqA-RDlEJ(%2673LwRLO&Ca`0w1~1>8 zh4&wmQ5g{=ZxSTh_S|i&arTxB^b802->XCLyA=o#y0fz}+<8kS9=oFmC$0}-Y?3Ye zz&IVW6oL}){Owg9Oqgi!P?ZZ?r$gmbRNWqsY`R zWspEoQeF)Tg)W_H)Hq%pJVupaf(cbFKclzQHnfTd5|=A+`IAnsf=ufM5FYDb;vi$k z+WQ9C!1d;A@2zRT?xsQPX{4i$w_|&RY9!Ku&Ea-zQkieC73*sGC!qYTr7X7L+%-6& z2a?kB>*z>A&`7SzZCKTnz#&j*=_wF_z=w#S;Q*p=R<|>&%5s%iN~X_GOvUx+s&)(q zH{$!d9>9}lUcpnRU&dp{pT^l8H{tq;ow%xd90yWe*dA@ecx5Y^GF|U=r(uv!09v(F9pMUL?iC=}Kkdq(QT7ZZHiNQqM?O2)CJ&4$L?^qusG^a=i4eMi~ zFqgmIg}ekAi_OkplnbFESj|4svmg}Dkwu~EaRSkB~bORftjXV%|~Gjn%a z0{-;u?X){_YVKZ~SoZ*~o_hc{ZGX(>G`V^EgXU}x^jlO`nB2_{YbljL) z1PPsN^Xiyc(<+tvYOE~&H&wEGaXq?wQh4&gN__olA-;L74Bxz7iJ#HFe!T);yjqT* zyg&#%#}@w6**JM!HJY0Ej9#Xxu_G863FGC*^_~*^;>}9@^6gr)U&!9#`kQrxQZ;_| zgED;m13`vhd5K_op#+~kSAzEmlD8ku##;}qz`h+-NKtL{W<@YX^)X9$%vKU4m2~*D z>GGI?WV~1)2@xdq29m){Dkk2i-7__KmC*Q*Ao<|&9Ks|AZ$Ffc*Y3&2vv&|Cx39!i zCyLQf9il1@qpdTByYDE$JA}@=k7VP$N3-zK!&x|TT?|8mgXoefAWZryCQX?PR8G;s zYb5GfsuL-dF~Y-@>5?R{ol@AjtO3btIpb9(X zuR3zuEG(uzM*|+TRP{GVC88m8REj*zhK+_<5x^8ynjh_VUNr~+3o1iq%E1GMQz8m` zc_1-}&%+dJ z`nRMoUe*N7JsqMmwsKw{9%hQ!D}hWX%3;sL!!tT^4f$c?XL-)w!9Y;(mC1rqS+GUh zJm|R4m)`44Q}NpQwB+ZwJ|_I5a@`W}w%_GV1d{+^q2#-R$pGQn$9s`>xe^=a$}bHb zvwkMKs7#Jc-D73Hr#C!AaQJzXvj!3k_IQ#oxq6+H$^BF&kKta;wz%VdTUD!YCDq@+>MM@Dz#les_Ra z0E&ZzLS9zx>-96SJ|TjO2N)fv-t8JXUZegBD9Vv&W=#W$nG_G5j#`&=2AZXFqJAs( z_npMP-m9>=aVOSAH()%V9?wpB7&t8N;W-LA0_$~N+)E%h^`V~UTJ{zbD0Bu=BI-|| z@P!iU0*aPawo6CRV+nLB4#K2YF!7X#dZD{XztG8T7L_fr(ZR$&+W#ftnY*>@a1JEQ?9gWSn|Bf0u^#XiOMIng%^6feU zfnf60YvuTyDn->*pFB+k@zh$}byqP4*|M)>WAnCZdxN%Gr#V}hD z$GR$-U{aAFM5IUvl9DjS3&R-6ug6es$XRB9jl=}@t*gar_h#ecCv#jl?dbx%_jn%O zxIYIk5-`u)o`J{jSc&nCwWzI0VE@4?y#BbtZv}Y&$wGtLdynSQ*5ILg%CPrvD+ao! z&{s=nluL=QVWFxnDiaMjs*a4kbo8pW>XCXdsEn8*(flN)R0NR<O?sfZnM%}mp?I(SP0Fj5$mgB(9lvU zNaj9I$Ika7X)CEScx(=}5*kgEm<^a_t6+vUMVqW_z-Xx+<``-&(KNemzIIKwTXA8C zf84MQcj4w_@3Q_Les6SxgDn?8qDriJka%W2)#m}C`8pO%L%%B|cIEgD1jvhc>deb{ z_~?^3yX`vMFuDmR(gQfytdLv_HU*k6QmiT!Lc&uZ8s%2D01}m?y4MC00pRaIVx#tY zj^B*u@_{f9jB^08$UKnD1BKhncs~CvYd*R4T%PtM*LNns%}Y~`w~_XtkPihB?)VM zD1(UuiQuCto87d{`rHuz%;VOY>@L+9U*i}LZpGG?Lzt}JU}{5s1QhdD#g*dQJ*5K{ zqju)Ty>cpqW<$3VBpL(DLRuY!M#sDgvDezJa$Zvy%6Z5fB)To6rKRf&A~*;Vo!Raf z4Z`S;qvgP%p-$8v#Ptagc(iO;MQC^+Q79{APRN4?mr^IKf(wsHDcQb)5K*b@g+Vme zC2-64Yw_{(h4=}b`Ij%2P(f4^0s;K;hXH*1Mis$Qj;~%V!%tr*B21L)$i+{dUW-R= z%R}GBM%;8;h)^iQZ>UCoLs0yN;P@>e^1FA#Tn5d4_invy`xSvAnEaeD`G)p2f%4@m zW%%T!LVW*@5GF=CF&1sbWS{|)bq$!P<+7^LS$UF5gldFJWHgWPpc)y<4Pr1Sh=H|r zXykW2u&xe2xK9wtBlHUKF+uYF6ZupnxmLmT>>U|+@wS!Nb#)0k*EQmyTlm?}@VIA6 z3DQ!0@@z5TQ;2sT%g6hVX5;NAvT)m7RhZr0kDk~Rp9iWF%blomp_WR*&!CSgMcob5 z$N@ zlTG7;2_`-KEWt&pWQb5vm`gK2YE%Ya9S9sgSlYQ=AuuTttIQH02oixnpm8wKx&Wm2 zJF|0mKyq;M-=lM@H@j0XG7E=koUo!nXw>eC+)8{WmRX9i;uvNs8ZllP$7pfXUXPVT zF;Wske_;&mS(*eh)({j996mg{UHPv6+SG`9?No@CULJ(- zuuvA4HpjXR|L#mqQ!3}^=xIE1)zf(Vs%LS}uG{ea$sIV|J&YqQ9oU;lQH?ZWmJsaC z*Kp!&(fe?a86ac=2o-?A#HkYP_d#MwcI}swH}gRv7)Uk9**h5g_aKpR-+u!V>%_P; zWv>4sQwR&#kXVXB_{J-ja(+_8&Fg!jd&xjLY^Ow=>ImC9sO%Q1t= z5hdU?l!}G99;7n4*TIA!(Yz)Kd!3|WQOCs_w?Brv4m?Ge+=Js|*AO1+H{tqFC|}}c zUp$QI@J47@cR!4^01~|>KrSX^Or5y(aw?dT?@?haQ{!Qr&k8}r?FvEJ{3A3C*4U8Q z!G=egFjTyEgb(mA)t9CZ(4K3@bXfnp#bC^S?`M5U~YKq5HVyed>95NXf3>c#h|G;6rxHK5MBA*T+#~CHujZ3(o@)N@L1@D?sXv4 z$h!(#6?Hf(aN#Sd4|N<=wUs89a-bkEa+++Zyoi8drMy%eT|Bm1D#ZZe!dQB*av|z) z=;jY`(6F3{4+;XLc}?66eZr-3A=+lcr0{jiswh%kN5a^pbqFkvVC%jZ-g&MZU%yz4 zuU;<4&k2rSzAaS|pwkb~;aA~XsghUA@!5-o`1F}PYwdsfWEOsKPd=vix8nPE*5j*J zity_FDvdq+yRmO4ZCwhR`x6*yXrR+gVlX>`fvkX8--=p9_#Jm|2;c|zs)J)L zKJk+L0?E6NN|j{e#j|Vh@@*?{_;fKwPBh{1dn@qiQ-%2P^9r|B(JJuM=Lr?4!CBMEWW<1@jD_sR`?XpyBFORb}BWHA;M_ zctCL-Qx)=BoRAk>)E`B8lmP?EKxr2nCGC6;_&f7iPzB(yssyW~lwFVfVU?X*!)M5H zDBYT9t5}@^7r}$pRzPqRx+J^?WHhBA)j?~@=|Uq-FmWL9b1YtXOG<{OK^e6aR9sF( z`5CDasTh?W4_eMfK+!md$3*!6(MA5J^gus#j!9nGr z+3e^{+*GzS0whkyZ)09rSjw|ZnS+YIG>CW#MDS2g^cX=Aqw4VC;fJ%VwQW7r+4^3k z&*;Rq)}1&@fZVt1`?zI#56<+B;L5f%_BXU*N4N#RS8h6Pb)MN3X|T~%GP@hvu2 zh=xSb0FG{G6eZ(T*(K9(DG5|-%&iM!DE^*?^Y2A0bBy;{^Y4eOF{1fLG!Bq@`>Lj2 z6(h~reO7s5gMg}xTsbv9GaBfz)&ogE>P4diUlHSXPFNL};Ns7ZqA*y+#XYcUI!dmkaSTLgbfk))E2& zNDY@X%W+VV(Aj_aQVD+YTs}T|DhD4uo<)#k;8nK#$7Z{5<&_Bwi~anSLVWXT34TEZ zBUSS&sS}<K+>?b@?pTcz*A`;;^|g5JeyS1f|M7EW`23|xeExDZfmwl{ zJX>M~0Y9QLdF!Ekym4PHUb}BCuD!JyQ!_p24~*04D|h0R2oos%K2Wseb}18=D{*-f z%bj@YL?Ce{RpPb@Bo;bV>9`aNUn?;`>?P;*OlgM8&Kx#c*s#%il*7}|JIbL*p{P@% zlJn~8(#!WN+@-nK1&vlVi2PDtg{`z*)m>61Hq9=VnonL}ar>z{xXr>_1ciX%#@ZrC z1Q$=8SYE~UOObFN)sJ!{8YH(&AX)p}9F%{r5=pjTTGvGQz()CCMv!EyCUkiS6&EV8 zUX7yjG$~DxtzcDWx$u=Ao{D?w!dDxM%@oI4Di2NUv^e+SfnweS51Mr-RQQJL>QpF@ zXdVp7!{dWHs6@8m+P+a7OSNNvq8U5Gjo4V5pqh&CPZ>mX8J!g!jpatp-YEK`fb0sA%0ErB{&A za3`Gv#=-!LPN`Vs6%~db`VvSi=Rr_di`0XSfFiR{7J(tNO07k&o2f<`R|*{J6&@u> zB2**ljjp~RwWvG423-@)c=3s9eDg}NCBuLHc8Jcto{qlG@)@$9zfp;=>Fhsyz5t&- zm5mRiM4rsXD^KU(`ujr|JJ^r$gG0FO-VlEDjC!r-;wvhbpYc84zD9*ZIQ*Po`329T za|smRzEOiOe^7>>*}h8ric03!1eQVOogjWi;Dg6<@jgv`THbho zFt|4xuby3tGuP$dy6@-WRc`x$isd6-yI>;w^qCTT{B$uscw9;*A3uC3k18hzFQ4W2 zKD!dP-kgPzZr*d7uV#ecP>4(8boph%GZyZ0fEjlXAuKdciSll)(o5>Znbd?o{K|X| zMG1ME6NSe0VNJeOrZK%#$kE{`R4Nocvl>dp6bjF;(SOyLSCYUVD z?Fp(4syhJf@lI#D+iNeucq&khoBpn^4Pu!=;6^2oC|qsS_9WswYUwbE^oF z@8zQ4pDSz#=*o*U0gan7v&@?ty~yT8_vS>WX*?t;1cjUY@vj^-q(&Ctp?rr6SNY1r z1BeHV#qc=CY^2^cn#RX+BlfBvh%R`fvKRXr25>OZik-m(HdaM3TM@-bk%n5(^q$@X zR~Ll{j<|z}U|_&tYnfK{UsxrN6+EP3e2_TD?k%MnXmawt`k04<2MAX(OvT~#DKU_k zGVwv;gGHe-g|wV$+hUlU1F*|nACvl39Y|)oNwCzfBuHN3NHi|j%q_TX=4JxrHrsZ3{rxyG_W;gpdKh;de1_`e zDeEDx`NlO&x(yj3g`&PDR4E%8ci00a6;ghI@*)ap$zl7Z&{h~_7u55*)Gvr2xkS}p zQL2bWgNAC<)!9j^K`~qbMDtN=SZb*r4S}tENk}0=g~MpU%QW^@gdp(&5}F5zU$vz? ziNahJ7X@jd`LI`LnFC2rF_) zD{gxzgb(PHzoL5i@`VDbiXwddatW1+&c{~&Yg9L{SKymB2@tB4-@X%|Y3YT--m9lY z@!R)e_~j47`0BMP>%90=+K*)~l;9Iy^T+%QjnVbV(^>fN=}i3SiA+3mZzi^!sK$m9 zwRrfBT>OwQdFR1wyiI%afo!~fA0bGP{D2_2@>DMNoGQe#_vBeFzrV3ew@aAu{#wX+zZDWBq3 zak&Bxu3M;!a3Mte(3kZ`;&P#1ib7qg@{%%9xT|k|^kEfoND-Sl;WIi&SPb*Ln$Jc< z*lP?gl?-d%6YK2AZJpLhQK4c&LU1sZB6T952ntdsR4laST>j@YN^lO9iZ{HxUm9#l zcph&`#?H~pbGxDMl^by*6uSXC9vqv z8K!errPX{=T>vp~(D>(9*{&%M8zqi;!Q)bOlv@bD=(R!QGPb6d za2u^s`w1KiX&FF#Ye1!EzVjDITyorjL`%2MZOhmarh?M4%G>{sAaUD#kO)LRNK~~Y zQxaZbE>l9>9->MMu4scM+_E9)ZQ5R!GjW9pUe07072HxPll(Co=y6BaHP}CV6OK>a zifcAdd8jma-Cb51e8YN;9(+3$$UQi*?tUDb(a6CM;_ib_ z4pFHb#-`?7eBcJ?#MN&^&f3e5%!4Gt#z=(MrSj!r>xZJwiZMP1R+CP%mQF60N&vED zq>)OcMjA3Y3?>OaKN_+na0MYx72*bR3?pc2M6dlBGX)k`;Z@Cb4UMANG6fKUM15Cu zj^5I78_`u`Ben6*naWUJf&dUC3?w~NCEZ+CXiGiQT}aCZiQlnMio=u#)l4@*qnwEi zaiYA4!V(0E`h!@Kd<~TdTa|vELm;uSvIqzrljeIgmX?L9)<}T}JQ@bY9WxIRJKkA3 z(~X$hkrCtbA2oCn62wJanbs`L2T*F2o9sllXy#JgWeUSmU)s2X0b#uqP^@mv)+^#K0zZTw6E;FUYqQb}as^?R~t0?7)j+f#}9u_SIfLs&eRfp-avAJN_>G~RxY z=Xp4v=W^9vuiTx5=WiE8R^h1|m*KiIYth;kU_+F?<@|C)uc$|Bb|;@9mFP|q3`)pr zh?OY})vUv4-5iE%s8Ez3w{Vxh;fK7GOBq5p)r01ASDzCLeJKP+sJKMEaww|sa$p%$ z4#g7lZYX<|mMe6s=TOri3`+UXG!Uirlvnbf=S;Jq)LGbrj)HFf$I|>yrTO0?B+L}T za_efeNs$O9%A432UGx7#&rFT<&Z`wE6_-0%Fe!iQ*`9qtA&SncwlZGV(NN%U&Umr8 zN-dT8YB=v!Ld1>8yBH*U#%r8U2^u6ur}PP<;JlZ)v@E>$a)kWjB@F3&NA zLIe^YDBNc1MF6=hF<*-7gQSw%WpeKRw7aH83=(qsF0|$Pkx-RLndlhqtMWkN%m)a! zFM^1`Wgwvnsa!&J#C_KDo+g;s^<37#dy}F{k=pAaynE@aH71uU2M((=n5KRvtr||H z=`gl;ox$PJJ8VEl>tnM1J}Ut}v;H1}MG1KBo4u1TAy8)S#&w(SCrqSFp1_&eTX3-N zYWO1uYvLwN;H1G0x2E=*s;IqGof9=eutH1DDhchn%bWR3jPUpBM%jfi%TwtYNqK{O zYMIJW>&N9p8VM1BM1>oIh)N5UBT>glRc=*Y?1tshw7d119h2&$)+)dJ5@MQQ!e>+! zg??3`Ag9q6x=Y5;UovR*`>k0mX6*`Z@egXf)BS-KxvrL=awJ^>1&=o~g(CH#YOBSH z#Fg&~2(AyvLU>BUn<%-ij)cmE2q@P1&;yA~+bo2osw}FIwlz{5Nw05-QzAM>5Yga` z-8l{D$w{yw6+?P$1g)z)6|$rjp-by*cEY*~*|0jV4&!rSy!s?nhoB*tyrNu5A-;Zz zYU24EeDO?{!Qf}Fl;P)Z*5X?_^>5zrj3D{h>p{XKh|gZBrCO-O=P#7w{b$Q@-MwKv z{XhXeeLMp{ekvQEJxxXMWIkSet`c7oMt^uWKv;zErw;=7^M@h)<>M&+@(Jyu2>$d@ z6o33Efq(rlf!`A-zj~)0KU0}7ukUlSQhfGei4C*z>9gu3FLRw71(f%n$i)YbuEH&M zlwtj~G2C%$30}Tq4PLr^HKCG$*UqlNnd=IXXymiFG>l{-id{#l@xMtdf2$e?ujv61U zr)&b<DwX0X7(vL^ol5bCqj$-6f~4e%D*g#$=6Ef9We`#S zkOg?i)WUNuyEt2wZ3KvyL?=WHBwpo}*8P4U3lmIvCTLJ~xN)%jv9MH`Bjur`K;k+O zE(VF#Wf~DTx^{rhq6sDE2T(+47(C8%AaM|^K`FmO8NY)Quum@I(A zLR_{@YDExP0Ev`?4-z^1YPN2j3UP|T)_Lue^(%GHdkODe-rp{RhhXA{QW2yHlLF1Oco^H# zXRvSZ`#3&+8^NKR$pe;zS2A8zT&HJl_mc4USpxpd#s_fc0V$IwEuV6@|0E9d9LIs) z6VN12Zj7p9*w%Ugy_K`p1HXQy=AM@ka#T%;)x8KVZRhV0L&*h#dcq*5vDDm%2X06m z0mL6;&Bj?%qeNCW}^M_ao5hN<8P}1E%VleUgoX7+c{`LkEHYN-t{GGc9 z5;=hdh_s=Ns-j&LRQzMAm%8#G^L90!v{lg9yRnpyFN&BrU5nFNY71B-#xwaR(9~BH0b-%T3ba=*f;!nS}XS zK{PI_qqD0;EgM$VnyHXh@=w)>B%AU0U1fv^6~~JN2o=cJFXZE^=kxISGuebd27yw5 zZ+}olh^Wen0HJ;RMlF8vrgA7D{QRvjw*~Pv)yZds$(Jvd;-x3caQ5DEyzyW*K7Ev} z{YNwL&eOTpS@DnW1qhHZ{`PSkfBPgxc!crSPa=d#J^sw~KXdz^4J0vwB#Pg>8@909 zSFaS~3xebeDkH%}{aP&K^}Kq^=MyH*KB9e~J|vH=!A-YSV*IKm+;np}9zUCnyKl_L z#+`LG)Qjd*Y3F@uUS5xutT>t*TQRvQh#OAj;_;hT;~9eEi5phn(d(At?$gU~@MtFD zTlp-tG$FJmj41DCg7Ao6Q3K795~HsS6DZNkLpC5|uM*z`O0PSUTQQN`C3 zw~slPP?-oO8m!S?n>um5;a!4WAW;RDMl_c>2y~b95)v-lYJE_+ZaI`P^UvWNa<=A#=K&HRJc1S;s8mG^TYtf2Ngmr`m0B9MT_Eu%n$k9b zL`x|Wmr!TxSP63hgzwW}iVhksT;;-78vlw*gNU}dFxLE#CUmTIbf)SH5?Iz~^NC0~ zl=AxgaN9qaR227@Da_(^kW*_};5gT~ou>26!-XwrgNZ@JDFk0FxTSOa^WiM13MvFy zP`Qj8wzi8#36m}xC0KBgDsdoL3=PzTbm|CNonL^_RK4z62oVn?zA_0D zAnIchU)yJ)E*q1}R0;1le^UY{gw9w1kqQw&K7T3;pFFV^A8`H4SE)u`S0xqU@xzdrN_T(#b^yQm zQ53&^H-=wQHT<00)WPtJm&@>z=Sy++{wh3rcP`$0U=3b*APcWOQ(=``fBh(E0QvVH zH+Yu7zwz~-KMWEegvlof{E7BQDwBUDaDMwPKl7~`3%#i?i#jlV`KtQ0RM_h;UM|6B zFK94GDj**wPbq<}>asOBcru8t$%GBo7>`f|W=7FRxb|m-(4QT`Ku*1__pYo%`|>an zwNVTYS7Lf+8RiZYVRBnBnm5#-c}onNj)idTZAF;gSdTHPFq%u#N|{d zmsB8eMKzy;MmlgheKt_KOH@_m14U)SG9}|D3?2fBL4+{r5=bmjPDd}GNTDcMPKaof z;;s_Hgz!+9O8uI;sg$jM6Sqq>5736nxU_yK?io~nCn*>~%Brpk1*#z`8h(yJMdM~E z2O>B~k+@{LuSQ(X#H|Z1%8@8{G9L<~seD)<(RQa+dg;d(fuu3Bn=MC8Mx+jhb>5%Q zwdR$y4$zrvn$-FJA__ftAh8l(FQnxrnv!!+X|VDkPKDG+gqsUQYGWaUrRiAx z*E%Lhr#ioHsj9DNa-4Lgdaco2RJpFK0`%;$qD+uCh=XVN!94 zmNGgUGr>eq5kOpcOCWJDp~E(ScuGVNG36j7;f155Ab71l3~UfbI&dZ3UC7IA{|AWB z3`QPEtlx=uj8iz;?!YDUfo20lUXijyygE)QZ=&H;q8>=1YcwvG`j-gY$~nmhlY&vg z8eP1fW6^!f*_bE508s5wpUpenhGFu51UXYa-7jSu10z0cr|y-yG( zw^`0aAkidG4klMonOun-Z3nGoR>SMkkQ#np6E|E+gmCA*zBpzfxWsBHSOS6y(pAhBEtkCP%%`D90) z=8zCb+UTGao)SQ~PF0~%TWy5iJjJ9{p`{8ew)_Z?&Ri-G!GtDd(wR*)lcBJcOR8%e zCBdX!V4*r`UZv0$VIgP`2F(PE675nZN}6j(6NR#Dj4de?0fr!HTN5{kG}DwL(Y`d* zNN-L92J({_EFes%NV@4TJJyDgS{bs?mPY8+RJ5A%w)8yOKUE<6ydb)V<9PYO0(|{k z1|gD#&!|E^eS9t6(D3gsxg@*|b3*m;t9K#RoH`n0k~yUB_cW`1eoh@$a9;@qd2Wi2w6hGyeT2N&M~8C;>xd@-Y?4 zCrPT4B>wbKgdkDkT~%bY`1bX3tMbzM)ze->t$h8OMh`9`NZj!BKYgx<>V!a1r^%<) zha?xz5Mo2gFk<|_r>Q^&vg!>U8l!8tfY6`~=0?y*m~^itObCI%k{~iy)FZ2+0p$~2 z*mWw57aq*BvgeN;S%drT%EQP&jExzBhpMD$MJ<|F))FQ)NGz@5>l$=v#Nwg`QwZI~ zbl`lx`pT(FsukXv;c^DUHM3AZlrDvzRB2^Uaf-w@KeVMXUT2!!Q3Wa8mhb2iNUS_v zmWHNkR*AX>s8w~B`Y72{%mNCJ?^h^I9R>?E<*>qM0u|Md!e%s6^w&4UOP zi2!0Cq1v!8mgP*i--=<}F$!}jIj^NaV*N-e`4FlTn5txgOX)26Csd0sKqn@(P`@f= zP;f0qr$8D65yi4)gpgHh8Azl|=m4yx`F9`@MCc&>(RVHPVMbsGB%T>Wy!ARdCF_k& zhhD>$crBf&!Gx{PDz!AdR;N=?Qc`S<8AKG;qMEY!I31ZvQVEiBb(C9V^B_^kgy)}6 z!Yj<>EmbDG7$m-9pSLTx&x46e#{2mcbY_dQH58pE%iMVwllSr zF^ELgFcR5ge0Her)W9J`TyBIQ;nD|)%6$cnzXypzT>^>9C)4V&u6`nd3Dt;zphTwX z_ciLSl!xChB+Y$l@mVi)FP!B%8oIoPAkr!$P@1VkQd-Y!MhoGf(Rx+FsnD0sD>WjJ zIA}N((oR75d6ITZ#tR}b0w%ryk?cnF5F-7#NetyB=pYk>N}OsWf>t&JHFtJ437 ziwgc13NH zFp1cBUELW$w5}u!E~-FHRvf$bgzkA-7qwT-HV3+{O2F)A{o?RfjBH*kzq(yYviu`8=yKqiM;Bw22hUrV%mio^gyvkrwyv=bsWW8o?}GdHZe zLRJEaQzmXKET=pK4hIlH#X1rSAadSxBG?i-Q#cuRr=QvzgP9eDp zwod%%U#)M0DvACMB;^8$g|}R;M2bY8sq0krmSAE)(KXn$(5ZV4-Kw}~^EnQhbuKjX zUJE4rJ`N&E%sZHzTg~M{S(doxaeAMhisl>mvPvCAd(8Q| zPKz%2u0bBdRwCR<2|I-8*dA5Ozy$88_p6Y%9-5m^+9<6 zr#C%>+x9($dyhVg8@Jqxqr+!RiKrY{6F_NB6Tw6s8OH+a?Q@kIvFfc0qBO1(DnUX| zm1Y{zI7;}*3?gfk>rq)VZP7gT$;@GBzW0Wl8NMFlv%!1B-%Sa1%Zq4Obi%~he+3d% zlPO=qmU*dKjKdbz@>Pghan_f_E8|tmP6|XI>EvtebFdLiT+7d@vS`*LJwu%i-Q-f% z8=cC;pZeBSVY$4BmG*L7@aW8TiTMtK!eHT9J5`E6qOxACyAT+U=XxKV-T>h-KqbrEr>tRr!{1QwPa!sFW1q`suUz_?*!D`~^41i7jbAeXbBcc_tfgJe-O30}0+! zf+UL|$&X^BFoyAxB*u#47%%2JVKS5#K~F{iEmS0lj3|yBh~ZOJs=Zvm^Ayv!K10$Qm2=jc(L$whmZL8|gu_lNfDiQ%|sx*q}vIxdX6BsDzumpOq zN`%#cv1*!1T_BmFk7a{sZ7Um4DKuraqB*mT@MB{ppJ1S4mkAhM1Wdb1cyl_b4AN*L zGz1it^|tfaPQA|WG!t051RCxaR8%OErdn>(^GTp|7xq$#2p+T|^p zFR`$dL4q)mG7&@w80AHLuxP)OOB1&VCIX2ny)?3kDUzDCF*+CZCf3xr{SFQW4lSiN z*m67vB08z0O)nZXhL+R__gn9DIx@e!S9uZTUi@teX}BqGy@a}qPSmLs4=9(@snS7O zM?yN}S|zpVKy8-3C3L7>9>Y~)2_%ArVB(yIDHJos#a+dg<{USO7)X3&;?#-Mh^yH0 zawb$JO0-)iNM674B=azlVxdEKNqDWx!7nI=`N}I#p*WZ+9t>doVqUx z+i>ZsxCAP9ep5sCS)MY|lGm~T85-}6LT>7~sHA(0PC06`EUH(xCi>EEqH)VQEzG4! z#ik;=us(GhJG!sIvC-?TYU}id2dzZ-`i*CC-G;kxjV8%bRo8mg175?>-?sNj+r*vp`eWwyQ*Ck^+ef zW0^9@Qm%s_5hRqnNgMYo{MDZ6IvYBmxXP^#CG1s^r7A5Y+6fW^hi5)u z%(zeRNfRP6CC-%#>GU9?UhAnXq2|H0LmYYC%m zrS4G(LsSW)ydM*#Nm{~Yb)6_lV1(QIxWD;)l-@qHmV+{3QmfPoB!fhmYsr zM-OM=jr%k4;@M0*fBS0Ock>#IOofriiJ*xPYFbuJRa3{`AZQ?&_6DAuDUV^Kh_K3U zLbsFv8!kO66*fgOg_isQD)AH=)-O~b;ZFvF+;gZ)uN8f3$JGkD#WhdXOeovZMtFMSa;d3f=+@=n-Me0qj{wIF9 zuqz+#Ru!Yh=u!`O>mBbZxdapIOQLmdGnjbrkcx4@=|+f+Wv58iLHPex`K4z9h-NdCIY;h6LU8avQpt6{OxU1daPdvs+;*o_9AKnGlvlBii~@;wyvxZr zvks5ir_Z(Pa!ScvAGa;xaa1TlRgS5}PvI&A52Wt|xHU{wHz$hC6V$|7z?{ zAH{(*L1L9$$FZ;XYV7Vg$@6bvBcc@*R3sJWD_26rx3q=INou5~5czwMC{HGcsBcTz1wjLeI&ifK0CegW zrXm=+bNqz5OR}3%Ay_(T?L4-He|m+ke3(d?2q=2(K%x+rbNqsU8%!~2$@Yb^-6rep zu6`qFu4}G!sSX|Q23urE;mAmXgHL_r2)jt znsEAb&;~{P`AY@(8J+Ccw6EBD|5DEPg&chGTsA&`W)0qZaut5U?Z13WiS!VD`)&}w zf4?4o_%MM#d_?u}A=L+g@_Rw#gD781oy1JB{E`m;7wQR5ko@+oD!fG)O-~0AUs6H9 z2eD~G5br%jki1=re|@JKe}1&f%R*i z?-y^Xvt*SGy`rA->M{R|A5bxAX!=(*U9aFlh`g-1Q?yi=OG+gJXYVe;Kz#_U%R?4! z8m1x{E744g4VWs|u=IjSoFIvyYh^8(3Yu`=O$z0e@iWWu%_}9YUyUlpp3ldRpU$<; zi|XL`!kwAWu=Kayl7;>b&F>OK!{yazS?NMsV?|NSRKze>rICy^YH<|(*Hcf}@K#~+Z))FREv(1D_BVQ+1HPVkZa$hrAsDNAed_|VUY*?Vgs%A^h zt5MohehFu>NSLNjcL}q2eTDK)lVK4%vetDBC|1DbcYWCz^mIm}(!-V_?&Fy zqoEy`OCG|`&Xc%u>;{9zbs9C8;80ocH41eRD(b|jJc@=>QK;*-Jx}56fhW2BR>I^e zLc~pqwXfIBJh-p-gejBh=w|*oI=l+5IDhk214)Ds@j;>iDPsgiJad>vcw`J3XcmTl z(L5|ty(%Z>J)#SjcziwZTNngg}t$P)N&iA_f<3&r+yM z$2bKdg=BqEbRI6dEjN-Rc&I+s)}wV*5N)fp44V>(T~dpRD-zhTD~9)9sKmE)h(CK# z4v{dSqy6^PVmjnvd;QgmxpcbOTozcXU%8Oqzwi2U{OOZMS`+^0*}s0$fIoih%-|xW z@@^ErekV-lTTk^6q>3oV(~nePv@?#z<#mXBw~DGLgsZNs!soA);xF%4;V&Ol7x@LB1kkE-?k=9NbU=7z6QAt{Bvv<2 ziD=3Tf`rS&$^_yolZdZKa6L(gM4|bgq*Qu})q!yY?S+a4wNVkYQ>93$u%W}}R_dgQ z&u1eWHiD2wZqP75Y7kkYCx?v}KF?M;NK@%@mlE*=i4?k$@mgwJFy&RGQruWuf{1b_ zU4(}6CjyF7DGGIIdf(VeuR~(NrIqI7RYkL)NCXg9*RL;cvm560p@9HtuyMAiUV^ij z+_DijJ+KKczP}0Ielv%^{dNof_Pd?<>#sNC>o3Oezzc(zm>EQUmRgdoyjQsq#koz1 z2p}{KV`4*@T&g^X2NbRgD(8%qWq?tb$pOMagAPj0-#{V-qW#Wvj6Z+3>y+p^9Garx z`^vn$i4=;RM{30bi4ydJiSMvYop=gmp`VEZ%X|eFoxMwn3o=@2o3~xdMPaW6H6n1x z+)Rsti5~(x2SncabZ%cE$)rO39`I5t76Rn)nz2z~EH|u*tkXh^s^)T|2*@7q(R3-P~noSS5X?kbx!?lFTb%e>O**o~!DU++`9>k419>;BaAER=) z9Y=>w8c6&hRcy9J&1=$i80%tN`7CwuH%Rh#CrDNjB&!vA^A(Ad(*RAwz_67=h^WJ* z_DiM2Glx7Iu<)I7Up5r|qR^Hg;>-<=VtFzGMccGr<=|;55(9}~VSh4Ow$?k)MAbZs>RRf{EuH9!BAxc-7D*mysQc}->pSWUL3m)#_;LO z<@oFSrTE*2HH1hIfBPtaKfh0vLi^MEb=KEJ4*b_|5*V)+5gLRDukq)vLP9T6(OCmetKHdPRn(HFe_kDn>RJCAB0 z$85YvW%A_Nwb-#XkL|B!fdKEjM0J+_XYWz1(8}=gCra4jcXba zUC}@zFjf&5s|1X=vsH0IB+3RiZCM;qT3|^C)fWc{i7qP;QG&dc4W1@GmyLu)qrgEE zRNC@8xKCyEEl6=+s~HSKKN;J6Sh|SZZ3AL1z zPitJdLJg*>aF+U<9ZsFrMql>AG|QAY=*N~^^n(LCz@Y==#n4o+3cviQn=P3b9(;kV zxArcZTSF~kYZ=p-*`!l+ur3$Er5qw1p&;UviIVDalp3~V0VDkCVy{&h{}8CVdA~th0gqQo(mJH5Gw_iveEH;ZNSq=yh|EQozyF=rLtfG#X2hT z{!5XBl!&K7Nm5x3R&T&`WG{B4PvP{;ZMc5(12$ZW`k82gtn23PAW*1EX73_M?#8j{ zyK!{pUR=NJQQW!jaa_Og4#MP=>;2ww2zzP!l{=v+hoZUA6%JK}Sx_oNii4(Uh1C~E zRcEg9Or^~Ih-D2Skv(iV66Hz~*;Fcoi35p}?JksM3WUl-ReS~wEj>kY4oJ?gWh-mg zLR*slF4^90`4WpO^N%QyxH&@v0|yILUA1JW3X9t{q>7gMV=7N#xe}@rfy7jXmqe!t z2rX85l{7F26oQCCTMBd8*jqea?-P&|eh^5MGwIE3G=K~jG+?M8i5`NaWp%`cKM7n+ zXMLfwvVRJpGg*&E?=HdTFXiH^7xM58LGtY@1=h0v#cM^@!v6J}WoF+JC_jH)Lze{b zuOCG4=Z|9e>!%I)x1XkHE%@tCn(-G8B!BsF6D?&5<##;xH&hQAc3y)#{^sopy#Hbe zc1<;4q$rBv%sM*OY6LE*MEQ5BP;x~Ka~ooK_ciJxJ_G}J5cq#`UKAC|}p2(zSP$gvI^@sEE%KZg+ zg%1Asm9-cO#W0jZU=)T4k9q^hNGZ2f#xYzS$52fi{S{H(e@zLV#HpPj{Nl}W{2Sq> zPLqFCZ~ON{_*Z_%-@jXnU-LWKsKqaq;)5p(@YX{)c=_%eymt2*Tys+?S{fo~ToFWX zX1!H~O_w*?%#NBSc)C1-abCkR1`~V5WJ8+_JVQ*FRDWQ#Z`zb(cJR{%+fj= z?q5UPq%))Z-Kjh?BUGDV#FlF)pfDOU+MxO4)5Qa5%S}@m2p9wjl}b}4)k#(xl}Vcc zMj&Y>I8t1;&;*kfKF6vg)UZD)c~`itEjxwA)d@tFMhPu8c(Rm(QLlP+WYjz=LvFf- zN=+l|=8@Ou>lhy*3U6slE{&{h$@W6FBm$Q=S?4c>wfxYQpy43W?-Yr`TjkT3J8~Q! zeti=E&wrl5|NWN}__tqf!=I==e*cqh{N}?}{PIT)_~z{hzIr`~&tI*@Pv0oU?(OXe zQ^}}>99~UZrNISTp(-sU-<3acU~zd8r${^nL8s_o@^@;)x5Xf_eGV9|KZvI?XilZj z`T79yO(84|Fi4QNZ3>ks#3ePteL7xhh378kxffvKL*!gLSB)qfwE%*1K_Xj#h;Iuj zMPO0bi`xw*PHFh&!=#d}d<`{k;V(kO6((rA2NUH@q(0Oy!=~#MOr%B#6f?qvze__# zFZ!$JFcH~>U0r8zX67ue-$XS+WuiREwd)8Hg}c_>jnnItGdWA3+=VLzlkJb;`#T=u zad+StVX{9hn5dGACXi@J+t{!J?R4hqBpF!RhTw9IAJ$2*@R|e>f+dzQfEa-h%b)`B zESWQDrtp`l`ZTYJKmUpAILp>Kp;3MDLY`!ChgtzeGkbOv4x`T$iKj+9m}p)35L@z3 zswBE^%8lrcXnB=PzE+QQI(}CMtm$%PPK5{#ZtN_n48nz~M3C`wA`T|X&G2}FM*9sQ zGN}=sSK+LITmmHD^%?2PjJt&T<#njLh)#(u>e%H$8(S-Qeh5v`5bnCA2tT3I{ff@_ zXD{U$NYug?M1J*V2_YhwRIue=hF`u}WqA>ed-dm!q6A6O14#;h`>6xTU#Lc;QvO2w z^QTlLA0_ac_Y~Hu4Ac1mjO7K;zM=+!3oB9i-6~Z5QzfeSpPC&E z;FUE73--EHU%Qn_VmBCIf`sXiu)qnK=L#NJ)a zIC^sm$IjC3Y{Zc}8gS%P0^?I{xbb9+_oWj5{&5{aQcuMcp<+@MSqOi4w+6rcVT}!s z|MjaC`1HAAy!&`Q-nc&(uiTxD$M4I;#+d+G39jz70txRyc>`vv8ZlEvn3TsbQLOM< z$UxF~c{QRJS0Z|86=IiFBYt_6S@e=hgfFTZ6`fDizrCIk~YNvoVPAUH_)05PX8%6GSu4^=kFky1SU~&(x+xiIZ z*!#Fua~&N#NpKu?eNAZdQ0Y31O-;MmvKgT&B23IWsKS&7kt&fw>2V5$szrqY>WnCu zs1J#T$5Dqz4Sga&IAFNyEK?pTxUjHRfC@x?i=<4PI??by`rLm0DbsA(El#MbYD+`K zD__zzj zteg@t#bOG^mG`y_Dgp_Q?IMi&@{%;yN2Ga0$e8(#abepm%fy7kq|W%%f^Y$}pm{QNa;d!qth5<;&&osa$d zVi>JUVzMxTnZg)G+S0iB_Bg)eb^hyH1^B}o`S|T?Ir#OXARIuudYk8@t>1}eWz<++a^$IODiQ(2gGU3ENh6r%8pcKAdPV)vR?V6y->42M!|322tu@~brl*O98Z&y(Qap~4 zod@vbJ4f)BKOHAPuE+oV?>7)4NAahxrU{WYgUB~;2l3S#b@<}7T733O4SxJm6+V2n z3?Dq3i)}mT#PU=s=0Yl#E1?5Z-w}fc&2lDOYINRtl|iTN=QZR&Y;r0(N^_QUpi(NP zI^^Kx{0#zf^nT)8m0M1gIGE^saa#%``i#1zE>EM+GMES~9!N~3@U?-*wmEZZh1Y2C zSTKVKRa52VH2<|Ze*YK`1O^3uR}Ut>$}mN;$ebeaLRR)Z!a@*{@pz3SY<)_oO6E(H z1rrY<+GiaXEs@XrZS%Y+d?cssdcRvIQ2rj)-(rvdW=`5}sIgNeY4kqenvZvz!_DG%d z9L1K@9!q>n4GAVvB=eyzLPV8iVVX*r^;$qlW{tB^Gs)+sm9SU%ib}%+N$L46vF=7@ zplP%NZ@e|_mm<+%o*jilR3u|mBxy^yE5s$GVI{t7;dSs2D%D{<*0o(Q;UClz?v_+n zI7*7b&z(33C_H7k5Cer%7cTiuMPdN)V4|d_2WLhx|AxV0>A%J9i3(b|3>xkpP#ql-wBhyC@=C6AtL+O^*sNxmvmQB z{x?6Y#c$p$!>``R#?7beF;SPmL@u3ZNetuV5%fhH&^p?TT}Nv0@U4Y-?V)1)@bOZ- z{a88Pd?FwBJyM1P*T=E>Kr^-;iP?y}Pkw(bp1UIhPu;Q>M^2U!IOX`{}D~sr`}P zSCMRm~D4 z>!2AKTlnlMN7BG&Gx`5x?5)GJy3(xg%uIXQl8}TD9Ex&xcXy?XDoQA%aCe6U2*f+? z?gk`*2#^3Ff#4P(B*d-LK0Pz#Wv9Dn)_!9CPC;OD8luNAck+cA9G(?~bt3gq67v`>= zjqUHv#V4m1;M0?f@bQtkIK8(U$KI~Pf!DI}{-#8J*|FI3LKJpA7m2r?4yO_cB}78- z>izzB@Sb64tjENJ~5)KX=6%k`EvtbSb!T7GAHnkb{| znlU#MCQC4_)YX|xDwyU#qCkAn9E{7EW<^_y+eG(~t4nHCN{My_6>MLEMFDv#6va(a z2Gm$VVk|*o5V1H2&EP>`2pR;6+0V{59Ti}lB8lK4se-F3f@BC+Tv8nzc;-hI6r@T# zR~JnxMQ*KJsSsa0Y+b1@|~AJYkvscCtbl$3$- z;fY*rrlMn13Ld#-IQBf+-zLnxPYuQXXZ>;Dxj^h!d_*x5!bGl{6I+x$m59@d5xkjd z5IMP(6X@1tK9^|i-j$wvY)d@B6Ga#WVeiv}v2H^&D(do3o0*O&H7QgC*;sykB3`h) z;D_S9XNNn0JwKd(GXlpp`kRV5^85%KetHm|eQ-D?v~u$FOGZ)vuj|xo1f>;X^yFN; z`D7T*yc$f1Q2D$}kPrmNUroSaPWpSF4aJ_Pg7Drm%J7OZpuG2t_JSXbMN`vIADn}l zqC7lwR~$}m;dx$D*Wm;zj5wZ)?|&0vN_BShnL&8w@o?;VIgudA!e_hl2$LL~dn*ec z@5sQ1+fuD)@&kSs$M`)Q*c^v<`M-8P6^_>*3c|~G`{Ci+2cWYgj_NQKwf?D=9X3iJ z38OL!ra~e}$_7OtyH6N02$7sCqmkb)8ill?t7A;bln%*2RZuya;zpw-c?=q2n^6~8 zN5xQs+EBHTFSWf#YNFau71_?QGRoo;O8?Csq7C5ksX(YkXqkkDVke4~tZa+w)kHrkGjpr`lTYFPogQyh+oAb;qr7sN_8t`R49Un z(uHf`HtrgUwEp=B8yuDtH`h1IO@bagEV$02+M&2!Tv?_-Y+|$wE;muSIul<&UXYRc-~``e7b^fOJMWru zP;o(bDVWl*1}q%43O7%`9kt7Lngul%aLBmRX}GQ z>N;ys+E#Qt^y#nYUdOUVL> zR3!-niZ(6Ih$J<8;~%gYqWPa`?dv`>3AiiSv$Xu}}o+^Y(hfbc*v zEFXn5WoG5{*Gf%oQyfUVLPP;WL3st^t@E#!)~ow`T-h;sR2v0@l=AD;NFkq>=`|`` z%;=kfbl=jUsf13d)r%+~TQVdC9Z{K>l$ecaX$6>> zS&Zo!MVOM3gNX_0m=F<qco-% z(;AcU`s0Mi8_@(p63%Rsx=JJzc7(H4vbD6kCwy^j7k`VrkFZ35+Y5J zHE0O0L~U3FY9ne<9o1^xwTlBAEV!*KEu{cw5F}ZG3a?qI5T*U*^10lhf(p%dG#3}I zaRuL%t=JG=LuFEdG%AwhtJ0}xRMW!i(cD~GIAv0;C=EFU@s}nb@>kI`DiqF%83Q>k z{L1)VsO$+5si&+#Tzn6Y#6?ls9A`KIq82GbtGGg|L_tnBZhm+!jvtwWvj=D6*zU17 z_+}0EzmkJ}{8Hb0F`59;R+=`!gNO;-zwxH=6+`ITOJhXu=uvIxciaOcILhOp7pPQT)NO)eW0*a5_@w~sd1eIexi<_;ZjHg6 zcLw2YPTa>g4#UY!fjCac9HF}Lc0W8q037B7eURrkz;o>WGPr=iAV{P*q^x;88c581MT?4) z43$dBF;LWoRHBB8C+z^>Kp($q&M{Y{A+=wo!6b|GnA}`>f{E^5oo=qKM5>b*wITXt zG{;E3JK+FTwKGsxlE1dwY1=$QF4;*>nGoZhn7E$2bVsH`xiZ8Ah~7Ajab{U z0?Vo=V`j@NEWB+lUU_FCJ~=%d-+w#@e>yh}fB1MJzCSx2U!Cg0=O;RGWPdZRy{{F; z6*UO&o5d9|--q5yk1FY@k`xFL z1Bt*wlbcWzwle7i4uc23XZM&2NU2gdsVDJqxjd^PI?$Ol7YiEJ5GFTZ6@jv7v^oUe zj>WCFVPP}D(!2r7s9XdTJ*WO$QYM1Q!ul0fS2Cr@WpYg}^u4e|7s!MP}b*q+gk2 zz(}O(NFYQKE{&raNuVlGZ3q`~{Zo)nD6#NlP+Fz}^?5tfkEMM}w?(W$D@^)TCvi-j5Jb<-9;*)$>WH z9Uh0`evyPo6c+llPz z9A%3(E$A}TKC53I!c39$PbR%wuPsaQ9omTb(`xYg^Lg0wQYvBL{Db-;cRm%y6^Zhz z!?1%gPg3u70_4?)0llFOtcpy=YZ=Wx6KshU* zOvl$JCgZciU3?#{*uT3Gt8T4FW^NUMtG!x$N=E?Ud$2?WbALJ4mfn-O;rN_D7Q??$ z7M+6c>SYznRlgLWD_(R!N#b(_{&iMs(wR96^GB}6ina|{*+C^jRWiHjcbMCHCzhxk zz4nH0*?^^^Z^S$*lSOT}W6k)xp_s|Sk;@5_IWC*a)QL0QpO81*_P$n2l_^qs0b!V{7MNDL8Q=>i6Ek$L{c9v zi%TlQ#Y8OTVXDNJGHeROfkV)c0&(>uT+wkLW)dJ8d})1CkiylRTviE6ujD{XBurAM zNHVEDtQ88?%K0`Z>a(S8zDWd2BEiDjSF2tj0m+xep=eMN+9EU1osfme$vK#onvWS& zBQrA#F(V@%(^L6aauz1Vr(t|#97Yd~K<3bNBsb<@!TJb1`=CGGd1g3{yf_@kpC67x zPYuO;k6ne=9_okt?h3|@zYE9w8)7kPVFoIy_`WkT&^$HM_H94+dMM7m$w~iBg5dRd zgMqa=vE2?6aY9C@98+-I++^WY7*ruI1`{}O)<*uLw=!v5^{ZXV_9Q|i1*f;Ej)mvo z`HpXmB}}4laFYrG2@6%^Q+@1x*0o95PoSu_d?2ABQTO6Uuf~-52}mE2fsRtD zn}_7uilQ1x#K&)?(K4(HvD(_7(R(LMK6oXJ>dPM+d7ihabWRa+XST)=AhGyBTLZrw zj$`}`hp4L5Iz{{0?|8)DTBkg6`v^>ENI|XIuLMYqP>B#GjUmYx>92iHVo)|P5hFv2 z(4O3eaXC|M^S9140wk#wtx_K;ZRkkj!Yp+(+7bzngl58~35{{hsEt;$$67Az)yt}w zFsUwe8Wl;Z!rt1p2qFVqTa~Q-8B`_&4pnoUwz&Rnyv0w#ei4lbF3^pIvH@#uC`HwjS|pb? zASJ96ar^?p`=lY56L{b+lMqBz5=3aIb$QUw;}P=nIDTL%BR)62bROr@ zcMI^zZe_b<;KUm#*7D&92gasN`RJZkf{5Te1X5YJIE61oSTG?xtfHOJ2<0SxUY&UE zt#kzZECX|wO~mJ4ZUFvr1ODf`Yw(|+EhI>0;Sc;yzCSY+-y9#0Pv0MfGrMbXW>+na zy;Fw=AFo7FC1G-<3ih1pFysHQj4y%2REb=J-jNXIl&h-`C-6RsnaJIx ziCm303wd!@qk@XABXfpz4PM!H6IOTLW&!xw&9}K=d)v)ePLQbXWZ|eAFsJDzEN#CX zYrF5pYA=W&*`p|6h}jpVDOJmd=1LPPIEsmgqepCyVaFG}y5 z3RkK`u@cX`ApC_O(NQszNUqdMP!Up3B~oe-k!wrYTc$6$mn4W{6{`f{Zp zix5#}R@xP63Gd=0*@TLA_)EP)=ZGd$;?WY8iU|oBm_%SqNy#ESatRYz9%iKRNSm5O z*hD6vB{mh4M#thVstPqaJZx^I5FC6y5Qm=+z#+voo*jlmPY=P7rv~8AZPta2^G0%*m!R^&b=MQi8$Hf2?D|C?P;9+Q)#^J1rq~`#(>gi351s;om*^kC|?uL zagvG8cIV&|#Ub9#;5kyL65{b86~l*$OKj&k?OY1*OVLE)&?W`c1F1e#)$VWb*!NsG z_B|WnVk1ul;+@C+@b<$)u!7jG6z%Tg{R_o>!S#PV4QtD2Iu&GKY5z~lQQA| z(B@ial$AwjoZKA3#qluQdtV41A$;C>HW0^N^rH>u_c9o}A02=A$U-A zv`c-Ya6y*V&SNWD2$Ci$mWJ3CR7W?XEUXTBBg&9EAlG&Q&7krqR`bV@CY1Oj7Ka*;p$`un>&64i|44y)n{ zrh*r53DUA_aLw9SUPyzn>8>Hzc;`@TqHVluxWzwSxN|t3yM4H|4tVsI0k~z|P&7}8 zMp#G+hW$7OBmN`8_JNnV?R(vZQw}6tZBm73qbn67#{7z_WxpCMTR#q`&n&`!|9%br z_Sq7Adty93-dm4TJ1cSWt#W*@qm+|o1wMLrBtCq<6(i)uE!a0)=UXC`imhMqB}6!V3{HG!H?X#K#rR#+~yYF_7Fd{~_EmaUIq+bz?<& z4VGl*U`}NN)z}>TjUf5^XS4AA(MEi;zl0zu!k2ps@Ws3N_?#-_(|5A*(aubq;YE7l z^j`1BsM~g6jf_ zVj+TqU}0*61FHul0tQuyz+tWCWqe-Ug5@g6)ilmP7@j`zeQA6k~zh z@o^cLm?V&7Q;1f1h_{_)N%!bE$Eq*F}@CR7w} zrr;DOXTjv;YlH|__Qy7b;P@s^CkGQwfq}o+Ak#QjMHZ4=yPM zg6WiYO?)lMU~-s>;NZq!Yw_~Vlf$t4akX~|wjjH6Y58Nf9g=M2HrV`^a zyU?9K4dZjCU`%?K0i!LY1MO*?pXACSVA`of+Ed$kzXL7FooGnxKn)>M7EYxcq=G-y z?`dlVF761DXsUpetGReld!JD4Z6Xs)9Q#$|T3!T)Cd{x-Q_HS4d#w5h5;b zLgRB%&g!yI7gCH-eBVvcRj3PBMSGd;aiT0nU87PYmdQn^q?=;lv0o||N=b<4wXVwh z5kCsThyesiP@gEiQEl{I>8jWVmLg?P1!|izvEe#DJWfzNanm5{g8A4DgYn4qLvY`Q z0l4VHani zc4Qm2?OBSye7gbv`Sp5y_u(vj!inzY5YtdUkm(k4gUVcLV{!-p*9zPF_28cHz!8pvwhY0XjdUV*p|cZB@@Tq$i|*+ zoJbayB9a$O$S(;e{XZGOVJXXmhv4GkB|eZSyP1}x&^+6P|Bf zVqpVeQZ~mRGTE0J>|9>cF)?o{8sgiK+FxnGd|zC6s6U(QdO$_e0}@~N-(Je3$KAyh zleX9uK(v_A%5G-mVh-YN6b~=43>H_A$ic?J<%$a(L>x?L1`b#CuKVhws=Xcr87UPF zDoK99Ljm+8!J|*2DUuW_5Ch0nY9p_>Ne;5O+Dq@3jg%{0bG~G*@RBZ1MuHDOQZEW@ z#{4GMXYO{yuc8t0%P5o#PC{pN8oH@UCMRWLDpiSKG9!&Zk!vd<8=VEkSanS{c5u=? zM1UN4A%fQ=!Ld2g;u#;j8jlldk;41x(0hzfQPBOcGOH92e{2A5y0es&agyu z5+)rH8R&>9L{~~9#%7Ph_`Jy&pCk1_rD4JIQ5c;}6(aS)V_R}NT2sfMHGM2vs8E_x z#-Nthcv)lOYeYRkr60tAO?!N#O(% zJ)i*TX_V}(8K{^Vhk^+l7-P8dYT;@sKN)cmF&KJv0tWp&5q=j%!vEjHda01}5HXO* z)UtvDUYk-yU7G6Bcjbn+C~iXgc?wd?o3Lx|Lj3Qyi}06Ers1#W2(@$5@aMA=@%^bzd~>uO zU+gW%x!w63&>7a#Z2!xNTxG@Ljt8@l%GGI*(t-IuU0SkJBt5FcyYB;yDHJ`<_YqBS zSZ^++2CEYOs(i%C#U+ra6TxMfgh?(}In@S}wzTP(TYC+bwMd!VLM3t|<~31;H2;q9 zxWxkSiyE(`GTA`5@IKYcwd3!?nz4MoewFQEqN;ZbzMHuK{G@{E7?m`Ji<=VH`GD`s z)gQSGK)JXg1QP>^ud3ZlAW^)8Ao+zX7XDn^6!=%#&W@v#$D?F;5sHRWjZpIUs1i?! z*kf*AlHOf_rAA8q-2LqSl=>V<30#qx>*}%uPGmF!Buy@=tEe=50ZUcJTXj4KrS_dj z>6^&`n2v&qWZ1QBei`ESi9dLsMuXRY?ja z#-(9$Vg{xp^V*fGD=rfqgvjb^v+>rmQW^xw#wZ*mG^Cc5{;KSyQ?GH-=VYy>iN{{1 zI07f`IlBSNC#7o!KTul4dGGYUG4cWz~q0 zO5(taepD5~R08pYhy%$dZ!1=jjE|^9&Tc0>2$Hkg2@t~Mv=qV{J`mY^()$VJdDOjl z5Z-*SKQ`ZSHD0-M0Jhvc0GsaVizn{xj|cA_j{EKjz>-^I(KT*!6?am7RpKR2T~~bFjRm z1S`s`u(Y%ibExnpCS?#H$!Jr(PGla&r#50z{$z9)&&HVSDd;316f_^5*kW;zmL$2Y zct2@07cT@!Y6nIUBn?y{H3W~*ug{GdQwlrSuZ z^wSDd`>_}Cwo*S8w@5?KFH|P~__tNTRe-zdRq)=XBusfob^e%nY2{*Mbjsa)R8Rfe+qH z!%6M!tTxx%)Z&^Dc|FZqHmKtEq*dOg;e+itR3%mT^gtWFK0XfL5ggy09FMP$wsGZN zhqJp%2%;=1h6GNy;dtP|a?Be&8dFmXFexq(W+!-&;tK zWZVBf%8B9qjWO8!Yy>vbM%AVxv`>a9A+N`nt0Uq6@Sq`>`0l&f6aH_S0`;+%=UJ>o zd&;-~yvybiNTf;>H&N$cxw*7yPixv_%&J;}B~5FwV)S|}9ep!qj=UK&XbYQfq%ygV zpjk(CvJUf`Z?IU(>alm>+OAs(la-jEIulbPzAAXaMA==Fcz<;2II86`D`r*gks#tM zh1XD$t10nR36Gv4vG(y)EOAscB|)_qmp2Xb`M=bOxP|)f4FZE#L!t~Wr$FS! z%0Y<~2$hI|MSHaKaSc4Vpq%^4!NZISL;-g7FUrWOPQRVM1&wCMFOj@u}!0L?+ah z;JTY~uv;xq)DlJYAX_xSYwr)O!nHEj#H~dS6|}OgEHLgsa`>eX9D8vXwm#4w`Aykq zo=|`ts>#?IO2rV*>zryr8BkQ;az#mvXs1KlSL9Vr)USl%=!^b1L?!US%YitzJ%$S| zsu@AZAmRX``i>9ZOrTPbI*6b`3ZXg(A%v7p?9X%f;qY^V@y?S2u=&A3n7K9qt;@N{ zTa%1yZi~S^4}{_A$AYj`n^AF`sTI?XXM?f*nIOFNv_IZ{VmMxXa3~(TcPO5FaG1r1 z#=9gi0MFp;{uERAojaXGxkEJDC zgya-ra#}GaXEtJL;S@|No&o0$;SL}^_3=Z1TmTV%0pZ#|f`dPbrrHtJlEmo$T%3vOsw%v= zxd9*SF2)Hej$^MS(~@wUM>UMIJrgK`FUm!68p9W7S%&2jW3Ty zd1)*jyF1)6QA`$ZjmC$s$8vBa8B{(YOca3BfUvYe4pO-d)Wb;oPVas)2v0l^fZC1( zgj`8jYMWWPk$R^C+r;gg%zY3se z{H%nCvSBosk8;Ji_xVV?^>`?@JvkiXThkHIH-{h*Ae?z1F(o1wmk%ZuOR;zgpZ6VA z4ez>XPLQuSRKTGe)IM#>~oPSlWDz?Xakgu%^}Cfa!HoCpS@< zTu)`P1`8Y3VqW7-SlB9<+<{e{H}dh7m{~TL1N1yZyv!~IT2jW@p7M(tS5q}D!`zx>Xfu##ThaoQjwm#s2qGnfh}QvF z*;&Q362e3G<;KdqD%DbUErvm82pj@P>J?6fs3JIfK)S8oltJTy_b!$qrK63E)Zk9t zQdQg)*M}1YSD1oG^d&Sei9lc^M6?-~sVsgzHKmkm*#9Syh`%%*BLmXV6`hRk=p=L{ zWMM{U5gvOe6$e$w_p(~Vb8_dktF3HRz3!Rf7;@|Ax>gI2fc7Ri{W_J=)^G#J2d@O+3PH1Y+}J zL-EAJLvZ8Wk+|;m6g>GzB=$TX!1p2`xI#y5NcH{;Vc17gTJU??x8!M8u=ws1YK1Zc zFF!B@58dyNt&fes`vk^*ODWdhhEeelBHBdjX+OOA*dQ!en1I=(1z1&7ifbxsu(q}y zYwMbDZEX`)RJUSbMHgn5O~#bs=@^$g8Dq01V2olP@x0EW>v(KLdtwthlA6((+Kvta zrX_708h8y>Cv>7TY7`1XYYC7_LItynHLVz}6dy2OFR zZIR%zUsDJ0+T{XCf%u|;dQ&Oop@PXsXvahKS1c47qDkYDBKBTnor&`C@!Z z{Y|C1NW4M{ISGkNFOx1;ypd+D{psYl7kcxR#lj2|!`E9C{N_T0RnIkD*&{PS1?i7sFVj@|$fECde+5Elp0F~TxroC;A_R;~ zsCM&W-uCct+;LL?GV(Lw&j~x^7YVN6BfnDviY9YS>ZpQK6!`4Wtez;iO)Rw1M8vi-mT-B!~klc`{~KEXT6u zYpvEqf%oaPHyTW4HQbCvqX>}3b(q(%9&?-Y{LNTB<~CeI@GPodfmteAq#~J=Pa{YK zk;&>L%++6K=449uiXJ5**OpfAf`}F>s$`d|tH7@k6ANY;JeH1HgZU#@V0P6a%qW|0 zyE%?d8iP{*QrkUFR^sRCNj!+?=-pOfp#emJbV5YXNR{iVF9(_#6$mB129%_~TBWEm zIG5m2VBM>j&mNG26#gDsX*!T_0AA*SL=%OQ3S^ov92l*im>pHUudGL@eU7=GL?Qjk zcrJdD(VCcpSyKz}!ehD)A~>;Y*FjZeQ-N@Sp#_c>8p^0TxmCt%h|eG6>yB|@bwsOj zuG9sBS9$#`SQmw5%LDNIEq$@&j?1y-&VJba=n!iyq5!(=m|38K=je+gaP;|MI6?(+ z;OPN4@Whq4eqAVT{~Z<4rXYO!W;o8h5s8mp3pcQQ$m8i(g7Cp+f5Kxp6_Cz75IbKO zf+wGez*EnLV>1=TYfpz_JHfF3g#a8^x8Rq8tZH71KovOdSFt1C_kND&_g^GPRH#Tu zyvx5)oynf311;O@-N%RGrHA}+|NQ}Y?NRM@!oPnu)EWQA3MHQ$g4Z4%jOlaJF{8K; zs|$;9ZACQ|$w*vN*Mg-r<1x2l8fKJCM_0yJv?q_on3Ps@5*RHp_52N5EDmB>S!wOA zMubYFIkgKT6FN{yfE06rF9@um0;#mFxfz4hWj&YhP-=ZPZ}a--Wu7V#NW9%Zd#i1E zcfx;@U{&Qs0ktTg5Rj{@B&goSPJ-1bm}-aMkW1C=OCtBJREeOXtS&)Ds)QA+&UyesdTMp0;Ak|@<*M49S)iRJ zphR#r7Wz|83~{Bnc6~CoYkN?x=-+uJ9Pd6Kh4(kb`u1}U!$Dp6wzqT=j&Wc}u}G<$ zA}j<9%go>;t8G1PYfq|$#tPJnP_!}q2OdFK;Bc>RIlc=YaJ zXq_I9$l>XPF#)2w7P+hllb#O3f=CZYWFAPIatQx5RYhPes>hUL{f0O^b8jf#cqG{V z$L=S!^(en14vbw-24MT6O2rS^)mYiO0n4<1 z$*5Z}n;@Crd@EscBNjAXN4wsX$`Zk(Q!u#+iyBs8R=FzRy}-L_OlDz{4<=)>rgG3! z@p|I>CN_|#F zWm4vWWf%t>2VZ92G^G06R%skq<`cdu1?hx`z@b2-OrUVVcr|=fri;Krm<-h5bt^yZ z%bg^ET#-R#Czk~kxgbK*XYnQr?fiHiB<`~bkXoEMl}}Z|aTomKaHI~&z@%9P*z!z_ z)jVh~4T0hVRb;C@yk%4=hLK`p$-@y^2o@!sQuv5yn(`%etP{wD`x?^C@0^g!%;>S}CzWT5TWasRr3 zm@*>@#c`>q%1FcS*7E$D199%P0DQD%1kP*;AVdOjdXqZx@;uKE!hucxc<)szjIC5f zO8uqEImxl5c!pZFX!pXyYL!COWO1BL0#q0dZ;s@`P-(*2U6DYcx{xw??>X(JNJa9D z+Nfw}$Pw7f_xu9KSSgT%As!sNy{x z$$e?O_EY+%T0M#Cr)q;sFgCRX({j5ojz`r~g$)bAMWwL>N&Z~Qwb|At#oCelDv=)~ z(_j)rRTA)1-vsMH#4c**x^e*FB+Lt57rs(5H7GPdV*5zKbMPqLm*J9p^;7S9>~BsT`m}IV6?xN&;8v zuAa+e=i)hC{DtS1@&D-mnp;ch zzdje2(vyD(6VFs<64NJ#De4lp3tZvj>%<(61Ve@m-i+vJeaF zR$%d{^_bs8MN)sW!9*Zg*nB}e<_V6{L|Pc0|+UC*tm$Yyh8O~_Nr`q>tLnP0mMGs+j3&8}L^Rbw~G z0th{-j#64V;ZZh%rr7^Ps;pDSNrkqdwWHnm?CS9yYn#zVTPkqbdWb@dy<#4EH^mxhbE^5p01S)~0e z+CZWLU%&r|N7=|My!coQC)@~A7d;Ia-$*A!6bMfzMAE2EQgCdGYkYWUV>HJ@lx1=q z;KX}~*YImxl-zY!AU59JAMZUnz<{yu$)R{(4IiKOqtX~*$M>HchP{vTIZnJU6EL@5 zI|6H_1)y_O7&6mRkv${@WmiU{VPH5~2SuPRIu&!f6Y=Oh5gfb2@d4Gx$rlG<*VC$` zABt@o!*ObR0zQ?SST!mZgCPXpl=7x=EFY5xLcPFLB)FAIiYAxcak-KC+z~#0@8?(!%VWQkunUm0!)opI9uFUbYZZ2jf(k56NrOwn2bkN#T$Dlc73~CbF z36N$3NIn6Q=T8OVSI+N7JI*Q-OOTNJiU8^F?1vB$VC2qn3kzi>x)@+@i1e6LE3cJA zrRDn(CV@&JmLgGVa6Kx*>p4~`k)ys`goWaCQpo~{?PbDawp_7%Olj^?(^@168dj{R z#guADuF{T*sZ`^sNVy`}D&K2DDljptlj^J)2_bQEZB=1n{v1rWyAAjf5A=v#~g!TH`_foXQBld5M;E}3; z#{*os9N-0Yz44dyAj#uvU5I)(=O+D^ie`&d(B@gkhIE#UY5O_muEYFJy;uQ-GbJ50SPeEBwowe&J`b`WFKU6$!6VjX4{GKZ(GIOLB0{oJ5WTsbZ%@lsoE|lu~+yjBQ_NEX#bN3+ZCpeD05Qt+NLvf5N|D&>vE;ZOPx}M*&5h!7f&4aWNf$?d-j#GPD#y>{;a?0jg5r4a9Y zXaMfMJsi`g*JF0|6wIyQJXksp@Fb;{>3JW-nHviQZBz zd6Az_uZ*(~DCrfYjFUd8oJi6+$;ij2e))2J6!QyGFsYDVpjNE>qAqtJ@nSGekvM4C z-tC&eF6QLQi7Pg|5cw^6ShOM<58NJ%jSmdN>kp5>HY%03qyKS?D+aO`-*4{tm;99!=nhF9(xf~S5r1gozNKwd>Ef-mR)r83ZG5D48M?FABZ z*@gQ|ZoF^;B?5zf5{tmTiHMBNLR2m%&jPN-3kwjNnTv40Bne+1vhu%E?a=+6?I z{WKoK|05Ru|IYXR;|QuC%3l7BV9^5--_bLBPH-Vadf*{<;qfH%nevfh%6m-y`>E= zZdzwBS=w?lR(0G;nDBLtQYQ1ABFQ6A1eKl?;Tag0GsU`(#&d7LDvp^YvVVNrgNl!4I z08wz>_n3n8QZ)jP0Hg+unH*rY2^K-3xQGJmDg6{+mnwG^B1c~pND^_7YGl7w z=}!*k7#fOe*F|E+@<=>;$1p--nC$`b!7Ee=3aV?;<^(Pm)A5(X36o&k(eU8Y!w8dp zxOqc3CgdkzVpt5O$Hih!aw6uXCSz_|3g)DyVn$>VI!iNg+YPaJ=lKZieK8z&K9q&& zUDa4Vt^j+UiNTleBw0$mihO0C?orWVrd80ZPURGp$p=bg5jd12V69ma`M9qDQf_Od z3|q`c*=K6-7=i=OX{+90=8k*!(Ls2Ju-JWnKfHQJAFNtE0=KXC!!rcRUAKf|=CYBP z*ggl-D&!7bfGJ$0bn)8hNF8fRM8FuAGZABR2$7trv?+XSBA=UpRw|JCgf>*gG@~?p zBnk--DUfUeBtxmZ1W1zN9sQ_CeX$P#BfUQXqSA_i`34aM*X73Y5=lHaXOI0qVB%DX zTwF1hCZ5PSRwW!ofpsVe9%-?X>QKTYOnaPEpnxjcgNfSvsM6h1gKg2%8zgEM#Mkg> zL3JJ#v{#mwU?O#*0>yNy_LyID!J`%_B{4M|m~%0`d=YIi#^uenv@%r`YqEFo4PE&B zBOSSd(ke<>Cy7@|eTcGo{G#${+V@=rHN}JpX{@GRysyU-I3Y@{=sd2DM3WQ;4!^u` zWvgi7v#b>!f5qp1N#iO!gy)FKE6#+b!$JY-{6P2^TJTwOmc^1B9Nk_LE?*3IOF84 z?}Xojg8;u{e!rT8qLsB_7maVFsw^J4IpmU4FEfi_Ig^sIjh=XJxyk+&NIch-fkUn~ zelPkPsTt3`rK81DbkyJQeA-Vwwl5)KAfXW?*6N1lxwupjpUUeYdvF;l!$zSqVaz?(+I0k96%YIXd{u!6`=NwAIE`TIJ}grs9e-VG+<`wTuk5u(3#whhKP|U zS6qYusUR3sd{rR}C`P$|k(pv9f`%86SEDc}hg^@F3y>?an-qxT<|ud}biA-S6G?O+c{vgLp9`S!2*9nYLNKO13Ay3vm^~{7JD(Fk!teoC{n~cc zT8MDcK4EFSzFHGP=CJk?d1fFUyT>21+R`yIEe-Ptjs=++Sdf)T%ffsf=cZ<2YE3cj zyEy|VUXR3QZ^z)=_Bed{W*D}<7?0cT$;P{c+84Xx@%4MT_~!jWEA;*B9Thz)X-2)c zlw#~^vrbtI)PX|@CZ<%Bc}Cbtu_y+ry-SX445V@jzyU&W?_*bE&qIp!T!pvqxe`0? zy#)7Oe>EDbl2BMtin^9Abl1z>xsVEXj`exdH8VbU653KaF)DF1+SA5z(J{_|GA@TI zB$sN0$M%e|XijWJO=LaFLTk)*RN!BZT)$E(5H6kvXdcW#^3|FbvpD}{b1rnsL+&dF z5WPkLbg=JesY;tac=q*uSuo<7RT0L zQqf#YD_uY;^NTHBgi)!J`9&1+&AJKR+)q>tf`F~2bepE^6G6mN9v(D2py;T&6q#IH z30G?M>T_{PfpG8`JUEf+f0_MCK#@x(-K~ay!3p=ri5U8u3`MY6w?B-#3`1(P(eO-UdTpoz}$qA^ci9NSij>KcQP*Mds2i}N{Z_1MzX6LIl+DHKvuKv>1wXCWHF@mj;1^7E ztcxDSNjce?*v+b1jPCsDC<&-Q?qKD<7owK(zLl%Tk>Ta2@~0f9@~93hMwQfu;6aF# z`2b=t(fxoD-sXGvLBs;`{sfCaVk$*$E!CND1*f#%tF?8m)QEZ&Wg|^#oTfySed88k z29FIvw~9E;HeOQYJE zL^Yy3v>GKrRh+CVETC;IN_efwRi(IxQy+?dMf1VL z1BPWE4&-$$MM6_Y>)tT&TBv9*ogk(E7DZKYFsi-xd`vG}%7G3@rOf~U|MW>jK~%U3 zQ_5GOGHMJzSSH_?K9Gv0bkR$iyTK972}O_TO{ZL)XP}UuF0rq^EKyf3t`Y-^2NRi7 zCS1j;?S-n8J)rnNqHeX(m**q$(p*luDG2y^oXZ?j4a>hpSTW>~U&kO~NCJ}MG7w*s zgQ&V3L{w)ZCX=h;Fshh-u^7S+IQXZr@cTDT{QuUgaMD)o8k{tr0?`1qN&1&=)u-D6 zi;MsCe$K(fJ9_HneA0mfjWgYMAQ8Ca;tKZMTLu$vx5XdA#7*ih#=%vp@8XKQB!QER zE*!yx?}6`G?i_Ws?frkf&!zw4`g-+(h|g`Nz`M?GkM;VWQbrCYzV~FVH)Wa$Cc3So z2NV8=f`%y*{hrD+aUk(}a|t$yS8*T>EQ0oaRx#k@g4vi`y9(25)}gatHO7@(i@A+A zVNuI129PDq>#?ZuI@)@yY`>`oCUYuzeG(!9iF$N-EmgXyR>pGzsOG9m+kYy}w>xht zN;$~$h89u2JF5(>LrqvU6-cpJg;@zIgGx=6R1zFjdXC3Rf~2AcB!r4jjaZs6)r{1L zT2HvPDjalIrz4MpO560R4N3+l48=v{cGH+~O8z`B{ux9JBvSi67njtC`%NS4_tft# zWfI8SQoLh!^F_I|4!`E+O8iVO2d03K_ zizWHFSXNks~v995oV66!D9~>;Ox!>Dv>1DTT7;j_HFU}ACcIxDI8lj#S#_- z$CeU6@-o6SFJkTMVR|*Ubc9Ppq-DalBr} z>1elQgbPhDX%j%=N26MkaG3T7siX?g*5swg9ad!Z8g9?@T(^HZfuVguw5Z7#4LGJ^I#Y-G4JQGj^kzDNr%KL6dPyt42B^wEsa1pNbUuA7gE}V~<6@*CT za?Gy22D56`VtnBeN~Do|b4o?khtmg^pI3SCSsWM{oY>^sO}4=zpva^;6cZ6dG6%a@ ziKkM|!^EvvJ(wsy62Zw$>L;3yC-f^vRG)lG_ykV;t`JK#EF)CM^5bwq!MT=({d*WK z)Lc!&|2=}YovTR$O;z_Je(d1UQ~ScdciCCqWbd1NHSyZSEOP*HyXU1@WI3d-aVK&p!9&mVB*Y!i3Q#*Ae+SRN>Fz& z3G?0$|EC8N&-A_(hjE!&z03ni@5g=suO67tTz1*{qXP-mi4@}{TucZimbs2h0K;VBWfSdxoappYx8H%NMbV$XA&agvDEDwLDLg}fObPvCzm_#TxJ{pNNYf>?$wG~uzz!$P2d)fP^+I;qjBiPKr1lZ^;myAa@%msn;nR@>{QH3%f)g6 zWMxSOt(qVy$D+J4%g8sQP^f4n92q z`=98CcOSYO8}GaVcdQ&LDdqM*i*N(=*k}fQ$oQv6&i!ixFE-WrO2nRFW<|4!? z5eE@HCRNgzJ<%%Dn-bel9jOecT9gD+fp~7L;l zMpbwnL0Dp$TLOtxNv=}Y35;w5hl7ac>TeW21BuG|aM&(LOEnAMJ!%OG&%t53# zNP17eGEYg!ogfRpEE(~``58FrBybXmzCwX-A6%$X!pwYdk(50569QKCHm(ql#)~NO*IIeF_aL}A@cBT<^M`O%`w!6Id0asM z2aoywr)M5CJivHhv0%JjqrX$a1Q!!gij7c}IG2{o#0m`!MvuMu|S23&5EC_vsB?_ZG=h3%~)K|>$7|=RgG)x zI7yR1)&xw*nuzY~Nd(Ib3(|L_jYr|I5-O1b?YQsi41wx2a1(y22p%{r& zii?v7B2|J1Rfr%`;a|YpeBZvfNa0}D#IaxqA)>)|wQCJ;E-enMbgoX5uj0y6E?@(M z4AU;ua&m>yp=_b6@frR=0^CMIk3D9dj3_ z#j+_*yaLsrW9aeY&NE+@tlbihr4jy0j!v~xA*v8>F{?bsYj===SARK&VAof#vy!%)`ynX-W zc=?V#xbeDSm^L*6^~DjWNY25mye6!w9FN7-v#CJjs-KJQf*F>@*G-i%A=d{I0z-`! zr98}?mEFyS$au7-cA;JhB)Sn529WAr04XCVijYNE2oyO(N{}lX%*O}~ZAF{OW2UmP zdbzPQp{uKbj;dJqU~&Oaq)H4TTr3J89Os@}>tDII{_!!wM7tTvc>QR8i05^fJG9hb zqIBRoDyy1sj)RaI6j4=K+Z1JK5fXxh?PtQpkOJ*yQYE~$UAC7~CH7i^L>s|rtLPq( zOf6eVcq}JOuEEUm<+d+`zWnip^H3f>njb2i6O^QrKG1n}A_&MWW!u_vbuG}uQ&I^g z7ALv7kb_G5gFAOuN`GZC5F&g`D_3QQS-eCmH#;iLG{;g+!}vc168ZcE6IaXPl!-r~ za2^&87#H4m=Jo&bpwW}o>p{c@ryHa;+51*~8l+l65@{&FW?d|?z; z+}Vnn?iOUk4IO*HRgQU?z~QscrXdN*n@~O7ccQm)|#l5j%!fi zcWV>3GfnCda((auioR4AnpPzIpZvc{gLM`UZ7&>37`VUl0mIkU*V1WChMOA}r;`X(%IyWSNoQkhJZYl|QeK%`10 z=1%2fvn)X0kv0Lvex)cNL<%_Im4aIqRBN)nDyWQBjB104&y{5$kvmJ7TY`r`LVy@V z^34=rw|bGmf=I5D_yf|BeYI*$GC2U1)+yI6l?W#Yb60UZI3*&8^frwlxhv#~usEThC};_n~;_;Q@H-p1#<0 z>s5GieLp;RT^~HSz7HmiO~LfsTrADa$FlrVEGn+S+6DF4@oFKycsB!I?9RfcyVV&x z+gkXjK14mb&Z-jx70Kz><$?>x>6Zg=`1v7t{fQx1e?us`=VxH1+PL2kh!^h|gg5RP zj5qHa%Ht3`d;35bd6nuDJz8j}q}M8h;3qAKvyLqPak>?+7h0e6Obc ztz0l!f$8N-FqIUaq5?4f7@aw@_{CLNti(0KqjcgVB?aM>Nftjus;LsKngtl8`bveQ znEDV%@{lA&LbxRHu>@uPSVvbW5(0!PH@UYgW^x(fbeZ z!a=2A_k~L1Ut14E{)wFji8tZXc>WPTO+e$=(b%;%F?0MStj(Y1zmfbfB$mkMzpk$KO1=KiJ!76J#)7feO!>R<>H zpIWi=@EpntQ#%#8B$XPhJ>a#cNoxNJth* zlY*|D-Vm{Iz-#G;Fwyh={}F~E7lm`;h(WDiDyDK}J~Knx@#eYR6A2M*&pSCW3tdrZ zXz@=#&7f%HTo#G+fl26`kbzt72*alP2ipGb?>^igyYBCYx9;wTH*dcRuiSDKHs9C> z*IW~d*_G*-m!5|?b+x$Zx_az>qX3`3n~5*p$+21x?LG2|wVl`gBiam0jZKrywRP&X zD17j07*$6wjwrVA^kD3H>M|^8i$LjsSma(Bjm*I*NEnfU$f2Bk{j&+WRE$=~xN-nCj_Xv%jw>-4YVkX%tQ1o@FHXGzJx=eZdz#(_8yPXN!W}4$5 z`7+LX0?Fu-ZcDkGTf4^GThl8DmC}WT$vjgguD%@1t@Q|FC=KL=!3R3)C>JRAS@hxPdTw`=eh zg5*!1%*Xd<=is|jQ}E@HPJHry1J1r%ijA9!&^EIeq5hc&@WIahuMK|1F9;EX3D2(> ziCkN>-ipK^qR+|CD`n!vO*~gp2rnp^ZhP(~DGqnr_c}AaR+GQXUegN-K1g_G)$Rt9 z-pBK|?eBCR!A)w(-ka}z@qY&EdEe`Mg6{-L`p`NQg>;}iVl3)nxzbCYiN>Uf$n&d0 z4hLh2e>qn})mB|z6IO@zhSz~6|aqprmq!K5cnGTCZQ7B{S>nx1K($~TY{TI@nAFoC2ppp+(HNPQ@4OE4iw z1Q356X*|lsRVFp!PmoBFSdd-CiE4c!HIjjBr9c`;vMk7|luqZa=%q*q5C;;D4NVRl z{81eEiG31W-HExz(va7m%7*7sOZu>j-SugUrl08gmCMrAmDsTtR7{zeT6d~E?zhpX z^Gn9~m<&ve&%%U+OpJ@m#F(gbw1lLfYG@*IuZ%&$#bFrwlQ>k>rs9(Vwz9#tMq2FS_~sA`lplPC>gVY`RGT3v>mQBm--ILem!U}c zRTzr;#-J@Q0W;#`F+VMl@Jja8s-#hAWnpGk1?CiYVP5ro&RdJkg*B1zu#KgpItr#6 zP~33_#uFwkpsvEgZj4SFkNSiT3!+zqDOI-GQh5t`edha>6BL997d`?-wl6CylPh<{ zMg);O#X)#|DH|(AslMu))t4)DUpz$0)Ir3VO9S`dp`!zer$np+w1GtKPw#dwsPK3m zA_;a4sVqP-Q|(kJHB9x=&=6eIUOEx0$J~y&b!(_bmRc=| zfuv}z{Ww!f=VLFPo|3MP77 zDZo-LQ3|vZB&u0)bt^IsJoV$cG{Z8u)R9=LZ?<+Xy$9ycmQt4~ zHxO-5@|-&Nh>IeT-%y7=2bSXh{^Um$qAt6yaEZ&+miz9!G!9rH&kq*_JPDEY+u~OL4oE2 z$$9hc`;JmFdcDs@<|cdxk@Fz&U~wTddYe_&>uC3V|KHw|&+PpqBbd*JaM2J>B@%x% zrN3VT3Pajy9jHy1il)?AXinj(FMBR+Hmbs#&7WMvi&*~IYC@!vtD=z+^=M6KF~9Yc ziWQhbn2f8q0b|S7Tl{2plYl~NzQJN9OIp@p`REO{Go#`m^QsnLdhtw)mrUX+Q*BYS zDlJ!qF$cX=i3UUkCkpw7D*~0Jp1ILqf_a%tt6mHFkNl!~O(D%o?O zep*>qr9jzQu4aU4gp-Nt0zALB)mv#&;CCp1M04QBaNx)B@uVxLJOq(`sRT)yJ5T=% zb8kg(MQJM8l1#K9QVO~cAl}gfi4P{{fkJgc3*+RGd`S$l`o{CT34E`yTscM~=9ipc zxX96Y{Voc{fJ@^sZB8QIeI@|MpBsuJ&ke^>+M%b1;lPuFZIi3r5A?%!DwA!uUx90u z48f%7nb`7dI=*=~317XFNr0#Zgb*<`;@n#1+9FJ}&4L=7%Dr`FTM|xfjlf9-(3P%B zMRM?|!Pxoa<(S_Tg2KL0DC`r7hT(~r9G8KGS=m@#kcZ`k1w0mDNnSn{=M`d6aRU}t zO~!(nMVMK&43jlKSqx;lxk@dI&uT*Cz9K{j7c;7pG1*+$q;;VozQw0N>QKsSP?KsA z6-d5cDFIR9vtr~DA~LzIq(lT0#Yo(agf5V-IEWcRa^A8D5h)V6wq&jzES|r+)Q4bl zek%bbD>-oJnz5|~Jar=1rxdJUVnE>np4X6>U}7pni8oP}eB;ak zBIU9x39O(xsWzbKSU{L$4azrHuwo@L#YfaW#aWgeogycYI2Ka+#5qOMoHqvRCfv=H z-Yo_aHOLd$rxeaM_tun>d3n$}{ZmxM;kxnB#l+C3s!qdy<5*#&zNC7W)fh3y>r5rVs@|6NffEObnM5s38 z#xj5e%NrBP%7`J!Ev~0O(YJzGy*3$u`4S|dx{*j;=|zvlXo8;h~4+~qawKi z(^kab!AFv*G?MY9+MDdoB0#e6`JQasz{=ZJ_tRZDR3z%gooXQY$o4Tw!Ut3&%EUUp zQTxXG;mDKy@aR25(N++RrV&Y08o5|mQi5yC%Ww^CRe2>=mDOTJX&si8w_s`QbSxaX z6f-NF0_iT8ZR&FZAu_ShZQCq}$bIFM$#jh4T-HUXsM|qF%DIp!^(l}7|1v6! zGQy#R&?x3@2a$X#4w-J7`%1y|K}zXW5)tRMeu4<0LDOx!FNml>o2tY>B1O_S9Z6Cp zSELglD&$V#k=KBMM9>gGl6*kv1rl9D1`{b0nNugq=u@&y)UOE<&UeZtbs+Iz;@v(E z5(g9KArVZ}8Y+V@DH@@SF2cm8PGs5~%hH)mmH4u?WP^O>LB(8M{JfV5Btf`nWZEdK z>-rtmjk^PLs#kDT#{rqAJt35@PeoEP*WfZXdnzShBR^!C0mDHO_V=YVqISE*{nO2t?@ciF)PcMmFI{Ej2I0t)BqCyWw0Hg%F(JB!BD$ zf2tEdDuyB0_eg*2ytglIyg3+mKM;dsuf^ia-5L0DkG7pANOq?Y9BKHB3Pb?Ww!EKc z3kL&9Iw6vVvjmCu^-)@{{?9S(U-Ik_?0Mn}tePKz?%Z4~D=Npj+B&SOX~5d*MqEo< zUDJwHbrZ0xem3UUEyA=4rSZz;K8x^h2|*KSw%5ObWV$JnZc`y@X(9zO22BZVsEyKI z=e4{Z36LPUu*y*2CwRz3C6z&lC>eyRM3ZR&EnljH&nZxyu3C_*755c1w0KZLk%Ngr zMAhsZn~ITm5Rq%k+*^c6!j%pjf{2bDL}a}mcWT6$)fJgiq4hw-%hr?+N)2wy_DW>a~k3nT+FeVbUhv|zPL$x?bSuX_dGmA$Av zdd#&|oix(geQ)T#3(H$?#H@HHVXeTui!XwT-EY72__K z|4(}@xb$BEBmN2vFjqICK&xC}a*ag_Ac}`@VpDG|0Ym#eCD3>9`~M;#Phke%=nt0$#f1*^8Xfth_pOB{lZB6 z@uRW$`{y(9pP$V(Me_ZrarpLV1HO8{44-l!oqa041X(wMNhEZGxzuppHpx@Lcb4x&qz-GsY9w! z7}}1~D1stk5}MLyqBeda>JlazM5?34P>m2Qyx)*C9Zjh-P!inC-;_#ID|GY!YSF=S z5>RnmaH#K4p?@_MOFNnfp5}P%aox$uU=GFBFq2an#1B*Dc!=B{z6 z5t*)SXT5=Pf&DlP1OF`&1(iA2@>mqkZVkpME`&~PiNXgjhvAsojmX9I_&{vCZvbw+ zBNPumsJ-8V@X3xieDz)dzJ9+HU%V@~ma5Xz@#)TVe6lOk_ICf+Vj~%ZNIE{;=C-0f z{dych6^i5L)*`5$?u%z{AB2VNIapa)i|gx~u%V$9*VT{4y83pkY3#f$`C>qwZWgdpWA zs*7Qz=H8P#Sb=!;sk3c)1rEX_lOR#tMDY`~o3~1LTK=FMB=(6%h(J;rRf@$US7QBy z+p%HleKt6!mo1@moM-e`A&-2{QYMm$4O~4Y5fVW^bpb%RxD;ekWqa~f>LF9;+*t#3 zbXDyhTohDSFD-$@xwhouQnr>X!%VFqA@CPeHI#DMi6yAXD@R>k3DR;ZuxL#Vj=funKb`5sUp|_MKU0lp z+uLss*Wj!D#rSMbj%zTtEfL3Gi@|}{67b~K25fw*3kTor!io1M;pqDl@y@Oe-2B{l zw9V^4a$K>CFR1yV2ND9KcU?&kKUdH%XuslxmRf^lziY+5_b1@9Gt=ZeeyK4WX?fT z@>FCGt>*8SfpETuaGpDozoo^+besQQ_qBk?8Ol{rSSxD6o6(log|WGF(OJA2t%cWM zZ25JVrVX;{Z@}!jb(l|8qD`{q*RA2=hu5pMOPPwf)r&E&W-02z1(IBsWyK4=Yu`?M1VyjB(87~xYQ|Gz<^+k%O%d0p4K zzBLYXtxFO6Tx6aykp=!Z90UI&68TM8*z$BVK6*VIAHET9rb_Z-FR3NU5WM$Tf2_YN z9(O;Oh=b4h<17`$CvTuLO4#l2FFUM`yhGKO~E!Gnr*Ee@yee*c1ZJvr%%?q%I3SqB)#k?QC{AQ(stCJqY!3?`;l3?2?37bp`QX;Li$mE3mu z{B9H{Dv^z-Kr!zZsxKE6x7(*(>O|MQTwMypOO^BjNlrkP^)XsGdOdEQem~ZXy#q7L zmvT^2DpDd!iKs7)^0Frt&BM67nWzYD;y_8|MI1*!r1Q(oM}`_T($q#gW3XB%m)Y&a zzH({#(tJ6vdRC^qt;b@dQaG77pg54|^{$7L++F-TtzdmFF;gR2g)23f|6lJx=H0ga zKL`hH&=uf8q_^2XbAxUKUzb%-g@w0I#q)1Z#@qY5uxD>8Uf$M#yPqn<^DpJ%*zQt% zeYhUq9c{!nhX|1aT#4;1#ODObCzSNsyZ!W@BAhwej_*F4hCiK~jX!-hAAkCE5&rP; zJbZV02EIAojdO?F@btzO)O6M$gz_*zDZrcnoFef-L`G#4$nT|cR6QQvI1ZnDFv-sO zm(Q2b7UA1d6L9W8JwDl2fp>QnW76y*L~(KpR!=gi5l+^Mnb>VliSX#fR$R|40pkZS z;qNK;nL7N&UR8jkL6rYtohXeQi!uVDhAN{rX)-FJ$C=`g@=z>fWa4DPW10omYp6~} zrp&Xe*NX6+*EURq(5qnqpxF6e$#419qRPXFw?x%1BfM)uSb8 z48|4CM`zJ0v=?5BNtM@eVYA3w7n3xR$iAid}(`5hFSA3nWqz z1i`REQzTWv9!Qk#D<$F}V%6+^K35lkqGP$AK$44+Avspjt2XfplBZm$fvy40>z{*N zTa+frb-`l{CjiT|kt@R7Ts}3T>z*Kq`HkD}J>&93WL`1y9L;t8u`eVsda`BIVG2t z`eO-Dn+Xw~=irl9)PgF<*22YH*z_qb73&4a1O?5?~hCs z4^A~Vma~}-CWX4KILH)htT;NQlL|zQ39C^VqSdwBG}SgaYPUe=t`bB%kO&?Q9G>M` z@oJ%UVes0T$bFSccxaB3nJVd}LQ<$g1QKUbB%XWA%i3~kL?F?zw>pvahDe;nP~_^; zVni8PaR!kn14&FTm8kUFL4=Qm@wrG}>`vEd0>P&${G1`h zD4;qiQC63KIe}6}Sn&HGKx9%TQX|?TLHq3pBuP}(=Gw|0k%6w1F<8hS@VW`N8BFHa zuEO}N$rzJ57UMD}akWSC6CzVe7GP5GTy$hiBEc#RCh?c^O$?Nektyyr)JK zH?e+0{2RH$1O=-dF@UI*gxq5Sj_OO)mRumovdP9lMDY@r8D<4{T)FGEES#%v_qgvd zf=21WmKnuC=0U{Niw#KCqr@Yvv9&pY0!oJ=-hr z>??)X^F|)d?sWSle5|xapM6A#e7>(7fBa}B{`0HV`1?1j@Sk5V#a}<4k3W7q3*ViZ zj4zIK;G?|_IP*>|K6$SaZ@pE8sq+dEF*uDYP(nnLfS@Rt5F&p3yv^gvv2AB3zW-zv z{`$pYJLliNT!=q>G?nKYg%5WZ z!KE>EHr2-rs*ky7%UwX&Oha?V9MlmoB~%yr0V9z)qyh;%zbfG^fY0|}YLoXZbI*=m z(uH=8D?%o(qmsZ{j7n%nS01mqf|ZzDy$-Xhmt!J<(Uma{qZ8YyT-te!POw^mZcaG! zs+U;iRzp++iiWy9*IjePT#O`0YJ$t5;JP4UibNn$fZYI+kBZ@W=F+Mdo{MtcR(D@1 z5bepX{n}%H8)E}I?$QJ#b8^sL4XIaUAej?{CWcr}2rlDBb{-snUX(xncu%fAarm7R<4M}{!L*xqn$t1 z>G!1|9DZgfc0D};o1crdod{3A5@JDe1BtHr-5L1my&QbKuMl6^qAJ&7Bj?`AG$nF& zdz$T3svSx{*rM*dp_cA@`1zqY@N|Fddhjx=SR9GPEiG8zIuEPcR%8Ci)z;2^l42l) zNO#d(&NcIRe-7_Ub;ymS$7zI!wKK_9FRU&!#En9AL><&jv6K)g4XQ#(02e}BSu2pP z0J_{+g|q@H(?Z@aM>lqGhd7nPgs_Zf2QW zRQc*Nm!x33K$7K`iqRzb3{rl1%Uawt?Ov?!z6(?NgSBxGwPu{ zznOz0uVvuFH`B2FwJbDu6(WE#Hh?g(V7nSj{yG(PZAEzP%|`tG;~50Va{SMCtMOk1 z$)7$_{YoP~dZ*C#sz35-jBV!i?#4hYUX+GdzbtDnt^mC$5CT>45zo%20L%FObMlVl zcb(9`1o8b#kr&*C^4KYa$7IyWJ(V^C6*1#c9otPCk8;ALJi3!W(O%SiO~NEp6Euyf z^Bhe0HzN}#@%b^x^{+>se;v|?lp*GdEbC+(F83sV({TQ#uH}moGdzg&GQKAAGM9i6 z&xM6nNlo!>=qgxvt8^|aHhgE1>75>n+zf) zG~2Lha5h0wgIa<_ibD`_wIejSw+NA{5qYQ-L}=xTjSxfy95m^KNc^Soh#>@GImoqZ zT<##Q$W&Lrm0G%BqU;>Y%y9!#1Jt>+l(tFZ?;ZL+-)O|t4Wg+&0hi3u|A>x6@+_e`R1(6FuVlfoC(S8(z#Q1cqTp5QQPX}3SWdF0n z@ZOVyp{9suwnpIemT;>^KgsL<)YeFx+mVFNcM2w{`23wLd`V?)E#R#siriU(NE#uM zWP7C?-yBAW1XAS;!TZnjvu$m!S(}APZPT!zeieVu)tFJS%(Ad170u#NrmdCdxyLx~ zOyuKoVQHsK1=GhWh_3xWVnH1`$h7;(nwSX}@A>n}gg}up;dSC!0v8|g z3cAZZDiuN#Fm&{8|1*$8`aoi@<+T-MZIh@%2#`>!5U+KUx^Ads^?c-@#+BSjeq^Ghy$3Fw(Q`#{;y~_sZ zHJvwE;C)WzB6Oy7VN`4jSAX4@%oW1avPGC$$^l)x0Aq8Viz{8t7zqn0j|2{UnVLG< z_PR8?O^YQc0Jd>3`_BU|9ms; zCj8IuufyNISb{&CosMr#jKvoRoAJrJl{oWOK90YUi6dK*ap>hloOwMCPduH9g1k%w z{4$A?Kpgyjl7P&TGCc886TUh#nde!I|NeF*{!WPenNa!W=xBVrrx+&*k)s@-2RBDx z-^MV$mq4t%E)%hlId(2D;2zA0%L}$!=9Uj2A&O1(%}2uEDrEYPLQ(iw0%IadBgbK6 z@=VlIfd~+_ghEaH1OlXs>Z1!4(GyTZwNV{E8Pzcp(3CQrK$(i#cs@qh6os@PXE@<8 zxZD;5p@KWj_FXZx=v-R1nHGO@*#$7+Z|s3Y$rq+hcnxSZuTHAXiDNLG*PpiUT|&h% zw`u{pIiZZr7;7MD=fomqGP8U>77`@QvCR%7wr7Y7lGlY)qc*UNz#vF0sD2(If=8ua zt_!g9^->|31V{qG5q}8*&?kusgzx3*4D%fqabOh;&gWpt;bRUaDRO1_+!hum@j#;0 z-)|EMBdQQpvR_Gv^woC09MGH)tUBEn8<7HW?pK0<0PyZRka+3r0*F%~J#H-TwFVB| zzYru|0R93aRQ&%v3d0AaqqZgiD`v-I$)Xs%_)s7|cr5~FUW=g$i8dvoK)g&HdOzJM zh-45ZnU<}kRNs%@Rt0-1PH#)KJw&wK_0dfsIQk;t^ulmF|7Zm6x+M?u7EH(N_6?X{ zyAIRJmr-#nBq-$RnB!nk;C925>vCe@Yzw91y*G#!1na}@)TA`v)zW=h0rL7d`HEX3NI5EkxuAxOLqzFvIfd~C!i5eE`3 zBzy|QVkSOI(Q`fV66fL)Jdz9|DV7SZ{%Wzj#yp7pE4u(Fylw;zxu>F4l*s#GzlcH@ ze^-l-_yD4cb}L-$Q6LT?1`;lKY|ANQCNN7DOi=fnbY{RtxFRCuL<>ZV=bV4_YA zR3^&kO1zZw>kulEyr67!CbXg}X*4EtU@mQ1gH;_j*Y(^9>}^ z$`@mD$pXsCZc`#&O0RTu%t zV*Kx4Z^C~QB!By64gN%sd{0I4&FNYA>{vI>?j3~>c9!Gp+c|jml{iehCLZkzW3XX^ zKUUwCfL*)0@L%7qGl={jD_&fKzkIp?zdt=0Umxwj=lkmM@$NF5+Ma`BuSrED;t59Q&RRz^e7UP@{4Xs6FBdl3=Mbe!mtUQTnbFNS_SEU0saC0hP!Y z(S#yS{-p#+W$a|s6CgG5lTb!zRPk{sk*e5>r=UJ{E=H!zMirkgkLW~M zcsud}8j(4y2FU|TY#(=}3oB*WWuo!7;Cr!p7X|!f`n|X^QboS|9r>G@8u46PT0po| zaK*>8P*62*&X5v}$()3FRZB3xda1=iW|qyx#B9POeH<4(o#;v%gBhiBu()9rTH;3& zB+9hPK^YZE@t{o929;a3meP8andP#xoC=Z2l~p_>3t0q6GQlB$#Bq?S2bN+aGPOx5 z^Djo}h!PYFCR8Qbqus$oD^X1v&TSFblXXKiLQ}SuAdf`L;TKI3a0cywRK-gf>m4IPsLsR{FuD|D&xIxBp*@az0{gwpD~Qx* z5lqa@#qY8-pb}-YVt&U3QYHgk0A7o#_}@yA5F`b`xfq+&j_!;x=t^nB%#zty(s&IP zQ+6)m>Te2v*zszNmp={DO6Ovl3KaR6_RgMAFdMZo9i}|wI#MTK0mZqo2oHh7=aQ28 z5Ja3Jk$cL$Hd^XJDuWYHM4xO#^vg$h{{lo_na{t?CVh1tDH2UK&ef%gciW7~XO`|N zw-!}MZ-{sxacQ?)!3iWkiNT;>reW&L(Kz?vO5kq-$xZkh70F+|Sb;x%x)i_vXaT> zy72jdW_=gO%}N+QRiE@>v8pFz+} zpgNh1nuIB+k{Tgcih^5_HLMn?gGv!kC2AXI@%Ih+4I$=JM5c)>?kDdMRa&M^|bm zrWH=d5`si3#ii|LQ(dXcdTK>{ZY+SsY7suwVzjEBL~^=A}SN-;?m&K9@om|QuTVg0;gOB${($uyx?RmEdoS= z`_wBE5zYzVd@x+;x(*6GAn`#$%EOC`co4A)_}*p^`Df<6<{zI^-#T|Y0z)qfMtV^u zUVb(MU+g3-1Pn8QCB=Yp`n7nRemxOqxADGJuBX~o7wDFvO^ou@dgpBDk5jR<>M+4lm;`P;rD?bz_S0w#{#7cb&ThlUAFlF&k@g-+nk7UmTxAkhBsc6$X-%ZzSW`E1_7vd?@@wQ!#TXVe#=bz+Y~{|NYaAR3q2n zZ=Ws3AJ0s|H%Hs?+1@&w-Bn^ORE}*)#9|25V<+=X%k5o3_ti+$rPNWiP!e{#`d7c7%usst;wR_M${v1a21mKJM|$~mBnN98_11CK=in>_?YVn##OFh zLi3tDI%OioB;avTafwobscLPZ;8Uaey>0Q}luGc$iKq-~!rYo=R2(a@uyzU6$zm*^ za-LhY&|or_ili&G1GA}AIub{tVt78vsYc2O00T*Ar7zenfGDt@Z*HtY0>n0_8lblD zQVMCDWE zmnT{EdYaFzmDMlBK%x$-6F!cApSY0!Av;M+&c{WEoYek~5OHm`LCP#ayO8G+t|-^+4rP z-qv&c8!bkZ4(ux0sX%@e?Q?a-agk1y!si8$*2jYyP zsaQU04VJZRux_iWCZ18g#8QDL(`HsK#SG={7tQ5GFdr>x(~xp?F)w!cZIw+RpPZM{ z>r{zB#NgsU5=Dqa4=hG)M;kUgG!a{NjKh(A9XNY%EIvLo0Vm#X!{#>|aqIonm^`-t zscA)2DSYEnH$0b_0HxKfCM``~1`;U}2N45@muA}ok{AsBc^bw~tjDPni-5o0i2wc5 z&G@h1ucsOzL{3e>7e@%0eGLvG+Y4~~%`BXFBNfNDM&RbPgOQk-hHLMchVQ>#M>TRI z)yNI_`#0B8MJ&W0KAdW9txxxk#D{Me;l!JAN5$bVA#!MADCK@Ak6}3YVhG-QW(bz8 z48w|bRrvV0-U~rODE-&hR7Phf;hSSE`0V{^oZVf7lLXSyt;qyQEcS1V!oC+HsYXKZ z;uAsW8b@#p%tH93S%@D{hSXs-$WrY`SOoQm@33B28DE-a<=mPL-Ijl;+k zLL)=am~85#I`^Lz!4piU<=`q7x?Fra)Yqldfz5 zWQ+l%Dw4k+6^K=%YflgXWO%um+%2X!IJvouEX6|{OwL!ND*;HYO+0go!!yM|1e4zI z=xsfZ5g=;Fsof7flWWVF1Bpy-tP}y{YMK@kx(2kMkho+ z_kCsxg|Ch0b(6rq)or;!wPl5xG%EGnwcnviAv`qsOMR#fkAs60h*KZdD3WIJ5We;R z96{s0LWuM>=Zf`AFj1UG8^Z}4QX(aO+6JNsMI)s43J4M|(gl*zxLQoeo=liXk#rCw z?dak_oKrsEK(b-t-Bw$o-4g{8^|8@$O5q%9qoRE8aryI56V*vTr1OG}MF=Nt1;7Ol zsR#!YWo9`OL}K{kcF%6Ymba$hlT(ZF<*8-(^yC7ZJ3a@W(9Rv5jgJpbz=!WO;o$Z{ z-0@&G^6E+uc2y3)P{GJS$`!V86>e@W!Nj?^WR_Mdb>f4GREpG!-@nD7u%rZU?3{uB z`qOn5?-9v*`~ zal&^Xx!yqXS1A$&+K+WnNz~)a+l4qz0Lhi*KoX9FF9l=&^8tA8Nk6=H-(ZYcm4r19 z*5T`qmQdZ?Om%Y;{^#4(`1428c%D`R$%nfN?Y$h?l7vIA#B$(9W8X{Bc%LwO@SYUZ zls6%Dcr#Lm)FL;e14R*XUv;5^YNI-4Jc@!^P!!yb(r^J}JSw8QsYIwg2p6U9)+NtE zbJh}6$4}wkbyBUgBGrSWr0(^QG_(%Qh>Mi;J(!OjE}Sut-H4yH00kZv)O%)W#PRfz_* zl!W$duMR0kB^6Coa0RM@D$KoAMk@^{;VP{Z#WFumWE^0boKVuZ5|uf1lZaG;Lz!CY z8m#_WahJq#viD_bX|nGPiwo>LNP5RcE^vMIfa1c(FTC$T!`(iAerNs!NkU8ko_~;P zOU-K?t2rQ6fRcz#L@JR6|lU zw7|071a!sz3?w5n+AuM1CdQ|aBSboR9LvEt8eIg+!rEn6H~u!?(kZhz_k(z`8mnSNk+Z5JcvZ|O-FOW2)r~Kb?voyX>%*i z9-WJ?Kiz<@Kf3{+omxsYG7TRd=*EZp$K%Ytu{gD-11EPi;Da6cc=YK4)OA)MhGdNT zt^DYmXfDgJV6E3uMe4+BrJ|UP-j5eI@dENeKZ?R|UQoZgrxU;bY!M-%_{d6p_rWB5 zzP}D1?=HmY?NT9vh?+9S+4iGHUk$|_Hx5U7ei~NXP=~M1&H(;M5K%?^4;9Vdc#c1O zG#g(Z>!c#7!I`&mag6d`tG2_0$f1|Q2$3M{Aw;%6Fc=SBKL{;z)A00*_4wOYgwkJb zA=GZc|9p2X{zQ;`eYgSV-pR-59T_-IkR5p?4u>`=?boMBHb>#k+e*=xHvt8qlTZ*g z7CC`!C?-fs!^ff|b^J_J{&~cvDa9m6O9WPC3DZYn2JDQ zFd=*dkspFYouSkE7u$rixN(ieM5?JSiUwt(Eq)Z{S1-c!qM2w2tt22aQ9@WK_EA18 zhd^;K>9L^#iXg({5C;~4MCa0GRdR1hm84&lW&!lHtN8rzLez#-5GI0%14;!IN?AZL z<-P-nB!B);xw#5$cSg09NaH!wpi!}rbU}pYEu?zP>7T|4g9{#l!D>h}!T(@l|5xV0 z!OPfk=6l|Q2G4o^IcE+azQ6y$Ic4e(r!B9qS&@m8Z)M;MwLIC8j!)ileYDQ0R^;t; z?A?}!mtGi&wYSWq0=W(g8VHc8m6%$p>?x!YYUMZ3Jn8NFCM#*e0T077f zQH`d!Rx~HJqb^#7gte#))y7Mz1yMuADz~YW4-7JEswguhBGbO<1c)Fr#Kl9h2oBE- zA_fuMVDp*~H4II7zyMe+$;(#GQoCk*Ym}1-KZ}&VVrNh^$;8sdItA94K2jn1k zaK6>#DC;a(!S=SI30P3K8WVFSV@y&z+T&X=nm_EQ$dQ;@AQ#tKTtE44Y?yo>FPOE~ znm`3%3c^n>=in`0jHzWRBwB(|DU*?WRROe{Vi+fE#YzN{Ncq6ILKRTLFD4;_a7r67R41H}yw>v}n!J22E>kDw_KL%Z|A@!PF?HCpZwmhO>0EqwdOE&5 zJemNgz}a1SIJG^UAW5;fgwkyfZwjND7>?y@!w?ahg&DI7@!|U|_}}kV;(w`V6x;aw z*Vo{W0?E;KQ#GfyXYg-h`L~3~#!z!-?R~}{J02W@&A0W#?8U>;IJW@r?C1vmu+~5# z*Vcc2xg5X$U_8FqUxTwdIjCPxz_BfHIBbFTD7^m?A@W=}w$tXXDn@zoXcPvjh>#%h zAB7CRQK+h(iIvyS!6TdIanzZyGU+cTCCa#?-vYs1L0m6fz7X6~l5- zIh-INJbH}xO`!}UM06BT2$K?;0_p_=GmuA+q+h8Gr_w0rHBmW0R8xu65L8km1`^M` zB}GE1U!trQ14#i_Zi<-@Bz;{hB;!hT`%Oh&zjXV5IajCf{G1FpIcO4BpxwE#G^zVC zv+O7p;))S_2ie`R7bpxW=ONQmTjG1nOs+1UyGws>|HF4q9kn$>z>lI)R8WE!p3K1~ zYRLF@rZr(a$KwZYXW+$6O;~^9Jj|Q60rMNzV@}0VOe@ir6KYvPBREt8qT@svRfw9u zPAi>b>Dpt`#-No8)w;+!RB0=L@H*RINeu(bs6dn2_jb1sJ~T4mM<&IZcCBaV^ktE0myS*d5R;Bi+s7ldc#IpUaom0Unjdn z3W=YOpj2!mYf!HJ7K;Cb4hwT_A-@@1-Q23 z2C9;Ku%i8D%u?}?Bq}eKXDem~7BE@pd&%zFkGDq1ExRW}JGj1E=ju+f_9dgeH-*vPa6FwI7V-BKzIaqm17d|*T z!60((y=r{4i)v&C)rb^;Z}L94F#_*DABcCJ8i0-oiSWBT6Qe4UuaF>vN_s8{t7qzRwyA*{Bi;RIF9(N|(|=;de};CtCifV}fm2wr(K6s;4B zksegZRaG@|{2Gu$FjS3OfG4&t#z&{;;k&cS{+fc{pB#tZpXk8nN1Jf`;Ap({b~_f| zIRV8r{Jn;h+KS)Qn@k{(3ek}uxez8gy2lkX_u%1_h~g&hH9C5LFqjA=vfdDp`5w2| z=}2=hajvll4<=HbR7iqM!H8;fq>e)a7dzT$s%&@`Iun~PA*0KIgvvq+Lm;W3A`v(Q z5JAPj;)6-)&`c_l43tvAln&u*M<_M8m_W&O?kz$ppOc6(R*Ja#tqd$fO;{zW!>CFO zB;^DNRSDIIr32H7R6)%NPfhI3r#mPJ4g?BSNpFzozCC8j#P=G4M}O1f!s-EugNGD{cDocj97voJ5l94)462cI z#Y9YXq&nB7;$Q{@B~&M`2Z?_wFkEFO7feAR<6$9!@ks7(`@*hI4;WeHcgt8_yhYJfNt9gVdP5 zuRMN_Qey^^s^mJXY`+DoXxjIBOmaKAlH1Up(1O<3CQQs4kEIPOuztets7&s`QU%^C zmJ=ikpn@UwztM`r3Ke;Ab>_}R{)jq@muNEYY0hXt_7o0?4CLpO;L&@su>ZAO9DK7J z$KR&xaH0^9D2J9 zAH11?<5VCDj;mJV{f%MR_k0K^?Evh0WB_iv#SeukaR|7GgD^A|ciokb-+wp-|NDmx z4kX{M;iNqopYN?S05}y9ha~F27Gz65wC7* zLf68PhziO>kT2uSfkeP?Dx?Pvf(Ivg?=hKI8)8R$%>8YTBJr8m)!4bYdIQDX?+p@} z&Us#$_zD+&Dd75D^ZlxA;!-AIdvXi9Qd%%BtqqN#WhkK{DIAo6a)HB7@Q^Zb>cq2h zDiXQ26i;c6szX;=C&p%sLmO3jGZ#^{Vb#`hzTBUR&%eZAp_oWDK_yip7guFSxy4Az z_`CxNAvBz;W#!ouBAHhS915=UNHAHiE-Di}Rzdhx_+_JTU^*uje%^}(0Zr}>0#YNY zS{FdvzT7INr7Gd%@3uvtvbMB#zP{)0bdK{N@vH|ZKd{~)k$GN($X}-*`j;uli>knw z_DNVcekqoZUSkb1XO=JF0&*U|d$moPNm$GxOx(UjPKo&3SQZN*K(f0D{Z2H+wV)=d z85Q9TC<`4))2322wl{jAe}%<3db_c7UwyKQsZ0t551+}kr9ir1BOsU}F=z-9K9F2s zKG0BoWK+cmKL1RK$d#4N$5cC^IuqyCIu92?L>ri8C^q6tq|pBQKZJ+}j#U261`@?d zdV$1KEHaCsDZ7ui)j1)a0MV8L=H`<7%coT2R+1@w+ye2O*aZkp`cfOZ?No|0?|$#& zc{L(em8V8LMdDP*g&hQTCuh!p#SgEWc#TtyyZDTg{?O?MxX% zh>S9jbSAZ$|9MvFY^-cuhnuF|gX^cMA zJRWzodaOAZd9NLX2GjgUF;xg!j#|ih7HmaDq}N+OUf=k(=ItTW-z8?iWI^_t`+aD|ZVp4 z+dBr|ot}fget9ka=erFCxW9ZlAK#zq#y3Y=@FhW_T^>I_I0|1M8jUlDM`8V4bx4UT zMBp!}DwL$4$)Deo#X|%LLO_9Ys~MqP=;Bgsh)l5)@AbXGpg4#J5~-da!bHZ`=`r8y z1R_^^;*^Qko69xvq&oRcI*J2F+BG+)Y9VG6O+$BjCq{)*9S9@?2@A%I2agb38^TbzqeI_c2Wdl{!MDjF2d&$*olrT1{nA zjY{4w3y>lqyrf3tx2Lknx>`yj3E8xK0wjmv&~AusACqk4_D?srS`{Ie-(SjytHj^L z8U%Vz@Y)~81Bq6Zsz*>Nv36Vxr+TpVD7Mq*dGlOaI=YRF1Pw39?urn5zh0m*Xz}&x z1?IXHbK<(H5cRR+Fo%oAg(FvER@D-ViB2t%t7?uL^F^+m$y9;FVj@%{{5@1`IH6z` zIx_G{k8-XLB+YToU6-% z?ESeopebf1i1a88PWXy}nGqT??R1$&rRnbTHG+x2A~&Z2C>`kpi0YLD4b_tmMqHYOq0}ywxkmrv@K~k_-YCcyqt*r zo8z!wSxYZQVE>EZ*t0nZbLaERv3TvRad>m*Xl!}25zo9> ziK)wL5FcI4iBPc=nyHv_RO|HcJB?)-y9zVSD;6QABZ~$I;U?85odl2rtVF((# zIM@az!2ib_kUt5>@E;{%`b19h?~TIWzgmv}`fd$WjHulc)d5u-`5Y%m4s8nO`wTTe zz4gd2Y`pU-OrA0VLH*+~;^HK%TpEjys6PJti@EsQXLIp~6D`&Z@3Wn$_-I=qPQ9Lh z+#*G>G<1M zD=nk!|NMRf{-%oilO6c#U@hNcHIF0l)gi)Ue*?bQSC4ZCYH;_X^~fu(KoBPqwb$2# z9wZmnZxVT=f)f;^JQPFW@9HTQxuRU5p#zBvvh%rr29pcmK~ol2FOYbz@f3>AEmdMJ zE`y0HP>qt}<^L=W8i|<|i?C?qa?C84g4W1Nlv0fpaI!8R?k4L>uKp{1?yZ{OGK@(d zi&>RRv9Mu{ZIHEO)OA?Vc0KQ}z_gNC7@OIRrkEB&q!~?74Jh?1w5E>+k^ribkV;f3 zFdW4H6`(GuE?cW`7$>3undTPL{_8nP^OZa6$|T-Sv3*P`1Itm&$R) z(CD@NTWtU*V5yCUx>8g{jJEbAvuak^mbTN&)YU?6>^Tk`C2nhpsd~IjRq3vc$@u(P z=*XOcX2MuObT11_wIC(IwJ7kfBq+2ex)%c>JS^MFrSDozh?!y|dalT-)1^RihYBQ4 zwKyeG^8WzIdC0i(% z1PTux4j@V!mkA^eB57Pqq;a7j^&<75RytB5Nu0Pnc=P~=f8%P9GLUShV7}aFYU7@c zJT5}iJW*Bg+P)x>|0|K_vbHM()c*@4o(Tf(_IaT6e(s0n!6QaBEIfK$8myhl6&X~q zu?QR-f{UX1jNl;4#H{kUSU>SDEaKoAmq9to)mlqT16m1^)~I?+%AR1YCvKc_4{o3T zI986m2eYkT4k1#yz>1EnP*HIc%G>tLsk{iP_|F9q*FMGC#}g*Om*%6fu?SB;kVqAf zh<9F0#Jevg;JugPv2SA{-rtmD&^WMJrX2|5d5kxRyuT?H?_1VVIQBjtjw2gK;I^BS zQ9r2*>mQzhgNLT$vkzwCi&L}k<*6Cg>g1#STy?)U5~ubQW8M8#NKL6k#HG22)+Sn4 z<{{>ae6Gf_5j>y}(S_M)?h3=qZa+*J?~l?}4$|aIjNq5(|Dy;5{MhCE`&|@?+^Rgh zxpgG|c5XWU`q=_}d!hrMy;qEnccfbd@UfR8&8_wR^Fi48#BjWR-vF#wH4HJaaq#;| zG=}~t8YT5vTT-yf>R7w=@@-1c~U_+}hFcs&lsUWulv3Bw*Lr0ow4 z!SiyV_ zUlSf*?kmDq`^$K{%65AEd|wsL?Ww>=J4KoLm-iQF_1V_(i8tM`xhWF^At*=&qc;-L{<8HP8nc5s>Bu~K9CS7y03UxaZnwm zmoLCegam;|9#DzYcTj*Ydd5_BUD5 zvfk=crg6pHk=kW#R+{4ol-L%GP8^MfhQe1e^7L9wBDBOS60K6%wnps*61g67hH~7* zjwI+hP#-hWDyXGQygT*5+`R;~w@ z*Gy5S9SgntdXB&tCNKyF4jQ?`v?Z=*F8<+;4jMh+=mCkl?SVuP$r&PWi9u`LpOKrt6tA9Wn(wI+~6UlD}7zHt~G-+-l*_t$pbf>~t?(Lq%*I<5(A zu~Z~Hb|tr4#qcVw+-|2TxoO73SlE1{Wo!u`Gs+hmM5gm^XH+b;BBaK+G5m50%&lde ze>oXgtmKy&h>I!2sx>Ki?Wrj2crpTSJr#o8&xGKeXT$OC3sKnnVl;sv^%2jb;1NUh z66?$dl6^1m_0I+3-beEA^y?FG`oto9b9NcN{b(7!{%|2aCs58E>Bi~znsNN?3LM*3 zNK3zM<93u_S?dYum>4k9!{!G*Nc z-k)YQK5qX!kPr-N+Nf^9dTp>0z;tfvXdqSM67al+IBHh^HPtAE{wf7UK@&ItnvU8K zIwY$FNwqfD3QN@`ccLz1G3v5cp)qeYrWdcmtnzi}t=@z=HJc15g3DZP7u5+StIbh1 zrA^0_#3tSwQ%s$V3$CV0pNt6+bdF=y=3bpqOD(~!9z@xyjK3(^66;E$Tb_g?%{-n< z0nbV6o>YfcqdJU0lCur2GMFUMv84SW0Xc+F{xCOVEb>=$?o<_)aHo@x2oZ+rKCQ;Z-ur>f(X07 zYTm!f6BJ=mTr2N&&C*;9n~`Oe=xUQB1=5MygsB9`M5>Q*1W2{jffNT-qaZ+?c%?w3 zItW{uQy|KN@O$&)@8ze4i%PVsTdzW0TAJAdhM=Qkp1H^FxLd;6OR7t)WGT^Z%0nu~ z2M>kCXxe&@rEpB?2rz_7*02E;qTeKgkj|jvPNTE;)P{qHCQ$f5ArSDfDG$L#&+(X^ z7kIn{p|n*VL?JAJM9FpUby6S#gMq^;<+H3##nr0#iX8TfP)MywIj+ z&EjZo1|}!*CQO}y(%=dFC$)U8>%S%Er&^NW-{+&cJ{$Y@hT^g7{qO|ssha}u%q@X< z?)DHIxl2t6qjB^;sgzh8y_X>I0p<9;QG`eYj@=!ENAC>B!%t1XJ8vz*Hy>@lS0Apm zMDxe53nDY{_OZ!$?b#}vc`_d-AI`*?2V-&f9q}lekc;3yP@4Umqg^^Jg-Nt>p=PxR z7`Ei;kG*xS2@*4{GN*PHQYI1br=y!aKLe+qEyR}^-P0kTdpZ?wQjNU&ux40=;rZLg zQbmly1A8w*drv4LM<);>9AVNiB=Z7D`~xr8_##|+c?$nF8t*@)j;mS(D3|~V#)*3b zB0oHN?Qq<`cL=s_8iS$&&0^AQAC4e@tM0ONz!$_JY-BoCtWCq~1lczyitx8L8u5>_ z(=A-~#mQoPaiRcUy;O>?PFLW|(^MoU%JAWlLcD)C2j>oD;LU@Y?i`QT2*HDoq@cMy z4)c8?dfCZJYN-9&yScppy z;d!`1*EGb~IyV^{1SkGtNfB@A>zHj}oK}v5jaiT!^NkwW!NogSwoRXv|%O z)}nRj9=8R3HJdT7W(yXK--v}&B}?ir#gf{Mm|v|4C3Dc0IRgzb^{634CeUibYEU+& z0408fR28MxYP(2|aTwJIAtI2ZQB|b;F`f>C=Rlz35fCbVoD^P(TB`i<5tA?>s@4+F z7mw&IYTn^!*&_@`ZyVjCWqFqh+(uQVJ36)|>QX>a%v(Q9!qFGG| ze^MjjcfoiPKh|c45%1=zaB`GjCMVxa;Q=o2oVkzd9P2; z>&4`>8B}>rfmEoCNys=`l~BzT3$2h)4qqWDtwu;Qm`H8-!dC)`r$PjU5q!OGegA)k z2hFgM$S-W{`1kQT>r>F474kP3ctuqT1~UAf`Yp#HIQqg{}k<2L{38yjm3{ z_I*|KZXqqI6`I$oULX-bJdg~w{|O=Nem$pYugtOP^sJ#PJo2r=$)ZdG6A^z zK05N3ItY=C_}gcf;){1z;e%KE@a{{~@%GVrLZlL>pUA_Bhf;C${sbzS7#zMg2n%{* z5j`{&0aP8{+9+zRV)10bgsMXo$Ib+n^Fb0wWitBvQHYPp#5Gr@;;b6jQSH3;Xe`dC zZNvece-NIzVGJI=>JrRd6^6)I!s-_)EFwgBF{x5Cn+_=Pcj?GVEXUG1P-6KB(D zsBc%PK;mD5LOR2o;T*NoL0CdvVX6z0pxTiPzBY%0w1P`g2%%CmB{!gvYGP81gGd7h zYEzP%__)s2Hc?KcXlNSpFHYt*6-E4ipnSDGEWW9gPy=)@O>2 zR;@x(F3KZ3C!#g08&lIeP)#7J6K^@yM`_SFYou7{FLmK^A6D-{rRKG6S0%J-^X@=m z?Mw_Va`FO#QyNkt=YxdD4J3MB4rynfK82yQ+6z z=*j>>KRj6gS#s8OU-B!j`>?R$?MDE?WEco1>V4}`u3 zzeccdv%5U72p-M`p65W%14;bF;Wmp!R!CXhoH7NgCvU>KmdnvszJw~I&87r5Cr&nN zOKrxYaav7rH$id(HgxQ>&_GY=a>8U0I`eu6lNMSl+OoQM(RE>b^kn|yIlMqssnvJfdHM1RD_Bdb1v3YU*AkETD5-xniA{Lk~R&Kc@H%bAd`7dP2m7-DhFt? z`q+Aus$bWTbOTA@C3NPzAB%@@#Hmo|FSPEqbNEsm8i=TFr9Q46T?HLz&1klKjk?dO zMTU~0wgv@Zs#O}*<4$YHduzw*IYA|iP?2eNm{f^c*6SXv2%SVI2qbiFN47OBX zs7nyh-}Z;JOoa@F$3KU+20wPN@F3zrhFgM0A*%CXA{C;2&j#Q!*gTN<8%Qn+MEUSI zRE3p*gb)MPtYDgB99R;$5Rk%~wz4iNwMGFNugNPDS5he_XeH z81CCU0*_wJk+{^!jU3hA5`t%L4Wd#B;A1)H;W&8xNE~}Go6wkzuRdIlzkaqAUw^a# zpPyZd4_}>yw~tT8tB1;P`teMhcrc!-CXzr3vl@(}w~fY%&8diq%Ak))hJ!1p2$<(Gj48}f1V$y9vud@8;?o{Mizml7zIgh?5` z6i7}K;o}$b@WEkKE~ob^l2;!kNT^7T-yMqkZXb(=N{(3RZ~}d6!W#$%ZT}TeEQj$2 zDkpxY1`>X+mXsg#+QEN&K1ldF!K5D~d`@N{Q7*vuTL~uII+(bkMOz4qAaUl}&>LuY z4%x$t(MWYXqi_+X6|6yh&T5QLTa1ZW%P_5QHD;8oBurMLn=qM6RjzeT>SHI{TJW<9 zs7f-YQ<=13GOz#Y0KudfWy+1{_q!y`W?pG3uZj&7%97<02&%BJj~a(24$zuXn`q6> z2#;yL$5WH1Vj7jnWCEs~?_EHcluGpsOQogRd#cVrYi%q4p=(hK7HLLL9v$ZRhzXdH ztra4t)45NuX{~CT5&H*40Cc~p2u;qx!Njd2?}G^k3E^s*sa%w5i2jY&@*gu$5k`CONB6BLk03yH;8Ul;};#3Krr@6;I(|ZlqK+J){Qy`X)p;_@FkLU3FP~$;Yq3&9I z$mwTt0HCSbt{sk)#b8*RZ%UY_ORaEgp)Z??%fA1`_oXTNK%IeV-g)*eP|ITg_NU`Ke{gHC6iWT)s!um zQ@RM#eK48Mi@#OsB&iW|%KET&+E#4txB{!E?d0ff1Ljn&aI?15)Ui<01Xby(gj_Eg zlBbirMRrrGg?ywXZ&c|X3;!R6W7H4AF`7SWR&^5AtntU4yNBR`y+iTPRU`4p{xP)C zcy#|rJbuk^9JullJa}6SUOU=>&)!{!Z$G^h-+sIqU%a~#pS(FAAG|UPXHQJC3i(ry zX5qvGaXyf!m@foJZXShY8~KGqXVXDZiTr}jiHuW(g*W+IB&k-3*r&}Cw}qqyopPT5nEh@xQIMNYjnt)UTvVIL=u#Aqts2} zC`Fw@GZfw$k#DLbcLX8A3nJmy9AW>G-_5_#f&PTfgRly;M8CixkT`X8K1h_?p;F;{ z7i5;Ac~uLx-#rb7pU%g7hg0$Cinq=!qy#83M z^%XmIep5HC^mFw_c z>;Bf4BQbCYHu`&lhg%004 zkWy2@YC>c=ZG|P_TT9jwBx})Gun^q^gh%#FObDHT@w92lEdsIshZXrT!pHD zB2@VoQN>eL($VKrLF8YYgxrggxJ^Of5UPpc>6jE&j^<YtFiH3$A;xQGT=;l& zR5h)JkjMN37P&3&GMXg*|pdMrbfI-B;!c!P-DzKXl z?5PulzjVw1Vy_zn4}Ol4^=5o8r$m&4@hJ~JFBN2s9o1n#v$K>3p_5klO0E9o`~`t@ zQyjho`5<5nfMa0a;GyKZ2M{R`trp1lQkA<@hyzF};iT}EjH*KrAOJK2ORebzX0NxG zYFIK=<6IX@tUjiBDYI)sQ|(wRTUs@~i4=b|dI!KM~kk>Vc} z-_~6C1j|SG{Wui<-w(n_I?kx5MAUaiW80=txb3nbxO>@+rs=Dpf96O#pb(Ksl}Id{gtYQTWK>Q@ zdVT|v$Cep77d&{OsnxVDl1zTT0*FQ$YQ=0Vv3bEuZmR`> zx_c^-t|Y6#@hK{i6Zg6A>FJwCV&S4l!~~=ufNEsG zk^8J41kMIP;hw8B$c@IfJv2186N)(P7N;RqEdT0e&sYKeT z7G`9(qa~va&FQUZA~2@$UeZ<};3n$e!S@vRsaIfc-0?PD~1O-(CRQ=_&fR*_E2<-U^S z12eJ&3D2AoF_4T=b-VH>&OCTHuyCt=gNO1TF5h8cESeyukXhbn*N4l1GJ?-bNpa+# z&HF8f_nWO`NfqJ&!ZRrm4;%yL)Q1@deaf9=NWtj-B|tKUIDlw2md(PV^6*rOn=7TQW170;WY5M`tDtZz~A zm^{KH%Zjh&l+D8$s*-t?OEEL26Eo79Z8b%OxSHY{&{Ncl6^)yyO7>tu&1!?jY_FcA zR8xT$a({tM4W3o97|mI;k?U8%8=M#J@6!twW1C?cTd7Cx3}TIH8aPO{y&W-Jfh(LQ$m7D!~e(H zy2QV_v<_4#My0jH=o z)I{=(&y))}ad(g@kRvL1ye$CF+&BVP?DRuHACY5mTCx>5hj|` zubD?WS1F&~Vtx))kCvoWWp~1bS*A`rW#Xw5FVrQFXsdx#+K^&YMbuMG2qeCclR?D6 zK*#En6h@muV3gBml)A!9DG>S%%_1ux#0&@q4o`IqHW#iMtV&!hiSO?O4+6rrdL7~6 zCFVW#A+yio>je|Rw7*V7lNSgOCDKiWC?}~f7QrBOp>6s_Def`NR0Ls=!Pf{L_L}}< zX$BxQ;Z$K_3c;fezN%K&YxUkvfe>Nk1`CA=*ljsJk%1 z;zC}j+zKY=gT!WS$@n^TSQzu8Q1}fYNFx3eiVTufN~9vN1QWt52@(Dv{8E$vTvoT1 zieoc+OXi_7tJBtZZ{qQ3arJ0Pnv4b2OR%oxGOTLaj)fD}QkAF*R<$HA2&}L3%Lp`w36(2f-HZ8p+YN->)JOav?`z9Py2f$;YMJ z3-J1h8Tj&p_4q5*$hV(t#^>*@!22)H##=|~tgVVF;g73;@!lv4X+3vGC=T5kj3=)5 z$Ng7df>~{$2)meS=mIrzqyo7>YnIT7k@z`@6{zd%z{0EhaQEZwc--g<2=J~%g@ zpED2Ney|Y#_;@+~^V601uTNK5x8A?LHxD1b)`3$;>alTeJt~T--9_WODAc1z65Gvf zSD{qCjaH)R1f%F^ttNsOe^OW;a+7kAmy(5?B%VuB7IKrbksY6jm_Yvj7ba05P$f~F zShAhp+bEv*l%86gI5h`<`*1nF{b(8f{^?TuEd9fo_Z8j5XOMj>YsKZhf+K!b?X$UtZ*kk0FW1c`5J zpfH#S3~noxqM2L@N&Sx?vCrUV@iV;HS`H)*A};YhU~aK2h0R=;%jHi56bBI74ze)9 zOht?tL-Nr;n5cJDd%-f)XD!1-CE#;bp*e2}8WUOwk_ng;HXb!}#wtpjmfVKUJSuD| z^jUd?3I|tHoQ#x@RF5E{j?27<1P)pDP#4B>({X(eF~?6Z7$^p| z9tdO(Ix@i{!y0f3C{i8*NYbAYP08qQvwW|2Rq%p(NETrYsNdMBkQX&L{AfjSL+gClwAZQE@cX?**<&|7F72?{`$NpA8 z;bTjlI|brYi(Av=JdjuzOn;xm>qLDEV=?;sfdokazBl%_WBEq0lztfmNfAe26{sA` zA8J?@3U~px^M+YFZJR9|)K$3gugd8aMSD`?Et;x@ct z)QhXPY9+eMwb;!fj>_huGQ6HAm1Y;MK%%XUtQEqMR1!>n?6V*F?1y0(^}QhY#pYx0 zevYtD&%|f%uBH-MkI&Do#7D37;hhuBc;ne>y!=EqPCcxu_$VA#sOIi49J(#oW`aF> z%_v;GdpN49LlO8Jj{1KTK?T4Ob5IFt>T9uVcMl#s(ua@US%ANNveDUUVeE8Btym_bq zuRWfMM{Wy4`=T&Rof(FOtHZE)TOhV>^T)a^W6`@P6t$DX5FX2shGuFJuGV&ckmHu4 z{~tkO&=}b7pZ;r)eaGyvw-Vbe5CKH28Q0OU03Jc%zV-2wQ4?Is_s_*d|13-- zRP;FtdnHr3M-V2O#w(y`mR9;DxhSIJR#pDYe1a;w9Tmajpc;k5zi3vM67kBNIFNV) zIsrxDt(=i%s0f)vAXJzNks5I=Oav13z^cU5R89YFFpv~ceYh^YPEq7rn966i??7TU zc;9>6ZyrQ^AR#Pl<`wr{CV)8gq3QGjNEQ`D1|6`bWqS(41Blrr ze2!)y@j_A_L)UQz@zgQdr91K}gud1fOXz>DD>P z<-Xu?et7Grn#M^z;wvr#~W zlFct#4eA!uEW@hFn=rpxA+9dWP%M`~X-=4IAuff$R!_bZ8(MeKc*8Vq=jdy#g}G*x zE<{h|a`aZNFjIHX$*D8!0`+EZDQBXFaW*rG3Pia8e^V#}Dn&MiAPG(>!ZkP0S)Xpl z$8Rsj$8Rpe`>*uk-0>E?akvIAKbeP94KF*tF5B#zx1ZnL$Xxg`jXUh9Vk_YT9_ zO#z6`h^N}&=MB$AWz!_=yk#bizOn$Hf2h&?2K>jDTk+rDY{!3peJTEf(D?gD%juZ; zoS;Ipk^lQk@Xfh-`24jld~l)>XP>LYYyA8(kEG(&N20Ln8jcQ&%8^col|rQ>nyWue z0xz)myc)F4slblCb-4A>PMkb56JNd4hktyq1Yf_q1fRUogZIx&$Gaz*@brZ9l^utN~PC~GJMHI$_XJF{CM1%*%AUY(HAdSMXAqg1%GgbGi;4+MAEz-BL zr02v3Id*gS=bM3~9~M4ULIRSzAit2yC7g%FYTgM|B`R7X<96XG~9%AAd9>0M~br4q?qg-MyqY0J=%y%kc->PnA2TxA&=6Tom=vNRybRM~#$)}o4Y*>#jo3N=CTyQ~ zGq&{JgiYNyV&kkEuy*G4SUG(^p>hQl)@{Yynzfjt`-20mp3(*A=6iNi{m(9<>LFA* z^Jkf2(G=lvA?2tUn}_<4d`u22z?8@mObp6L?hwW56Kr|bDBj$Pb1SwkXFD;bp5U8B zpmd@tv<4YN^O1Ow@+Q0bi*QAa-^M$r-D|ieFZ&9gOUgTmo)mMtfwW)WC zWN>A>eoy(3WM46&lu7cR-PVJ}V0frkmzO^=H6lnzari*uYDAn0(F`pOCKS%{R!sEr zDIPq$tw1D*g#IF$*GfFb{6LDtAK#1kOPHyVJc1*C1RbB26y@l=)W6V%gC;-(o_U~TJOESj|07QN~zTTGxVvLw6~#Zt#m z#fm3!6qtNbt|j-RNHj&!!NfQ9_JRnXI&m;j=u2x#hej9T>Z{7}*0Cvg_jDKDeMvPV zQ?2mOl*mH~mM}hkAOyz_gyQHuszs3+@y7#Kj>K&{FGf$VAA-juAu6F9m+qTFCq5rv ze7GL}_~KIhJK^!4-)_f$(f<3JZ5-KeHh_Gsbv@2@bK7h7)!ANr`DPcsczrrPJ~ajB zp0C842lMgjqiJ|ay}=HI(AO@$lo5@$&IjeDLZl zeEIfVgU2@?uEgIyS#3(_i?eg_$*b-7=yWsQJKl(MFV^7A!{vDWP!ZmGCI@dH$->Gl zCGd|gM#qu~IQzyz{O@lGk-zS+Zp8oiU>?4HvlXA7nn0CQgx3ycm`Xc+Unurm6^X<& zg7k+G7)?bZld|#uKEG$GKYiC`{QmV@*iyyjuss`a_|66Z;VBLW3qJm}?EeiD_g*vu z0?h-7K4*}*&*r&MCCLmXrdss8-c!kU2NHvc4ZY)@|6o>=s--4YBZ^4fFx8Tw_w_ropjab=w6_!rkiAA-W zF>m}D%pJD^bE}sTEKAT^MkUXCt-DB5=>-C+ldM)WCrvJc49= zL?!BDYA`;u0!5?vkCVvy61$4Jv1ZC9>$2KS7<6W}+Zyj$BfBZF!Pjy}O7oA<+0)N|3*Qq8{%M9_OB~ z!duT4o1%E-@l>jvL@JATy!Kc;-Z&VKt~EvIUQ>=UM_LJx^@It}|Ff0&>$`&76nt{B z8t)#?v(+9?JseBGh2fSPf>B-;iGZI+*=#Stx*r~cAz~&~B9Qo;ywCm>NW86utF-?s zg{!>vO#~8w!@Xwk>pc)SSj1zj_Zi+ zp4DNw!YNVXF`X*CJHHn_RNb3eci@`EH{lwB$}PBj(XH4%=VntV>pJ$k z@K?iD>qNY0;#w>mzY+^;mSSPm0?e=M!@P>wm{;C~`Q@EhP(B@t%9^mGd>R&)PQko_ zI&^1NVn$*iCWmLCa&#)v|B}G}nb$a#Njx2lLM`g+)tc4DdvX@)IQUbZPHj&FllVVn z&~fno^W{p)d0$qAPU;7VLQP7ZYoRFtpupe3qmbJof<)`8JD~WC;Lutk1`EC3pusH_ zOMwq09+WIx<%3E8)LkD)yrM=I%Hr!3>Y@`?7XX=@zI9txc!~~xaQ0?x-E`^dUl|q1MMx~s+2ax_YEF&ExT#n|uFj3-PR!E3Y ziD+UC|JK!y@O@16cwsDUrAC}NkPH+dYWlAS6AvUw3Tye2@PlV^c_t+yAgE(+B>lT! z5YNY&05o_;mWo$hh|?(!PkHF?oW*mnA8YR+kjPvY;3W9{FxcftM*eIx!Y&9${BL6U z1EiqLFBjuOip`2g=bD^LB*iL2stJlU*fL``FZlH)!IdAGPMAzjnTF2HHY^&qn5twm zc648jOJ`q)#dX^#*84cgv86ecc%Q=2UDn7lp0s8Nq$5rjv~QZC3Zh!Pd4J44^~lq zETkjuA}prjlhY0O?B&UX$5gY=P7@#}C)l#Q?>%3HbI<1Ct%F&3?a?H+0M&hAIB^dZ z(d_}a`Pu+1lZHL>15wvhyc)*zIw9_pS@CxkB*n~{Ic=tV@Xs>u{e5nI3B!d6q+UkBly<@$&WO>cmUR(dDD!| z{w>BOy6Ld{K{Ch!E&QYzRD;w7%{6-T?4Lnmrso6*@BR8gG3fI&u=vi*t-+=oix*Dw z-bedVCxVIKVXafNHS>Vt67yOyGlHWmwdbx5oq*=7ZZzdDMSYGyvKSL-by-UYlqG1) zT8yb#^U#>siplX)(3aN5d!Pw3G#!2d0YFvJkxdnmKL?wpUxsUz-i&LP-)cbFN4t`? zhYCgNWcxg-lb)Ngq4PSdZr+RKlecpKwwbmLs~T2dRqY}ypEwuGCUj%P#MxL~I~!~3 zXJLJP7uMHyVAZ&0!lVv!aw{=2wFoV-IhYibhTMxa6DP)IbEz$gR?<^Qt6)?aCPdX^ zO43wIz-v7Tfh6J2S?=B=Ok&kSKD6HINCW{Tza2z8063Tw`BzerRG^rqt*lTjQUnyf z&OqXWi317$*7x@cV>y7FuRN@kiXHdh;S`A0Bheb+DlE)a0jP}XL4f!!JiZGNS+*2~ z4+>Hr76KcZh1{WhA1N9hSNKbn>@I{Q$hc&^@+6t6I_Gz#b=xzC2_UMgQ6c02K$QD* zE$Q=Z+ObRgd#0w4`db$U^Oo%O66(r>s9n7kB>HkC14(x`1z2rWlq1nGJ@!IZ-v!7( zILj#tK|p=81P+C&EX-wFr&Js?TuqVM>v*70{==Eu*RjEX5k#UnSW`1wf6dzR|8;;V z62%mAho&KKSQ-k4r=w_O7LC%8U%XleXhD{??3viou@@WK_ELhcLMLVYjEq*ar?;Rz zt;JNy(g`cDfupe* z$!U21SS{Xvp^Bg=#`{M~@X@g3JICUsJICPiYtyjgx+WZZq0@3HAHUjxk6v!U`zIUu858jCi`6)LxRk1+0B=5> zV;y*3dn}QTpY{mBMD?Ksvd%mhMy12|=KG(x*V)St1mWJBqp|q%1RS_C4j&vV#}{u* z$LDXf;nSDv@X?71ymutu)}Pn=oVYKN-%|)4ym2^M>q8OpyBJ%e+iMBq>QtNx^vtFh z6XI%~XpY;bK$Cg4HCCC^ZlLPV=zeEc{;f1 zct1>hX32JKX>OJm6%1b+Oy!k&NgnFrIXKFtYA#rT=Dd|?$zO?PCEy8@X*mQ*_Cid_ z>_byV7uqsr@Y-)BNLnoPySej9s*CL$JS{^XuYIj7di9c<`e7n<;=$zdg|}kIyju+> z8$0)7ecK+aZ@ml~TDD+g(;94?vIH9@&!_ca)6_m}nKlQPPV2+w$=%pc*MT)P%~)18 z33Kzx(Un$!*0^jmhGnAKUn6V&uXfMy-uX=eV*aRFgd8OQg(LY9rKToQ=@{extj@Ao zNc?lQ?Z^Ew6IJ02s0b5C6s{^Y>kmh1OF(F8T&a=sK;ly+rb5(nYtY{qK+FUYDG*P2 zIO|WO`|40M8%uyOr!4j1Qz3HvIZ_k6hX(^hO5=aBoFOs-MByt_AcV;9TtX!a=@+>L zrc|7$8CX&%O0Z{2p%64y;~^EH@RR^z#hD&Rcy0y~f`TAX$k2m{2M!M=1`*{`R8zw5 zU12OiL~2AeWioFa6!#o~K}cB3ctOMm61BilM}q;7xKNgZh}(Jqk!j!c+ZxDu3Bn6>aWyti--9I+*AoT{(3v+I?HRn81e5euYc;>3em!5a3tKv`#^U;| z=&f3XIW(9IpQcg? z{zV4zit=#v?g$*dZ4BYzPvzl{XKwVzQ`e8dlh=;MWBW(rfh$Mh?wvz$#g-vxY>7mu zUn&BB6pgV%k}+#(Iu1XWjI)P|aqfjWymO?M4!w$Mpo9*cPW`D|yiHYb_USCVeJBU- zJeQC6pD(nQ>+c;d#5v_D4yNM`Dh4_Im$i!Fz2P`%`8m*VM% za`5SiDyo_me0X9K&OKL%xA@*~2|`b)$5kp`e}c;6al+`)M7;8F0v&$>PSeg10H>)w z)Jp#3fj}xRLh5d+ox5FU;3xQhFWV7;>#vQpb>=^Kv6!H%!iUGo2*Es@eI|`yinAqu zl`lF>g?8)y5tukG0ztot!Pp~C! zXkGsSm{?L?u=y2%!oN?uC>N8Wr(kx~YAmd~6m!R|MOW!EwC2r6Q|^3B%ch#noP%kE zNn6%TOy?k~HMt3`nX|B9;yPT~brn`j-G!cVUGqz@zIi+LExZx?2@u)Ug2__Cgm(EN zDwDalU~A7!782ezb2qlNZ^hQ>YjA1n5^QOikFCvfv8`nujr#=2mc}k@teuY4)l;yz zXdLEd7h_gx9@^rlw8JwoacmBXM`a^{4n%-d6}$#x3L%H9#mp~UUeXE^X$mV#l_Xxs z`}{B2R3ipYO~x3Hji?Dpn7!-}8Gr2ewp+Dn4uiOhS zz^*xIICOUuopu~vek9fGl}D0s<{>)qhvKOQ5-n8qy3__$!ke_W52o5$>~B4pLV&0* zm0n9H{Xh&(-W!QycZT5ko5$e!YcIhKSNdb+mDzae{$zaeoLZFR94mD=^gaq0G8B&Ac9Dq;Oo z^$>GrEns~69z+6Fv-0y8I%pSe8fy+(j@sws>39dL17~vTa`K+Znfnxjf#m!USikuo zbpCtM3?e?5@Ug)`xey;j9H`vi8ASLy%dD-7?|eh}x-iYuA}mzx9`!q4#k&{gQbL}l zrIVwl! zIXV+%qw`TXvVbG*Oe-`~f>fd6V0|b57K>n72o;#x0jV{L+R)2MrCyweaZvc;JwEQ+t(f@gOod|KX9xQM0A|z=@f|rhqvvA0wJP=x|{OrK;jgMgN1ucsPX@_ z_wnGv=QZ8e6&h-}-Bce)GP%|CUqa+B89sQp5S4(SqCyKf`DSG4xK>YeD#TMF%8R(! zTDIQE?+F|;0)m4|DUxW_l=#$#XDX7`^&@ktCDs&)!dv~Jtx>)li9nLbH`Mykg~KyY z;h%?czZ{zqoTo3@r%E#TW%rcN$Hvynv7+Hp^s2X3c9$jJXJ&WMIx)9w9+pm6g-bhj zW7pgpI6B*5Q-PIi@5r5vSw(&5DRYZqby3x{<@EC6ZRCh3+u)(~OVWlE*(z;Qs8|}3 z+fbL-g2q&ClbUS3gH*LDQ9q}QBGh#!W8GSRT(M~kuGrv*ovTM+)zV=Y*BXtem~_|Y zi2#WtMODce`iCq`pPGh8ZsaJR4)z2c@W}%acOXC_dJpB6VOPriWt) znn=9N(Y;i{N#E<$;P8bzg7Dl;0eFfqduaDX=v|$F{rh8WfvS(6OUK7AwGK)_RLK*s;@h)8G-Rr z642fmhKH!cUVbnfuRWqw6(e!xflyO2QZDL9EVXw3HAB$X8;FRJ@$mbJCNvCyG!N(uY!|QMLV$a>H zF}Gult^6r)G-b@j~^ zt)8+KdkK?kmphmY4s%_xnEMNE!WDD(VORGS?Ce~H%VsRWb_2-*Y$Zr8ojM1bs7BUJ zY{80(S}ZE6#Jrqh^rq#ZBR&UHsYa^Cq@ZY621@CuOX-}8==d@&&O$VuK`4Ja!6QgH zuR*`F);v)yf#UD#ygNRo(H5pU4#aB9;4ugy z{U9kch^V!SAmVZjF29gZ=jztBCma;M^5sDW&$4pB_-94p0OLTC%R#UM2pu;^|F#Z@ zr$7uaQYrkOrBck<%lWHl*rzr;a11s+=W-)HkO(4zfSD@lWmF>k9|Vzp6I6UaF^KS- z1QFH1c=;4TL@kuOo_tyj(NiP_5t>wrEpTOOLk^z+F#yCHP&~yE&%gCTS`H#kfe0c_ zSx`9qHr9dUcTyWJnXY0(2NIW{m-0|gH`OQ!BwiRyfYEF(TeC?Z3HXgalKv1!P^1wU z=_nYHg`!cpl*V}|9g~kTze4+>Rj8xxmT@FnRrq);Z&-&7tvjhcR#`8u&a4jJMD6Gx zOgge>Vt(ZU>(#ZhXFoPh-@_YgrOn>5Sz9XRB1F_SWoFR=+Cq-t zf?fwynIDNGcZN|>1mV!l0aRapc=+m3c<{=h*t&HzGScEOG9n9oi=*%a0i(6uU!;0D za=V{}{+_+r4^Lh{8V~Oqf;Aib5tEvT(E`Lz6vFa_wG14CK_Z9{9s-C$TT~@uY3EJ7 zrSo>GgpS+y*gI}E2q30J)UKTSGE*f~66e>C2nd7B9uK_NV33&V@wJ<`*Xle35f3E3 ztp|}{J};0Az=Yr+M52D5VcRf14%LzZe<{L>%NOGP_g3Mnw|ns6$*Fkr_(a@tPbcQL zFTsqQ9<*f6#MJaIOiS-ZOBP|GK3u%+d-K~cD`PTd<+Ni4kEzSU#*RHy?>n(%;wH?k zScMh!o3Ll@wf!(LWwMkIAx!oyx{<2oO6=*`id{3QNDL$ks6^%xBE3{1GYOGqtgNWR zl0reG2z}}Kn3b4=&ctH0B#cMN=t5+1^qNnnRY(V=gmN|=RPvwG%@G9uOrc7(q<3mW z3N(~k^>@n|nU9*NNvKVjW*{*Y;xlUnPe>@d?fPo@y87xlU+Akpd}THmBtC#xGTlLB zpwXgwV#%3mWUfTIdSX!_C|{uvmW8oA^M#-Y0jD;6;4stFT|q$r@!&weMaS%>@}^oi zOQ9=4%JL*;t_~%ge=A2XNa)Jl69?=upYy_5N}>xA^s#Yt`UwPr66Y?&$Mg6mr!1G>4b;AR3)%d>V45 zB%}be28u#S&)=qmJ{5uGWBhRNI<3P#0*~w)fd{S_j$3vPN8kKV#6_hdc1#*BT~AOv z8IKR1NwtQF?>BvWT-G?1`G!hQ!skYwt|KOOaJ?MO&kY9#Ai;$ z(0<0}3?|xlKndk%TjyXY6DnYZQPXUxQzmsjH7KUs#)-Td;4*&A4ju^|)%mKJ4w;j$NH=uyguiY-^r_ zEmOL%v2F&|j-P5uWU(NUU4-8BT+B|*MMrWW?}e%8=74K*Qk#XIa)v0knaO)Y(F8f? zTvPEXSd@bp2xCUuQoCUsY~^rt+?qX||5qzY1IIZPB9I6m+!{m(2q_T3q>v_+VvAG> zCWMCBC^y^6TYufF1F=Fvt>Ge&$Po*4v^;I)_@ycos`3CL$Ir*PQW^#kDG;d%K}*?b>2o6EIEp#mX1R12ay5(gO{s7wuL--WwuW){_t66`jmcSI45&>$dkEZpXq z4=6sEIEYZW*pyzE4{__kJCGPacwC_@4>D3*xwLHMN(hb=Ib=F|nWpBd2bMa8YwPvu zlNnT~QWB^_R0pCZZ)50-W2GK^CWyF%`#@!TJohaG#`h91Qky3~Wp*cKnlKanjq%Rf9uPNU+`c)iH(#)F!tr(&tULr5BWX)os z+Nx~t4HI`ePeLXRUE-zGX{@eI|h$ktNhDwXx;TEt{IL;uNsC2 z_71~cyN2S19mBAX4t?&j0OU+cLD;Ai1pXibqv(Jqw`AbpJu!Ig@i-h)uI0rleD+E! zzIb&8J~}lSZ$Dd#Qx7Hj!d4+Taz`jF4A0*YZVP%ncY7F~z9j^YT|X8N5+L^y2)A7} z6#F(_f|hx~h)Yj~A7!WCkHRrl?PPu!3BSK^^iZ0Drk)^dTs{JuR}ROF`GLr&OTyTo zWQ-v!G}FSC3pS8A^FR_n=Mq52B6BctisZZIYB&fPU+%;lyQc2;gGA0;&`Ch(pE3|X zCJ)I0d59gEg%C>hAgX{Mt#Kk42oS&K>wc4L#f$yw#HT_A@+{slcibT31IhmclYxZ& z!1MZhfhm-x?oiu@md5LROD!#Io{DtW))^<)$xAlG)T=ZS!%P*YU8F_!rIz-@VmCQA5`dB7iV)om|@Ptt)~-VFcC;X zei3C3s6Jr|Z5m1ft6fM-AQ40iB%XO75o8DufrP*)Q1aYo1`d#(hB~kIqzi z5K{+I6g)?Rg%2dEU(32E)%Nq~oAL;ZoMC(oeUo1AAR=|)0FpOC??q>BGqKXmPbrx$ zzhzfQ#2|cg`?#Bb1V$y>O*9%o3JYA9Y`Gf1OuB*X3!YOi?}eC z1BO7N5S9|_dQR^lP;2&?lKBcZrYTgVkkjwt_?yJi;(gWUu6>D|zgLYeum~ave^NJb z6%#rIq5?woSW^hg6(tfN1c!4{3>XLA z29zj+N(@zrb>-k-L~Gr+&&q;&y^S3ngzts_DTF^5e>{GnYK^SUmRfhX#J>==vEvDn z378a7#UD!-p;{&LN0Lk=!5fpOVAYh3u0t@DM0ZgiK_W%cNo6vds-zbSsuo+~eaGyp zuxjddll$rp+*P~~%O~@A_qA9tZ8zrBY``4eU}_bi*4IP^ z?h8csjL|5o4Mk9D2K+8chQHQv|6wS%G{Ph~G!=c_Nm#VE2KPQQ9iP43i+_B$41fDz zCB8m851+i$hBu!s#mNT}ZJm^-ZlKDc>Nt2q0G_-t0FPf6fQPR3!+iwEjh79<`n97m zqst#Ld5IW9s0$oQb_J;N-&YIbPpFLgK^R6;m5t%W~^vvGaHU&rPq_l-~eZ zzbf&Wfy9FeFOvTsLE>g{Y3oeY@cutfL{?H6x;M|pzQ^a`&cnU9_L)Acxrx8;q8Z4| zsX^G#d^+|dbM#UrYF(lnic=*H7|yuwgNTCxjgR$u4<;Tc%v3~a`#Mge8u5-9NccCQ zv@i}jwC+pfkYa4UVhKL`cpd)ZlO_1->}-7YY70JksR8diUqxk^i_;G#;poGOxa!*R znB72dq_$yN>I^gzCiN*ZEuYt#I2n^etI!Zuji$JITUB&=Mmy$KF2$zy-B{DSlLM-? zm|e8M7SP%;`wD`Die%w_Trp=4A+jBpwXea}rg_*@KNIW5H(_OEJt0zuC1s6RR5}d{ zOIxs@tP}Igdo3C;Z`^wHRj)@ULDH5pkBXuL-YVyLKKh#i>*i5t#n{TBsO^a<4u#wV5W&Q!Knx_loQ8vid%u2w z__h|-G9x(nxB4#h_nJT;(NQA~GV+1dbF+Za77qINJ1{V=( z>pKS}!3dc#KMHZpFMa3cRjYfeKgv9a`1eC(047SbpKk-m3ZS|4o(pyCl18o*n0&Zc~9L zhgV@rL^T>X0BZ;z$Ln(nIx;(WFU-gKmdmiAZ8ssZ0?l;zI<{lhUR=HCI_&M;jh&qv zaB1^GY;5Sl+6gUKRo#RY)ibbs+$=1wnTO>QmSEY$6(QgJ*@r=-Xt1h-=^| zMdCq(>PC4K4XOzO0|_mHa1dD3GDT(}QSyC|>3MIlD_bLpf1_}ieGZk0x=E{9=-6RY zB(Xyx?T1mj0iz`)rsgb5B9{0UVPbR*8WUv+>wF}7Pg{ehbIJ)7@KPj#jizhC} zs)mi$bgnC}+h%Xsbl}VxmN%JKp{9--uzl7(Y~)QOP-xn3Pw7IeZQ6ltv#!Rn`Ykr+ zzK@D!E|rM3b1GM2PW37aZ)vftUcO#S`D$gk)|3hdlH`1}CFGzfIs@ZF)Sa{j>BGlU zqE;jMqCA4bb-s)zv7>&SjL-{t;fx=Tt@|5s?1c`z^U6Ye@WxVn{MKT8^7cY}{N@~d z_<9dMe3d{s-G+0=r{b*YOr9;l@yF9Ke@!}~M#W>y_voMn4>@Tmke~P*`%hedF!ih& z{*weuYc0pir+V?fzS)BR{&olc^UF>6_Wecp_~kad&5`cQkEK(Q#N*_>(YXJ*5VS9k zLG7F<%w9N#TY_kE3Sy#@For*>-w*h{`b;gj&(HDzG5``+L^$}^`JnL%4LyK-M~%>w zAMrw4gKfYue+MLH&WU>=CchuYAt|90*WFG>{LVsrLk0E?mDsl*F2-Nqn~SgB>acDB zpByj8yT{9L^V0FutSkZJP7B}ujPxVI3C|ZiC8Qqu|HyPsyutXm9drhj zNjjCRREnH-{t%Af|CESuIu8GxaGlCOyv7$J$1E#cPh#` z{=pz|AQCKm`#w0?d;0+5&A`%x1$_sCs9qx#Voe?ifmEsqDJG=|Ja}lE@~0#M#kz; zrFy<7yu}}f|9rt{E#Wzis-zYZA~-@B#V<;UECMA(Qkg$gaX<-Xhyk(1H@jbf=`v{R_MYjbBP4zeIkiLB(iVG8y+i)`r(#nuB-WScy;GU578=)ngZJ->5u z3eFv^#oI?}@aA(BcL3>s8^LgRm!N5 zY1p{C5ud-m0{H88{BPRdKUu@kIU#j)0$zJE+Y;4|@ z{C^gS(LbjBAcD%rz5ifHcyI_91djn&{Oe<8J~iTjq(9Ma_RkP8kXVRo04UBGODT8& zaofOui^FJs*V7g?;PjdK_>WK5)ASK0Sp1X|O(OA01N{gJLT{edF;|y!>DsRY@dH-V=g5Zc0W^ zYlAJH+mcFvPzls0wR7OrjOO@8Oo<#%n}E8|8q|hXVOs12YcW4FXC~IQY`45&U-@zl zw0Lh!U4fmmw&RL9`*7K;J=i>br%kcn(7KxnW)C)WT!l?Duf?Uaug4bItn0C1=5<&# zeLogYxdJ^kTc}7@U|LoWYU8J1d_)b#QyFP}p^1@Iwl61}BF9COIw>GXv_5f#D%fkC|o`zm(D%cl!HY* z=@;JC^V%ol|ojQZ1?_#uXR&*1uItzq7@o7 zrCIQiZMigGquiZ_jx`sXCVBiVqVV<~z}#hA>knG}d%VkR}>rUARQ@jjzt z_ofEB=LD8yB+%L0tS>1Ih4TrLNKN1Uc@&*K)h7p`K^KJE;6bnuNL+}E`+~gw?Zq0Zj>R=AFgw2o9a)_|BTzD?qqn4oa(orGbnL-p zJ=bH!l%41*T8Meo%dv(dt)0F5v2n%~SfI7vl^db-R}q+IZOzsUuA)9$w?XZ@XB96* zo7#FOk3(ycK$4A?*bGbxR%JfFWXeBV6JGseR0;o6O0~cOEZy9I=Z|#Z%`=N|?)4Qu zh-f{NCHQ~}HNyR2--|+iUN#A!hz5k7ipGDo=iR{VJU?g*O= zcIZZbJbK+Q^tA;d^iR#eaXZ z5&!sj2^C2vK0VWb_m7RkThEo?m8bFy)TbX!#kJR^p|~gy!M{@vEZ5D~Qzrv3@d3<( zNVUPh@&SL;;VP&Q6;wHef~D@AFss+M5`sDms|)K^2xWX-%YxiJNF0w3S=jK$tAA+kjQ|QXUI&>GVyOgx}S79kzF0kFA8p zww@cXea=m|Z0;?%eEzMte8H`_OkIR~Z^63G8?dzbD)dd-fteNS(VDx!mT9P?DyfU9 zMPu9~G{kDsOdTf2H=xw7m@0(Igib2wLf$VIC!x|m5A{(MXoxANqG~XZXf5{Q{?L{K zi5t=T!dwE20OHNiGJq)I&e1r1i9%R;bfyLng{ypd4`&2~)Q4bU3dE;K1QV)`9QzxW z3&|fzIE)lPyhOUI1yQa??~y-(e@BSuZ*zQ3-IM|$;MOkDc{_j<@o^y`;?B{hn(Vx# zAd2n$ZB`admFzjgi|oBL^J)Mx4kVrm$sye2JM~#oVp1ehI%!fM9FS|>5{29Z2dNH& zhExgFp#jAQ5~#bOw~2pt3PlNd0mV)Kbq?JeJ0BZF`2O~KsRsfiUMho*UJ#M; z2&Eqg;k~b|03t=!i5EI>p1qs>tVHL^%?j15G~0fn-RG z`*Qh3X!@H7>PVWwFDsi8w*)m2<516waa>rX{jmC>wMMaClg|sY(612nyg8OnTFIMX zBj!*=D8$u~)nV3|Ii0FxCKgsN!xQsWC`g6^- zRpr)Ne#E_oY9zngYEXKpRy6a=YERPJ(3DVymbff5MW+%Zi5#IQsqR`sDCQgVlPCmU zn1LCKC*kSmX5%aY^8VR%`1ssvd~$9%K6+z5-aRz~Z@*Ya^-;+Yav@%MLIBCY=|@rt zkVNY$eDeNSoVYK_njXG*=NPPB7mLK8MEI$DuG$2c!*?L@|3N4&;b+%(SK#FFF8ucw z>dLzfS`h2A*JtA0BmA7l)9})L(RksGP#n4`00;Mv!Y%toqOL9sfxn5unC}m)$?~sN zg|;3D1OdL@67J6Q8dsgJqC;QdAz$Z(we;Ag`Felvvwme_&%4(cNWSBo1&@KS()l0> zu?2LeYyyz zAJ4APQz4Hw`*E(5}^{!0a&os3@O9Jka3tCU5_ad6VVV_Y3gJK?~4_U z8?m{4H&!;R!NSVfSXkDD4O7-*&%EobNO9M~TdilCj1^QR z3#aTs-^8uxELn+`oIXq;Or|6?VM-#;D@luTP2=FJji1?u%AiVQXj<$a;!!bLt2s`_ zqVhH@C~ZPVb~7qNY6uHK#LI^W9Byk0MGl!}v$8A)!qK=wI-@uu*Y-RG;>&RiQXgKZ zsz6&a9+#@nF;f+S3ikyNI%@U4QVob8qFjbTSB0ihlt|Auhn{JsJO%-xS!1R=2DYw$ zm$vkE^mh(?UM-72==)y;626D47jgL!Pl1>UF@QJ)V*nA9>~9tJqT{7cRKCVSSaj~T z<##P$c>Cv>jtM4IBq@Z6QzhC;l_&&8RYHZK5SLVk;2|~Q@)W+rI-idxK;j6GxZe^Y zGN}e`RWA~%X}mNg&80YE6vCoKd+<>Bi?3JJy8s~tl0sml5+v$AtYcb?Pau(LWmlQh zO0YnpI+EC-vG(Oks%G&5Nae^uszgB1EUtpl`KS)7LLEUei8n^^7*d=+tfZwvVA)hH z#bZ_FT7p@HU0B_?9!tirLbrAM?Xo;chqX^>we~5CC#=BwmK|30yJ+G%bma9|?qfyc z7Hpk)6}HZzRBzahx#Krt9<8rxCAtdT@}}Kogh<75^i-}yFKtHt0*(TwVoFRNrbH&A z#y=kEf7X|$Svi_7Psi~9UqPV0<5*;lE5*%s*W=BX=HLTC7>GCaYi1k1bxe6UZE4F3GVAn8|8_W$`nLQ~i+P+Njz)Q@72Q7|6& z-qVCPj!wbL&(`9#Lp6B)P!-;IrUGw1RZ1(go@;NZ@7lpEyzzLlbzy$_p%_yoCl5s8 zrMpA$7zY>27uBFCuNl)4n=v`D6}1U%sE=<#efT&`7+Z?Euv$!xnnHzAiAe-YT}ZLz zQd-hlv8-VomHKWhowx#XOS;gTHyx`QR%6eC>#=XyZC0?Tb}C-?;4Aoe7s0ZF>SS~8 z%~;cMJypq-m{-3O-PIe>QM43oxpUB(ITNkvGpw=XbcLC7I(d(FQ{8uCLU;}Gd5_OX zZN&P<1=!fwi!~GYe101$f(a5Tk78dg#8ioj4+RuCW^ww}G)YAJT^EJtQy;O+`6_u-q=jUij zuUB^>aA;vOw|uTZLep`YfZ_lW>)ZNZVoF4&Fcr_^5(0Z@o;CB-5(8SZCg~#e{8ClD z^5;@0d8SkZ5_Llk|0VA&)saMB5`_|f*YZGLl0sU}GHeu}5kYxy1tvyMG?-Kdm0J8# zt(W!1X}X+&gg48Cs0mmwZZReMX3Vc#N>wu3rUG~7C|}ZM3t-JFpNEz8>#;>sA*XFc zcj*GO=XP5S$*O7FDd(>-wX(Q=JLXN;LS?eflJb_sFYL9%dtdb`+FEp%uSRqB97^~q zOr#nq9u|f8-^C*0SBVIt(+O1=;}1DlI6o6lKT?i2k9Xk1Hx}W&*X9!>nwc!S{pum zsUB}1uEfhv7U1L~S^VA;anp4PC@aggdXwOvX=<=byt^4%4k#W-tU!@l@7z3d=d68! zgy(E9aW>{h9AGp|#I1Kt#PP?=aO}}iy!1pFPCr?OGfx!bWnL4nJf4Nu9?!t*PpBhv z5?*^Go{B{6XJj!{CNX&F-Y8QhkKP`Im20Xnt+IuKn-(-AwqRm>8*1a)czrg~v|KI+ zVncIKd`TM0hNWWS*aA$8uSHYpG!Cj(V)Kk$SW>eL-Fcmuoi_`sr)aT90n4Z4<1FM^Q`bWUmeFaQ02&83PiK8 zhR(-3M}wUYBC>o!#7lq|(D54t%^X1b^C5(V1BM>^Kte#de2+dyiFB6-krGjEMKfK! z*;xJ1FiWH*(RfUdFlZ1Y7rIWvgP|hxK;n##FXVIdubK#;Jct~<FXo~M6jOB+`N1Zjzm53UA3nmF+Q>ewXqW|yj8#-)8=h! zDzL&^{GrMSn_^yYt(5Dl8rSiLSV>hvC86a)b2|+rQYM-byr_DK)s`rkxM;!}%*gGf zbz>owj1rJLdalEYsh49;%|`TAt+sw#Gx>a1L9gXZv?hE{1z}RT8Z%3lV{*!LR0L!q zz2 z1e_o^j^8hf!*TAP(DptbMCdeLq#}8d0C?e!vDkOn2xLY@ThA(+R%;HPisVO8nAll{ zg9j($v)8l&;(VK>b?$HpUVn^A=K+Ptg7N&V0XTTwNIbN62(I0B2};W8w6%^rA>x4H zLBt0DDUqo+pYMoFST^>VtjM9haj1b4^GzG0$#6E0lf4`2EXS-JbQNp z=C4mjR9GfIBhl23SMBa86BpKU5b?kgpmXw_tDUE3c6B@F?Qn>2FjsqK?3(Km-svc^6ueb1&)#o*aFDF2wh|0AR{Q0O?m5PI+kG znHSC)d`!iJ4j}d&@N)zh%Y!H?Lue@X;U&%m4b|RsA|FtX^)3a;Xx{-f|(X=^P35VR`KuY-rwLty8pK$#lx_zOuPk*|-Ut zJNIGz^eeHjb{po5TW^)YDpFLFM}@Zfc!Ts3BE1!>(M_9Cu$ZH$35cM~(hMrCuBh-< zBtbGZx)iH6X5rcUGH~LtLY#b}1ScM+vv@QcCkc?39uzI2Bp z1CfMC1diMpio>@DTYs(F_6$csdLsOk?7bk24vJ1)p{QRbQe9->=(9EW@JuT{Jk?}t zj;PK1=?5a9R`Ac?=7)pVkHmv}hvJT%7ou)vB!Y&p1P}`LLyoKzi(HQaDG_=mD zz_G&(`1em1QH^Z%!Gs|BdLX>@#Ty;?=;UNus`#~ox#qZ!^LsyXM+EM>c`PQ?rz7Z3 zs)F~`iwGp%Y%W2B#|MYF95B8E5uXKGm`nYoVyylrF0KHZH>KjiTN3f)9qA4z2QqQw z-gN%%sk|nrPVY~|OZUZDKFjR>Xq&CbMtEn)dxz(XVczAH=4^s&Mf`_&Qg@Z^g|DLLBbgroog|U>d&h+I$GE`fl_v8B) z^R-3xo>VDx=<3RwP3M(Ipt-zcN1bbJzbsdK-1SL)EAT!t66lA?&Pb|!?#FCq*o4k9#bv!bnoj<>Zg zA2jRyVf7@g2ceoVszN@6>O-JNxkT`A0P#T5Z&WPlLkEE*^1^WBT$qSrjtrzI6!%nF ze;P*yPL;UXTT&d8q9>v@dIHL+P&6Y$)$sbq)48wkR#{L5$z8x3sLqmynw8R5w#Zth zsMYrLv{p<{ZLw*=^D7o&HKDPho+Bs1VV0UV=2Ml_tgu!R+k5t79bqzW{AP5QYW9}$ zBg-r|B9Qb|t?|KR4Q7?C#MJa2{OQygvSe1AxG{C!{IwZ@tkrRsye@R zB=+wdih`tg_$&7AbNuRI6~r%n)9O^bbg&3#Uz~`yo-M;Gk0#m5iO26!zpgQO>iQ9Q z_)0?LvJ0_b(I_N^N5h{^ee6#>H6q792pV$o+VZhpC$r;&jX?0W1JLm7J0LL|{QMy6 z2g$(5-ZSTv36uWfL;@CpXj5IAwHD3E`*r<?1<}&REm%^w0ZVGv+Im8&N!dO7azbPeuAX}t z_Rijj%V#Xcrur^4CD-}7`j(*B=bVe^OpAG67n=$p7?c>NS}=GRFzE2PEgG3=j^CL= zQ38Qe6Dl1LI28R*k))Lz_yD5Lyy|tOx)D<;1dRutJmn>Z6CiT#bn*(T2_T+QGP7`& zQw!&T#J4Yy7(@gIUx>=4_{uDV<&=q3j=DccgwN!lS`J@Rb5-+^LR0w4 z6o}6}h`8Bh4kqard5?WCF^D)-Vu^W!NQOm&w9Kw%NvTp@iok)yTQeluREd`uk5#cD z_XQGJ3{{2z5-))GawqPwAfg4VlsAqda5QsE3sVUu9CZIejcwHhI?6srZBUdSQRpgl zh|J|pWP(ic#R7}+>G`&@Vvs44OQNkvNwG~`I3ll_jVWX$NTkpd-qPq~Tv!ciV-()1 zwvmLIo~0|qMa$#g3N}T4QZ|L?%$hN2`(A9DeJz$Y>_B&!rUI`(U-cR)ly&GEx7LA#YNWSf z6?!UGVpi!gjEicvuC9J`=D~Dy;TO{BkNZhZS#%g~g1XK0=gd=h~DOCrL zr6LLXLn;<7Ou^xYGVtn?IXL|gRnI-4IC@6_4&OWqPhU42k6bwn2dG5WuNaP$lsJs0 z<5sQ6fZ|Yo#M^gfaN*YfCw#4ps={9;C{P8M%J?xsK$r*!{)22ldC2s7-?8(K%LEvi zz#;YFQy@O;cgk|uK65akc_5MfC<-Hk^00o-czk%a#}e*RB&JIKy3LaA|M+MrzCPQH zPhW1ax|%nhF2I>bsRAB|qY8|s@(ag}R|XSESs44P6n?+zyEPEja^}D__LW~+J7v

`hVM6~RM2NHK&szjrG zIchm~Z{Iro;-%4pHOyyKYpqjz@lt?h)qq-4Gu=6tvBKo#l2a>T4BoP-z5CGXIACraB(Y$DQK@^bgRtF-GjBrcvdLT)rEL8uk z9F7c1gUUIIsX{470b1r#xfFe=njYp>(QXbFEx$AqX_s*KfETcTL6rZ;No zQ7ID}ab?dWWEK-P8?D{EK(e4_y}@KL;jv=cF6+a!Va8s}tzJz~%(aSmXLpS;3 zS#IUjHBx^54mxguHFi|D5Qz^k5`OAW+DgyEN-2td9NyMn1XP~L22#25Vz}FwI z!{5Kyif=wzhYw%r#M{qT;I${HL>`Rf--KFN=LFy9=$&Kn{H>!oY8` z74aC!3&sEYAsF@JSj1%{W5udK+;`=0JaOGwIQ?y?BAjJfL{0Bvcpr zZxU?TDvSGjG%er7(9H<7#!F$68pDaf{yvYNJ&ma054YmpLp-5 z9EgL7>*Onl5L&)`horp+2{&tn65h7w!=xWH+$#JS>{CqwNd%!0@mr3#WWRA1P7C{m z*3eHx;glNOac2ci9W2F}LsfYFa24JnK+Zm2jn+OoQJw$kW44$wMtdpJOwjYT!fu&Z~UwN#OLF!5${ zUA62sT)Ff%%qm}nvcL&8k~feHQ~pD1gXGewW}`A_9GcVG(WJHM6B?~Cs}{#n16P|+ z!`Dj1sF>MiTPd_;%A?dAyr~dDg8Md8i~HU&_qWjMLY|KW5(|fM zD^(+~YAuuuLPS#l`alK0dcEFQ7WOfC2pp;`7d$kArvmwnTS!VA`UX@ESXNgsHx-HDO4CWs|b!Rt8(wm zorPJ2edr)adJ226vTijtG;hJONo&wowhYT^Dd8IllF8e#kr2_kB}?iy6B2Xyem&N1 zzOQ1H`G{XKonD5#%U5dusrJqVP+gRb4CQ{el#Vl#7fYc=7JBR5YW| z(3ODEyx``~%){yDr{Jsim*MLV*5JceXX35rtMJ;BnN~A#no8uQ1EDxh5FMo&IdZ$; zr3(Au){kq}k^n@GjfUTk3SJJ^2MpVKHV7J0 z0UF7xg1rAYoxQ>vTAF_{5%245g90*l8p%qQqi|M6w6nS zLG9cqq*SFKU|1r?sKtwthxXo1^8qVQHILE`tbc;qyc_3v<8l2R7OChU`j#_ADxpQZ0EzpGY=qwhMSdTGp+QPKE~ct3PeeC z?w3-z2wfI5I9yHjoG?mJ1R1L~zn}K#_RnLBte=fy4(BrOW@L77 zWYs~CP_A>U*p}6MW{<^N5Wrn`Nb%P$WiFe)o&>gS-F|mwkZe)=#=lJJwP?^@YO@{ z*tNq6lwsEP{h1p_QKk4_f9TSj8itOShW=%{ac z8je0uj89%^!-uEa@%G_r2a!h;aEfZ=)cs+2>0Sqt7nLKp-49P*KN1h@xdgj5U4*=% zDERTh^ZQX4ouY4&z>h*P`um|6{^u0L7bT#*I}mHvj=>e%hvVkUFTw4yT|;oy&XL%% z-XA@^{z%N_=>B(Zy%RZHbFymesFC)cQ{g`%4FO});6F@-mdR8oKF8eeq-ok8WP*w1 z85CCHEahj_9f5w~X4;hI5OAf;SkWRSJG5;V2V}9sB6!Kew<_`%N%L{Z2 z&ufg@&-|FCM4~>+pdom8szmz^Cia;=F!Te%{>Oi9PK`LAsAyDi=8#|bK*F0llB3Fh z21y+JE{I2ZK>_ydNx`9eQ*lHKU_F+PGf(8(wD6ao$ik~nWKe0Q;0@Z_2h(uwsdT*k zq*hmq#hLs0d-JzGc~1zA-xX%9RbIF=gvv1p$8Yn)&DV#azBL!su@g}j(TL_0t*$tk zPO}PS{uQVqP!#G?0=^-pkuaHtmSk4}FQ9Y~By9?5ap1MB^9rnL+=O}Mi>UTzVm4L# zj;Epu?r*vYU6yEoDUHf(()i8pqTl9qL7&f5GfPENh(CM z#`Jnk4$2-P6(Wc@bs-?QDZH*1mXhlI>cb%7fYGl^s5S%{4=xTy1PA?@{GM{Y=>&*Y z4OE9-CDXm$Sp9vl6vp!9KL$YJl!g?DXUc=9UAsa>a`+w`3={?u!Gn*v^?>3)A~^8) zAb3Pj_4M!SefrpT-gJoAOO1}EEP4km=i&y`CVFb9*tAQ=?elHxFU@W;~D zwN=iynF!fZ9#SRzB9qk@=1;DgQpt1WJ`(?|o-qW4av*xleb<*oUwV!`cIG|LAA%+b zO-*UW(g`cDXq=LSv(cP9%?cDdv)ZXrI?-L&YfF!=X;_C<^&2p+VkuTkT8q{7R3`PC zv2u!*=Dijhrtihv8ilm9YNDnDuN|m#SC`<*)drKS8^)o zw^^vHNyOF-BXHBUOK`_!m*5^c^ZWM^C|3=`WBV0GB2);Jr>+}`r*0U9gEx+`_12#v zTn^skk0)>R$KzB4_Y*#qjR_d`$1F_$|B&|AQF3M1nlD;**~j)VyST*3%*@QpN|I8k zlv0_g6f-kaNyX%1W>k@inX6Q&;wRc3MB>Qyt`(us` zu_I!~j=kq^eRHlg*I*W2c%%Sdz1Co5(vLq~U|oDad|W-gl5x!Isr1f6@l-icc!|nK z^}cSqd9Sdh3yzREt<1 zqv?t@n9B8o=h9CO%Hi`fr5vn&RsbL&s)VokX6NO@;sk(%nhz3#36+y2D)j*BVf+Cvx@y26ic=t)#(GnbcvJf8~Enp)d-@0;t z{yhKp7mM)~m!CaNkUXA3kR%c$2?PoMb2dKSx;GYY+#QA2?u@|Ocg)6ZHzuIBpAEXW zRt#hgS)N3>3H5ktj%hVz(w)?ef%JaN%Mwgnm}{&=%7hItO>JGZ!eDaYk}I*Z|3a*; z--IQl%WTT(OP60`B~^Tw+)R*MzUFoe6|bUWZZ(G;m_k<}EvOp3&EGHj?R=b_1>T#-a75t`*bq(IDyoa!Jf%FiqG@*-3w0*Ro)rR7oh zJd{&?2q+4TDUq)EL6is4bmA&&;={wQi-GhSxMM^s`cD*$#zS?o3hYr0)|uw8#?k=e9Z@l65cw-LBuTs+ur*HEOY+81psow zGO*uMC<)DrBZayTCz78&+r4FH1>i zsg%muwb0rf>R3i#+X|Rjt zJ(;Xoj!hlgu#6y4Pn0DDiRLSjB3VY2vbJF@cJ#RNDVsZYVoUoD?Cv|j2jn1jP?;R0 zB2kEIm!{?wJPanDDrwutn_mMJcXI!B?k7wdw_vzr87k&9@bt^T`(QDUUVMrz3hBE%+~#5*UrQf29?>)Zk&xLX-99I!{r=2e#0C*dR-XR%xqjo z2sD&tBjWqn$S5knb=PL${UZ|QR#5YqW=6LxUff99^R7xi1{5Ti0|1lR~TnaKVrzKc8OvgtFNc^28 zRo&Qm*+M+^(gJ+;K|lWZfx4|~+(R3F|5g)@zfz8y?2-Py-!`k9ZcMV zno1=ipy+|sK0U;V{En!f7NWMJ3Rhp9ji>I;#0&T3;gtvT@%qDgc=M5LymQ3O@Bg7U z@s(31R4C767Nd!q5WrWv>+0&m_m8xD`yzPxeKj6m!@aXb2DDC%;qoAz{QQ7yD?e4 zmity<4He3z%Yrbub=Q5kc==5j$XiN>-E5&G>k&mL)X5LfktzW{Ts)37R3nouJJFFk zADS0kBiI$vYKzy%c2~#BA5`dvuq=UYN`fFz!A66K!d|`!Fx;<%ypAO_H0Dg7rygJ` zXRk0;i}zlgpT@1(7!D02^Z`OZs7r1@c@Q2|1cfuVT_I&+>O&CGlKZ%>_bEr>LRwTd z{9QI|fdEy3MfF70sB?cTh+mP{av~l)GJnl=0mKhiv9<1prUJQ-pSqU3jU=}gJ;5OL zA?SF{UpWzjiKj07o>Izz*x*DRW5)-Th<~ha_BkF%^qOE20LiIL-GvP#4k88;DH6}5 zPJD>?VJ}~i%swp|k<+7mkSMgJP*x2Z!%9&J&kdD&F13M*0*lqnCtUK*AwHE&Mwg`F@!RPr z=qz5kGYl`?J`1nhF^4VDNUD%%yml}53nDj%;gPGS;llkhP*9qMaQ?wIa8l3+0g0Te zmi~SR6mH8-=kYQRKK4GYN6@h+OfSN`bqn#*JL~YzzgvZW{(KyN`D78k`)JU*;{NWf z7JU6`13rJTiY@6}{Pwm26c^;Xa`bY(0))Wf+lla?nR?-Cb574jM`sJ3e|ZA`_U(H7 zx8JYFfBD^7{M*+n_`Qqq-TOWGlX^J4R*7$3EyE3W)2X!7B91q|U=o-E-^>Gv01^O+ z%e^EhG^w6kK4d(Y_y9TCJe9*^0!w{9O)wEK+yj~yX!!*|qQ3dO7SX(KdWXw!-PQbG zZ%M(^x2NN|yVCLEJt=tQzGS@qK$2Aj)?Dt#pU%TqFBIXcXY=vd(R8YkWWt21#DmE@ z>byaiym1dz3Gac|?+C}yJEAbSC>MEiD$yF~qjL%`$UX0gmMq5%JoqVGy7ab?3ugRa(%c_M+ zDzqn-6oz16OF<+QB;K|u4IM9t&<`meqR$dI3?MXxyHsjk32>D%QArb(HqprQP%ubo zRL|60Co+HChYLYLkeEWzKECF{ST5PF5%2Paszj`L;jPG`Li(zarEa}`80#dEIG89W z;^#K}u#^T82FwGA9IybPDRiZ3H1IC~6f=dx*a-F1j=n3M4LZTZ@2Ta(si>b_Vl$g*P>-g| zY?xET){y#;SEDng!V0cc5+o%vs;qmjfkYK21rn-@+_TmFt<)@!fber7QXkpeC)fxg z0*DV3bqCJjA5_FYYqWeEJ9~FwW9w!c>8`~4c-ayxtz3#pZm%Io)Wd7f&|#~1x~gFl zcJ&`1Ob8OfM9Fuxsx;60hMo%vlD(!%RuLu&X|19fQFma?`M!z|)AEMR7^~Tcri1~c z{iYB}{8N(kXZ}o1DTA$Pw%!SWIecwq0&lRIGz>0I!J4g6xMctNxc1;V`0W8_7w$VB z%eO^gcrqE?bCZyomxu7*)TB~uzjQ9X_;eNi~QMZ(gs%7cZ4rS7haGK02CdV>>>2 zHXcit6(IJk9I6>QVL9kf(|Md^1j+Z4G4qT>@md2$2EwBz2=Nt(WR# z2#X39VzfwwA1AP-TXULRgWYU&N!4%e*osXZ+i}s@mAGx!z1X+tD)eM5a^WTADyRrF zO|H$Et|=zCe-Tx@@*|6?)}fhD3(Qc+b5e3WFteZr58B{E4;X$}%is}&MvDQ&?U%~1 zx!~Dg)ly-smN*9x0Z8LT6h>3NqtNC^&x1yt2LPaCx(5fFLw;tN0}0hd^$f0apWhqH zEp=R(fyBzC@VIiS5RH4&^chkiN~UWNUZL{3T;@rU2qaP;a_;Jxr7#ws_4Hpm6(EOf zz@Xzc<>8qC;+qc=IeE=|m`<3a|2iLOT-Uwk0O7`%xG@`q$r(CMYDc{ab&SiK1Of85 zL88x*>3D_HR4!gnu@IZVBxq6^K1ifIRKx2;h=iJsnR8l%50VrcjMy4cY$H~ydE?a2 zEZ~h=X1%yNVkli>s;mcB`J6iIndQnJD^KFWTRDV@hR(atmDGk04~4Tzs66;5i0CvO zBQ@eeUV5z%4Y941-0Lj4uDr-()p8rTJ4Tpj0xWfN+`!j%4IU&sF2HJnWPRH<>>s`e zd*>g<_Wr}zGkg^;U2%&|f~Au3297o*+P7K{F2O_t7FSB0C<))Z8w<$3w|JZz9 z++1EjwQT92>5LVxkNSQBVor-k#P>5Wi+^f-cqWRovrt)*i;B`bv%*ZiCo&5&>HKE@ zm=5iXu!R<`iRiED<+cGz0ftt{luX1INSH%)tFu&7|U)j@`Rvp|C0& z;dE9}QWZW7e52!Y4p1#e>;f( z`p1pjSeA1X&&cs?Re1(Hw;0ZG(p{O%d~ z*!SBZ{OQ|m_`e8}|Ifc%i2wM@PWCIMwZZ7%vA3qwfyT)%~rAxlX^D# zq#Qa3n0~@zZc-Q8>3BO6+A)~fkA?Ze7^N~ z_&q#!1-ABGh_2N6gg}Ei@IjL&p7rgtyeZ^*To1*+hU@7>mdGVJziqR0L8338g-Y*??7T ztg>lStMDohV%>cS6#+ybaVD6ALc~)bwsh5r%c;0jA5eK>+Zr4@o{a1Pk69p)m24NWgTQjuA}sei`9l4xa##B5^Pg5VT)|7o9SZg0V0c zA>l$-p*9sH!PkTP+&dmMUNyJ~6D8eglNA$KQN0XXy0&8PynWW1T3)xFvVA}H^TAN> z6wUK~c=84uT6!Zk@`e{URJ?HwP2GXlw(qia0cJ(hHg6!~4)oFKl#ZW9#Wqm%!>j&sxH26|V~%u%rwtQLLL z83jnFC`R9CJhrTvi5=@^W7oP_SiUk0MZLL*iz~MA98sqy5)1-~SCEaL5dZ{Vccndo z=0nB665=3rP7&voVAq9R`1;e8_#c1XjsN(U{rGR+ZNXoTk5GwdR6QYbG{YtTk0jyP zk$4HL&@ak@S^Z6?L&p%U@ z{L2CS`yV#ouOANL_x#>3o-e^?N2x|0&!A(~_p9n}Ha?{qdj642^mG>?hBv;xQz5IV zisVF)cuIul;KSr(kc66kgMY0~k&~fmv~U6+d>$c4YFG1&_>3?UHp{TtOt2f2t znv244?^R(`C=ob%n_!ZR_a4jTbuPgdFBIalr!#T<$k*nxa2A}$S$;M!V{%qSktf`i;9;~@hG7$XRhwG^;A0D zSXQ$h7cIXYm#@DQ+u16Zse62Q?R7j2~kW3#U z3K`kGAO)*?!#QE6N?ce=$@M89F=wp=yL0dsa+1lB>wP>%y|rBKL{8s9B?uEKBB~dD zzkNnPm8dJRVBrJE0mGU5E`RCP^}P-t0gxz(_4_zCHqql0lk~Dnu zbP>LIu?(MUOo`=6Qaq6GntOvU~P`t9F$DQh2szCoCW^2nkah z>XB7Ml|f(#5a$<|sW+B-Ygyu5iF2BtbPt5QEX2kChfAp(sSp*QEt%m47OKRF@*yg7 zVrAXW%J-(Vbs;QYiKqvbAYuP|;C@igC@B)xvUe?gI(flBFyU(s6yEE4O$vnOK;kS( zDme5wZ@=HO%LhujdKC&91``JdU%ec=bJjVVrTDOtgc^gVXl?6tFYASaH%)fB5Y{g zz!ueBD@?bncD)O64P9sYbM$gn z-u@^l7lprQk>8_fdAf}Ie~@6xMxe6y(4qO^D|6y<=8FBx^w~O)L|GWeL^w~K6@LmtTezlIR=KRj zhq93rN$`b&guf@$LRE>`=_#1`s{$-u-Hi8M@5jG?yBhyPIhn7P;*ZC=@a-E7_~w-= zeEyu9TK6Lj*3}VR+Y|KaE%zR#R0ffI%05OmR6^IWIGeIN(5}J2y z3=-b;`cEz7y1L)xuBQ}=Z@vSO1@_Sr)Wc5m%Ck{c+gE_?dlPZ@Z)5QE?a6rS(R^O- za#{h7KczySPLartsl<+{5`u)PL}Qd*zC8*r-xiK}YYP!OJ(uc|4==wjhYhfbS+#V& z?dYaD=}ze<1lsxhHlTw}xih}mKr%##EX)!}dNC@Hc{Sv5fX=ge;;8c+--f`&5(1p&dq!B29V^QL2WVJhe3eNf1u2NK{i4?F<~ z5ESXY2{0Z+93WghEFU5cD9#*6Jg|7CxlDZ6x$hB31Vsmv6GK@i+EkF75ywA`Hzz@% z`2;lay-mPR1)>S7OqK8tC_O))e*%x;eyI?}4D&3!#U)in?%4`sl?LE&DtKt=TPR3$ zj8ux^k&7s|xApA6#@0=Q$yzL{T!zu2MOad}5aR@iDxYrY+J@bO2eGARAC^?E!nV14 zaB%pLOTG_XMv&Z$gQLI2`YzSo@`^YrtflZ)`yQ+%OxAX4QY_8!PE}I38N;Q^u%vng zR|Jz~w>sM>>hhzQt=JiH={$e?fKdoYdnRI5!fv(7E8Nc-)9oqeI zc=O&cELxq1sA)NNzW=Kr(RqY@KL>TS<#^ljlg zv>%-UCY}Y=m9AyT{?OyR_t|yT=jgZb`pQDV5-`55>Z_V&IhJZLhWE^ZrJ1<;@+3TU zcN&g8o`>TqmqJK>_*fEEpz5Pjgjm>aNUMmaVsHW#U6Lui;Gv?gl&VDU?m%7f>)y@(1QSR z%0yu)nHzPkUS&4=Tp=wfkZ1vfDummrBHTiN2qYSGsJyHke+gBH;Gr;9@%e?8$EYR% zWG-Aqh!|A7WIDI2XF15oYP`INMz1T(r7eqO(sF7zm-j^5o?Ay^epm)CEkDmjD; z7G8p#^DoApg;(L?$s4hK-bI!tQFzO`0aKCKJntQD(Bf)tn<}Z_ghl16u$a(V*|M4{ zM_nXWV_owkR@aRXUX=T#&FD=jL-uc!LpcE)=5(CN$*9D$lC>H=FLPrZ=;)N=V5>Dw z{n@K(Fn?`3)?M9(owqh&*KG~h@Y`lA-Bgd-#xf*M&*$gK3Ho6d-?5!+&cW$e@8+hm znf0?=Om0|!uRq^vQ``QxKW@jLJ{-o^Z#3geg_WMw{OEMtbmCGZ?>!ucw;zn5#p2Be z;_&Ldad`6f8R+jNJozw90f`wQBD06dGp*aX(Bbj;S-&a9@-0pH;LTqA%khQy^G6Ht zhj+RhNM3EgS2jSCFrzB^h!F8X@;*WG(GzJn@=!9`YO^f76|E2!uj7dz@nK>R5j+GE zjWG#?yWDm0A9&wh(b4hqBbWioh^vsJpH+1Bm5Z zlmn^fJ~tkul>m`S(WrDqI5dJ>^Q0^Pp=7z$xRR=%+Nhf4lIAs95*S+6YUu$(VDVLo z+jc#)td|z8oFLLHg{l>((3SGJD%E64WSYi@sAQ9T7ylzqg}4EQbjY4ski$0>qWi*Y zAo|au-B)(sC=4VxC=}&EL>2&v%)vwuaf(BQ5dChzc?OV--xSgc2$TYTzw626hqDBU z(AF$){F91IiC7|P zMgjjo{z+lA_NP?ND6~JflrT|p$^pa=ak=o8Qze3hpG+4>T%N=SMkrXcU&}1TB@IGF z=*t?!R;rTC9mdP$gzJbbN_q+qxJ#;ZN7sH13?Km)e5%v#X ziX8)7j$DNUR3#gGE+9m9S#CsQOPnInw7eRqs9s&$u)JXtChFE>qXXkqQs8yj-&pC*r z@{nUoR4b@;F1B1e4@aKq!~3sy;?q}K@Y&0a`0S+`eEvc?K7FwWM~{|b=fP4GWE3Gv zlR2sPk~y@Xb2I11?{*G7xL;02AesI1TrA(T0AGE&4*%C*w&O3y$EZYF2$3>;ZryzI zETr_|<7sqkDL5uI@?bRHx<3l9-5ZV9?ux{f*F>SDB#nwdVLdO=uLtLZ^KeTKDs=GH zvx=sqdsQCT^!rKo-(xSM|Yp?Mn zcSqaw0WaJ-2T$KL13mMUOVb2Gy5IPjp2_i)&a6ReLJyypVJq&aUR*V7b=UFPZDH%b zJGLGJ1j)RlW{hNXU`b9NMsw$3Nx?#lQRQp0tmQUAmSA!P8&5m1lnQ^MVimT|-HWT& z-b9exgNr6LgW@F^<8#)Q(2ZbjL_m>gG`g2C_W@!#5Ow9HN{ZDWN&-gH^0J|5mBQ)t zl^|Cx#73h_VHh}E2+LO(H3Ua3_XpwOsSoY5;|Y?P#kNm55R0nNKUJt9JWF)|)s@%f zJqqY+)U{W=vr70Z=F@*^&JaOFN<@`;)e}n&S_A*A1UnlFGWF(j|F8ZBcfZ&@#HH>N zfAT3g;ZTSu)a7y?-nM~6P|RfUmU;VtD!Bn>sDB*%kEs1ngqBi;=dR3V}J{dHfR zWS&uA39t!*LsRprOR#!zX*|i&s-<4?oywthg@wTO4{go+aGySXi}>@-EQQk8<)BAck)$W|UDekD~%^?ceqf~%h@ zsGA^ZBuFYToT>P$rqgvdmRiQD)g)-BD4{=6-UhVMG~1pAIi_W`W6cHg@bpWI@zuvm z@$JWx`1*rUeD&@CzIv+@Ur{N1@oFtTeW`-Zz8Igqn1L&9DnLt@nMQP;rVR=+w0@?EBfMYH7s%4>aKOmuv9p z3pM!cg*rl{37iqvNk);7ao==X$t zZvZ47OoEAbnfuQI*Uo;64-pR}-u{4jzb^<9w=dMG>+}8Zag7S!+`VDyRAIIsWFh8P zbezpKxbTu<9K9zQFWwi4=kJfhvk#===?BvAIQKnxTPz;DAp%?Wr?D|XWh!X;=OSlj z=csbvX{d^5!ARM14CIW^iMO%g=jKhBE5wG5k$1E5$ozLRjjfm8cawPqL7vw`{5LZGl9Z^q?TYXhzK-3 zV7RY#wg(v=Uqe%VMEMkju{5Z#TwyFKkzA?}2a7@!o)?-_cl-S=y_xEbWhLHSZz?&^ zI66*Wh4}YQs5v#Nb-%Zk2a!{PL}4#2gCQiK0ti))ri0DgCr2{qR(>!6jd&TzQI5eMUbdO z$~g(BIX{y(U=aaPfwrh}!h?TOm`Ws6p*6gOEu0D}mKy60tOUG5U5Yun=_K7q_k6Z2 zq)Kx9!HFIuLd}PVmZ9q>hPhNtvn`VhGn%|Fj>37DheMQxfr|V9p=MvwI$wn&cEEvQ{pKR&Fj9VeXq-v5JIao!B*o2 zOj4OFs$7eO#bX#RALMuSVrf|yCQ4f|URaBV&lb?J zs`rxS7fHdVM`Lm2)!8V_p)!&4G-ncYXn|Hyz&ywH!(`0(Q7(oSHRGLEdhy3&-E`u$ z*82bCXc|6wGTG)v|Bw#*gU1LGI&Dqu`_==oc>NyD${3DYuMJ0IYZ{_>qpR|86u)~a zNZj{L0ZG8<)a5wkNC|+bUlyTlxE1%@RfcyT&%=jD%W?eKIxb82TLll_G<|U?;U&RxI3eh06iuA`avTIlPxFZ8#$ z*L{HaW??pdr<{vR#JkXDl3IR>akGJ;d&Eld{D^9G4%I|oEq3jy#+E(3nAkCbv8_WG zTGNg0F)E9JYUHMt+QS^nbJO?Pcl+nBZJwJdGpO88gA(qsu1j$BY_|Aku+cZW5-s6X z=!&VqKvEMHW^}P3)Pqs$!8MG$(nWpI)~a6$vv9Y_6JJVIeDx2T?9WB~FwG zHx)v2F!4blr9psj82|~*sSs`pA{ytWZ5}T*quE(iSh38?nrL1mh1!egOY-Sg3Xy-F zW_Tq;=(GH!x&|1!ioJwLNO^ZTO-=d~^CQ=Da?1Z4_(6d`==N&duZ>l7}h+qbZ^a9Z_Wl5zR#+1!7V(tQJ+_ zwa~mHnl({ff-UYyh`2n78&~4Pz&9TnzWu%Ze)ubsP|<*(;lhR3Hdj@DcVKmcrsQP{ zs&omRyUJ#d8c??O?8M$7l}ou0!^O+6mQYZLYcGMKu-8SC*PAlg)O*+gq)lNil~7SV zu3a|1WM$K4ENj?c<>r?*Zoz`GmHezBf~yPT#Vv$L9mWbO10WelQF&@Q2(}6|h|#LX z(oqN?R_sqTn&>cMr&XeJVK;7nWEh_wo51fsUX4F~z7~J}Vhz6gYz4mkV1y89rBf}T z6IDnjmFhraITG>iBXOn>UcEmIt2bpLeKtSuht4T#tcg=0GCDvx7vDUGGv~B)l#~?X z?)&TU*=sfU_~}Bc|MU^z@sVmi(LT`Fk;ep)WV}s9@`ly>ipDdy&c>$gu}F)_LF5nU zwCTvEn52Ec{O^=GfCL>pA2?p0#2?dnQmr=h)#JKrGV#g-S$OM6u~(Za6Yo8mMa#so z$1?ElqiNiiYPo_Z?wpSP7JfcKEs%slyTn5yG9j9}q86-{si7l1) z3dw=#82@;g7iv^8U!NWCeUCz$306s1;|nwzo-IYS7K$vKn zYZZDND_w3OuT8xN(VsVJ<3$7tSL<8eR`jT$TaGm0MWD+@*$aL79#zO0?WB-E_97fR~27P z8J=$qU-iu5YiFoIsGNvGT59n-=c!II?mu^G#8;ROAnqBo#Je0SovG_xMMocM0*ai! zXLbzV8#vZ~jSgC4IaJh9y|@GsUzNBViRuA*(4YwxPGz{)1rnK`PtmbbCk7a<>o~#0 zuM?(xj6!#(hKUc5;I;q~1dD@OaAbR0Ftn9?PBO~Q%|p#}CD!?85+E&6Dvwg>;Gx93 zza%{B!}uo>I0|nGBo@;0dT-^(&Y2pzIuREB(l@v5o78?dHnE%puW!^Rdm z`s!siKv82#WSUiRP18D4Eh_4`k`Ndw7$bakVgG^)v1dMEvfy%Dy!>YDU34|pcDe++ zP0wpU*@;!n+iZUKWew{C%f(fzF;p~)MTI>CNCN>zfOoe zuc>CUY&7|MN8$;Pc)as)Jl=XxbDu<6Z7z*TsL0Ew^CV2$d!eL7osGYXM1Eg!LL@y7lv zhcFRD1Q6FsRLLCy#1(&(c@t#u9!X+@BaVR8d_LQHci`~Y#n?N43%0b6VtIKhhIoIq zN9LfCjg_2VXR=Wum`IiQAPK$Z4lr^)m2CJ~Y4=&04p(JVs!++6dL5l{Yg8?|;u_H% z*Mz>Lb~c7OF_78~>n5B%fQ2~&SdcdlBSlN7<}WmutZUmzb+XZlKCYw9%^D&wTr$0t zYNRzry|Wt8#?Nh!Z|1VadTS}^u6kJ&Gs>+OmdcqZAL0)REMcQlszfS80P%ZX`2z{3 zg2V@h{0`NK22B$pHungBS0x*@Dz2y!@3{mBWCOCcY1@6GV8u@}844Dgu9gnfwb%uP5c3p?Qzg!Ph?rycOkHnN zdC%A&M-LFD9z2)?puu~|H$PmZV+2h>#1CzS)`xTTyBuWPHR12p7(N14VY0sklF_Kj2^7tjt0F~Q>k1`xqdY+TJE|qN|mVRmU?h$g!^Ru z2HH9-tzC_Y$`v+2mi8^GScL`ogIJu`fYH24LZreVvLv?*!x<%fF!@_~1Ntq`)L%Gf z;qoFysA+1)?f1{aXCJP{x1VjncVBG6pFUlU-@U&WU%t_U&t9y>$IlkygQGcg@`T7E zv2^HBc!vN{i0O^{V+fH5JafxT+;~wWstSt{^^+XL(Q#OJVsoOr;SD6tp~#$LXKRZ4 zBTr*%ItM^E1;Si^6D0b5 z`dtd$@wfbdEiEdBR65<9yf!q}cVp4uAl9uP!{$rYVfVe8aP>2*@!O}D;?g4vvE$Z0 zEZ*ITj=maXB$gxoSA}#A{sT>j$N^BHrv18*+eM~I6y{3g!>z_ZM_MNiF5ZtTmS2Vo z7wy3A?lG*Z?!ahv8T#V$&=Qu5s`GP@|C2&Y9y)&kWnNkbhBCS^Ryc(55>4g16gvhFTFzuE zmB^Of16WC=-<3Yd&!plYNZJUHc0#Fxrlr8rL@;VRh>AU`IHE?K`@ON;pg@JL1PLk$ z8v`PUIAviWExu-Vx3$RfzU${u7m|S3_35NuezD!t$$(+te1c!t*u3p1Ssy zSgovl`WH7`AqWwf0HQ&Lf{14S6DV|F>;4D@$*Ik6nTNt51R9=A33COGqe-=R>q>|l z94z7PgCq!zsSt6HQRqsFOVE@WQDj8ZVi-(Joj8yJI~RB zi4PE&AYw*vB=UDB@R}&Zb%G*ELg{q=kphTXFaic|1Pwy0QqL{^S%OFXtRjMjDusW( zx&>G2Pi3n|{jl9Q5;|7?K^hSrsxCs|p~RPpIci-b4dz80c(m;x;z*m$AFIUsVoL7q z1j*K}?UpN1ci^$2g;-L!z<{!^zHs8%XFR($wPoaW>*pYO(XOPQ3m4D8Bk=1J%f8{Nd9z`0Cg~eD-P^K6$PJ zA3mi~+-W$bF&szYs6qsh2)s>4|HgeWc=eu0n^a44wqLL-9K|X5h@unL4X#$U?;w4L z={rwzE^?}#Vu&~`6*)zfxZsj(n=|G0J0tno;rxyW1Iz38M&ji=!}0XZv+>w(&&U1? z!ckD3he)=VRX9siLb%ht;4wE$VNQz!9jfZmkaQ(|9vn1-~YKC|KI<%8UN2; z*5JQ?J%)e!WFCJ1P6yRwHQqgvgU9Yk!On~F(9~Oj^srKERHXY##6q14X;FzNXGUkQ z(3k$R`E=ZuO>M{}ODf@4#e1IfnRucgGMWRK9{q-Wi!lqmz>3a@X1| zb^bme1(f8UW}^TdLE^q-S{0gN+tIEbR#D9;J+IhQfLi=kgfXld?a_7UNoYoYatC43 zjs=-i-^B|tk~@Im>_IFk9mmdj3UghKo%0W4vS9-{Q|6i)aS&-l2SFr=v_!i?j|yXH zJ`Wp-Zs9A9Dkmu9U;IS54+SlyHV6@0a=VWEWey%M`_L^sjrfm zZdAI}!xAhA6YJWmURmCV^<1ftvppqZqt$a*^_0S4YC|<8FA0j?D97KWu@ht$7kRDtDiT2G2Cfk!xrmnqGARb6$O;M$2iYPOw zTo+b>X4T^2Qlml(DCafMN38-)KUr{|y4Q)0xyU{v*Lp~3GW-)k66_KzXatk&f`)IV zL<}Sad`+ay;X~1a^{wl%tB(@9iK?V_B_=Dzv9xTIOR5tpl(kK3v3;&!atLEp>(G}q zf^BnmH*PXpMk%lE!0N@dq;3P| z7ms2=P9qjki7d`3H5<+>LVIj33eM4=k;9AaV3PP#jdv(O_h1dFuf>_j@A@BG2Cuj>oQ>iMuX8 z51r$@p{FU#M91d=!<=#mOng(9V1dMMO_^%=VGgnjYO!fgE{@zBf#+|VZT7-#syrNy zC$5`?2d_FGd-p{kwBof`N%NTyjAeZrALNSA_BRhT*o0&d0``;m9h;L?qv<9%H`Klm&ryitqHzyGEx5 zi926^TUC~4|0o;1V*_~U)dl#sZ$|MSe_D(G=U=wt|M}OQ_>aGA!GGuWfBoHZ{L2@M z@!g00`09;TeDqu;-Z+wr7w$^IQ&ds6T^EU=RlJ9jD)}H&g?ODV6yj2-%kB}WGu|ia z%{5jvhHF+|hHKYeic7|}VgKL~Y-#Gk3W9%eW+CP$<)J4!7tK^AmD974e`W^Key&y_ zuPM*l{U-j;Axrh{O6GTzPOqZV@8C0A&jwF{l^ZEllAETyNgJJiXKVv{lUgw^y$d6G z1V~Ol22wk*Fk3|*7htq}88&wB!H#){FkZbHZ3&%L>O}AmNLs0E8p0LsuBKHI4iyFx zm1}p(Lq=#2U`mPyfFU?|=F~O=LiT&WWW5P$CUR60#-WwW6pD1_$3{=O0knx8kZggYIpKXKKG zAPKb~L})tB_R;vOR*HS;|(6gNMRcO$13jTU(j{ zOUGBvQctdGtJ`lAzj(x#Dp9{|2N8dOVlc#IrBuAJCaSmPEC>>(NVFtmXd3OQF2M^5 z7h%ufKI|IUZ#9CJRZdckjG7{`xl5`hvAKOS_RYTldxkE?+|0%HAnY5u0Q-k8CQL52 zNw5wty9rzRFUIPY-2?{}j#LSOVspDU6C{MlWIaEhfH754xd!v|=VM`3H5O%;&`Pl& zqY$mp*~mLf7hEk&H}Ej-^kjseTaHZ|YVgvd)%fV84t)B?AdbJjh)i*& z9zbN;mLf1``h#pl#+G8YN#JcijdZ6_nD-5c8Mb|-4S=GD>*R8*rAh`^ejc>)ld863T(uH*u zwOA$voL+?aiBu*AlN>te92A^IHTMfX*!o}fJNWzEZ=rGBX0+rzq2hiOGu2t9(IO4j zTZ&3SA*WhCyG`NMXryx%Oy*}&br;OTg6uv*MFk&wxvv*P*+W>vXJTvL0gRL^g(lQ$ z@y3ZXM!E{Ywft<=>a*^-)2Kjbmg^uiH1gcSS+rU|bmhZhP6gGGGY66~TNgaseu9an zsEB%t*4Q|=I#kT2|6${`SVb18KpaeHR38Qrn!H z;i;3Ud6T~d5-t6644$v+f-K;={D@5JdM)g<7zdK1vl1*vq8c;};T33!DB>T;mIMLP z5?O?XIlKX=JnBqp@&+M1)H2Zg1Nw8NSX}Qdm&CH(5FSVjB4;^}gu*1KNc3lO8443Z zL%j?#&)_~<247Rlu5xxARyVK1p85N+zKu@5W`&!Qmuh6PVvMR}%*K}N?B8SM<=3|E zpcEg%>c;g{C5N$Zn5smH_hmQY0D-chOBI6E6E?b59 zIqEuDj)fV;7)&leV|W%@IlP(Z6yks4dT%ArIY#ov-?Xz9uRLCX51y^X`_I3HXn6uk3rGTweD9&bMoOLKXPH|~qX>-SKN+#OD}FvsSOzw5FyF?Wou@oCxC z1=q$X&?#CA+Z?2qD-le5aPWbUBk>)lZgN3R$EwUmSabto`+HEcp#=-}RbkCVWms{b z4okN+V)eROOiq-dv%dscwbh7AtwG#5MV9Muc@hT_bHGy^xaY{c?+lu^U+2y9ouUr( zetwd@V|0n)uzdJU9fBt$6{_WefR4B{wuU{^~KYu!ezkbw%@7`_3H?LJ$ z-K_T?Psf`NB;mz7V)4|?(RlRQNZfl3VX`3?8QB#`;zKQkAd_NMGFQJ_G5sxbH{rT< zSK_y;4&$;Vn{eUaV(f0~#+I5!tSPO+WL^mtrx#*g04BMprZOpJ119TN&SB}Eu-~Hp z#r_*-+AoM{;9ZqMP6{z8ETjgYg_<<)P^INd2Go-)rx*Px9X2AqPs_MwLZu(;Teo0y z_ioJ3T?EZ7QcnPBz+)XhQ{g5h!wnoXE0Q>^$iQLy36Co76GZA}31B7WL+ZjDG#bLn zZ5||b;L)-nypo?^W$H%Xp#qMz5t>7uDue(jok1Y-_gE;4_oZNA^M#yOW{p{evTPzM zDiqc3v))N`lu{jX)+s-y^J8l|PThJ14E_%W1m2fgccqV{e%*f87&{p%z6#O0Z>|e) z2q5_1IH0(FJ}?9p+jk1n7>^8tL8ua$8tQVO&{z>JZC$VoJ>G@9bezAl#}y%pQKw_|mKssp>qz)H9)XQCMIrq&HqB?obED3V+y#?LNx^U@r-FWDc0lf5FAKrVp8=t&37oWdDd#eYZ zzuk?G-sr$9R8U8r8NfXcbmP!9t>{|Uhr+53Bu_6y6d#~SjVn?93NDYX#)CP)MaXDU(Q!^AiLJAKdw-{;OXc%8g` z-=iuyHxDDrbMfT;MO2ZU`1Yek_@^%?2$3=T%jXO6*G~rUXR47uyj72{U#Y-n&lT9X zmvnv&LX)=iN$=!mT+MA|Tz-i5h|ZRn*b5x+2R zJ~nr5$HvZ`7|0r;f2ikss_FCv6MmLJL4a7-T`CHh#)8z(DmHKkCUt~Oz0?K)qM274 z*&uZQp?{H&AwU|#%V`3Nh8hr3R3I9YVly@JJJmBwja30c`Hq}3)nIicn&fws8&Rhp zS>Cxtbf%i1&i!`3O0Jw=ECCWw9lY=K|Lgw{Ahgdw!skr)gAWehg1|TxMA%X{c&NcZ z$82Dr6E^cf;cv?XHU~G~S?d^6PCVaGCf7-uEvshAW^8xpX|z4A!bq`rc?w9!o^z# z^CiAHfcV?$k6(CRImRm|tq#|g&aDO$CEo>-(Sik7%o~4M#kh@lKd|s3>=-zN_QZJ@ zDW1UY!2?#CYuCJsapBl?*uVH18&9&VaXVHv@5G9x?O5Kp)g|C9cOsBbl_=E3?a|7$ z=+Ej$dn{X4v$K(VCU2mhs6wx9GTmHkZStmy`Bf>FEH1*McW2@Chcof|11Wg@J`MB} zETZw|eVU(L5Q!&5;w-VQZoaSH9l=)Z92~ug5V?9fuD|GfboTP|&&-0&PN*9{#2E`D ze(fnAB<7$!r>U~{5kJmCZgL$4Hw@$UBg1(AjYatK{iXQ*hs$VMj?-CA;E$gy#h*T1 zia#Eoz#l#t$DckP$DcnQ^UnFW&COA3h$!KYccWfB9?y{_=4zzI(3?e|WPNU%jGAzJ>Vs zDdoFTt&3Rlc5I?5Szl3&RYhf($S%Ud)B+47urWnt(mE#>wNxf0=VT%0 zH*`RJxRvw`g-8tHYU-4x7N@3aPzybu&!VPms1DQ2ge}(Um%3*3v3y5DF%AhZmE#Dk1~Z_-W#iNEe|`*YP;$#^;A066@0 zf7>%TX#aI{-p*VIPQA$p0|Uaz=0FjIi(7IZLE@B%Gevd`CY}jAQpA=!`L#>L3no%0 z&OC);zrll{2NIdoiVJT!6G*fS3`k5oD*@>|NJSaTDSOphqMj;5gA*&*@{j^iBEBlT z&L)5Lb-FC@DR2ST!afJuEXAutFflz0G789B!tOwbq5wes791CS?(=Y7+khuVaWsR8Yh4wtx+WJd-IIuy?~cc-_eA5ByCU(*o#Avg zkpzcIk+@OhO02(3CGyf8VR+`|*?8od8Myn>^RQ@D0^*~y+4|>=%o|&+PB}yeE#(_R zjLtG>bc}pY)X%A!+I#WayBFf)cb4Fr4_6Ti>+$XJjrilI>j{pv`15Bg@R!e5u+<&3 zfBtGEZ596c%T?T7ZMg#3U%%q|=gaxNQ9SwbBCI_yh`j7tMDzPyxGTh&dydZv!B)b^r4mMEqQfL4@BG012I?nZEM`vt#%j7E051ao^uE_kFz1R72Bv zkFVQWh!-C##mBF<;+tcA`0f+JL}9IuxlWLL`$iqUdbu2*6C}r<%EpJvdp(lG20}bu zW`p6`+amG!^$~dZsxaJn#SAQ1&IdB8kpC6I&+pQ`(vsMT)h(;Ay>BfWMibc8vlzR( zhOx7)AKRL`u%)gQ8>{NEuB;L(3Q94SRfOS`0`!|QQP*H9lNmWEJvR&4zosLj)3o0n z0u%dfygs@o{5e%LWkbn~8kEj#KwVTj8lqb*bkq=0hhB{>N$WyqjK-TZV@c5>Y!yh_ zwxTDs&p;xFF96hg$#R9L8o6ww%4i}ank+nCfo7W4o7Ld-Es_c-g723ZKs?j@M^r}2 zy=V>-4P?~ph>C!eNQqdq!}SVP!dzo6G)G7t{f9zXDu<#W3u^opO)I8j&F1x>XB&4X@!b>keCIX@JT?K@*3Ae zprS6^DiJR=;rGb$E!`A`)*USTSrctLu+OOxmm+X!0(B2|U4*HOrJ4v0Hy=tc{H5;| zNNnkWL|{|i#y6d};2{fw#8V_mXD6eOa#0h=s1Ae$rH zDWV#2^SgJ%@TQ4o%OqS$_ev@T{o(oigY#?zsk-}S1rqK8Mi4HcFi|)w6e{W>te##| zj^PDSt|XH{87|}v-nqrbmaM8Xl8QM1zKHQ6<|--u;_#%#mRyRJEqkDH@=Cm`Czs%{%zAMNBwGw5QYsU5o6w!P(Eb!RgQ08H zs8ymQs@abTl<-;q4~Yh+BoSbQ7yp6k-90&}k}({gXUY_YdLj zqs#I2@pbs)=iBhzm)r4AUvJ01e6t<@`pp*n%hwwnD85*MzkV@ETS{9-h^*jigvr;d zc+ja_c-dO+cQE;v&lcm`Pv_&#BLiq^Z$~U|d?_I3ka^BJZ*$N(S1Bi{CeOKZ zpE+>v;gs3&G~eOtbDR?tIP`#f5Ruus`z%|h*{qUUdawB)3Y?2<%6?~d9%fpXr6A(; zOtcOZU@&5C*_~NZD{NaNE{P~kX{NbqRc>3lrJbEov$rZD3&80KZ)}4a{J{xgAl0#2Jbz~hDl`p``rV*@f9>M08 zh1lLUgzYT?NjE{#hK-d?SXW+y)x~94mR}Np$w0h%bLF6wFsY$3DPY4!J&;Zn;&RuI z4H5naDH=zh-b;Lza?UAd1F#A8v7M-i>SXJ`1+DSz=u4kVWzvc^DioC=+Ss`RleMcb zl)nI7N$qH%T4-S-vYE=EiPk~@v_!c<%q@{pHr0HuvlfD*mHS!<3avY<=6h@SUe)rW z>9hEmnyJvkH0}aiqD(om{;%8TKX#u zP0*-yE4cWNDb|{H4qVPqkpqQ9b-%jjH*g>7zVNP@)_nl`MH_8tpXtI|p-RM=bG8l; zUaQ^!p?%&toB$FF8wHMYONE@INHR|eh63|H`VWJU@sFEo8ZY9HX!i#q`gsz8Pm09m zf%o?LiX_-;O9^%VT-~};CLU0D4MIw!B%@SyZ)TRcuD_x?L82ZL>JF^XR!dkR|D;l? z6SXKx*xD$uaF>$v%_{1sQSekL>PeB$KiGjJ-$uPVXh>xUAX6a{$ccC$@y!QH=sxx2 z(r9;$+t47S6}2m{yKfiPHLb(4>g8C@n|*oR8du;^V5ypfM!0K^>W!Uy*z)SaqT(?d zq`05TM7_8!9J}6%I&SE?z!b@fW`(&F;!@=;hhug0RdwOk`xfGh zkJjK%U+u;}{b4`;?T`EMU;nt5wiEy5cblm=*5WUpP2#)bqxjRuBly!t3-KM7fBs}K z{_^PnIveI5irZ~R{})G|M~b5{PCkpmzPJWjBX*L;Zl?|>u-k)UaR-QxhFORC^AF~LPR7(_hrnO+CbS{?H%)`2dA#7|M z#HPl%*woO8jWx=XG&+!!lw)O4DPa<#ObC-MDwC$!xhOv`o2o=fWjaV+gA>h%i5#Dm zN2lYE(l4A|gR-y|l!kYsBC-#)v2#%y*@8NiK;pAIQaD1zbO5WGH)FJH91HS>(49=h z65oo>#5RJZjq8o*h^yza9_=x;Xp63~rLD`NY78#27Beaj!ovVU1!8lw(|nMqVsHZi zr3&NArJiG5ek)X*QjJ~p=psn!*qE)SA2K7b)T>nRNzx!YI=v|^ ze?3RsrxGW@s=nU+7=VbJrKdJ%p&_jxL|p4V5YqA-uKyiw>Fu-q-qOq+KLsRqER7Cb zW#R=B9~Qp4aO(~vK3M)XOw?#_FcC-uAPuijXw3EMQeH&r#1D13Jc-P~!UIX5H&-Y~ zw4M>{{+oDaB8qua3nD74R>5U;nCkOTd2p#bNk_D{%P2LKfGg;;P>$^ zn^8%C6j`1`mVX{Wa<0JPREeK__u(Lb=!cyOCjRSM>OBf?N!F!Mm8egCcUmttcWlOv z?rm60n5=4Chm{TMOpQ#GjAFcaAy(I~BusW=|ALFLsALr-bC2~xxqu+qGyfv&S#UWH zEV&jthAy*_?#r795}VVV3dGBi_z*FuG%LiV#QP))r_}|Vk5qxeS}K)d0~CM4B^A~1 zqDMkyjVpM#`AwXumG6g*LM{4xDTwDowEJ);-g|Ql-+s0ofBpS_{Fmi-IKYT!d z9Gj2dy*(FSzS@FMU#P~H&*b8x|@2_0t0asFp&=~RqcKM^3l+4r7KN5az}FsbUt>gqnMsp-br>Q=0)YQoye z2CONs#p=>(tSYX+io!DM$u*W$jG^Rw^v30*Jt7~qGjdUQmckRf5A1i-cs)Wv;xFa= zEDS>DlYMp>VN!>}nXM?A-A$nMqAH@BexMGebk>VYCa`Z zB5NZwF0RrdB&olU6Ay)nOwLjRWd#R8LeG!`ij;=|gs;n4`k@{dDpP1j`<)ZEa8M{r z^w~f2zbga|_L`P@UFNp!^Znz@^gi`4Q+H*B@DzoikECVE2oT?FAN^;Rt`SY+?}89< zaPbw4<)M{t=KHMP7J+PC(iM*9@32I?)Cup)lRy$UUw3XgPc>Qun6U2!LR(2F;UAm0osnCOyMi9ca$D5U~RSEt~{!!vRBCDU>5Wz%s# zTc;0QIRg(7F85z~KJL798ZN(J8v4eP5g(U_NIDrQ5sfC-P3=a^x|`OzkMfUq6J@zD zmlTR3Izx?fWBC6!Yj1 z2k?8s zcaNmu-TR|)|7~%o>n=tlf4g!tp@1-_Zs+OpCH{VQOz=40e5ZdREKX&?@-3bsk?}L# zGW2`gx&y|k&49wsn?qGsSzU!&t}ntnPuJl1%iZ|;?E%7c9=`j4pnXr*s~KOtQj5=@ zFP#$J(iovP?~ljJR3*<+l{|5MI3Bnv40l~N8{79qpeQ?!*FmZz#ai3VadjBXZo}fD zPE3?`VOd!_mX|kURe1weSJauUB1l#iR}v)USWZ>4lrR}dFTkLZ?@}eRT&^Vjm)-G-?|nfO3a%Q zQCQ0Po0^Sb(fgb2YSkZ76FQBNaudYdO-In}4VSZ>U_E)heq>9j-?5&XnXbf|uP@1VgI--khK%(*_ z8s%Oa?s{-lsr%q;bpdt&k(JG?a-&QsM^$CBkZPoerfruz4~}q`tX9(9CDI)@{w6$5 zFon0&gG;UShNxz2>Dpog6*sie@k@n*y=i$TmVnVJ;=$xv$BMb{AmOt827s9jc=`c|kR4m87mN zybEthnWU<0_|FOv9mj{FCkK~S-Q%#`aMB#go4E1rx>|SqQu5f=9y*4n^upKIUT5p=|e^2TvSK*W1?mw zw#?{#dAokG!Zo+)g` zWB;b<{XEWXhpq=r-(~9_Jf`gD?^bUsmqhoWz-w{>Fk}I>B0qV;YeASy-RDAFPF0x# zQ=*+P@lCb61QUM{qBGa^HwY3>k%a$a1oB9Gsf)VV#a2$fa+ZTeD;0`*XZQ(tjbzuz zc7ddv>O=VyTi57z_2s7G@Ij({iOQ(xhS1yqZn7&s@h-S{%Eapq?0RVlCZ2g0E3{;a z@+7*b>T#eSDq_#e9ik%fOUSn-H0*Eg@n&c40aH~&KPXD#N<*Mc=x zi|Znrw`AAQ<+y+_*-4n#m=fhlns*Q$>dB=f{0>th7Tyv}xW2T0GX@GKk$-*-A714y zoFWlKRG3d412q3c+z;s#HSXf)d5ECX2u~?PL_rb4vq}*eR)(0L4#@KPJT zcylhVVLw$`AC(*d`v$@K60gm(rTB!ZU%NLRFKCeBP0@Jxw-E$M7;d_B z2F6#$AR|nw%{2y8?@99jbtKl)`8HrdRvi}Q)L=<|4JHa}F<7hxbi9~}{Vey0f}Y-o6@&u?g$@j9GrDfSF0G9Y!5c~&_Jr#DcQbf9n!6-n#@ zOzZT1?R=Wx##Y?D6hS8rnfVt_tT=rvL_Mq9IfRZtns-ztq zR3ia+$l?f;Smj1Edm>Hw5<2`w>k(FM-HJ7ku~-ezvkL6~S8`lqN0fY*Vi`#9#BkmK z+LO8n3pZg_ohcK7MFSYM?O8)q4b=_-#rLWAm(4fggXBN4Q-Va>AwX~mcA1X#*J<`1 z9Tz-yirKO98Q!}8UT@uCrruKgj}&qWs1gH#4-9Wx5TRvre>VTm6Jg?qxI%%Vb?uh| zv0hueF6yy$N}zC`LRq0e@pB{rki`Bn2Kj6uDW0gZ%w^{n7)Y!*nnGA=Ay8^6KcXCp zEvZ2ARoddbV(aH!I^7LObfZnFLOeB6%-6L(HROM+0DGC0wLh@j!j0mSz4$1A?8 z@Rn4GeoPSoHd!-?U46T-seLzeanHncJ}Wz zRkCg1A{*_la`RgUF2>5{U4+GU0%aFXxf6d|p)NOIQJ`5|vjvUG3#gKcPXLK}LGb*z zt%o}5hbl)-mGS)q#GDq7*wYfZ)WEw0?iV~Z@}h%(}JNc zKh#F^@;3qV&PNt-zWfiuHPXF07gt=FjprXJ#oNy`;rMIa_}W5#1Nip6xu#0Kq2&Pe5{bOj0-jrzh0&DsRS%?E z%>$HBgT9n%%ulbvNOqM?Uof6ug{1|a63HV(a!W9gQ;czfWHdwdxCj!JoS&A3^k4X~ zewl-!GySGelSAphG7`GRfPGCX7B0^-))JcEV0Ose+BM_-n659+O3SXHTQ79|gXjbzukNs()?bLljVmaiaq2$Qr9f^2|_r~{2uA5tKKNKIse zK}AZWfsks{_ZYC;xD<^!(cB;YEQP_rfhXorWS&#!dJrydpUa7ahPAxc0^rcnH~$!! z_a6VVxjx10d)&Hzyblv!nfNdWnD)`6Nc?KRp|3j#`Q}5!?N>Emg}7X(&VkGFyteIf zCo;jsl}hn*C+-{rAW1$e9+l@wHkMnW-6h?*Wa~h^BGfnA!K4!9giOVZBJ1vJd5|;n zt)#QmhvL0%q$vS&jw<8kBlpZ)Wc@k|nQV2b0ApxxtiVzb@xokYCxp0skoZtBkVuj# zzQ-FP@gFm+sN=@=P1rrK9~;TvLwY8v4k^JCsPd|+xi2C(h&K|K6e zC*FLi)1266ue9Ui=j-s{)1^2@NB`!dnYi=rZ1gQDL-N_VnDMJ3^o*6`f!j54Q~^GG zt_H_nY``Zk)Z&w8EAi1YR1>nN$_Ru4d_Z7)@VF+F($u!9L`*x9I@P%E{YTUB9@pPH zqC%4?ct@l2sYYJECk`*%9*v{dN8tY7&cgU)JW{4-Bl2|DL(4gF&$RB@f2KAZ0O$Zu z*#Gw|)JY0fCx*O&PW1%GYLJ-<6SLEjF(aZBYqu2QiF=Fi`jfTz@a1-V@m8;?l5gMZ z#qZwkq`GUur!NZ1#e{DjuN6V^kU%0u5>1dq8c6OVNbb30Cid))MNWA>BKT0o{?H|Q z)oZ4R&rp2?l}UUh`jRU#KdllAGb^w-s|-uBOEH#Hf^nJVea|Y!l1wU+i~Q#b3H;o3&Io`c6ej-f4g7A;cz>#4mU~V)GWZIvCb_^_zVr@kD!k*=}PHAdwd%mcaxX&TYaVI^g9hjHXXWf!D zh|#H$2C9^LG@25r;yKiK>PP$ZdJRgZl~BFWS)K$Ft;>Rr-kiN0r)Rd$bMirF@0+0E zS>Ut$eg1nq72+MQ*%Do-%zd73f$!Ejowy5e`Qt`HVc>wEr2|JOM1l=)*MmzIe4jt= z#D%!j2o^kC;YfwN3>ezx{pnyrWx{>R)%xcd07>dO$*7)Q43&U(V@GHTV=0NwWhvoN zuDJukRQOHfP4W#iDz{*BB+n=_WuiJ;1`_N3D~ROqX3)eugb0@^pq7135EP+HH@e-a z5x-|aC`jA|ab18zL87EkB9(ypTQ4dZ#h!t^28uQHt4)n8%AJqVqG3#wEwRz>8=BW) z*T5d^95{&f#6i@|X~uejN42?j3|@pi3$Mb(%Wkp|m*#XAKm-p#M5ErdzCs|OEhkKt zHt)fB{ciNlef%L^--t9vES@i&DA6FCZX6Lt^@0%ZzBw0(Lq;?FHbT}uykU%wQu zzBz`^-X6m7HwN(eo0ZHYj;Ok_24J3pMyuQnSq`AGpuc-2V z|4tu%|8^I?daaotslmt3loHBRBu6r=9F9t+yl_Vhj#804N|4-tWf<_slQy?Kvo&Nti^IqmwY{O|HOTYB?69m0)3dF&3p45*#j^C4ekSFThAz zKISLqp*>2)92I&9g2Z(R=l9U;_XK_~&ASHfw{pp2lrPCUuN;|#NyZtaXiFW!+D?sX z-)dp4b*-DQw2}&hDqysD5hf}p36ixoUBejRGF-SA16hLvNvjQR)L4*a&w)#Y7)T5v z4j|1D)zECLs;{N|NOy8Ke|xiqtJOVP-DlP9w~An>jcP`TRrW2VN-4Lny8M&C(39Fu z71WQp={+`Aisp=0Qohl8fw`VwrbuY&ax8_S!PF%)N}+;|zQcAF3V`7DNg$EQ!Jo=> zj0Y2UyuYp2<$yy$B4^CwLP5gM_CskhJN6`y1Whmq=1~0oQ$gb3q5ZBqa1bW``;>SO zf<$31+m>?k!dwn6@^6BP%$<8sk!U^=Nk%2j)uoOgDL6;7nw1zxiUJP06f2zakv94tURmolgV*@73#xYhjf^nK4vXr*0dMPw@@4n#+v9@(P%BIy@h)d;D z_Aa;>TNUCOzZMrRy$+k_9x^pziFPH}Rfo$PplJD$rk$8<+KtJ^-59Igf{xS$gdmjx zoq`~uP(k`{ig@93?BP98g}BkRbaFpPLflUY-O^TUKC}SOy|UEG&j0?yNqqV4LbiYh zEjIt@%gy-Y`6_($Y$-l`ifV$2Vc>SRay!oKYc%sHNW|Hw`!{U#y&Hl5=6$LM z7K7LBiN&1Jaix>CISXf@t`t_>f*(rvZLzE)?2 zH#+e7%MJMW*$Vz2g!U1s5dxgo@|C+{@Z4>Yc=E;wJaTO~?!96T?l?RH7aj^jZ3`X! zudW1#dKR88lf*CaQpXfJ-{$7E{nVkb+q6B<%Vjmk*TP(D-y>~Y@b!@{x zW*^$XFj2J%3k@a%HdCS|xl$6n*>~hpAQsA^QZR^A5;&Fo zovm1yzX&TEH({b~18Sl==t$iJZt4P?&qk_)h^lIop_D$zRUcPPE`D}wJ^C`bu^@jw z=4SMwiQiox-H7^_W;D>WtQS}W5S~YaO5fA!xn4#fse7#(^C6S-_8h;r?f@d^?t>xd zk&~s%c+tQf<=QAMScBo7Q&+P zP!-?`{+X&Es_>Tjb63m~c#5ruMK(_;jTbii*I6h(zsL%>m2z1~C1Yg{lqWIu5d?^Z zyS&hrfg}Vb0Y&12g*UqEwwTW!=#@sMsb`bA?Db_2TH@VQN!=P7PclJevP=-E9LIR+ z609d|b`Umu7aYR;{4q-DYK&K`z<~vax&K0Jn|CQLT5&V>ExHD4+V>F}PK8*7V83h% z)yZT%jfzF1;Z;^;q(1&e>)BO{F~70@6Fn9@tRlsqVTlr>7wPyh_X; z@5lW|m*C4!mf^e4*5QxG*W&jdEWuZA_2DxDb>!Joh{}! zXs_Qxbs@Xk*{gR)m@+EB-FaZ+MPM7KBOAsfS(*|Fx;ke=0~Eh(Z*`33l>pQ;+kVr|(RI z=O^_P+#du9zyEuwh|jLW1s7M~xrZz9+RYu6;)xp~yk1)b$)Oq8cQ69Q4cUnORSr^r#)qHxicGU9Dqfv;ZUJFZ z%+_-W+M|on6;pt@F?r~V%Of;Y3Alikhd~0QFFqUH(JGaqjwh#p#}soQ@qecu(zD;{ zjh}EZ(fE>LKHIIBs9ueAZCkLGwuZnMDOyCe)L`XK+7sF^Qap;~b!!Qm?O5Nj6Ju2? z&_`9$DrG>ps@8IX`#6<0SRPwu%ewJfN zc6I$Nr0*!B-;o(eJVjE&#;+3U8jGSHVnca@{Qd#!(PbcsZAHCiiR5}6k1-=;nxv3o z+<-{=J#{e-j!I8+X3%ivJM{oSLe1V61c59R4z_=a={r2rXNJ7yz!3zKGpPiH%hiEL zwY7rvwVXMnG8H1e`JiwX1d9u6>HSXSIHi)Ie2J3o{N7Z4uhgMmkIQuvCOo)we%FCz z&S|lpA~`$7q+<0f%^Tp9ho;oE=`jTs{$V-1(8be=4Ib6u>Q1N8q&27x0ZFcURq^3a z!Y}`H%g%`z*Wvb<^;TUT4KbKZVz?>~szhz_cmMgnG| zl~CE(a}Ya+ufpMFHxnwC6CnEt67}R#4=z{OaaqF_yWy9z1*H-2vWdF&=*=BN@vLT^ zw|);nLQ{>fv|r})kMJioQtXlT6aGo@%~*YK3~#+Ti9de28h`j~1HS!a6~2CNF+P8- zn+l{BA0E|w>x2hYheAwNLHGfxhx_BL0Hl<{TldG}-A7|_!QKSq&C$R_IZEDmNo5$C z$i{* zrfGQghI8=HHRs{_OCoT=fduT@6Ng1B_`l{A6C&z`M1|!!Wm&+1Yn>JZiSOXKeDgb=*K`|8;{(HD?-|OIG(C}XO?NlIf?{Uu5KW`r-)}{L=e5lT=z~VJExbLoV zyzo$!DU&yzpgMW73hy2*$GcAy*(`}~Ka_?y?oYPdh(@-*czZOSz9|xqQ;j^Rnp>C5 z!EJ}9VcXUyWabni`e%G-c~4lwi1&0VA80+CDmtif4;3>DP%|qJ4Pm)x4bMeKqzX3X zqlZwL8QhR&oGKG(%4V&kwqp&jFV##S}0$2!75(^YpWL`8@wTqX6Pgt|+rYfhs| z3u(B2<$qzw+TCyk;#y&_RgM$7h#KK)Mgq#LK#?|w4sw|AA0iCA; zNdOoI3UAvtgMtSPZ{Ji9gdVGeBH+5b^T_E2$rTua>Hs?1!{mH(>`88b|R-iE3;KFj62j>SjmNQYO$8X`mrtll`k~=6Ii|Sx2>0g|3udY;4_xZR$5a_Yn3k z`7I8PUyltvhY1k%+ETv6E1x2O@WEPYwvk}j#0PBy7FVpX*mA+NT7yUyRY}Hg6ykFI z^)+|56iWKf_(vynV)GTN@c#R&@O#4Jn@`r@n~x^(`I`gy_{C;?_-qwhx`k9AnNE2; z81Dfjf$(sOLdoves32r-J{W;(E{sE2Tn?S9CQnL3;tw(q&Ksw=rU0uqC*ZcrX5rCm zXAmkHT`v<{X5nd?mQUR{2T$EF8&6y}6OUap6Gx~zuD&!J8~3MU-GNMOxF{W4E=$FZ zOVY8QwtY(~1_#rTR#=MY^XVMvu!2tAbK*awvsbx!&!Gzxp-^zJ$7DBE&2QWV`UB?DuKa=m=CWW(&V2Pp`fe;6D5s`-+2YYO!9+R6qH}Ukaeg*xXJ(^ecDBvo-Wr~RHkts^I6Di~(=(Cv zYbEXZ8v@{PP&p-7*vb#B7p3(eObAt)dbXwWnJYNI++Z?Vu^cNJc(2#2M0ZN3H8S(h z%tIv=!a!y}mR7I8%Et8=E?$C$s78xyG(@Nrii|3R3Pehzl}ex|xf5fh<5=6g$w0E9 zbt^WsZ6{227*JFoa&6l-Hq_RkF}@pRY^Yj;Rsf+Ns#Ga_`YMe#sS8thtD4Z#)WOy0 zh$Bq02e7zwF$Qzz5hh(|h->5Lw4#aDLTirGX!-^;6DS(T;zrcho5IO4n7GhR(7C&w zN&zQtAaJj_b%DpJh`{^I^qOaOY{+Y&>ogx4!S4v&Cy=Ont_$P)AaUv>2okq+5DCJ> z0VNbBCxXN`1BM3?A0+DCri#%96q?JCs4?sYFzOiz|6V+zX2jsM=wC*m024v%zWBT% z%XcVAu5NcW^CADRs#)sltCG{zra~kmHQ2|8h=8KkRmvdm%v@*|Ha9;3|8(AL>a`_B zqP$576-~uV&9AIn2Y-;KIQ;yG1Bee9y)J<8Ldf)^DZc~|nOaP`D7v6dcDV$Q9tI8l zA1)Zij;?LkId>=4HLk;C#e{VM7D$$sY3`Cm*g(iAH?n8JMd(f+M(!CU7|CCR?LE7& zwf6#S?!OopF1-=E7ha9kZTq0=zpk|WCZ|LQkx91Hq)ar&`*H$hqGk;i6i=`v)Q#M8 zEBPC!$ShYPFuFK+#?SMS6W)PM2Ug+34>#dYpRdR7K3ah<-(8GPUg^e%&(-3Cr^|3m z;gZL)@XnERwsunpkt9MS5wG8uU}F?sqr-aj?l`=1S2SL_BLc78H66EJ6piZiLPY&I zn-0QrD&J2*n0is2n~SWvVvMhk$CVd^<5mLV&a39&?yJLa7wz^d=ipX?=ElRbaLLZ; z7+8^o_R(teZLGuItMc%`?HPFX{#?BESUx^_G6%<>NXJKy@f;qB$D?q-oqUYh-vbHv zskJW0swx6%s3<3wK_{PlRuww>s&VzzIe6r@Y&>>*Hjdtrg=g=|#0z()?sj~M9w~Fp{m3!L%|RV6M@5riCgz!;-WixuRc@2^5G^`BzOoUY4qV1 z^5V8jwrh|g&yV+N`1j%*NFsh3X{88s5miRMVmALQLPcN^Kx`1AfkcJTH0XyAk!jQ@ zNv;v4#cYk1O)KOLCJ9=G3WddHO6904vF3qSOTx)g_3+9%i$5X+BtBFeJbZw-?rNd+ zV*;uq@Z&>3!V4}_B3t3MwN0zByKgr(v4yjeEvcpD6Hr-fCE!Q02QgkUYLoUH7`Y6a zy7sV@#a79T8mz8gjh%h_u%+)Xb`4*R3np&Bmi~)OjZ8{?2q1N9X$ox-Qgv(i5Us{| z#Uz%Ljbm}y1P1b#pggSAf#f%?1ik^~m-)z@(Tt_5R^yeow&D+8Y{Hkvmg4y91Ni8L z794xJ67L@sL`n#e9K0=vXuQUQ$u&M08-cR; z96H4;a~yIc=AeI=gvjqFBK$Nid2@!vl_Ryb7-ii#sOru_MOPNeI_a?5(vec0huDl- zv<}tc^4}KYwMR8ju?AnfRF7|7ZN@jRx8uv#TJYJ6RrutY0zxGNA3m0h4<1j(8xJSo zj$0G4>_91c*43kHbv-(l*PyMd1lj5Nh~^Ec!I`05bbV+9mmU=UH_U&}fBL?^0}{(0 z@Op)UL?NeiK6L57$U{6;X6JAXuDL1`58RlBhi*v2ksFio=uL@u;wFt3iNVvi&~A>x z6E}q6k>AcSh}?4N4D8zzf$q^%B&KM#N#&AU0Y&R9;yu7>>ABFStEc9S59`l0Ia8Xc zj=VF{QFvAciq6hJ!I|mE;{HS_2wpn}fl!F3A;Eou1WgSKDJ^}jN}vBdkodV2nd;l+ zouep413|F>BUBT^g`;%9trog&iKxW9%(+-hrQea<&F82Bny0-XT$5(00&oqw6PnSN z-b3{=h=J_+Ja!BdmCMZ5s7F}KM(aJfo)8g8)^)h$>Xt1SXX9)zZ;^uueUd<;u$E1L zMZaWWG!1A}i4~fZMms?ZEm;N-@C1HW^Hs_}*&z zw)}H6a8XXxbMCxOQ(@ptPG6-}oT+fD-<4O#`R|$fUVDvB+QC5z)Q5-y8T6fk#>pUYe$;&iP5v|h6E~xzg}OApL}5QKO zZ#e5JJgt=Khf0O;upehXN>I_dGbxXZ-{cxZRJGFmaCZ?j54BA7XH>=1Dw!(jV}FKw z5cM#rqUYRAjA!9;Vi3Oi80>5%9#iz6THFJQI+f*IDmZ%F2P9g3grK$6myg2 zVq51n>>50TO}!Uk|LC!c z(Lt4ycYZY;5kX0a2qrn_)na1#IF7!y9p8Mq37@|+h7Vuv#s|+e;Qgno3?T14UPy@K zl{3J>nto@m5KD3Ifz#o z6}EbHEnG`qJ;XF}o_2a7B2P;|#A#Y``?O@UXqvXeewc%fxutmM?lOG(QYXHCYXslC zw+P?7Gaui)J;3cAeDPWQha1R$a3($iWbs7d_0p%B@M?OalORexIYO`-cTCpD8usJ5M@N8(jKxIe9i# zc)uxsk@oXk#Lucl`}_)AbT|pOUlD^lX!l$Zh5N4x$3xeIe3Lo$k zr2H(IiYA%&xw@3AJGoPN4jc+!g{To9By?i-+5DbF@B94kG?)lH-Zcol*7^)}fXP3P z&+)8!4CE}tr0#JxoaUx>qg~;oj2^0zg;s`M`Bvo|nj`DbLFe2N*Mv^0fVrtX=*#Fw zf95;`$%?vFHan23v+G9s^%>iqcR!DU4&-Bq&AHHXHK<+wN$84 zAv7C&NSN4om6&=f#nO?~hQZt+>zcfX&w5wNTr|YCpe}|M*N%pS4m8r5cy5i{-w@qI z5V>(L@_+gCQ>wu0dRx&119Px)pg{{AoEKc5x(o%0z2EyRf9ZdR|2aNDoVk#asaL|o z0mXCtp1Iei?z5an04xHD{Hm7Dm->*=(wTc*OC{OuyliZZ&Xd1`>uwB5C`dwjZKbN$ z79G5Wusp?~k)idTsQ{X842w}pv2ZC^l+P%!QU(=VmJ%Q(=M~serWmRZ5W&R9qoy!CK8-h7CTm>_xG z>k0JQJq{wOZS?|`$P0o9?U|dyY(QXhdm&rw`;)`s(k?|VUtpRz*xqbYZ z5`6Nsrl8Ho`;X`2n8vRh$-y!HuIC@l!KOX=$WAIiq?L{d!sKrQ#i^GQ&4EHT^>`m5 z|ED04L$x_5lYg3n#Ix8Cs;R>0x@25P1$fzk2wYCPYTq1OxML>f ztw}`j+#;vI&dtJX{uhzoHYjJeP5{QHTAU2Kd-_j#p=sm zz(BYrjq)>ciqHPBuLBz8VsDy$fupOdULPJ`ic@kMDID+Qj5xN}!i7lNk)oX%A zu%R9b6~3|eiaSo!ZNxzSI7(+VA^m5BX>L0%y?#DE zd}|4gzrGmnJ=2c2pQymwj~C&sM{@~|OuY3_nlnN~%QvV*UbApk0$#l*)`ha9Mm&%_ zcY8FRxitz;-836J_C}*HF`LdnPDsuu%c>H~Ip~^91&MD#h(z&*n09(5dV9+7%;P=y z`u#C{`OYvtS8cA>yYbZ!>^RklYI2!9UuLg4wUSGv zl7si2$j1kd=i$xAGO_z$0rFF%Of-0r=Rs%ww?GnvLeQq}JN5DI^{KC&%H$}0i1;9p z3Q(Q11U*2!&jKMR-fKxz3-Le6M#4Fzh)u0QbWJ5<$|?|-RO#kq;rqkCpMr?*@tS@= zfv}{7j3LtaA?qo|dsI&RB#;Ck;yHHR%TxY8|Fit1LR;>42q;RHt9PWvN^~W+qBE%( zEwOy2BTG>>L-U>U*)c~Thv&Xi_lXN{ISA=G1OUr(aAQ-bObC;LX_|$w3SDUfR0=Ds z#lN^j6@lmTxv8h~tw7<7I+RDWp+2@7O>wlmV;5qgayeEJ9;+JH z6Dq4Mk-wVHln;{)3URf%e2K=XsPQ&hxf1=k!)Q+=1PF}=`YW57*Az)jAhhMmvNT$; zNOcaFPi3OdR}ZwFtodk3>OyluC)EgzKxtHWWrfDra8}~oYJ$-k2o=qBuimJBnRp)- z|CxmGNR3LmU_2;}X`2gr4Qu*veJ|vE`Y%x(; zWKi}ioE7{LJV_}J{zwA|mzHSf4{|Dm@8!kzdlKkDbP!RUDz^5t&Ie7$4doAEXWwq@ z?A?R)Et{!Acte#gvDx1Qlcj`;Cdb;r*LTmqm=by^vVT>Ifvg4CIj|o)2QR?Zfy0(~ z-#2;<)^+UTO}&{Qq4J;tF^F(|b<0+h;c7iCYuI1~9>)k14OGmT-hw4d2k^p^9r)n+ zX1w)y72Y~hh&La}!|M-c;q?d8@cMnpc;miAyh(VxF5~)Z_Yxo$&WgA3A1~h-gBNKk zz41ID^4x9eVj6+_uAPCF?i@t?l8*WGB-g5@Gx8mh57pA!xRb@9~rOf9rjr z`+|9q;Cn1|69SQd(V6O+xsu{^26DWf1zPdEuJJ$QG2i3;CdDCesBRj;XlB>fzs6I| z2Z@}w1Bf%JASE&Fzms$4|7d`)^9}us;I`C_j#2JM=cK5@Smh|zwM^0`u&Qo^_j)hp z=QI;63I`RV;A}oWztDFIn6v;yg1_I+(Lmz5Lc7;ftteo@`4txGYD@0rcP+9?U(r3i;EUteSA?oKDn4JD7B*4e_{zSWq}>YJL^LA&{v1uqLHj&gW@O^G1SX zi-n`tw{9gwwqQ-m7R#3`=W{kna1G?lLq}peniY;BNE#wF^|v?MB0->X^$NLZ9+b}b zX7r?VVSe5)CJ2*p+T5H4goz+Wh)9XVHNonDY2YN^-x9-Zf<(Q$G>B0!QKQ#D;^hfW z1cp-+4hW&IJL55?LIf4JZrgk<#g_DkTIaoG=7AK4i;)>M#$zCINoQ_T-u`5+O1 z^j_`rYjXu6Ki)A8K>oS|lJmn_cNV3Fk`zhmxyhy;oIK>85v_90Rj8d^hMHLlT@eV= z3JnmY1VuTQ4kj)yQp|ltgowHh7oDAJR!G(1j9>PPOoNCXGQTCP8((oisS00Z$XwV; zKiuuJFqduf0vdVMgCN`Frn;u-k5DB_8{B6Pr((~Fai{VR)&r4@%Goto)3nycmZ+%X z%9>ReFCDYNiOQFZQf5zZePjD(>>0Wcn|lsWns@Nr%dEE0?s*5XdG3Df7`np7mh75; z8P>G!A~XmNwv<-$GgmZiw7&K$o2f>mR21UUT<^`>v9M${nlhJQ?Y40||7a85dL)M` zBZII==Q0hi-k(Zfq|g+;lKMy@IFbmC1geh&ymEIuUcM_1FHwPLY{H9o5F~=gEfIM7 z<_J7{(@bpKk%+YDOlt`X5^@MOM@pb|t5#)DnFL$4&Ma&qkerc&q19D*=HXJj|71Si zc{~Tlo}}}CQbiqA-8P$G$S^p3sQrY)2TySO$xNG*<1+$Am4iQdhAQP~COl`13! zA3TceTHxD zKjnST&uB~S#+KeKIJj^(4lLM$y#rL^UCXhdX^~BwHIm zK6+*xNT!;%&$}*iJOWA1S*2)A=r)-2X3n!Nw8iI_A^V(4WSw1u-18ccKds4NQXA8U z=A`-P&Rm3{qDgy3R@AS@>IQ;DFrle-mw=)$mteAv5Lw&0nLyOKx&;d+>n%(*kTZl1 zDhm~Zv>scUeUZMaHCAD*;DlNRlf-7Kp&ktBS+7}v(TZi5n=zl~-bGc?Vcmi?PfDA* z0~<)ZuE82-!p~BtBF(j=+^nvdl$>t?Sa^_dIfdYU2bvR9NQkBI*Z@Feg2?}dA_>@Nm!(9H-hlcI*_WK~RUR!Qtdy@6Asv|tA3683n zCDw&k3Z#OdP_3j}b#Tx#uGTuNf z*xb355IV>Q^stR9Iiy}(eHZbj-ivkZyRfcvhfPK5LRy+laUC=PkP`EnwpVjH^cC{q zSUrlz?kb|fNU(0AFW;R=SO^%Z%OrRtn!QFP;y{u}cxbK%sJ?jx19x{5qv$kPv*J? z`>p&FUvsbf&Qj*almxrVDh%ZJtnxvk`_6afq4)R6=AfcueUo#x`_}s``_H5%^}RHe z3Hf^wlOY6%_t*j?J2oVFxCI^)^ zr%=>z8?9VnVXltkPOC1gDSlfu17mER1Bkcmh;Kk=LL<79+R&HLX9Xq~m5gC=@t6yf zrAjSzp)IiuZShTnNdsCH-tq@6+Q3DHy9mUYRTe364T>O0oWJn^!h1vJ7dCVtkpl5U zSwY~KA~Apv8Zon%$Wi4Oy3K3JRracadzw;Md%Yr*#i zKS!_m$7o;V_q`lR>bXg#E>zA$BUTlz(s+;(Gs1#!C?Fh)36r98s5mTy#dYq>J0lCZ zY*9LpDSh&+CMIG(s+I`-{}Mn-B5<|uJ}x?hr>*M<+f zREhE>E(Wga<3c8yAj<*8cYu8F_YyGe=Q-#a%Etq@q(MM?<*rD)dRI7=QWV~}FA8to z@9gadV({)m1d8BdP)VfPpz?S$1;@0H@HxiUKOlIH5j@9^Byc?m?^A`m$K|^O)BBIa z;N^$oF+7@w*xzLGhV(%q5cuYRa3UCj+rCYO!rwIhW_63TF2EFsLQ$rZ?(aSeivcEWHFV{AfL~! zlr93J(csZab1>1Ek_NOBBAQpeGqDLhsh#N08Z?-USFhl+xf=5d7NH}x4{b@EXeUhC z5Y zMGzLY-&|>AIFKk;5(<*g_c%B?K>6>LuP&f(r!N*rlFo=TD3p1L zbb&%}uy7S&BGpl#fr{KNIy)y|1!rb)zXn8QTdDXI$~TQ*_rqAhSn;V%50xk_HDdV@ zo|xt+*1(svUwTS~@<|s`e~@l!7u zIg&8~L?JExfS9*HM72pfZlO1;E87(E(} zOZO)tH!=%h3d`~TJrNwC+iqXbk!pGJ>r(-7lG*QdV@aGsnQEcV(yk$|lR0?1*SwsQ zU0Y|q!*x#HeMcxPrkajB;d}KT&|J55_u2kuItL##eTG}|{Mk6uU`bWoJ+$O7u3B{! zu2_CKE}gsxhekDt+Xig!p&DtNkJS~Om@I0mp%Aq*a***0-2*%?TIlyo zF`cjdrl9#Ck!AdvFgd@%;_GraY7FL_Rb)yf=j?K1pIu3qR3m>zBT6H?Q5idk`lJ!` zNp-i%%h<}JViDiVQY8NX+K!Keq59;%aWv?a8o zEv^-uNwK zM;ARa7N^DkIu-?lf$|^4R2&6X{$1fJRq)LtDDt?ir4&aF@)gRW<@}ls|L1zRJ zY>xLWy%*zxCD-AC@$0d1?jfvf-HlakJF%jLlG{RDgh_)2F0SJPvdVgjERmJ2MtAKx z?Alw0$8U?Hb5^e_I%_JE7d&-hBclnA7w#lL?&Lb{+1umr?5(j>A~AUC#z;K&+c|jX z@|n2vlIhsAITBgPIfzgh5+B~?#1t~o?{=nEw2e*RGIYNWlYsGkQ9t1EIVD)LH5K<= zHwQ1Z3MB?_-WQFxxD*`Tc_5l#i6Tr08?L`)3WiWI z(D?Pj;_ZFM?w^C}uSh{*P65K`7%h+H0bwc}rmlxt=yC4#6TlH_{C$C>yT4AG>fl4S zOn-W++rbO}|@eU{U!5E?#;CuHSGY zu3L8zr2&z4>6tp! zcLdHw=XSz%)cFRk2~C9_bI&ca7OFK2f1P7_6xG#J9wqB6s+O|_lV+4f^r9+m2+b*@ z7%W_c#Z~JtUbBwMWF4QIwE>Vwp)9LgW3w@8-&%#Y3@91}PgO!&-MZBpZF;`u+0?(? zghmS?LV$Q65k!N@K9|A51BMThK<-4~FnFA>?j0uuVucY6B(k6iIT<9*i3iPh z>_Kym-&s&OoTx7Rbsg`k82?xY2m^_CT;TKV-2Cry@6kkB0gxn}nLs6?O1-62B~&1U zg;a(q5rRaM%qaOIm7u9SQ>i=>)QVB-F%%yE2PP<_{#>OZbfFwP1QPWoiM23*i}(9C zx9drh%I~LXY4ZpBVJt0u3sT?z)1*quXH`)?ud(^tRqJQGd>o4j5i9Iip`wnW;mbTb+`0DXym&f zqSn?5>nSopnXd9F3k%nwdte!^z9JvbX#B@*F*KFb2->r^$Kx4+!;J8-kO0O*XCE-W&sy(VdfWcz+Gp!KwG3H52_(G! zPNDF%(9aGr{l9Ac4^vTJIB^-S-FORb*nBIl-EcjwSbY^PnYfUOdJA^;Oi+!?qZ(<& zvZ5NQk#a1~EX7cA2|AS*3}*QgFWe=QUsdEqNqn0Y!*kZooYK(Jcix)5I|vbPzxUpg)kqKyL1_5kaORHF zQlIM|?<3KEa>2qEgq}~ zjt}t^c<8}(4(va((AJrMvr4+_f?5Kd7hjMF^uXeG$@x3=A%JMzS0p|_g63u#lbr|= zx2``@08-+8*Wi9=a;sHdu0-{?R!d3~B2p;h$J4iH7CnsxiNe#gr)*ovbcL}Z@x=8Jc;u?tc<}P+xcjnc*u7^aDq7MJaZZlQ zE6DkJ|A#^<77pd7okG1ynOpCym;FzymI>-ZqLChch1JEv{$(PnsO~v zGMdxmmAj&NTo_)w>3m$UFCJMLc?5}5RiPjWIxlSpL1HlBJ>nnZA3yay&TL<(BMdll z?RVxNqW5~o(hLAR&W!8+^_gmV&E3zw^EdO~Z+2=&?7tQ;eqMCIc$|G!@Vo4FzE{e! zGrb=>=I_ImYp%t0n{L7N8*j$dYp%g1OAq0~gfi zpI1x1dmvGW%alp;X6x+*S)A8{ZFhLq&cowJrXnE24}LswJH2Z@8p$&hds`W~k`{9{7R z!N-4(zaF}rY8k&)j)c#mKoa%yNMuuKD50)gN7k=036FGS{5%aQKc(aTk;Z>Gn+gZ- z!Q*-;B>uB)S@5O}Lc--eoFfdvBXm8*Qy`%##3>O63`s?q1CxVCC{!Fs3izi_sMP%Y zgM`HnEUQ|E6*bGvmK2X*er7K;3C~Eud>hEPZ}>9wXN@8K=LP7X?A@tCkKCt$iWiJs zhkc{hVQuGrENj&G5dy>#?3C+lMUB@|jYyeLk&IQU4?Fi&tV2iT8m!pZiu-Sjv*fve z@#GB=cv4GScK~_(x-dL^^(;Ja>2%z7=zI*VNI`OHJ|cdUZpm>e5zPf}MwqCEQ#_qg z+>Z#2e~=Lhp2hx9+kB1YTG@h)2NK@|5f3W%-_fy05GZrLpNctj9MQZl5@+NiDyB;tO2pH!ZVi=Hcn-pCJPCgX@0U<0n3L6a+rFtV2|X@!-8b6~ae$%bJ3%eo zF?z4}{*d?RyX|_NXy!ciIa5Irc<+hkeXsj&_r3osAd&gdR~2PVw=i5fhFwE@amnPR zxMIbXxP0;u4lUk^{qxphXXg@ZY8=3ts#dHlt;b|R4aNu&3;g@`_ z_#MG#F=+nxPBH(Sy=!tRkjNBr6F_`trBX*4AgN@$Rl}w-$(`1W{Fxo7i62I5#u(Z& zm!iL56&6=0U$PF%)su@L(V*WIY~-o3@JfZb`20w*sOOdgiAv9JX9I7G!AEnWEGb_~ zm<*#oqX%=TLb{WhsYqJ+>~>IFQ9IJw*F+12d23dd=nXOn|TL>=-qB?f_@qLtNis*!V+!d|{IaY=R`DC()D zL7@s~NxeCc_}y+pRY%aK!a(^A2ZgC1@twSbK@bq3?{%;FuZMym^tGwx!ehQlasRE` zmhXw4L6C(1V>pt3l8p3UsC2MM$seyNJH6174-Ov!{`QIc0uPk8??gNCX0=d?nbw0! zBM6UBb0BeJMEuZ|gGLI^Ti~!*qL~}0XyqVGM!I7SCj1ix6U9C^QE}`V*oU=ER3c?d zFq|_F-Ej?Qo>PXZ^9s-$R*B`+6WBd?5F0x7qB^V{nZGW?C|fT3G&S$Mi?MCqWjHu? z1GW!bid9zBQI&xe^Ic`5ejQ9`HR>U<))MloT6SQ(Y%Mw(mSX4L5nuAGiX zt~?({uAGJ=SI;CIXX4(=&%>s@aVV-TL=68&{gzXK!YPK(`%dc)8nVccbvxcO|Ft;v zwECGF@gF)_mhS}ho8>4^e7^%r@L0jXcm6(195{ma_=&&>HNDT)z0dGJ%bCt0JphuE zoVc0lrlle8yfVxy7{c226}*4fVgJwu?CoEL9i5|8A%oaZ+l5u-O;}c3M~GBoJeLs3 zEW?7-QcJQohba%LXWadMXFQjHx$ipZ`~7p)ZwWCUs-cY#s|@RfaP(ZtQ7WI1dp29@ zXO|=A+-e)JSQgob%Ge>)CoMup)+FW?Q&m*0r;1-s6+(CrAS(zCvA{b5tg5K+g#)9t; z9`8fJx8QrH9`C>Ifad_>Z`;Ur_1cR1StL^Fq*K+pr^Al>aS#x``Hsb3|66uqPnOVq zC)&xNa7x2KVF`3+{wPo>4}rr*v>Q0M{3VSF${!(be+<{vomdw^H?jW*nh7NQo-FUii%fg=nLT`>m_UOpT5UN#GNTs$3XcEqBjJ{K`mi&1pof{2`MDAxdZGchJyee?Z*Igzhs&{O1zU%8b%>u+ zirAlKQNcKfIE51YkMw{04%~opqA3)mG9af>83`<>rD66Db1;oJUs6IQ3R-f|v@{(P zTcUBnu5)qap0lub&nzt5l!~T)w!D(_36U&wu&&3JLR(q}K@xQCL6G?SLoMjE{eA8; z)RNacFsZ*Puoso|VpZJ`;jxGcWQYo+6D!IZvAnnjllhgVLPiOYMd_tfBE{%S zP~#{UMTAK5KPK@r`8=QEd(8avI@Lb?7XB`O5atw^=)TdCrg1kajc*BMK9iYrtO^0; zoLg$+ObVwrqB5!%r4fAwlJ49Un47;63rhI>RJr8)GB&;h5rv7>i_3B*1d2w$3oxyfLMfqj9&Drw^mxG6`AZwBV5w^(3{a`VebB{`GmA%HVF}ep$x^H9rQGI%LMoH=0kkCdqJ=P#GHFfJ0LBh9$F*~*WV{1QLu9?1A7yrh zRY2F&8B;;x!Nfo!h!6-TIcquSP@wqcJ8%b(sdPGqvK_YEBaxs(C?F^*F5#B z8duw_i0fK6+T=V-O2)7-cOHf^yD%@M1@qI|u)1y;cJ%EhG%ldL9kMF0J*mCeqe74K zFCjqOK*hZySF@G4o50W~=z`0krc`0k6< z`2G7s_}x1L`1&W*5GAy&a#w(uhx9s={l}RN=Av%W>ecLe$NzM$*h; zIs;di>%{-^R4_RaERhO<{U8AmypJLUmhZ(AD#@7ngH+7^DetE<(vUhk51G*gNS~F1 z*fTN^E~jqlO5l(QBL8b3QP)@Bu?3Iu9JiT&ygOEvkTnHz%C876bI86kq;Xxp+y5;0 zUfSOPNe~7h`$AtkVLRx6LqNjs3#bp~f4!9WF{M!NQax%ug!9AXSK_>TQ~xg982!iRxuZi2PmkqjUM2=B}H6oot(S zDxaVpm3rRQpwly&b!IL!-$$;-o19;RqFF5{4eLf-!UD8rEI~`!67=P*z~Tz2iq#ma zSb>Skl@_uRNTfv6bzi->v`^u$HU2hn-5AX1L|I|o zH2SGV25kP6zN~rZ&lyI4-UwkbhLO@qEG}P8WwZ>V72^h@o{V|cJ-EZV1vjH5p~EHP z6Fboq-Gl~JMxRrOn%SC$yo?|zMJ~aq^6Sp$1pQt>jSvDF?hp!+pfmTKyqtJ}SHeUN z-|wQU*L0l!{@`Z>K_dU>hsE5s?-%{g4mEdt(A;~R=`*K-#5Zpoi9j;_$8*d9_`Rco z5_js&;ol(sx(&#pdI*l?@XgO(2p(?fo4c5P$jO02H<_0Hx+RvhG=T7e>n0>P3?AAR zJPaa&iIhihskotkp=G+jrviz^EP0+PZN0U78$ltMs2A4+MoSiBQT`A{a{93#s|RC6 z!`Rfm1-pkX#_HBxD4kx<)vWL(yzaIM!MB}mxW?;q2XcV{5(Z&@af-PkS5Ss6}36Y5J zr640b4-?z#@ci?g`27cq@$E;;@x|N2IR0uEK6#}TAHUp;Mbj$_k6!IF=$)6sNi0{+Bmw<5omI#H&q^dM{Qc#o`Je)-eO#J+lK{D0k@O&j9 zr|lmbYQ96)x_YgtG`vDxzQcD8k>+i?_ju;*JEifpDA}p@^Sy!Z=k|$?I8=>H0m+FD zT-zZgkT{s6^4`utO;|ZP6Km0*R*RvuD$GwVAv6jwFW%Weya1Aqw(wk3o|lQNpA)D* z(mkaAkLSwIJS9lH^YPCkc&?sb;P<=V>#mazWZ!={sZ8dgLJ9bcar74~$8a$lc4aFtUa`z71h1enks4V|h{)8NO9^(3GZ8@C zsQ0ZdQQxfcM4Q=wbG^An3Kw90Ho=kEho1Ca%%vKcOPB~KJ(=^+$7irVhrr2S%>Dxb7&#d?lMD^{YP$8-uLF^y=G3Q@7gs75rXr2Q;SD_-Vm5vs=Ev=S7br<@6$ zvV5E8??Qc^6p5LE!@#_2K{5+IfH1bzOH}u@c*|N-V2V6e|dT1VEw?y}XCs9y&mf zAP5k>0|Wu~26mAWMNyU|S+XT1juVe-+#Nfy9Vc#yTM}p1nziPS+4KGGea^l2J%F+_ zGe3OS?dO*F?*82GsS_o`veI%~)&^gY8{@Ca7=nM{<1uO2(JJQ-T$Y>T_vGBb9k~ui za&zi!Iep*;{(P+d3u~?eVNv`-Vzw8%p66g#+;&E@y(X#_q&EAY#LBCrDpn-Bwp7T@ z2k~|MKz1@foYEh_y3G|bc``0P{$8v6@83?#|N8eS`Qy*J<-dN~B7gknX8Ge!o1jLh zUjF;14f4mIG{}GbxE88$Q>tG6XYz ziCzz&R)9Y#2MSt%bGtamvGCVq?eJMS(Y7E*iVjHmGx)O+5F6Y}kd~$wNSifE zbLS+Oo~1z9H}@a)3wL6cWqYB<-558SF+s77Vqqj3ov2%N%N|@6J~9l z>0Xo5T?EP+T)#CCW|iW(IyjBkqzsjf%7D5MLWQp6h&d8K<_O;NAvhBjypJ=Bi!iBQ z7PfI=uiot6Yp`$#5MgJLop2$o(#8R{wi-ZUPghFS)9gly<1$g~i?CYC0f7R|Vd1^o zhpv4Dv+T$MvITMmZ~G{YirW~(Ly-8`=AA<{;9FLhISGr6;WoeBgCLYoD+-dZ%?M@J z8N$oQ*htpaMj-L>fg~yq5$$h=Q03WJJ?>$neXpU=+1gAZV0 zvexc+!0d(vF!AYyH%2AHgxme~Sr-MA^?Sqe64nzx-+Nwe9DNyJxB>f&px=B_78{nd z3zeCwaha|hlQTVM<=V&{xj68mbnF?xAEQJ&s^f6Imxu1k`N4Z~YvvufGV+?XE17Rx zg;vy!tah_(WKG?OfrSN$*5GOu`yDPE!?o7pi`}xYSX(MO?nI!E;Hw^ivvasszVYR_ zjqUy~{ql!@Ovvv!DAo@vp(^I_(6$G zoT!tW&E>M^FOorGvj2pK38&TDnq;nJS#D0=mb;6u$jb|N<)xY1a%=pW+#J3j7Y`hh zsd&G{pDU9+bR5i*Q%HmmQAHpUKm&?`MCTE(Qf0x;1?p%Jj|B4|h~u7E4ixO0a?D2Q z2ioxb$;bQjxXmvI*B2~D6$a}KLBi`L{4n*)D*hgD)O=sqUlwa%m3}f$${){_(#H#A z|6}>G_aVH~f5DFt*6xddoX?v7TR0Jkxt4Ht(1k|S^@Y)djkkN_@6N{-6m zVmOnsNg0XF$rxZVUb85Z%;suX)uD!2P~WjD;ao_AGy@{_&s9tHv-k)ENMe{WTZ?1L zbKIr|Z0ET4l}}abhp^9{GDw7Ofq^~@3hVne0aluVY5$YTARTsU+^u8+Koubgud z!&g-46LG*|S*{&@NiGcDl`Erf=!lBv2VRoJ<})(actV|scAui{ODykJ%T8KO;LkY+ z@yqA9bmR=+nU-w$~ZrBZ2Wtd@+2iX{ty zPqQanHkTI0GdN(%mdu@d56Y>&OLAxKRe5FU)ABMva%bj6xjlXr&+?pHJ+vwpdgtUs zVn~MeH)w!UU_=4ppqd7ovI+o^ zomWN98BT|_8;Q&)?kl!JOGJL+od1PX7gEK>PUD`T!(wHdry_GsKXeN3=;l-2on}I z^WEcnTmomp(NfEwsh8N6c8PE6le&ySY27;}y+v~}T(Jaqa13sM!}2c6QUga+Bs2^p z?7E2AT*p~q1|Mzw06f{wMvE*rVas=xOY*(5?uurS#$+`hQ86Y*=|BJwc7J~a8Y~^t z0lPGdt8zxmOUA0_Wh@S7jdeYHdI6ATY1`JQ5aDpbjerERvUE^CI1y%PHFGTaT}?U^ za0AC|;(h>26(Cc{k6?{y0UkTy#E4G_#J(q8?@G9S9Z1*^5CL5qc`5*sLE-^m0YAFz z)9~{q?qkOrNDL-XkWn@tx{@$04-<1X5s+;Cc$)s%lA+;)2g^e80gnO%IReBx4|5?Q zJUl4yC!w2Q@W+kY1$g&i>zOU3*;j78$w1(5HUJ5>sR)lG*)eupC_=+OxZ2>UKzfNKiwSQ zRw>PCZ8`|nwUJlk!jV_x=JeazSW(9<>9{P*fXEUXC;}h`5!T{bl^J|xSw8Ht%r%{m z(fBc`+1{ZqAYq~b)&az{SBJ+ftZkIH-zb)!e{G-q%UAbl=Z@@P@mF8pC%@*{?%&D> zR0?docb!s&@>_uCw}6WZK>6KwDO=|D-(&w@|1e+9oUWAo=SuMhVIZ)8pw&7NKp2eU zTbpIDa!Sq}xgmEJUk5~9m%HcL}jv1eM&c8tpD)*)GKI3Tm-P562% zmh6w)Ju%qu{!|K=Fvvv}fWrsz===r>0y>nAbotTKnAs`g_!?d54`0=eaAQ07!!Lh!tz|Cf!$ z{j||ZjuFIc<$3HgRXrnPmE$s8MsNTirK57VbX*3@Cge!PBtSAPBULjpR6ZdG z@&=@RSDPdN52|%5oXGR_1`!VG$Kfx2vr?%GvA{=hG9`gpA7J#G~>*Eoau%Nxr zWkE4c`En4g4$Qdb0(TJSAirM-5+3g$q_SA&kBQ2Ev~wR1BkxM6s4MaEuEfMNIQ8eX z7#n15Z2?I5GP71y6e3zB*SQdbhban=s5~e(0twe?wp9QVe}e{+(#zC--zABeTUYFZ!TXOW%a-!!3 zzJ@L#Ex-d`QmmtHeQz=El|W%t;ghn^0&q2*mP5tUQv77CenI+|J`jGRT@U0*QDvjN z@k*Zjx!TP`7MD1 z{m<{`$XoB0ORS_E=SR>&(C`kGUodT1eKLbDzl+1SFfaF(-;lS@z9(<4en#%iUYBb}*cHw}>By+n|6Ldq*QeuyY4UnphfZyM^!SaW zfg10VF8kmd-N&zUK{_d*^H{IQI>O01-(-+xI?2!Q{ltAg3`let`}|3e&`~07FMcd| zSE6Ml3KBODv22yO7Yv$g$XEGHT;k6+O7*scG^Y1T$G#Ei&YP42g>y1cc1(sU0e~7d z=%w>Gq2OQvJcrO`Sqa;iaNQFApM;Fn=JFt6*Yh?o7Q0)Subb1xhoer2NU4 zECLWWNAJlM0OZEh+w$VvJ92*Djx2YqVL#aF+9ho$MSIOr-dH1K2J>@Bk9~A4zy(b= zoRp57LD~0cS@6*RGMp>$;m&LL=>CLE= zyoV8dQvwc^6zI6!?UM|S^&klYIj6QvC&FWoRF27Ot8dAB7r!WPtiCCCrmxHOBg=40 z!*Zsz7cQwqTDDf`kOx+KD@;Sh;OGNAronn%M<81l2G$Ma0&>RD|+aWU#|{;^}-k%8@>c9d`|zkY^21R9Mkp3WCnk}lkKPE@*uOgUXlKS zaX6YXY2DQ!7Y|*No8zw`*nS$p_FVwwEjihHQw!YLb%XZj!!fSwmbB&UH2%f_+5_W zzDIsWsL-JRD8Kkxp8V59Z>}T_pqfYFqBRA{T~UlozJ%$;%6Gz=^ynH^*+sTK{o%A*Y)U%gM$* zZB>1|x>c6gQ6QYiTuF`07RSwzJYoSaHQ)qNh*)s)fjtcH>jQWY#0gpU1BYq)pXcb8 z-P!(2^3J(0$mh?#E3YoTBrlAdl*@f%az4>3r|NlqjdD0ACb6fAboc|FKMIBDIsTNw zz?vci(jfRMEF$|lfP?`$I`4#l>&qy7fVXTScEagEg8eLzZw!v8c#j4VoGX&1S6Ck6 z)}>yL6R+Ry`~K(UHmwqj{Zq?JbeZcR#vN9VEkq0`d#XyRo~xCbEzQ!nvqM@l4@qav znDiIT!I99FEXjC$S*B~KMVVo?Gd{*xHnPmNS_#(Vve6dVXp!TYoW@6=wlU$DL0y-% zoRVdf?DdybgQxI;H&IPUP07LiN32HHj%GNKMg<80L0Hg%5F$DyR5sU06Q+bng0R^R zhyWA`iVYLtWLmI}%WX)D+nE-{5os%*L2N=SEJ7S(Z^?|E{zF+^3Syg;iFj}rJQ&Zs z{5oB)$5@PG$C;Fmc~RITt;4zWJUC8|kL>F~;$aeXCgxUnJVAo@jv(0vkg%}MT95m( z4|5!;5MeNn&MPb!;8n@sC{SQ{N~QuJ1c?jz*kYZj8~1zwIjE*H9NDo7F#dKM0`oD?l`_)2$Fex z&78xZ^ZLkLEu3%I)(M!D=pZ}ShF_A)!}sL!=o|9V{Cjd~$U1(stSx*=vEgHb4fmv{4^faRCpD3D+N{eGxGe*X1* z`PZ-I%0GWOOMd#L?J_wZlReLtOU8rwQlHi)bBU93ewbC9Uxf>K4Z0^c#%{^wfzxuX zV?s_fAC%SlPB~takYnmZnq;x6319sUG6jf?6;w*gmSV|yC|4Z=%S03$8z~ABKM!r> z0DP^f3@m zkMx=Q-(;h6IVrSoK7&aLNFwL(AF&+diJd%Z78hVrhYzX*VA7L6Du>IN#WgPjCG#>C zqcg!r7~Ub4c@PSV&E`U^?1aw58ZENc)=75ss4d{HYF9?A0HY0s0UN$^!(|iFlhrHj zJ6j+N`pl{#Kzuea}P;|ejD@m3?gxj>R z-GgNNCw55|zUH)6lLprH?t?N1Vu77O+%FRXd>ASh+&w&O8iGV`k`~T^MAHK>k_y%} z{ph(Ub0y|x*jpyk3c--{aJWukW8)$uZ}a>5>#%E8XNBwFmuc;m23cxamTUO>zBKr< ztaMzFiP)kH6b#EzfMNMd9s?d4NRpV zf%g4Hv1De}$^6Lz`5$lY(C+2G^LdV#2%ylRd|@Y`k|sa-n_X}!0L$NII;WBir;=m( z%0BtoS90ZNU)~3JaNLt@`QaBbh;0g2lVLt`+>y6!>aj4 z&UYX2`t=_9JEoj7S-3vzt1v;DQJCntH9fbDQ;1)_?BFr}e7)3eYm%n4c4^N(Bz*;A z(pxYgUHOx87(khVbDyoVcZoH&7V$C0HY4UltN@-_U34V|6L$1?4X%VkW3K@w=VYpe zReFy|M|uYWeUr2S2y7S9w1p0Y0Es&pNSXkWmaX*)k`|VmsB3Ysa3GCtS&l|*0d&R-PI!TP1T5<_O;L8Z17-9cHg4CN(T!oRycTw_;_YzB>_kf&nz3C z^Xowp9%r&+Ft5ji`%#v~6=rjJd7yBf=hokuAldPWbnP;P^}B;|4A!g);9>I1>K-(I zjJAmIVAd2wSU_B7fgbxH80oSL=z5XZ$Adxv0tFyq+I*mdh;t!c3LhTNw4X4y;T;D_ z>@A{Rms9v)o%8`le+Vyb!vg|L=uQq59F|M{m*w&xhts`@pnqCMD`#b_VhRwMml=R$ zu6kNdcb%21a3B`}oQ^$%l7qn8xU)^p_g@7}Uezwy}m`h z`PNSP?mIi>`|s|MAH26i{sH>oU+r|+UpD^9kN?(dkNnf$X33Afm@Pl}>)rC5chcoM zZ*P-pcXmsxvfn+n-u&SVffkB&r$7GQ3iUi^fL6R&BYEB45bUf|Z z-E#5pX}L4KCbvgU$kl`6a+4Cd}$G;IcKOUDD_ zzH&i6ln?yaruSm!je;a90~h673DQ)EFgs2`;$ecRPZPhh!UXZ9{F%5^!NOk7AmR(u2@$%LR?0(y0KsuAkng~{j$MsVv$Urn zO~9fJ^DTfBYj4rTurDrVeYJC+T}_gJs-LUY_{8`~P;eX<8yKugyQ440x?mXc03@7x zN5UA$sT1H3KtanzeC)I1%;6YBqEk(WBYsbC{l84i^28B5f z0wlF&RVWSWma*1J~vJ!CMIUXJoK=TzYf*;Y<$8 zM8%j)l#j|>?JS^iMXrr9`|65RJ=dbWdoqjb>d`xL86def_J+K?#4N5?EVP~# zwwBd=+U8R-+jK(aV9OT>8LVdm#d*xn;G74GC#CG!dIQNP3?yExLdjO>yPv_Ithq{- z&Sc7)FK?GGy}m=f@!1`K$`1L~+uP+kpW80qe%l<&_uorbeed0L00-cBCqusVxeWQ* zXVc`%uWgmj+}kFdhmq!!0Lg>w(tp3SWw+tp=H&dL^K$;c89CFnBqtN&veI}+mTNm? zDc%BrGy)*?07$(|a}>S;j(brlhxTv?X8?%-M23;q@gxOj?YbyW-1Gt+{7559W zl%VzSz%@jddCvPtub00I(lv1S>3Wdpd2ug#ZbHO?BvqO=($z6_07wQdj{jc0r3o-; zk=BeZ>B>2**<1&TrsPQZyo^@C#RDd@be}8UU7QCz2$GKUR)7N6 z4QMbcs|C{I01g7gKw{#$8l10`(0H;^LBhtGWq@hfGcnES2!gxDKp)S1fjj2uK%lTT zm+faDAYgo~i-3f}u~DG#9F~@WBy=SkfCM13!uH*kwWS3pT9C3aK)iyWj(LL#W$^GY zQHXfRcsIgj>>sRm)1dnkx)8b(!i4LQ7Ry*HTZ`#ne<$`L%7E{)uhgMnIdC6H+slI_ z>0t#A%+fL0L$(KxV*tF~oL(I%@zS9eWVZfJ#8{tcu2;Jhh(C3L{<{3 za_R6*z~ogqQZ|e8R!A9sc-m}ssd5njd12-)xi$MaIoCA@Z;r?T`WW!F{sx&-O{$lVy?>uad#3T)BER zL*9O6i~QZEpO>!y5MQO(qwpI$R9|_0JHW9`zVzyL`Qoe3%V%ERA{VZtOGAILWNj+3 zZs{2u@KV?9sFTU+BeK*uCQA*&a;#xM7HfNCkpm`GHpxO|qs##yv!!)1Ra`5h`7t@1 zQz5;(iY4|Gg2+d6tX&KPkCz5F7jPJiQk1l>?$2Nx+0Ogcb=<~aY+~0XRA)|?oa}d_QCl*Kr~=S=A$498WUpw4+IkKr^ke8Bte4^LGU)1I7QDL zkzFg~ej9~|tF9i3%XV%9m}}e$U_@(gSq(1dPMkC0vuCpd=8Q1kzWnKGsb%3i0)NY{ zF6qe~ltV=mGEh9L8UmYO$|^!-1sy8BFO;)Oom=#Tccr%K+g)LSPt3ynCQ5 zdnyEp9Txy3l4cMixi4avxe=dE2#;vi76-Np9LdgyGb9IJI2(b33NMNx1iTC$1`Y=h zFYK$?Om3U*i?n=z_aG^JoX6X|Wsik0;;{il)ZO4%ANYd@X%D9e+f)RHo}23&SfUTf zKR`F13=x|eiAw>4N^SjmdElyCIsBpwmrUrm2E%~JMCq_hlpK{Yz+@8cM2F@byNj>j z+tQNWr-Mr+(!1~le?_jcykz)Qd2#OZa+%$ybY4^E!J1lhA;$m_!sS@|71dJPMSz9F z^`6B(XJi;(UA5afCHIjs0K|i&SlIo(4ci11KG0k;ZjM@s*sk}JUKL8AX8^^ zWbN8kx%5!9k zt+HC}KvE8n6j#g4{7gSs8#ZVnfDa%~E9TC_6XPPLW?9h}X8!`edyK31!#s zp^_TBO)l6X(Ax5Pka$Jc+q~Z&Qy#DIh>DQGbws7t#q06s<2=^ky0yBrJ_q})3?x)4 zNO*2@PU}G8U=o0Y^EijP7yP%iHrJy?cuy;&YEzwNaWylGYfrx%$m2L6V{%~sB%H~d zxf1P{>$uD}(3ub<9BuKWf@Pv=Mh@&f006)x>_p(+VGe|#P4=2tukQ+4IJU)J{5|M9M$oR=^VVv~w zj_HM2WwsnDnziNQzMUhr%p@smEA5dCVOJr(aN7#FEr@R<%}xq}KZSY9Lj__$XJA(# zM1W(*xMOvl$(9W?MJ^3yi>Wvt2**+ZRO|r-AkpDunR}fDH!8+V0svx*KF~D(}j$;@k+q2r_ zcw$m6_RY%q&Jj7&d_Yb%w8=_svmCE(l;x@hZQC+mV&j+}&5a>=z>&M^$qbt8DR|#w zWg6%#XhsxV3+L}IgPmPl>biAuLD~X%vQhyMIFcaX^BDjdaCyugIWZ3GS-hIb$9K;mGM^>N=O#+)F?hDOBG5e|$Ku~~S+66EWBwS`TSnZ}Nseis&fzz1@fIuA? z0GL7p(+ofa&>=WD_2nh1G=K=#Pyt6$tnrQU$TE}MF?zTrZsP;SLod;X zX~!JMJt${-FUYN_&&ZhrH)S3cda`^}dj+1Z9G97@30bULkW-!Kv;owGgSVtVZ$kFt zi?(`m9W3=pxi)fFs{!Ahe^*|ZeoI@tFEleti!cF1I_O5$Ahy1}pwjt{OR|tSFJm<; z(w4<6u2|qon6;$=*aiM|VxL`q21fuuuxphx9Wpmhc08CTJ3o>yJF(1B4iz45+;=Yz z5e6uZU|IOdBB|L_0()O7U4TVTdb#vwR7elhxvLB>T(Q(YS14tV=gGd0=Sb$CXD2}< z3K9)68iY;$T*=D}xD34E`eY?m5|od4%N=Q8@Vd4dp<=`Q&q?$e)kbglF)6Ao1G*kOaVR@QBK6L2Elf8lKv z3+L-M#}qf;zFiXv9q_LXd<|vHexJ(WGO&xId<( zeIPz$J>E_A$12gbHI=ZLv$eBwdGG~5;x@i?PRUGcS|Ku4IxHhaL$chwqGL+W#HR2i zd{S0A)}(%Wx8xxRb?)htbNyGf%apZ|*R|mN>gZ?SHrC)w&TCJ>%K!!o(}3H(_QuS+-O1dLP*BtCladjb6zf z0Lq>ylB%cSY@XdO)td?-TQ2)lt`t1HSF-taqMZX;dl8cd$@+8LU#TF$al!mX3WGGy zRk5i;judyxa?_xkXg=g>mN82U5P^;NWZ6vhzo;BXxP55GZ}xRCInraXWsGAmDRZ*Ljdw8vctu3Wfk0sp5WKSoZsG zs1E@WDhVVWARZbY1SGg8mRV)toE$NgWwYfE+Fvhbx(r zBV`LRSTZGjIS27&pK#e&jat6Z3}DcGu(w|CL>jl68zD$qu@4&{de=etw6kzLjZ;7* zS`I>hz=d=`1Py`2uH*@)_FVuCpwX3SAnBx2$-w?t*U4o-g5@L3)=KOmP)vl0mYq0| zbY!+k6hPENGi=ylTJv1~RC&>l;h@$W)X4Txta;l=o6sK$pgbOrr*@C|Uz}=_@d}sG~ zGtuQB8;fEZx-OZ%ep$Cg9V^4M;9LHI?dgP9*BJfYeW@Wl+QnR%Z zJKAKtaz<8KPQaZ^%VfobobFlEaZ8xR#q>DJ+U21a<>K%wS{3-^#dqbx;k$CYog*f4 zkgF@uwE!jri1rvv5h#g^GTV4o`irKd^qIQAm9UW_^OH5`>I)nO>klg*#0?-}o;I^~ zIB!5l$nw`nb;qf zgS*QWifmJ*fqgFUt- zDcrUZG@=UAfTH`Q$o~fZ_wmqJ|KGFUPCqOe#8kSJ8#`T!<4mdeID_8QFW8p5Ow zU+XOZMFKGC%sK#9!ojH~q+i>Fz^%}Q03vh|%%);C7OV4Wm0q?;cTU6#(#?TrwOm4k z&`>c8iv{a+9_A*3DV+tgv2YA~|7A8-f=)$U2q1!KPiCu%P_UX>F8d4X=t#7T!~w*z zxmb2$`>8w0Oi0ttW~qFZRfC-)ff(a_+)J)iLin<670`EF}z&CLCDQ@0D z#KdEqB$+9f=4Q(dUL%TcUfF(!}sLQ!sq4o z?Avmt|F-rJywb_B6AUQF;bu4h7Gc7yEiSLHFg~#+Q+4c%>#*#9qDpcA5^YoBvbp%e zSztP!$ME1r$3Gigcd zlwLTJ{@g)0fF1;Y1ZH&<1_rti0z<3wYP%3t=OsY6J<Ao->06=i3 zIxx?6AWW+c?uNCqxULIu=yusy6tlEC)nxz_tjWcF;8N6W1OimBo!!p2;ykT@2%*EW zliIEIRvnn}ie(&#T^=BQYVz|w3+jF#VVrw^kOac=p8^S+W-w0fc))=q{n1RUD8fMo z5)YGLo>Lr{3X-UzAfO}pQ?7)%?|wL62oZ`mhiM8(ya*B2y$(7*ju`e5DhHgET?jUeep@Ixel4AZVX-EtbPgjrmyQaI1XgHrt;A%Lc;?<*f<@(qga(C%H&F)(1x+be#SL8$wbyeqe zG(|d*_B8_yVFJ)Bw4T@Mj&(b`@zq@ph`=$y%2K{cuy*FhmyrQL15y~6JuFgX@UZ^8 ztf8AtHG9E{`!TTktg5K)2N5rSep?Q&N0qu9kq^oW4amOE5!4vWK4$6q?V;#O^7J?D z+|vZux65p00?=rXxzbvhE2|AbQq~~T0LeH&GMHT<4bK(AVc{AHJ)bVGF93+ix_L2> zusnqz(Pxe8v*Qd3_WW($uMg$-wY2$ry+@tgQJ4J&(gkZc8NRr`U#|F!( z`$J4$uUYSx&&B_~m|s_G%m3!~4ESG6f(Mlf5HJ3BaSc)VYo;@1N00ottKHfJh#Zgu zIeiH92?T$9#lwkc_a=aauf|JR%XHSrvhGc+Rk{NSv#$UK%g(awD+7raAcEk|DIE^e zW;Pa`O#<6}7M4!^yk=nmIuzXnod&bQG9b9e{cx$mgkuCKP#$4Vou#txQFAJ6nV$U85)!Gh-k!C;qet*X4j|bJy#Yuff#QS6 zVBn_=$QG0}>&VHL89*6$uug-kSL!nM(|~6C+th6f(%^iqjv!dGwOS75wg4v03XBbZOK3~nX7?<^SWybvhny#l7H{0 zg5Mg)d6*Ci?+X&Q%>cq_D7|i^lk-rK#GY%A?(81v%j(oJ3v(gXM^=*-q#HQsKAK(s zR-CdNgykh#7NP)g~2*T(gO#=`JO#k zjyMu^B?=S5!=@}RVcCj81>5OL5^2rS1Q!+G%*Kaw7KpouIU4L0AlA^(pb^HbsDe0_ z1d`AYC>R_(eB9G@8Vfm%% zW01Hhf=cjJujz4}orCKb^b4OTmDXLgayYME#tN;$>$Uo{LTv;Ry_blBwA_6LQy2IH7^El(BoWy* zNa119?pOSJCrw+=>#mRY5$J<~B=~Jz8a|6F2_}D1p}Xd^;PEPH*x4?fySotNTcwkM zpAcc25CDRMOL_Of7U|l^P$9y0A%sY6SfEaD&~Y#;OPzfiDhB6NFBhZ?L}OC)4=V6JjHb%0wl=_ zL83(zfB?mqXFZuv1|B`9p3Bl#Ifx{K#9+dz+3a0o`vV#G0f~b`00s()+jd@51SD9t z>>t}6Jx<|)AYL4Rgg}U-t^0-hC&|Nu0iD7_ru1RceK>fS=tvy!H26bqTJn@#gU{A7 zn6M)>{wVfC@gcDXV(mPVwaIX9K2YvMAeAai;B>Gb^JXWiCION)Io@(ors4}SRz4~H zu-MEuZNXR3c;&coT#Si@$ zK#4ydmH(F&3YUUUvg-sa12VY;44jPW;gTef1leQ37oxrV3ke~lq1-(OLXP$2}| zx**`Yd4&LftMwr9+f+z{0P(V8qWk)E-StQA$L`a9({SqNqvCJwj)xQ^Nc)3>gb-ob zA3KvRm)PbOY0vCKAZ{}kfp?8Spv&+*^;#jiL4;XQTCEq`n3bg1bgu+DKh_1stf#TGG zbu1rggIdyBr1IGq-b=<*1aZQ`2XDqF??${7BsPr#!w3H8y6AcZ0pWpNXR%GCii@T!$AO2@9+U13QLUUzPY# z7-+)>tw5o0eVp=Hl+xYcm@w}U+wh=!a{A3|cC6^A}l(oT^<%OxY0SC=)& zaoL^oW4XAO{ZfqIvuS%w`g3aKXnu{1?T-T}aRUmR$i)5{87-)mLs^v)*IHYwS;l)p z8k(lfdsz*%=yG^&JHG`?2IJ`ZB>8Jjk-yK>bx~=sN1#{wHBS%_t08H{{-?i%!(`7HcBhoA`O?nwXJsEH%a2HfB*JlAH%+`Xu3sGkR#}cf=az|!D+IP3%eiGW) zxn@hf#xMqF`%nfr6e#vFYn?Ls@(}|_806P$9Y~xAk$hGnQizy5JWQ4&Ls>UjjEz1% zGA-|57&Dk<&Vkj~#wY4X9^EYk*l5{CVUQ182ZO(E3%C2AZ%zY28`~^so4f;wrsI!Y zFFRJj;rC=twk6OTodHIn37L zOCEwGIe;+BD+-K_<${w7Ru=qB7BG=!bU&TiIv*&LkBRQ@(r`iApL6%e^YBN<%Ym1q z;+YERNU!0^%6R|)1`SC>K~$E3oR}QVt&*|)n2tp<28|ZP0hpL{XW--EQwXYBKEiuM zI!rI300a?vutfLs&ym+<+f$D7*PNoz34}oL%b{?MNSh!qXA@cPL4x0l^80yth_K^C z50d~S_?-!Ae%QXDd;A7cex~wh)&VvFtv@o3j!FsxzS~!r}=ix%I zZ&zlk^kub4Uv@$cKz&&WgU22KWjA0#H$q`uPZmG}s93oP=i8*4+Joz7wO@dw757rV zty$vFvrxST@27QXqPZXxKoORWB!h(eC?HY*;=UWd{P@+Llq<0!Q*#rgoSZ1&L)XQM@Svf#QuC9HL-QN213hAFo+iD(<`9eMIHq zqf^Krfb&5f3=H@_50A@yu-=gpHj1XL|*%#iz^td0yTZ z0Ez7cQu)&eMx|qN3a;clKys%0oDOz%w0KzN0T0&JVxK?9npS18VOgf)b21s9mot4= zWxVD%f?7SC2|I;6s3W&s9K0h}MqZO^W3TH#S_H{V{gN!U!kHw_0xDJ&_*grpkhOTH zGr1(Qji+R&d{*jrbg0Wvm^cUGrQje;nB7INJWjYUyUS{QS$mW)TWdW?Fh#K8RKHqX zP)Q9C_Y*;*fhzd=C6E~SEKr(|rYa0%JT`$9csTLtjI@2T^V=dS@9>lG&aO;tQk zbPTCxOb%P12Ux3;4I2V_>hl4iF6Vja#b~A|52_ zKnMx9j|Yg=$TE#o$6}w3d~&2LTW~NC=TWW@iB+ z3KBSyZos5Vr!6VV-v@042&U>v z0(gY7EUonb@mdEGLWFLFZUXR7F*_>)3{xsp=uYgtq1YojLBhW1sEt9wtSuyD*;D2~ zD65f0C@_#3L;@IK-mIT=#i7-FZf40h5{djGXK`FRLAJ5v?4h?VKz&uF7O=P8J%D112}5 zH+MveKN*vXC#z+&a$3$Gydjr|?#hkHH?%g_V(VF%tXY)BmQ!*Zj^uc!X&Gr!1d8^@ z1;ETUoRCqZ-II3&UpEa3lK>(SM`EsopfR^X(UE8&JYHO1PGVJuDZb#L{MVxbMFU!L zU|25;ZiI+;HTpG-;Gr;b+rvOECYnW!~=!v7`zD+ zbzp8^y-r@APM!34r{ErW&jt<;4;#yb!aSZ!x(0r~&F!y1FcCJF)8KV+*$UP<)HmKK!h|1bn6h2ZcCEFQ aDgOr!45v3g517>e0000Px#1ZP1_K>z@;j|==^1poj532;bRa{vGjVE_ORVF9Q=r)dBH|D{PpK~#8N-2HcS zC0kbJiypXCQOBhBMrov!_uG5#owVh>PbpI=DO2gaJ-&{2b;_Jm1{#{iLlqvH!ZdVa z%fM|seW5Q8`n~~l_cQIreBU=$grqza^fQ0FH{Ks}MC^#zv19KYF@NiubFQ_X$fPn- z%9o@_E0m;Csz`68SFNYqi`P9;DORLh#J*xtN`)ehE#N&xyyo%cO|y=9%y4|UP|*E7 z5^J77*}e#WKCrl+|yX0~zZ)@8#} z&JC+qb4dM@-E!&^T~Y_D{mw2q@v%-h@f{s<{5#rVZBp~?EpqhRVBg-Vb_DA*ZmapW zHoOO)u{|uUyOYwgGmi6laXuH8ZmIitx72<|r?hVKOXy^uR6Hv(k-RE1iSsfOJ14X8 z6`4(}=86LnJmovqTn$1lzUF=^f5lO}xs27Jep{|l}c`w!!|Z*9l7*gxEV2-~;XM~^## z*R+57JjbO5*V?|rCE?>~iPz;NSznUGsglHM3lch(7T=+$cn?OzeIP83y+P^T8xrUK zh_vnS$f@t_kh+hx8vA&wepfD=o@y7zZijgHImNr*B_1qWpJ|t4-*yV$nO6Up9mcl9 z_${{XtFe|x)Xe)DaesIY=frcQAl(Nt(zQPe%Zc|`uSDvGB#i$v+dM2o?g<%k4NG5F zpY*f=Dy?Yzw9wF?YRXEnF)O9!lH}?#l0FfW6fAL!7J-E&Q4^MUO-N!#!)j5y4j&0i z=x|6vhk_C~7=Zc3f50c6{eE%n^NR!jaoY|*?mMrvZ1+gVPA`5}k2F5nB`3e51=cJl zzP(9~;rkrJcj2-I%j4fxFQ-1I>o))DP21qIB&PKZR?cg zCtKyz|I#QY@OvMJo&5HCssDJ3v^?D*9XO_QhXYH8v~R-__eIw(w{-1x;~Holymm<^ z&ew`_)qiI*o}~ud2Yl{3q#f_;+U3K&9v0WZkaX?wN(YYTc{+E}Jkq_(i~A=aZtQm* z2;h79r4z@s?R4Y$aO1agJ!Z|@U|2S7gCD^)HQ_g+e`$QW8|Hu?aY~Esb4ueg*bdt` zmP_o{?Kr;anQokq+v%V1UihXKJde#dj@!9x!RNJX=j)Fc&SS6h^O|`KuB#Q+hU@0i zUT??u?7;WzgmqxOUB8?8K0KzK|Lqk|(x5u?IIzO47AGGU}U`XBS_UFFyOj z^4Z%zD6g%2RvwK%kXw~Aaw#<|Yq5%)_T?mXG$b8Qxg~*zVb+E@**>|E z>6OcgvRsT6Wj&IU3xR~(Esn`M%P+~htFOr`6Bp&#>Xh6s4#>?+k6cOcxQv_&BxTVZ zmzl1pj5J38m8c|+rtu(_C2^t`*0X6Rt=A`eBz3YXNxrV_l{mJ=j#t#8$IEJw<0Xk4 zGZwBXO7KV#4^Ccu2XnBjcn{Dr;@+ReO_&nLzNDIKUs~KS=iW5dvl2Lp�e}Oyi6s z>c%8|tWQEmd*BetcnIMn4;LldFec-n%W`4hb-6e9X?ZmBwmh18Lmo}j-jw@O@5t@x z_toysyf4q6`B2`z{Jrw}%7=1i_yxIAxg!@#H{?R$s+`YXl?%n|vY0$C#pVHV!2xt_ z_v4|4qpGb)u3=D;b^Vg6ACz3vh?H8#C0pNz{c-8uj+@D{F3Y;%kTz{|e>cu)?4!rf z@SwsRShwx*{df;xMsvXFb#L>EXHQsqJ4R(bc3!4}%QEPgfs-AQV$*>1vPqXVRN6-Io%-;5;{U*R2p&vI_;3o2HYM?6St&P_r5{dwpuHk}trh(5 z8FB3ejCO~`xhJw|yCZNeaF#nm*k^6qON-+8C>(UeyayIPQISFSSs4jjkUsaaR2|FG z=USCP-@1&1u1cwM4Inuyr3snwj?0vL3^pp$x*V5D*O-hsM`XBbM2294ooF<)4@$x#U%ACYM7uw)vBC0{=znc6;y@jgD1!~K>6d@^wKaq;4NdGNj5`0e@s$Bq>w zd#Vrb>6hH8ek=#1&^V~SceQ;8pVub?9sT%hS{04NJ{j)llM&b`UXSASPVN`=s2`FNLP66dHPAJ(Ab+7V%u?B!d4vfQC}|P!jigS*p%S848`2 zUgrefH=yt5`*^NAtk_t^cN>s?{GJ1jQK@2C#C^u+A&Yw|*T`j6GN-DNQlr19;CHqR z_dy)KB7OpPvLZ=rOJEs4$=7Ck%rbKf)?o=;n;r{GVtW#PhwszZG_LD;__{~Zb-j{O zgWsZI8%?*LvbMqadgEAJYZ{ho?3Yr@Ain>gezy$1TdEGe4}OpP3(fpJ@jt`Lc(zLL z8TkhM|CVLy@xArD{2luAx8Qmj@7L?Ju7mrs4R|l^mmHR9>p1$@B<{Z?UMF!}65F!y zK?F`I*&|OJ`L=`N-U;f%QT5`6zBzJN-oO4OdHd3L!x4T`ZuXs*OW6rI7pce@e-_|K zO8QtpT0Vgm&nKJ`-0PM8hLD^F1kUux8;-5SL8u; zQf_9efK5r(V+C0YXXUIfDf7;#OmqZgurVO{8lU*ka&hCPp#<;9Lf!&^94GT{AN1IgQUeD?{ET$>N6NaUEzGb)p4f3@w^x zJi4^EbwI(<%(ln({nN(d(DKB0zFFJS+3TX<2~H0VJ~k$uvMR>>9`K zFe)R?QJM6NsZDss0oO68%`r(gOh~d}QsVWK604hl0veUz2>|5eh(u~fB-1o5rM5}@ zF5~$AL)v%>4GYgcKoSr!Ac;xjXigi3IXIgF?uRm*O0{i31_6rU zjvhdwtU&I^dVgmxKtW)@I(l)8F|Jozd*GCM;FNmctl{kOzot(XBziQ3|0M>HMbQWi zixm++tbpy7zkzRw0Myar*oI>g z03|JsecT_xe)^;YLBs93#P3Fj3@6RcPS#nVfu*pn5P>D|jKq09T$^ot9n)yp1C_T$`n_(?WA*(lTP*w4?iuY=#)z|XwaZLla!{V0#&HQB&3Ff-R} zu9N5EHUft#s*tS66CJybOZHe)I-Yi;m6wO}T9TI+UY1Xuen;*M-jK`rX<3W+%h_O2 z7CdR0=!i-3s83p-f}HzK$U9gkWLo;LPX-#pGSV8AvG%BpcSL2lB_h3bKAG-L$eq%J zJnET|+l67dlBy^`R)T3c<4eh`BPPSGL8;XGBn5fx-{q9(5hxQpcALE*8$p z+2n$Zc_#29Rq?0ci^CCcNrz%u?4c5IkDc*y)@t{WHb$HkTlVD8`&P}VR+6e+GnfhV0ash&#m*vjn z+j4*MReg}7?|`lv*ntC+5i*)2M1t5`&J;)*MJAZIcA;45KlnwkxKWH z^m*2#-?O3sQG-eu!g9cOUQ%t#;yE!d?h~iQb9`QW$EGEGVpy-4kQj4M0S;4u#H38& z-XuU~{gYVYyEvw$)G;kXt{IuY>*?Se4F4HaTDEyg;tkUhs>AtU(Yh(XV;l_?z!a*a zg7=N${~W^qS%gwy>zb|C81HK+5oI9XaaUXQyKI-1#hC&HKT_%k}0XT91QeM@Gu{@k(9*sph!5r5KEdl}x zC5ihdR+E;{kp#{W2gq^?wK8Br6+&fG0#JCJIRXim$GuhoX!-z@{_auf=@`cS#a6ay znbRRA;0$AMrV#)ly2S_#`Va*NOhE$R(3ukyF#sfLg9Q7GL*{k^9x4?o5LG7F&i8W3 z|BkAJjys9>(81g4B#qxdL89-WBR4Q1WazZ@Gw8fw1O`DuAE;)jEp9VV0Wk3$)BKJK z4D)^k6t9El;b&pHen)(s?&oW)$5fq|_n6;++qmCc1HS`7qzVc5W&19V6jM+n&b=pO zusMy^zh6TMqmF*LTwIjf)eCYpKQC*sJ~CC%UIfOSjb z$GW5iOV+Fm2^28OnQ<;-1nGv~=8k5tZacyByk{LUAyCOUJ0lf zwqgcxBaWytiJlmM^R5CWWhfq~6TpNaiqYUjS*0?8`nU^t+#3f_CTOn#Ca=p)Diemg z0F?W)P$ny1l$XxDFE@vtl?$covYNjr=L?shQm)Fy$}KsUy9Q@FCJ{K}5?b(t$3*y? zOhr~@GQ1*FVY4j2DLZ$C6&yBDx)_Su!7vxM{bP`@mC8Q_60UPU*X_{R7LdT4aMGbe z34nt+1U&R;5i$f3w_0)OIy}&5VdtC2q}V#4*IR5Il>z6x47%pgq8x|w9D>ut?|Co- zN0LV?wxj^5S};kS1pML1wt~d5%Pc<%l5VurjoC0Uii8b|QpbYyy4GOlr0QIPElMAj z{mwHAmy%;e!lxF+cVbcewWq~V@iED(SsGq`j8AEFw4_Hk`QczC?IKUE| zLjezT2H!PX2M3A$3`>;SrX|rZBhgwwgclQ<6)O5-Sghj9han14}b@ZSz} zO-U3A&UYXVI42Yop(9zSk|egp026=&-^;r%0zjwKnf7-M;yH#g?jC>{fQ&F?MVLT= z40raZGNJP5hf3*fgHwj}v{eCXDiE~p8ySj1D;G+FA*?6>5;+)?@WF@#0g=EyG#2&- z#J2|^+3goEj36OIXjCNxN!KRL0gnS07*ZfQfVDj$E&S$ z7@ea6!g7|hC|_IK7~LQ&;tCW325JMZqf{sx>*g49oN>tNboI4?ka6O0&K4->q-{l_ zs)WbG_+E9^FmsGCZcFf*2>~9DB^8M(lMQ2o%!Kj?79K+&sFL7$`MLNGR6Tk={XBdx zE(wYaU>I2Nz5LFqvS0?T=2(6YLNMPjsQHuopQ;sv=7&C7*EpPY-2$+&9-Hzac-MwUh3c+>!qkflP5S?l3jd6EQd6C{L6mm0%eNpV_XuJG{z+<3!KCvfgq4<9Y#Gm3b~8R}w~3oX{b zMJSUOXFrW&UzCfb zo3aKavX%o-idUdauE{c#%7}Mf`nsoO#J409A%Fz%n1Cvo1W;yT=cS5$z5_|=WDaAe zUmQDw082<6juQ=rO&Ay;Jg!t8S1q<}g_3>TrrF1t{a&c16dVv+j%1d%P^GHh<8zjp|Thjt9Ae~O(TE^e$&HQJp2VXxe`Fa@Ky;e zWadZ+5_~@>4r9!h02sRh(n)9%GI*_qeccReVVfF265##bHZMKht5WGYBRyS!O4poJ zX`OS@=U9{;=c>e-R>XT^2>>BX79?+7t8mJ2=5WRZJpVa3?)1s5B#)&f4kZvh1m6IFgb#$(6e4>9upquG zzprmYkr;?Df98>H-e1o+x0qY6J#sXS|F5K>C_;pxsuGn2R0H!S3KL$7rHBfRWo=04 zb)A4hAy5v60k@cBYXKA-OQSNO@%=fR+gJtL`ZPC0nDhZE40mM#FgjFqq;#5eu64#y z5+ugC(s>#X7`RZi5FTdTHXAH#)EZ|?#cbe1Q>V>yZfxVZZJ?CUAU4ifojtB2iR(~c zSf(J*W2sWGq>2GZs7_R2!1$T~$wP_en%Gb?xgZ-PcK*e@*Qgf_d0E#Mqw2E0D?iIN z{ocHG+jP58PZ>0jn0PNSwAH!&u#B~*(el8JgNHm@KOn{Wy!1CmWUM196Pd75ZP*YXaN%{6BFWk93+mt z%%vpsINxEan||Dc;|3-t`tVtV2^0@=Cjd(7)DV=(x-6Gpk{e@h$;0WlS6DG`cW8z)8JNqeldFAu+%E||FckCs(RJtbT36lIJz~m~F$~9TZUy+&U3P3_f ze8#{85Sfmy$xQq_9P})p28Bh3vzMU{#?Vdp!v+aehfy9jNVWpuTiHj!v2}gRF;G5U zI8MgWbGYd#u~WFGSP-D<11;`y#}vS^qzY;l;9H2V%N%Sfx(b*qpmmB?BOOg6-iP;6 zd1j$fdjJttB+Xkv;sYoGFr!SIR2>QuqeyI!s4{`sAR$yjaQ>=D90bW(faJ6SuD5*( zOJh_hxy}{w*PSsisa=v#!@NWq=f!_&Nj!MX{6_p38YOV-bnpQJ1g+*MoP6X^3_ywC zo@a$t7A?zR3D%B@vt|aifOFzpr)DJymD1ZaE8|`?5L{3s-X)m?V0$|zBzP!gfh3?t zP{(W13V>19s7!jtWXi`J%&0_HZ0>#+=JcyG7OL~AJW@ekxEMi@Ss6~ zSS^1l$Q1J#H7SW5j^WxP57D+)yKg2(AbBloudz?}Qpf3srL@+ufTW z!E9PIeO2mMQb7{J z^$>LF<4}Tt218y11);?Ih#*mQVu7Z&V?YKR45LyJ<33|piT9|s!s*bd9=J5l-l!Gh zKxtdoH3Ws@RGnzJi{UL8j?wp_6|ca+{c9hq5W#B$2RdhlzPODBs1Ou5Hvyuspu;Cj z2n?*JnD@XnQ-u&Dw!`N>lS6?IXSfT-P#$w7dMw{dkXUdripBm81|a-S<}>hHaKB|q ze5S5jpQYa&*PFILQY1)PpFSuswBVpB zI-h13s}qhXgde4(ttC1Tst;8s3KT9k;iBt+O%qg0qZ*(Aa5Q2YfkTKGm~_b?Zu)oE z-<1!pyf3fKUXvGw=j7Sys646+%KdV`+%5FU?R=ly%n!=-;<(%_&&kEyyo}@FVRA{U zvuFwvTZt4T$RyZ{h{H+;c-NP>~d`X~@fjy{^by^qh=-1SV7@ zY~8bRN8{#JB36^Ja6nEt&`z`#4M;YDU_kQ!iFwsQ(CADVeq(ZxN#)+o85x4o81tV6 z{Fuj?2QW?pe5*1azn~yFoxG$r6*vQ@(Ff<5z<0>&!$7&7ha>4}CPZiqkFnB@VJbiV zC+0v12RK>+1E5flKpkkvYYRv;;ZCD!z`c-fo(9-X10+jQ>0Fdz%Q&14TgI$f=|`)0 zL<-m+Z6Zj{032r}SdZVgc|n2=1}yPYlbUSxGSqY+ivOJzM{$Y3S%zVu{V_OQINyUQ zsE<*A1Zw2?g1C+`?}GEx&PlduT85m{G6+S&T+t}h<|q_exn%_BOKCnN24`7rfy(eq z%6xcU=7KYtlbIqwaQ_n|Q&v)c9Li)2kQsFjD?kR@djR1csQ?~Dw(uKS{9qijtv(V^ zA5HcrJ#$S{chXAu|GnVY4y-+}AY-s1PRocs|*BH!XB^3Lt(@UOV4sKHGdAK8NR|@$+E% z88rSjycT_3#e1kocub#`FKe>>)W;7@-kQMsBMl$BUjhCA`6;ZF?NiT0yK z2?8WGKx|{xmLIJ{-yxH5XP$)SRFIeoE-eKHEH;#h=1Ghqv8%bd80Olal|W4utG$Qq?7MQGFqwK4KzUW}j=wIqC*C$N zfik&8n4I~nynOC6IPMj>Sh*>yxyy1M5LqwZkafN;T$P#dvJAWD0Fxyd3!H_sKBM6f z1<76m5-zDoXbw7A{8x1Tn@Z$c+2hIhZ++YrC88iPITHsQc;rY15ABdtyJplWPKQ@y zmWm1RmW2mbFRnpTsq*iWaxhp|H$Hv?M^B({YphBQfon%ku0FVrR zZ-x}@-T!K6V`N)I6s{<*4gT}VWPPL81n}V>CnNO zWVG%4t=9@6Yy&V%h-ecU3KE92(AeO%kAlO5uLuG-XMBbNf!9Ol&2^aOL>Rhi#5H1{ z4GrCo>$SmTUSm6thx*9lJ(_!hv4W4kC38r&5@LR413vuzSgLZ->)?3|NO(O2iK-o~ z2qTyWgne~N34r9t6Z^!s3yxriSIL`i9>s@L@j5IGA2VSo+ZeX8tpzs%TVSkbX6-Sd z!dQGe91g+J4ktwGphJSS!?>Mr*$NM;7zGZD=k=q-vznfe*JjSks}pAdk_CBja8_Oz zm^MJ^8^ij9yf8Q|&-G8r-SV(pPL^ajkd~>g6j~}-4OxUXav`kNVo2+wAYlpcCP~QqVpL9H9@?8HI zBsTXE7IX}>O}N;{nFRi(BJn`^Xr)#2phAr;>uCUQF1{*ru{D`sg&9=SIG`~dKBs+7 z77`a^HgXQiWd=9?fP#<>6je5OIP#ioXQJKYN(d6)21pDXwt~bclC2=&Isk&>2~#RF zKNM-IVN9}6CFzDy*bpA>Ufj!h4UK9)rCKI}$7RsHEZL3~@z$M@MAM@5bk0epWnLon z3sUKrg~P^s(SV6Sp%WmiZi*iPs9-Tbg?W+yly9hZ9OqaBNS4F}h2w>yi8P#+c+&#* z;ogC|8S|Z$v+2wDZ?DMN%mq10U}f*3~P062Q(PYQji#cbUX#Wfajb5p|O$5CDPKfPGG)HEFHtVnIM0-$kK#G^RNe zmJY{{XW&39($fYwwGZN4RGDyyfD(-=Maz!ipnKZ!dF=!6fpnPs919$3ycg)W>1geK zD1ZU>+3LXt$wv6cQVMAp9X5@x&G{IrqLc2$HX9@af;ooU6g1dosNxU;&rBK5jEA$ACtU<$0ml z*|D)mknDV7HzcQ9ojF@*xuzlfh=bxekd)?+D2S~gs}{5_JQhd_P%-Qvm%xX!EYUbC zJjhIVvcnqIUAO^R%|obAov?gZ4WH45*KN4TXndWjiOTuJpgiaqm4}sKdDJ^9&jTbc z49&_*!!z>o@HF5uBhU6t%AMkntjCM8zkgU2Xs-%&FX-cuSsN z`mDUN@}WGMeN(OuJOn^)113-<~{|!i43a%$x9` zrQ`o7L>_-_6v^f`8z!5_f7A?6+<<0WDy_q4Nsr;d9MSUEQO~?g2G0N_s{r3wnGOLg zfXWn>Q=xM*4TwyK&SIZwg=bGy;Uw^X;eKK#MYf?+1u*~E}Xtoj|K)AHS z;aoPMp-MkMKf&s*IR1O>tO$orRbl5&s7P4Ip%6Ki#&Z?LeGru}VC^TYVJW~eU7Lj} zj%k@P3k8^@FF}p4%8NjuX=0vEm91?!Q^Ja%p;fzh{|*8wRY?IjY@HJz+KCTKhJq9b z1OuHu*0t&iW*}gIlEt|REN%JY7zGK8c?Y)eH6LNUhfdw}{U9KW87FVl2~`sJ*^0y{ z2D49{y=AH*^jfX+=r)3eP-2+Sz(~)l-;I#7KFd0v0)+4YFsMS9YsuF4>vr>7;P|>e zX~KV8NR;J?r=Qp2sjxZTD=5Gk#S@i zA%qXJb0>c6r!iR=J(ff35(;2n6z7F=f>UC6jHV8Y6+f)@*ohl-PYB8)BMaVwtVgqQ zC6SjK*&ex59*_rs%A@MIJPW%IHF6`@C+o42ENLH?q!jDYXpsXX0Ekv;*#NQ352wVi zmV!h(C1%CTp6)QW=1DA&988H9$8!h`t>R*`o%s?f5(PaH zN-mRt4KXWGd++4%3%vvzHr7(F9AbCq30wnh}XY!idnS4`j0U}f=cc$Kj zGWkGWKmSE}{`5O?ec*vyWQePDRV%refWLtCx!AJS%bT063Xo>-dmE5=_S%XhEZsW{ zNa*|@hleq*8i(^etsv=XAC*BT6Ue7!CbFVst%L?) zGVWVK3w%+-ToX_zqh5CUn}7ouz&(Ti1yG{8N}b4PM?{8b2$C#7;yn=A1PS0^tBx)- zT(oLyD@ZmVgJW1GjHL&^OZZq1AObZ4b&>BlD}{D;JRAp5Sc*FVNX$sJdrpSki!$t6 zm1Nrr6vP?HwgCXGXC!or{XE8{4-Sf@vS}@O#zO~Z5jzYuqPdco_POxy$LF!f`l&_n z10bOWz`uJ<#v_+xDRWI$i)a)SF3EBl3M9D#h@9Irye3fQp-N_gbJ{Ou*g2-9zU&La z#CwItH?7n`**Nrc?;9QJk(D<7x!1%d1r{<1mgeSkRLK22! zGqqJ6>ZM4KJoUs5{J91sd;pmsVtHn^aaf&j$9Au(k5;M$+n#pm1IdnWSv>HCCPKtm z9_oa~Y8%s=6F)wT9jXW&RyDBpU1QaN28|Cjhr8g=7y-1T!+=1f1~&p)bHx*38Ey*6 zOjiu5BO$Ayl&nXxauI-`oyYo;FC!CON$G7uYYPufxMs7&*G{tgX{tmrP+qh&AhMZg zSBL-}PJ+ZTvoB*3?{=~s;Lzkdb0q*tH&l!%3pPe&5`c4xp_R+zd!!b>V@;2N2|)sI z`443A`H-`WTj6VhTo zl@4rUXsd(=pShdfj&W5=<<=p(YmPrF+2bDwzASJ3uhM7lIJ2G z&=8!R8!c=XK8vNVIxGwBi)`DnRK4pmm$@mcrCX{#ID)})`huLn|ECavok^{3tS`ow zWj=gbraY4})IEgX6)kHxIQElZrBbPx@o+d^O-ARSLNby+nU>sfIAjG#OfrNB70027 z<~^c^a6N?GG53Fh8=_FaF(Lg@}g@J|@^<+MAuZ5deUu z@IXt|^dT`2*|4XeJOGbYg$UuX2@um|fXhw|gZa_$Nu6Z%o(apuj%5@cEPEzEoPd}E z*Wto{%zPS4i1~XIn^=Wb6n0)zbplI3y|5#tmxTgQhwNNg1{|~>3g5##4D%-n5;}jV zA?zn`*s*bdQ0W-b-&*q&xE_TDoN%JfB-6E}j%@}MMvpe1&4J#1Rc9OrBCTZoZ%IAxKz=!2JY@ zhP-f{c0qu-CR0kx91-D3xbVBu$7_y+$@ZtHNS<^{Gg=9Zo3r~r!&`;sQOPw9s#9id zJ|9d?(o!|x!SF+Iq;RneYZ-tTGe8;Dbyk4o)y#UfVF(vIphOEpolv#VSuhmEp$7;t zmd4S-fRuAj46PS8uCG)3nnd@xC0FBjKhK>yy$6R+R-H&%+W!>mv;16T*9K zh%iT?RazTLgh_T&t>xZoOtf!-2yGJ}P$X8xmcFJsu@s8#gR|y-yPC^=5Z?_-Bnqby zB}|}Dn0z;iggFxc?EoNLKY|uIU{ZZqlj{%X0F=1BkuIPFc403!IF1|&EJui2M| zo%n)B(5gLxd*x^b_gYmFP$bFvaY;8Y;XWnhwkZoB3o_t>LUPQ?03f)SyD5{I=b%bf zCEv9osrIw@kH-|+>>$hXQ>_?+qsimJW4^?N&+Oh2)Zd=r-jWp^E^K9UgJR518Rl z+{P@B5F&(!LPX7kxC}_T0f=sC-v9}VcQ(xi3Hy|pWPCTy?U%@rw8W2RRfTw<>YzF?iD~?Q zFymwy@-q38eyEc{rx{SSgoXo4lhX_hG2EsQF{X)nIAP755DJ##<+^d+Mh(!}YX}PW zimd?Dp{rBp`wUq49)%YjJt2VC+|SokIyRupdyE;?LIpwP!#oI82CfzBNkR_3mI{R6 zxD!_Yn~v%b#S0Psvq8Pn&h|on@}2?q@4pdBxEtSeS*U=m=a6oM(liuS;CRp z2B)h1M{Kjzi1r~dW=eno6Pg7Iu5Wa5Ll-edZd(8>N|AfbKhkk;eYrABpQ zhq??@IDjB4M8qO!{U3vU%7d zu4f*Nrb)>)&q}UkLDG#=l4+WjVh4M;FXFnEaNSUNE#oqmx+&|!Z^~fkij-Yzl69<0 zxNZUuWkpLZwL%ErtI`H%(>X4^P*?0&ISPfioV_fUdT-0c@^x9wuFE-qgTvquAWP{r z&4+N?X@FyvxsWhN-JX(RIITX$>|3i)J&f@i;}9k#mhm!t1?53lP4**a4^!m{A_4LMsM!LJMo9R=|RZSq}gpegMP=aCq^%dhnZ@p-?<< z=vczEQtKI)f}|VZZ~!74)xrTYFk$s4;b5Lw0}`V|3`p#p36t;4mzb(9suz|G+dbyH zsCujrD#POuC=FJ2DFEQu)$wb;6!RM~UsP@?Nx88o#d`QQsKzuP%X|=XJ}hAl;8}&edP+^$Y{9J_yuK|GK zc}(ubz=g2UVO!W>CrD~pv_eqv^VxZm*Tmn^z?8Wm!cebAmxPOfBd?Vu@*6{>Ji}x= zt9R%C_Mvrx58{A;jN|82Nmy!wilTlHk_eEoK$D|tf%>4@FvlBs6tJY_8UPP$&tRR2 zOR5oC8s|vS>Hw75J^%!*LXO}IfS92QDo`k8@xgi1LA7lp=1ql{CjV{24QLy;Ylk=v zC9xGIharifLNu&(ka-a*63YM$ml~YE8Y{OoEXBAAThZTV^k+}hOB#sGKryeN@X(njy#%uR~}3;%*7IDZwNJFjXa1>?xhsRn?q<22L=^EYQ?&^3of)wqlXF3OGZw`8^N z1sRUskfQghgz6^oUuJcr+9V!ww(16;2IpgIa=v&2@VFzFs!$-68?u7`W;uIF&gEzq zin?2*()4;x^_(4g>)a_8l`SL_=A7{1TuN z!IB{@&5f|%2#ldE8zxjDooMiNJZ;p73-IvlaN!sij&W_kq~mES4WmYEMZzSzZAO)t z{0U)V3_!7tZ)Nz)vZjx<<34q0(!JEe1}&UF9Iojl52eQ6s=18ctfHm`2z+h*eg!l{ zsNSf;g8R|X*yDxbfkLS(0Mz*1wSsF{dOC)*Y`6ebLa0!k6yaoh0Qde*s1yzk3Akuk zEkj6f%tpyDluxj58$)1l zi!lQs+tJ(SGzK0cFgcntZUbm%;^cb8&b24Zsa@6il+zUEH z%C;x=Kt4h);^AkwiOF^j53EqIK(aMNRiLTL06;YSWvLB<#DWj|573wg$Z+^;H4c3Y zkN_a*Izj}_0+t|16e2JV15blO;h0%0mvk~*qDgtDCgxc}+=d&r4N|!ktrnIcYpiv{ zAggx-GzUVE7$suoMKY#R3+jPNgk~UOgCwErn;?N20hF8@DrFNe8z9*Qr%WSEtkU4k zWfE$K3HTmWB!MFwi;FN}zJ&c-s8BeLQd)oGnFN)|OLBAaJ$X3)zTBUIGSLthE4p5n zyOXcW?a8;GTmX}q_vG0n!sLq^3HbHmbsgk!Ex!&2$zfG4!(m;}oJssx1rT9B2Yhe9 zkcAWd=998>*TpM~(o?m<$|I>5QK1)8Dr?Gb68uvOrCG54<^vjs%|jJ^Fl3@SzTI+ zwP~!URDI-6Bo!Q4fWzcMpuUa)DpV;O29UsMs_Gyt022*e0U(isFbgD1sxud2@*zfv z_!TBFZey|?+u8ZhK?Q zWj3^E`r3k&F>YhHj({`A81Ud+43FV+?CVv?DP)XdGNCsDh8@;7JBk*eNH|AIIn^sq z?0e#f#E#~bbUd&G%5d1~a@&}$b5vGVWNDZSrXec}GEB%5Ae+X})zId;4UtoWxEKPY zA5`Ty`cxsdL5=8qBZLXZW2yosy$TZ!^Woha+8A=7OU*bmQ}ty7#fAzMi&LEl%a={5 zFLNSkH8Ug%Ev5V2fC6n(jo45DGzbqXSF$;DiUA1|?3ynjEOt|suzCo#8$dye-!|^+ z-ks2pm!AOP&?!ea4j4xZCRkV+T!1qLY!2k{$BfHt{Hm<0P-o^bqj4VmZkxX8w%QRWm-9XGUeI57+Jn z=$wEb)dl;K7&T$SSC4~a3pBPG0YY#v4*_S*kRnYx0b&~y+HCo7l-qtdSBA2f|DYlv zOojoMY9~vB+0ejmaRksmnAA>;@nakrIIsB~R{L>ibb=;uAgUec@@$FM=OtSUrHK|b zLt8prIa-r67uL0p43(2!k5MOVt*&O)z+N8!!On-whd^cE+3ThX!IG6(G={Y}c&lfpTfCeA&pwV^P@qVu3I{4cX*o+w1AnDj; z4cb`uOq-7BGarY%X2K6G*%h>8SBh*kU64gc?zsqCl5;ZTn$oJ8LNj4upur*2A6Ff; zA$<5Cekd!1rC}^65P*ZZ5rTx{dMQMh8-W^O(lf404#LPK)d|ap;q*C+9$G=1>%s|1 z?a&r5oEdJ`PTbTsRNARlo&jujqJ;}E*x@W4S4-hx8TPrk1X#GVLBh$cHuEK10vu|5 zZH7(R3~yn%7m5W=*`d&YV^)i(DNGbHCj7-*iVSC^R(RnUhQjvZH-UNf!SMTV z5->9v7?d8a*>^CHb*d_U2Y`}xs3Z}9GF`_8$G8;RCUua>A=j+t4me)|tGIYSvDY@k zScC>Av*H{i+81UYE1tqoBWy^ZCA=@B!(p%@DGxQ0g*sss4wJ5K+#jLC99fp5;{q;J zu?jq+NLm5J6ds0U+~14g1sUlWkP3T8*A=vL7Q+6K-Ussh{G0e6UX|OUFUZyYd)RhcLBcW6RyfEee*0DY-YZ!c0g@-o0i4)b zne;)SIVW@gL54&s?W_#qsJl#__o7t|^+A9T9u_#LK5XT|1bPu5QAKi+uM-=J#6SZ| zn{yjgiBr`Gk99zeaA>9WrvQadKuy3DBv2LvLnD?PErWSb6W-c1zTP|* z_OT9y3(x7pHE~*k419TWjlHs76gIz_uyyo98?-= zCUj-@Q_=US8EEL|@f_B9>|+#8Y9@E0hINM8xHKTK6iKili-+P3xj*r|+#A0SyCe5T zZ_E8rEQfE(&AxTHR#}m&rDa*qFUfLZR>$_KK;p5_NcNPaH~PxO0%h{@nNQ2Dk!Q8D;(0U%n1H{4hJl8; z*b6>%1`UBR2_C89_b6M&d_@mnK`UEU!f#v@*WQvi_Z09uXJPm)V2)i-WV`TN>;go0 z;XCdGNMT(&5&(M|mVzpbVaflYpm~RU(*$mQCJb>iGuC?|kN+1+_yh|N;u^jpG}u{? z{fP8`hH*RpTQ8hj;uvT0%;KTu%zFfh8HAFB1FYT&9Ed=1;dcf|jB+x0k~RfNRnEY% zPy6SjR9{qxlp8t7A0ATt-t0%h@?QdkN%w5?oTM9PB-OxP?;LD$S|-95s?+6^6gcP$TT)LzvJwuq=-wZ0LklMW}NoAk1r;4TX&%N*KNZ zRC2g~*`LIA^mO_*BzVkb_=`X>a54MKsycL(VXP;xeP}7Ndxa@-WJT*GMSQ z2o5&>2x?w4^ECt+XFl!5{Zmc@BSWo%V-QK=Z1aF9Vb*F$*;9jj_* zMZ1ql5Zi*<&m|9-6!FK6$!r|Xzvp=!d+WjcrvZ~s8JNs+6yZ1I&e*GRi$H-Y(Twsul-DJk3yOB+<-%4t^`1V(p<}5hLgJ{(~)KU?K^kGq2OXLEVm`p+P85%hE_bb zZ;R?Wm+jj^umF@Fme25YSUT~#Ya3htaOT?scpb*ENj*pBc9y;R@t?$@z6S9h4nS=r z6&mdMPDl_IG{S;;6;9LZ(ta*p?H^zo3t`>o-9v}LF_|)W5ZFHe|2;s$l4HU`tF)+A zVWw1_of5gvk81)*@GvfA0FuChlui{TXAM7IfilWAjY_(S)mk$;&j`oP%3#^k1ptIE z%VO@fT&zAYqYCugF`+7eLczg4AXs06>RT&a)peF%Gxss!n~`D1nDpE5UaY2y)5KseM78qz+ z()%RVVT6kV;Be9`pm0g0(!qR(stf}Mmil%BG{Joy{fzK_hQk;hGYNJZB8?ww-!KA( zU{a9mbcz?MhxwQYC%uA`)G3|YM&P*F?*obe3YY~P)lNEdDwIKNH~)oB=%og*v_YBB ztQ<-e|4omEya09l4+L}sfaMVS(IWtSJ%M7>kD1z*c^c+tU~HTyG+3tHFN0mf8t&3C zCX^^ULKbk0nI4znFJn2V8(SF|=f0VISJ+^if&;JZ@xW}gLAY$F4#MLk`+}JMEoLYe z0~BuKakjG2bKx^gF@UMuVi;JjiRWi-}%YPgLbXtXEb;WjX81$-E;eL#?>EwM5J+=e6O% z5SE$&h7A?_n1s#&42Wz|AV!5)5Yc(pZIIAP+RsE`qChDas92_BX`xl(2V9s~Hz6%k z>T6VqQ6nD>Z`lyhv9$mO7tI0+=NwVnn8)2fWGhVA5z%x`qyZWhI0%c!LR@>`SQREl ztq?+9C=i9ou|9l<9)P5%l~o!pVl@|)N%OeOC9cW2-WTP@#5?k6;e87xSZA1vgFQ~X zYC0`Wzi;}YoVH=|DX5kg;P`HuDlQKA2#~NBJRKmF34d#a31HE_Jq`d6B$nxGdz%FZ zg-1l!J78Vg(fTJawnH_*xUcP*6fBAT0P2pgxc6lMk`erG{Ti}jxQdE{ofge-El`wj z_5=U{!wM~(10Jsl743}=0JCFXm;(wPPvhRnN)oMdg2YxNCYLV3T<(8PF0=Dz>4uf> z05I}b^mpViHG~S4$C>nbnU62aWN<---B2HJ7=0YFgaAQHn4uNsK1|gR;K9nPQ-Im2 zEF2Z{9!Y>CjcZFwAX`k+YQDg%zZ(}Dtx z0BHjhXbfR_cRKJ{Za~c|xf(C7*KOt<0)W_uggFi-&zX!*9}VKW_$711i_h~w`S3Hk zq#NgHW!URG3_zN!kQUGHc%}<*>Q+d(p^n(cjLQHZ$XQ<5Vy1H^Ncv#9?1VZ2R0tS$ z~%Y|rHX~y@Y*EfO^8lIc2q%lt&jqkG~C8(#FC%I zek7gv4>d>PJ6sQ7RONh>!;x4PDayrY$ylr;>#>Slh?Zq7oR@Qfteo*?WY&?8p_Uk0 zQfM72Ff54BxDK9zvdfC{(4j1mDn*pFljNH$?&L&TWQhpa#;!1)>rG3UUJhfpd3 z8zid&u!UZVUM)hHvLm~2bZe%M+Y-M6V0>=r{^)29_8JKJ; z6T*bx3LNHe_W+5SNxu6460PK74|uF|7QXq!by?|oQEpB^jQ}R>Yr+Klqv_Y>(aali zA5ghH^^V-IN`oIRd`@0j`J%jh_I;?9=j2j_Fo9!)17srJ45@NmhoNV9l6jGiXP}q> ziuUceM!=&3a3KtgLLnFk5`YlLbU?*)DoF610g^62q-$GHI-bV+pN5)wIsr>6R2+Lb z0oVj!GNg)x>cW76@L))i`7X2E+-4MshPnu2>~paL+2I)OfduY}lmdnM5L2ZEkWrP` z=fXMIah9P@s+0|oB-driKMw^^lq?)hfvvAr#?t3x&~sj9leYnn=b${Emn+qK7AOD? zst*ey9DVsh`4*Ne=e+`eT#%W_lEQ;i+j72e=02)$98?|#AW$Don=$wjLsxiiG>nB7 zFM&}aNNSS`6(-VEmDDjG0>}WG1*#G%4T6M;bA^Zn7C?nL5R+WD%_P>NRz()edub6k zK>~z>G13{bw3p?)OfCoaKzZ!*0v-U#VVc({5*sAD;N;=3>BKvj?*KsT$|~kR+&j7@ zeb^%d4M7=i3CU!8P^LP9GT9c8kw%{skGaLSlWGIULP=z5LipZMe6Jv2u!!UIS#w8k8XV;g{i<7lq!02tIv1mI&o5l-bxby8`;|Is!K zNH76zhr0M5u@Z~U+=dC38ggW(Pk;qb830rU6etzxqY)}i1WE?Ke+<8;0W!x^V<*(! z4*vg8J}~BJ2o=tYqUF2T&k|wwyk|iLd(0CkT0YEbFaSq$4=`JOXr6Z zH#;|ChobC|mxi>oe+kqIAz~>I?f9t28913Gj?qvv_Zu~0`nOmL1jgz#9ij#M2@s={ z=*J0?0r)ew0ZG^HW3mziAVL{g4Hx8mq%7ydfJO-Gw1~0QP)yNvX;%_ANL2b7 zBAWDMr<;#LWD_J7OtzSUM3d{ZO?W&G5Ev8*!&){(HWZ0v3KInhNB9Lu01wmo(1660 z`@#SaJEY~_00^OB>@kqQ!5f3qgCgPJjGTg3=Xc-FdEE_62oW1D8!!Ptx>(7@*IeIi z=6mP7?>0y_awxl^_`DS0!C~)vC33u9)d}Ikalg!*@(j)8Ab}@jHgQ!}Dlf>5aj22` z&&Z>>_ca0kEFhxl1Yo&6^|mJH@6KZ1;%DWhbDxtJ7vGUPqtDBwp4-|{@d6y8CgI@_ zIbPRNat*DwL2>O2nMy5H7Pw9VV;jR&0qFoN)Cdb$=QBoqbZ#eDSS4k`TY4;jq%e6Z zp~}R$w}S6ChVM~^lP5$r%Z0a^sR$!T0+taToL_`F60;uSJQ+0{5tx<&Xjvp+D-r_| z&5OY7Q5bAR!iR7vy)N^SGg4&?8?CNk=afta76IyYxd`QPZTKZw?}hpRDEK=u6u7De z1ws&G+e+@5oXK34`Phn#`Q~MSm0tjeaZDz3!q%r&UJ)Mb4A%hV0bnRJ>a$p8wRKwr zFgP|`5x^iws6GgiVy(5E6if5Nc6X~HnoUzoSDkO2pZ`24wPJl>ww+o7d)mSFcP8Gsr zI73?=049CZCxeYaneGhAe0N9|x+8Mh8IeU-L>3$onFUY?lRVT(XpaNub>TY&q+A=3 z^2vx4kB1Z{k^K${?uJ?c7`pfwfJi44h!2nm?r}A{l9b=ZmPz%{VKi{ULh znQa`mfrUDLHKQo_nxL_jhHcz#=S((}>D0Lr`aXcp@;Z)$n4CgC|h{LFiDkD$>o*N5^+D3g1WpVU@1 zJ1VZ_*5Mcl6EooBLVQ(kTqm3{`gl@w?^$3PJ?VU7_(0jlf_Siy9z9eRSD`8{LR~<8l%U)=N&u7P z%qLNSV4ZX4v*PP?d_^V#r)AJJg_bQ_p1l7|>z1Jk9XYnS$0{kcjEUQZthIz1QHW4+ zU|9<#LKRZSJVzWY^SBgGBuuDF!(9YL48Sm!hQeSK7CRVbU_3sL{VC=^xKBe_%!Py{ z0mvo}<28=w;EYkcHlZxPmiAJCxOZ^qcTRQff>Ln7!Mh|1XvCmEVgLx02*DEGN0s0d zKO8l~Q=QC>03xn!PDvi}N?(0ICR&3s16a&=hZP`8?x>vg#$?$S$1)C>gk`+hCq2iY zL=HM6bqGrhefcF1%fga}+!EdA0JOR#w5wZT;&__FBXub_BKxs_Kl3SW9Pg9f`k+*5 z{gOIN$N?re&a>U2&99$h0858XhtuOwHYOR*|BZPQHUe0N&h2pQtnMOAw8{%W zVIDYjBP}o0*vo&(y&fYeL(QM-waz>$OvK}De zRMZ^iX1IGuMjTX4tU6;^tx+%xBM~fFsDezxCKTxCH91chC_oHQIJTEPG>hg+tmEnU zd5j5**_C4^N1~NkSem4}*=DcX5Mf7g8&u58a9Ef$u8GyVZFbLg&rye5?YS)1`>xBa z>Ls~WT#yUNL0O4bF!vTa93yd637)<^hv) zmFIPc`Df<=leu>_0k1HzW?Q^7`37L}DZu2@@@T=p<5E$>3v4N|svQPLT@x|@cxVNX!h;S80HFhF zLM&j!S($Y>q{($1gb_=f?GO%*L#nf!l^q7z*MksYzYiwE zeLK+F-$j_XC9)UJngD_04p9N@21Kl|78B_XyoV~Ke8MjyO+lH2Ur>m2g|D$4-L zia!BM${ANoX4(QWPzT3z)F~N&gkdmR3aTOnh-8kqq;T8^g#x8=f}t?xR|pO#VCDcw zTmYOozUQQ0h8q1cQ11myOdcl0kl2ncd_RV{2p*4y&kFTvz=ZuxnBd+5llC$EkDQTm zTq^uep$_^vb0YswD2v{fEth?mJDy?PzW2-sgbjV5`>&H#f<62@4?QH7D(Xi zRi)7BGv7gIsM25|l*zRa9JX39p(!3)QXtqv!kFP!*s&Jp)%O_XVGOU;%;#x-MHS3| zwC!YQt0GUNTT}AF>??9};JRGS&B%p#ubc~VMBSt;debuR=J4z>sh$c*;g}D9JX$HZ zi9GC-1P5TMazHJDqZW!nZ7VSBbtnzXHuq71n7P(B&E!ME8%o47Dv@JF8TQW0!?{=F z)7QTspT724dGq`m^1}Q>xzWEalim@D)?_x5>VSfQ$m5EHITKEJ#jw|7Ac2`V+D$pH zhO2P=R@+FlTT7FyH@TF52oe+0GBBYM0Yog2v;!pVaMT?;StHN1De0B~@~b{N6ITnN8Eix5_GSLAGRP3A+3GU1t$ zp>Bpj>^M5ZAPiYx$qs~DlIJYl?Eyr30FVkHqK1~K9m=X>PjrT{Od{P*rn6HaVN%fk zAjUY%Nm|2Hrr(Dd;>600FrwJ<5>bdi3Gf)t4%6aio+Nw3C;g`gk${W?B2#TanQafs zLRVN82@+RC&bT9T)&oW2O~B%^;Yjp-{M_Qt9mgNT5nMP^J!+*=qXy zRH#k>6wU!tZ7HkuLX9$P$&mzDKFz6$GbeHYO272;vv7V^3K|7#=9TA=D-4M-)J1Ty zx@(}zG*ooBdzKjE{ZuB*b7*L+jbSK)#f;`_Iv2v==#BHYz+r{6n0%*7F%ZChKtx|- zojDO3Bu2FmQYO?zGYNOI&4j*iPAHr_8fR@#vpV{3swE>2Mjyz<{Jg9|g{+3NvK+|B zqKCPVgv`5=GSzWHGON5~;hOyKL5%&JJ=37^gV0knzRmYS(n0-|~xbx5DgWKON?_PXY zURiur?hjv-D}@g0u`&&em3 zKPwNXUs0H7XGKDUP=V2zo=vag0UCmXCJ2~c2nlD)?An2A-%i7|Zf63V>k;fT)Nel>SALB7UeWYliSTIsf|aTpW4KAXKP3yBq( z^e>=Q$dPvK{vIqBWvi4eOFY2MCUM@wejjj5T6qQ4(F66dX?&f-ekRqe9LOmq(Nm_p zm$?xQUD22aFm@R5ZjD16XJgeLd3-_%yl!5z=@JOou}u zNSMp0S|KkTx&~Iky7pz*&u<_-GB*{3KhyuJIrOw0r>fj0Hy@Vv0f|8RRlmx z?{`AQtIA|j=2JHSljlv&WC6;Ap)SppK$Yka^k(GXd($QXf1k=^>%^SP6G3BaRt2&*?>!A!X-C0=WdO$tRLR-wRloyEHh4xjRdFBGN0k#rQGF01Xfd*tXsWI%00zLJ zp)2M(jPhVvXip>5NJBw-sY)8TJ-cCA!duJIUIP+VVQJ5FT{5g?Ktd=mna&P~rZ+mv zcM}Q{j@)aiv+S@ImRtu=(rQ#KQ5?$=d_w?85KG4E*;CyszTIxXfPFyVRBOC4+78F= zj^R0v%W^O&XZ&a|;`KyFSo-P-6qkf{Q;Fa`?47=?TdS?gfX{SiRL($UoC~D1KxZYG zRy*fULxDhfK#efGHQ!CZgf#h1&@eR49LYefQ*wv8CAy~@5OGNrKp2AxnZhwNR(g%M zT1tbVt5ZHzAJvmyDIIe|!E{UJfI|lByfOuVOtu-23^sTbHaur!FJQ8>3k}~+@eqJe zSlOcishspnwHE5-R2csa)L>JuHkznH2$QNp1TcZwL-^QJ^fDQ)VN5PIBx;R+c^hrOJF68Wap&YNt^AmmIN~L*lT1Q9nRZg~Da8d4{{#3!eEC-g^U7 zCsZc*TvZL~(5=uCj1HZBI=HTC1g3cmsEY(aLZDa>vCV!j6@`}jT2NpIL!F}p;9$;K zl?b-k>SF^Y+(ww`XAkOpBs$e@_;|Zq&5mlt)-p?b{aIOpo%UsA-jkL&S6U~H%AI6Y zj}sv221ICPrbLc!#l$9u`{v+v3JijT6us}C*dRW$;5*kkl1}p^jMr%RS3-jib=lX z9I9BX+zGa`6D0E`VSqRcm}r>mFvsO$0-i&XFx17;U?>w-b1?z0{Y*ZigFQZ&eHW_3 zEIl2bnQ&tHU7SVR9b;xaK?zJbHBrm^1+%3J~T-7|zN-f!HA| z0}>|FH+rg{NLWh5RBAD-l>;CM5>wJ^Iuz0v&SHp)ii7KEE%gOVG|XlCim+b^`;jCx zHxkrk3`!)tj~xw7Z*(e);<12E5O>BK*9qdzM>4V&N#og10V+wr1sr1 zS}gTtUL<|kC$o;2oDV1EVk{$<5(QY%TIS_aEHCRgANQ|@0G0s2!tl5Ukb)|iY!3kp zUgWJu)+s0a`d>BIWN?$F_C%q@Uk~`>tb;}Sy zG7aERUmD&K#hws*eWrJS^0kA#-r6H>hoI^kL zXwHQ9!#M7XNuA!Hznf}>c@3%%Km_-PLWEUUgba)jQ6p%ePg11MVq6Y*j)? zSfHTN*nkF>0LF&$*eL4-M6ezLG?>4zhk$3*Quh);7XLjqfwY+=u~&frZ^UQHPeuaji0HOu#dDl5ZkRuFD!=a$^DjIsI7;W8I&57a(~$=jE|OI|wjo?IV#C>-2zHMcJ33m2hK%wUg9HV>ke=EJqMKW$*q@ej;QL15O+ zV2NCBdny2!gaPsrKw>KrK*3TMboxf2SkM3{>|s)Lh!bm@8beli4<|L^m|C_mwB_Pt zRtgfn#|sr2!Gq2|1Cx@kACw9IvRv%B2SsvI<|0;abLR+Jf+oD6@X(~WXR2a(AWlq%xtFTmg)KY=4bD_zF5E^#5E=^&=iY*Os zF}ZGcE;KMdD2p1mf`OG@(*OrUWK-Bj zh!8mA3~f=7)O%$BYNWr`qgKWB6b?D0bfjBGp+J~(VS;}a?;ppx2qvng>=Czy&ipJ1 z-q9ubV^A%v22#8Rf@`8BAR`S7i3O#vF>S(9Ot>phSdqv6CKC!1jvQS^|!6co9TTC0vh|EnCXAba4ANqC`fpW zWd=?rj7CUpln4VR3L1ro32$+q=||!*ITH8&ld=*jLD~Wu-ITWxmADg-zqHZzD86_U z&Y7VDPPOZh)~B6t6fQUyk3Qf&v;sNVmECbr!&QU_A)&Sjl`xhB$Yux&>i`E;2O$#R z)VPN?jUXwsjKVR#BH#7!2jq+Q|Al<|%ID;@g*)?gi}m0p z7zm3^kQn0ti`{?<_nD+RU|}7vAfZyAgU9O)gR?ib6(R-zkJ$!9HqBNXo5%BfR3{8` z@%(0=fsCzI5@^i>80-PhV$(q0wybj$(Q7X#kaMRiHUcX2D7S*3PmSj%3YRW$F%tO zC-Hw17zPxkYRdwN8n@f~jDlf!%e|jv(?*e)9Lc6pWtg$G!V)~lPKLN=phE45x175H zBJ9&DF1(jhtNIV%Sf~*i6^eFnWXUhiN1*Xstcu|zTE(VeDYlHsm~UAYlb2-FJ*{~T z_75;amGo=*W!V~bywp^Im36|XQaw8l5+nuT@a^nGNV7W;5*%hsCTm+Ad#F=o7`4Gk zrIWLeLB5kOgw6#%Im z{7VLkCm{3nD!nJYQaI|8G?YtZcef;HL6LpAy!T_2eBTIL0HeJprmR-a3t0Mi7=fT+p72X?T_o&SDZnSCAu~| z!T^aevu^hb(bpD0G?`9-XwP&r)CtF%(lMtjqvK?i#odYL<@0yHM?SdmMS1hg^YX&* zg4`|i;-<|&N+)E|9hY%{uXr+oYb7u&FxaMmFxzdAaH+4cUro2e;NWSSAh95#VJI60 zTy9Os@h(UIa{T4BiJoI9jc#cF%YO|xs8+T@#)k$6K=Nc5 z3IPy2HiX}m`4+E+pr|@H`Bj8d-JU22McZstCy+bjTy@6F1)efZyOkN_4O zDn(&(2#Ul4iGhd**TBM$5FYFy8N&6J(E#C;2%I*0&@m_DzIi;<185oI!3HpjP#Fb) zqHwCD#u@b3fsp%HiB+Z=At2NM5_S}1egg`F<-A(fyHS0`-sc?iDh72C+X!FfPsSB4 zc8M>+z#i(QlYk3Md#kGg+0siLW~#M}+MqHq`4FQx5=Q`wqakbflMs#zQhK+XrEcyAK#%>oz&xlWj*%bP~1q$>)j%gGX8LP$ZCgc#=HGa7{xXZQ^2pwk#3 zLWgcQT*vWxw3Vg3P$Ts|4Qo{a5vq~WVL)+jx8x4GWCZGi<;3#9b*m6U5d027ai$&!cHk(D!8E&?iZT~I;*&}ef&1^_1JPb#%h9Ym8I7d7$D zTuBFQ9B`SG9zdlG&=4jYeL}3Tmw^gEq9HD-TLQ&Eg~QpigqR&2S?x#FOQ4uxSXlL! z!F_=L8TSG2!G5h0WPXHD8IykYTjFQnoD9X4nmE-h6W(Uv z$duU0lJ+&RLslj<#@9R#RgT@eo$xSlF+J@GAER2Bm!V=|vb}k`(}3jQ6T77dQ1sT( zKxLT54)HpB+^@Yv9XniF8N-1QITT6gXx1#%tTC@NF}}qJ6a|J^SE$$k0X$eWMPL}^ zVZvByhuGSNlcXKyw73~zhnThE1$k%fRe9&^BYA1`jNB{tp(T`)3*j_wjJPKDcr&E% zfN?S;=0lut$}|I!Euc_{uy;8_ON532iOwg&&=>b{eFG*00USS6!X`*)_PPZJu3HeX z)yr1nF?N}-0SnJmBOK|O13q$|leRZ^_dGS610mQt3(FJQ?KDOIfxK+*BI01_8~;!*_@P>7hh zYW%|Kel=9K-K!ueHglH3Nj(=Q$KsIm{LDT&1U!fZ0D^-ga*EpANx%Ux(U}JswjxLv z!eTxI@E}Au3=o%89?Wr!II8mexd(D}jr4Oy8Ehm51MqaoEs)(Tzm-cAD^1P3j_Fcx0}AgoME9}Y;811%mfod^k% z%waE_Y(z_N*8*uM3WlJHaP)a&00~1;=R-+Z_C@8i0}$!-$!xnn0Ey~`hVvOX73ET#!z<;Xbked4rLzh}vKplF!DmJDu%Z_B zoCaLx9ATO523X+OC-EFlGV}y>!ZP5Y#(?xemGqu)Nfv4(u)R}admPdSVhG%iPi{|4jR8FJW@CUm_U_OjsYZS5HsPw=;n9Kz;X(c1xHN%#u!wP`5b`cxK9el z15lr7Rjek|h5N>;>@xjM02HiqN@bSjGSo$olmU%0Kth;k*)X;lvs4OIFhEk~?2TGJ z45$Du{p~$6(9Rz79KbUzS>8Wrw3N`8;d}c!IbN6 zrxXa1r=Hj$uI(O)9Pr|Xgd>I}4*MjupAW8EICrv{fTjyAO^>!p?K}v_y&@=BqDwOn zF|bgx!c|6f&=H!@mFazM*%oNn^M^2G!@eH&5GNeLcc6Pf?hGu;OCt+%zgU&4@wA)| z#x&kLgBx%V@-%(aExtXRp$`s*PMJmrObO$0!ZjGjo4O*C!9`V{R31#Kv-2VQkeFn@WcF z0YV-q5Y9=%v9}oNGGQ`?w>XrF>CEUq6qmq3{O(vcvp~A>d!_K1_&ip>oZ!QX7T|F% zp)ddzE$MCM0DUa&1xOkIX3oe6&=gJ@MZz#v`fymTjjYQbe(m4N4}b0lv@(l{ZzfUg z5S50kU^98QAL}4{qXQzlShYE8Ymel#mO-q+9@m)}d4`V*h$H zqxqM0z~Ta+aUq<+=Vs*+wqFLY__>!E{z~@9db|u3M1>H8laI)}D=M?jh(cwCV1e_W z08~a>19-kYQiRe7?dp)w&Q2-fSs!Zf$t3nowfS{9#=a-@0Vo(R_PZnvMN-0X1PPVL z9L~wm)`BykBXrDB6}7RNEGWYO5c4+WV_wNo`RwmfD056U4(wUs&?-8Lumu#rgvQq# zS4%tg5g3~1Fu4v@B{YV<2ox;sG1yF>6snXSY-5!dfTBaOl%%Q(C8s6e95<||xheyl zBdSmc6Dpw+=k&4qlTZP%Y?vbwXX=@7XGn|vOQ`hA3K=^rm1Yll?cB(zdI=nJy!KcJ zEC`;Bz9M*SfMedPDn!)@LBh~jpR@oZMdnDJ1W4LG;ga^J%v6~)uCp48^Po3!RxFKM8U$UE2@qS<*}(a9*4&kv!G!QuxO9i%TitfY`Ba0kRG{! z8~BVXD6^2?H)#SW{&7~&S?PPIRFX{_@@w2V6dk)TFxJ_4$BA=g$|&hAlWb~ z1}v!@2pTx~$H1@!5ZuloR9M2>MVM?T6We$!0c630aMJhMAb|lKX807dPV)i2krO=t z2_ekixfj2$8GfD62 zapU(P5U57nfQW0CU(LDG47XzETC~Co$C%j>J@|Z=E=@0YuW;h59wtkh@KAA{a{wfB zEQxhEedaizNI0*zNwS;1A?$^o=d6qX4i!cL$1-2>-t9L;{_Foue(R^dDpSERiSG+2 zGzbVP4Nah*V9#``boZ!HB&t5F3M?kmIm8KvK;ipLe-Kt-S*A&Ks1fYXa>hdd#Pl7( zwwe&m8N#`O00%&VMqt9S*nX%H4)?x;CA(;m*9K(K6*C}-WaN@s8V2VdN@!zj%@>g~ zUNp+`3AwtOmz%Hj%DZ2R%Fq8$r~K*|J=>tqEP$nG1FDgr(h%Dkd zPJ7}q#}16_djiEU)e+F=d@LS+I+pF8ZP!Kv%?~j^!*HjVLeY!)D?wFD3e?h=QFt~=gN;{ec(m8J^30y0#%}=!EfmVSowhOpALXx-GB8<#`e#E%s;48`IN&Nstci8ctdg3GIF!~-?r z+D#>5MhP}U$>TMS;Sedz#jqG;10;COk%u`{#Q<7xc>p2}MUrGkKfolE&UaLOwNbm3J&Hv&ikXX<_*c3GayS0 zSIy_-v!9R2zxt^z`Q}$T<#&JFDZlX}PWiPTbjYv%K)3wL_jk&#|3HiU`1g0qwTD@m zq&jK!0w{ht3!t2Jh2$($#R@=hfl2v94$#QS%}haVwZ57r0dW?@jS zX3WUrW=yWAobe@O$(z)i=K|CT6X~oXW7L7wR%0ztCulhKo^{?2Rud_S*=AQOrUn~nh>$WVd;I{5(9t&J3GV;khq=z zIYGw00!(lY&JdZYEx>WJ%5Mx%V91NrfrQD73^`|HpnC%Q%w$&z4t8p!VVy&slq}=C z?ph|ye2S_S=1@$Jc~%WpX_`ZUQL(hJaYJ>?;*G5Iw)6@+%4*f2HPV7zhMY%3i9OzP zoGJk@N#UARby&tZTL=(>Lji*IxB`XXV1JadK8KtDOZ%a~Y_&qALbJ<%IZ8kq?y(|= zNZJ0xUZ@bCI$$-ckBFK7+%^M|HnjGcEDZt<8zsIBZ&?sgHG(A*=Z}S|Hb7zlLr1mU zqCC)or+MIf?S3EH3mr~Y4X-)8Nr1Bn9VG?~zAdx}%= z!iliQGpu970FDjAYqnzXofIkx7!&F;6HyT)Mk%O+w~UV842xptIt)Y@(jrJW&|^1q zA^Xz+h6!^Ss2G?qoMixmZPxj>!poEfQyt-R@O}dtKnB;u?HrYuVXhE<4+2GdziUbG zrg8e)oGKH-^t(1!_ZH?E_bKi)S>9_z@9t(zPxkgyW}mx zWc($$R52q5n|ukZWK_z0d8V`R#bsdnjnP|^KNep-O$@jhc zJ@PmI``^gl{Mlc~|MACvF8}v0|Aa`XB>&_34f*>&{xkVM{lTBe|M6GC13sL9rA16*CIdq&${H&dQ8R~ zA(?A-$wH@BPIvia$r03W*A+nIW~L-}@)cNF?iMO?uT+Kg$=zaA?iBjvc5z5Say{24 zml9>opPa)rGEZ|BP@zIHBL@d9i^#mgC=~5C(!`1`suB*l;*k;*Na?6cdY~#8&Wi8u zQUExf?2y2YPRT>T5HNjEG0d;@*ZTD}RY(aC${uz=S$0Wi7nMjSAjHz)PCyy2cXDK5 zECJ~FQK%EN$V;tMAWXuu1EcA4GC+emF+Jl=nXqjPeU+e0%*Kukr8meY6TcS2kL}F=9?rrl>M*w1kx+Y{ z6DR>bR5mhMI zPMDY>OAJU1Of02fIeO+yYA!_%LZH!lGu7e6l#ramc`d(0q*7YP+C zHFr|JE{n+vct9rbzc6Qm|3@v4#s+77EQw=xN?M)@z@Qd(Msc0IcL0qO9QA;eBy5my zo*AlJCfFI$Vn=eD zOaKs7AWX6oE+Kut35CU^42bm9hG9V+u0+SbJLHzg9<;#mP;){k9i7*j{x*KVB~wh4 zdn2;y3BiJD=Uh-CZ9X|ujLQe_1>_q)*($&NV@~i4zFzxu%@`Qm%sa=ILqu~UATY4*r0)XB0dEY}iwxt#?(pgtay2Vnhj zztjr@NQ%8$X?eFaAa{y`ax32lkW}Szx*``?H5Q1-IVh!Nf5IpfkI8{90VvGP5GWj? zezrR(lO62Q?v;U49s`dWkA}1IP#;tktnNx5Z~_o6$>8_lGS86MfiB7J@6zxUkBI>| z!5tmq+14)3r=dWgV!Yd-NOpBY-8s}aKS~J426TA)POSSR$2r_NnlFG-X&aZS0%aUP zG2t#wqQycI_B&y?OB3=ITvU}nbx@^Hov2y?K=M=)aQ>{sVu^2w^Y=5{YUS^Ae)-lC zlnO^sV8|=2CBr=}I7iFoye9=TmY6f)_+2b*FW{Wal@K0;2_a!XQB}B8Jo{sC z1Zh0zIRFIEfEkcbk?`TqsZo&x;0O#vs6NnwX1)VX!46fKK@_>p-sgk|*SXDt37oDO z(!_Kqq`CG%nc#R<4cRJb6CRI)qy->pW4wMFLmdDHD~&c_VjK1mBnlP_E*x}_c@sEo zRR|j}vCVSwFgvu>$pkz=;=%Whp{1RuAJox&Rh>Y&Y^o942o&>PHC?AV!9Kj^dk7+1 ziKrsd>o5?}v8|v?Oqi>rV{@6D37k3mmjEI@Dm*F^IQ4APn9Rg3%h~)xxzPKP+#G#b z`;*)UOxQt@)msmkL*dXWGjGf7=}*axsrTf;;%DTIi(iu0&wl{W7?^A%;5n>H<)+%1 z^g14@F>$l^Iw!l@0VqtHT~a3DMgm8h_!H;&h=#$tIe!&N4k&b)_#4Z)E;8div~ls9oG zpbACzAB#%yH~|utY8{p*0g~e!g54*{Lmmn5<-m##g-Gj@ojRLfD;il;C#*&~;|$9= zcR-e%emM&zvefRD8FxhPUrWd@LWO+oM?2-W0FvMSiWlaV-}$mbzJ}%3e<~nf`>CM( z&QEydHvyJk|Dg`~jUQ-|ul(~)xwICR{wBYSopi|zoceixTyAFyfJIpzl=|h_%CJ1@ zfmH_OAwY7s0F?rDaeOp>w^Oz99T>?FyORG7(ltHO&@@s`+ssf{qH8g&9iQUFMb?8^jb=FMs#!91oFy-!&Y9sW^8j*UAU^;S z(ua^SX0{T;+Ft~AEN@DDHMC`ix7ZRj$#pu20COK2y4nPY4G0Zgah+;I&Ac`}(qVY5 z$FYjpu7?@~f*9JhCw}6wh9J{R`I=IAj8#p8Y0W*jrL9hXg4S;Mygzr&RqacCmz;DC6 zioKuv?9b58!T0BD9Uzj4{9Sktz{A`Lhf`rMc$Noi&V;!V1&I|pWSGl>$(j6pS?hgH zZU83ipm=|lz2E^2?XdWkj?;C2<}JX4vn{?O4^DqpUOoTa^4i+x6(*N@Zpj5E;I%xM zFv08c4OxOR;Sk*n4H6n#`mTCjA$N?a^ zV;sUHs|n2vTKycpod7B0n8Hc+NM|qgw8Zwp3HP0s-~WLh6Iot@66q94CFOs7_A~O~ z>t7Q8PR?x})`1o|9V-*&9NL7S(4;$GLv@&;PK>cnh=#GCP_!pIlj;BoA(CV%@8O^- zmpt|r0iT}Qs8mmdwLHJKHYBBEoT8S~;ckM&gnB!5Zu4eB2vAo9CE=?6Ic0El0{0f2ntXCknueEp}x^0l9&I&sKv z{9v2>$`3cln-9BW1nOYmh(o3tJz4~FH(!Jb=>?Vz)S1Ld3&-EdGe2d<5+PYy{= z2SI0HfWmN=WppBhAs|5|(guhaGXU9Y1||$|0U}$>mX36kP5^iNQ)XK{a=O(mb8Q}($t30F=Tq`40La(A+zp60 z<#%Df_j76a!(SPZ|MeFO^39(M!(#HypN%U}z6NzdW%64;)-AvLqYd)KH=HsOiA&FZ zr;NjyQ<2;&mgG^n3hR^S01(3DQ3de8_WQ+(hPxCb#R0jM>yaB-(}__#Fvjw7A;LZ_ z8SQ^UJBM?!Li3C(Bxl@FmOJuBOv{N!oaBzwufb?kp|e+ z`xP$CqfosBcXiLn`;D8>; z$|ypBEk&vimh{>szS?tLL1Gf^CQrgvqiJ25GG72hjnm!|BpGcLGi0SogAjoO)d{9H z;9@QJBorv74+sE4=S@{WXAX$iCA>QK1RMt^FY4H4LU2s3Qcan#99~mV*&tyR5$&TO zAw)F1#e4}B3487{WCRCKXO7RLGGV@i%7jUH6Y8=ZI}_rB2mxXsvS}7b)CdqJ=HUeK z{WkL@7DVt{Fbrlu6NAdx3K3)axdaqGS78RoYGoI%0ZS(3IX^u+EOJN{Gyl7p$ApT6 zp&<_Vm~9x6iST(@%ov#59C=A$a)$|c*u&X(6eN#km@|1@;c|BdFgg7>d2Ri>pm6y#!r{ZBafBh*PG z1CS)Oe+#R^mOW7`Z^9*E!eQ&#`<_G7yQNgaYA&DjLxBu6no0OZny_pN0uDhf3noaC zP!;I|4w(P|sCalTRX79*2aALvp{glz07&MaIH?vaf%kCemCRwMP8nQ*A{lBh15b{( z0YEJgsDwNe0F*B)yQ~h3%%PN8$0T*Cs{N=qDO&^>z0gl~RMT(jvFF+yW zpb83-AwU3+435hJ9s~=4l7Z#0uZY*om2fy8maX$%W)FFGwk-ozm1d67MS!rA!BBBe{+xp~3_!E40i+Qv`?w z6~co?px7$JHaKH@v|g$b&WW*+4A;Dd31x6z?hea&v~L4Us8U#h3I)PxV+j)NB&RU3 zm5EU&3~RCUx1GkE310KF*b$PmB-$WRm|)!`;BBLGr~1H>j-3upmw5dcP~i9pnXRzd z%k5M!013Z0Kz5#j^Q~ifbe|!S)1#eRZ)%GcwJa0RM>IRe~6Bm)m#j9nc-BZ zV3>?2OsGy$bptXUT$6?LZ8=wZpd$d^n|f0oaC+XkcjP&!j%Q~%eJ_;>!(UV;@5#OS zPs?i;z9es7`~qO}DwN3`Q^i%_w7pm5V&#Th?768!wD&pYw6W0o6iab~un_(qf&^-n z`5as$!*B`|fP~i^Izou_O89ucM4>>UCr4o;5`hJ4`V>M=C{l%prAF)??OTlv3p>xp zPP7CGLt(6YXW3*13V|Ukst>9V0!2XrV^~WegLNk1+0P_-Fe;epUx* z4DIwv;$T=V4_uHR{_qFo&dd#o?C}8{CY(ixn4au30>J=;{Ywfut*%L^D@0&Ss8&&-kz}0H}DOSlsg4Uv|rH{kT(p=f^taE8p87%j0bl+X+Rmze^_I&@Y8!ax0yd zoAHF)O=T1$51~*VmV04Ly!Qbj6}g-5k=t1)k~BL=7PQaF#b{n_9S~s&v8t1BPF8$z zIfw7F?1lLvR&`fI&Uk_Vi5IHCWfJX&sS-G}3Y3j8n@`38TOC~T7$@CwL6vZRc&Ehoo64^Eey9>`FCX^+PysDV z=3tN$P&54iOSwJ)#|BqfGr^l%JIfTacvmg8B; z0U(@!D_57t>x@K?rr^}`!Wka307(YhX$l!w2C6!X*E~KC`}n6Q?8aUx6uZng4mIfJ=ht_Vwo=iv1wY_MU}!(7=U6A{76;8ktmv;hT1iXq!A|> zGRi{p9L8+*v1tGZl?Z#a!xF5xp(;_3Y`%}s2|>xI8e#b+mw<`sSZMMjOtR}7>2R)g zj%2e3I+X}ZT=UmrzuqMT8+n1jZm#H ztd-pC$cTNc+|uLmJ`+yEZ(u=30mAE2rNkw#lfdI|$1-XB-kP9hXvoa6XcP&7PLSmA zAEakJNO1EUtJtyo3($0~9pCr&j;^1&E07&2v z0CETbIZTMas6>trr~(PYBFBdSl>vngp~zzhMdtE2(yw+x+98dL3iBW)sm|W(#t0Is7F8_Al9D<~@KCX&6%aJ86B2}p&YzBRspGfJ z0!ogbW#>6`kVbqi!(GRjK&J}9aXQQiAi|u9Ce{sn%sy6f`E{t1{1H|saV)8T&BA>a5ZAJj%z z{?}hf%D;vEy=AO7k0%P;-=ugf3)*$>HI{OcL{{a;AP@Be&SzW&n@`5l1gx4+yC zBS<=-(CTD$vQ+{*T@u;WE|tR$Iny1G8}XFfPG{v_wjj?{`sMkaL50cvQa{v4pOyu) zvg=kBiiBY;_OOrIoh6NP*WU6_CbYF+LRS6wF8Ds@6eJM?5B+q&ITUg*RF2!bogdN9qJ$=M@^fcR5R=suSFERKY2Ls|XNr(8qGK=?GkeLeUPBnxwDdb&o0{R*vbg zDprzxGdIE|6bwNEb&|pNVbzv`aihq1q^V?xFPi!v2CCu9E8O1=0o00l77(3Ty_qQUvv<-H0JI0wyzz$x0pmhd%U z!Z4TagA?zDGthhm9jFzKQpEu%5CV3GL#@zKATV6TY96z_Qvm@mV7*OY0+8r?4V3ge z7G#Wi!8V=;pKF_{6kLxD680rwpAu6F{CIzpjgE}=atlZlA{IzA^aazQRrIw1l7Wzc z6k$U6Fr-Gnwe!2l^2UYF%l)aB z0F#?gBxX{q3knjzrRTP+0xlz-IRGOIMM98sSa~!m01-bSVu6IP3$nL<&460)Xcgcy zYKo=AR8y=-qfxo)(DSNF_@8eok#7kSG%#okZ5c%}q)I~*=?rP%!DF8hR%#I_wo#oh zfyuG6m^`O)Avm&lAelGOK^E2Fv!fv$egwcEpiF76Nv0Df+)e{jI421J!t!2%gxiej zV80Pl;>-PJ&_`41TRs`qv7a(Wyl4e`U|vb;VAs%c zU-O0KLNKbP{YIQVnF+<^!L5w^>o5D{_kYSEfAH0c{LLS|FaPnc|E2u>U;UK)*T4U| zeEAQ4P-cE)R(|yN|AqXQ|M*S$+kgKR`MbaRIr*zUcwc_+7s~S6Ki)0B`(+nYNvnM7 z?QW^2gA(I#EYGw{U|WZd(t9x&m%G`V+%M$iVX-Waphg~Gd9P5^iZ1poxs@$JjTGf- zf<53dezFobIk%X%oLJ?}4Q?|Y6D#`)g?5*k4QKrT5su%t%@>YP%5V=o;w zp)G=h3WT|f2@ZkMVjMg>Eanb$%MjGY#YjSKq_T27k%wjFMj|EGVo87u&^Q+8TA*YmS{P>YNq;RzAm+@BF8r4fC=5DiDC}dr zr=z&%qY_aif`$ly5I4^T0 zrdo@66T(D$yjvjQL|E*QsB^aOeWFGpM+gi95Cf5d=08~1uhZdjf~u1}5ji{-G5^EM#jIqVRZCgBMNI)6BOlRq(_(L4(6F_3V%1tb8f z32_;a*nLx24Q4|~->>I2=iX3AmI`6QT|XbM?J$=U=VFx;6Y$z$@euxp{bt5UR&^03 z99ks_m<&13$aLfiU~*6HjD1p`od-zfKdlOcgE~ITya`mvz3DeJDSwmO0h1S(KQFJY zeh&Lyk*n1^3KJ9Rx~7#~7fV>rUzTy-f`&d>wdLHK!FAz*B0vs9X{aJ$sH+P2_2_ml zfZ;s^)uN%UehFC?fI8v5qa95F4!c5&g%<`GHb_i3i$LLiE?G95Lo1A-EwUlq@1yt4C!?c(~3TQ2;}b@_#V_v`ZC|LcE}|L~_@kZ=BM5=x|7 zzWL=Y`I+zUl#AyAQaKfn!U2blf7ShDyZE?3rMxjCxsd_`(kvT!+_+V4H7f_{S;IQJ4%i>Qz3bza@+&eNVSOn zXrEgm2cr_$&ymT45<0|b6GBiRAt(_VwjB&&zh&%fyWItl_~5K7!ilx=jdb8S_~SGz zWi8Wy36u!#-4vh^$994w4NDzkSk{DQ*>Iw|rV@`GiYTZ!^=}sEW8$63ctVBGMt|p^ z^mRa~04N0}3GU3Fz zYEt5&6Jz2XPRxxKys8p7JiEi815T0valk}t3!o;W$DmTCg`xGWrUa+p6G0iLZ4x?3n#0;gv zj(uhnffS&^|09q8wkrKy^D-G)N2B1rmH z+?ae$prvpy#Om-)?w)<{0o{>W3PMeyNn77B!B0Q)lbS6ZAfN)^Qis37x zJ_s4+M>y^ljv+YsK5REi#H@1&6qfn+)yDu5F43Yr5!C7|ow4v4hjc&P4FgD^x+43% zGTs_e2fN}8%6Wg#IAb{Fi{Y4DjwR$uJSmq02{~8J$m?&X<#&GGCx7^JewbJOL^yN`mS1U+ z@B4I@ESAF3f7~zC8ix)m(*0zow0*owyxY3a>UYZ}T*Do}>mC4dH_LEW91c4!H{rCe z!C_yC#GwEpa!DZq<1qDv2tc^Xa2!Wu9yOX33XOzu}Y3GVXh=|2=IdXVJ>E(9mjPS5q}4CO4eZ0hA)EjnK*#lD zt2m!((>dgQ0Ih!~`=fO0;F=t%IR%wjIuVebx*+a}fW!~^CAi-&-n{{UA}^f1lTJNd zo0k+CKk?%k_|&w9yb^#$0*#(H4d6(?k_r^d*!dPv(aNp{=20>L4Z~?+@#Fl#{ZUEb zGpIl~H$2ryFBFhYvenLMgiVioCgwThye8jInyM}iwyHhb36gzJ90Lv6`OAd0XbfvH zA7X<<^CBlJQfM2K`OG!6M&Ff}Rz8#$7T=PG`$DmO0{cNCW7bXeCeS)oCZ7=-jhcF3iBk-5+oY# zdRy+#aO&PS<<=yi0>$#&nGfZY=RTv#O52y-NB;39U7KW|Z8=c_bQum)^Zif3={!3+ob%lrl3&UB}IsgPpq_-9js^KJ6 z3}ZO}5l$cLL<`po2jtZuJ6OUxQ0JA!u7Iq#0&*TOSr3HaoB@$&0v1=0TnQv)wUU*$ z-%ZQc{-t032r&7hUkJz_e>E(B@{3;i&wu=){Q95&yd=Mrmrp$Wgv7s?lVADMpO*jh zN3Y2@eySZHaRMX``TCEy$S?g+lRUf$C+b9Nyw)XyC!Lbo*CmdpI;7(hUDEYrmjvK2 z`;WV16@a@DjmXV-L~g_)avcifS`^n7f(i))aIHbP=m+=!zze>BtN|da4zH{LBFpT3 z@9@cNn@c9@yJf1$2_+JMg=D79E2F1aEymAhNGTu_P!HoRKA0H-e*wSAX-7a7x_tl< z)JvmV2BBU~0}gjmS$R+>$paPxl&V-~6R zfX~7zC%jr6rXcBz0wx^FKA_<0tMy18iX_T$yS8;ma7U*EcXdHQc4?=^z>W^_ZEJ^h zNb-PFMH2nMdx zxds&&8%CHYNGyBCRwRT9&48s9=c5rUCfRL(VJi`J>VRFxZhQunldi+LTfndZ!>X&z zz9@u@0mb8Rp#daNCY&!`LtS*B3KHu));aVu@cE{C%a|EG*fPAA9T?pVXR)&)O_T0v z?fmIx2#lbCB8Z*nl}h`RjQLh%wRm4RbjtImKPAth;qYkc4SoG!4hqE@ql-%AHeoUa zWpd_o^4i)5^4#oea<%ui0)%}{7}_F46eifl!5*`O$v(hje->t7LUrP&Lea1mE6e~E ztv=&a!%!zDhH>pZdLJ-s#l9qj#ACS-fQ9hj@HnP(tqE`O7$)Q?#rr!(;m8dfjG94f}%4B;&!vs_*rcVi$Fov}>SHe6A6YEei9Q~I-U`UHF(b0ej z8yg@e2ox$7?59!zc(i&9P$^Q4)J3$9NbZPFV(cHX%Z(N+=UsPe>$SfzBC}3Thn>)Q zG?+NGh|MchICBOG`Vfp$`bORy) z%8xh7mpx#;!E zdAC>AoE|yX?Uu8!GhI$uY6m3RoY;nA{Q=EaFn@75mX~u3Gqw6;u@k@vv#&``&U)iA z(-DwSwEjmLygGF*o$*Y2So@hUx59kKrBGBZFh}GG$T>jiEY!^fcR=o@Sy9G5FTL_? zc|aZ(`{Zt}D7UkDRVLR`WmyMI&iYd_+Z~fpD35AgKuR1<;3&gZZv0PBI!!@M>Q4eB zlbsQ3ZWEtW0TPbi713euJ0!BZOJaNRyFkq(_IK;Le>=wlYsWT^Om?MYAby)cvXH-k48oBGhe zwuX^E{)uKOHjd&U8<1?nAZTOCfE6svg)|S#L}XPi4?QQ(F1;r&uYM>mo%<|WpD)SH z{xt)UR23?uBrD;9EC=(lyYEw-Xf2ShkWIC_)! zFn}O1H2DsvX`4|fcBso7Ym(;#hW*+q?^Vd)^)s6=;jsirH)rZQz@)$VTmuqTei0r7 zjipkoe2L~vXgHtdOb8EEA;#KuiEC1j5GeLG+W-N=KmlMtVCPo0f@Q;0kwBg3e(Sx2 z2#gBFo{WnXW*e1UHc$u={-%Hh^DZ__SRPE6FhAn4LX7-Byif@Yd$Ch#p?O3`yvte| ze1G~4t=@WM0mRH-@+nm&oD-hY`QDj+S7Gw#^k>k(_?$d9|At&2xF;NTevJT8MFMrg z*QKkn5L-cOW&n)`G$sHN&jEsifCEGT5U(l}j@QO(EdeOK5;;B!pbXZ>jdOT1WfJ8^PEHrh=cx{&q)^thV(Qc_?Sw7q$y@xtwq17wb z0Kc2bq})tpxju@M^rdVBdfi7PO#%+7eLaf z&`2I|Nai3b%bXe(0u~|{X*cVbiiW@A>o)^S^QZG>9cZm=(b$yyCoB$)A)n(I3LgB-2##= zAupU;VWl8}?W7SXga@2Cz6blH=+N|li3xep`5T}pRBRv+05Cdx8yp5C1|DV`Awt-| zwC{=aJ7C|2;kD^oVm{wMgs?HngrTi2m^n8~uZ{Yku@soO68&}k{Gql&ElT~Gph4Fqo(1-j|I>AY&(z4=bAB(Kj3p}l5u}coWOgA}Y z@>G{Bw6IkV#|?)`n6O2D8S3PsCngsvN%{1Z zo!VM#Q!N-s>f+XRek~HWIVUq^)mW&gHLxslz0ufU6+voBViy@m~57xQN*JQ7z(OuAn%;h9Pr$Pu*sW{h{QzlVvXhNR^QTi@7*}p1A{;%co3iEfU zC^Cb%iW+U8mUDGA-t6Ss5mrZLxampf1{7DMU| z`w%;=Ti8fVVWqwsi#1uQkOXF_LS`x{0W0bWkr)Pxq9*gCL=xwMBp*N62O*Ta7e)~m zG_^f(fpvHEhfV1yx0bTFTxzhZF83khyR|F;@0o!|kkA6}+b>kftRSp>=+3nGTfBq- zP)cz-zr1^6&+zry_xNV*JKFbnImZREeIHHvU0kdxas3x1M?g_?$9#f8)%H>^h3{#h zR;+|j@f452M5f=v<6pBQwIZNsvFG3FD+dP@0mQko1c|`MJTWLS<2Tw6zfQPw=c1yZ z^3#ej={5RU{ST!|983fd{m&f498~n)a>c3MM7K*mbTuWeNKr6xD$xbsHEFTd+Lt5+ zli~@?G!Q1egvkgu5`$l1zn`jP;A;ZpwFTh!`(Dt#z;^FzY;?cI{_wYWH1#{&8-9tc zwgZ>V6)DE0S~lmi}NqYN&&zjS^m zfbPdWoEo9&HcgL7mB<7SnI2O;N#u;?jVNnNu&AfpcRBZhhF|S2h%^W!R3Wvey&yZE zw|IyJ+p|}9Mo^Q}?KCZeFwU@8@Fi3cmA@Fty0&f)) zASI}JzZ5l}lv!{+{duLqLq&%gT$SO5Y^*q9i|1lhJyy7R6HGKBcP3EjiVm+AV&GC9 zhNw)&bBi%u7_gxAf>c3aCDt1&@lRjp;D7#4SMk68rvm)1|G5zVpZ_rz|KI;p82`Wj zOBVmj|5!$dVLsv-!1}>M_@6+#94aQQX zVlh#yhHJ1AuH|Q=@wV=-hGfz9veD5FR!4 zgh(8lQ7Q_VD&HdsEE1k-QLhx|`SKdVss?i!##K;`f$L@HxD+Hv6dO{eSQ58VP1cTO zKOrb6kWMH?Yq3F?C}wjj9ydkuu%*|4ayQdPh-9f$(%7$83w-_zVd4+pnxYaME{s}T zisB{NvtF9|!Pa zi8e*NUCZ-W8C*3yhjmnqi9N;*lZIn;=C&A6f;O`G-1r&|VwLm(dA*JH# zOF{+{=f(;_bJeRqszBv?r55ifAv{V6k&1T%=Dt#ot+;jft+EckYJsA`Rc;15cfS`a zG46q6ITXigB#q_jl#S1wQ)5Fd&;f)} z{Ob=l@PGTaoA|%~4=PN&iT~ICa25aae|rJ{kN^G}0g_AOV+6_n^xs{?|M4F`$BTOf zm}m-Pq_BceDyPB-W1TzvO)mPIH4Rv&ZD{eYOJH4DPmwx;g;1%gG03=@5CMnEL{JGw zuuNO5qQW2u?X}7*GZ;}hR5wt`)DuE=Sgnp(!J@|Xs@h#JnJaTPqd{1Cr4~e2>Tln5 z{(XGyopclK5Dd3zYGbdKEDEIW)HPr$5+O)J*pJm)q2&F>P6DM1_Zs>7wCY{bIB3Y= zAkmE7WE(bOEe<3VF^m*dqxWVN+Ak3Z)`rE!M+A}{K5p@mOH?bL7NYLMT-1MDV6#rr z1W4=kB>Kw-xOtkvKxi7>Wh3Y=9kj;=%lj}8=t5snJ9-I}9$Gu0(!^bViktd`It$-a zXWR zpCu@=iG}J!Dwl&UWPTtnoPHNzn=bPo@QxUhCCVz{c;zBI*KEKXw(@y&rpCgcuVd8+JHuO>_QXv9}rDao@7*you;-Vh> zM423t*kc&0-2i()guk>Aym!@$i%MjNs${GG8*4i+HL}<947d88TDuj+QVvJH!-L81 zac`7rrHwGrbVVu=*<8bt#Z44|ACJ!SBI&kn(Ne8S04RA+MXZ9F+*}EMCbf~LO6C98 zTB8sQu71S9#Owr297L2B%-d8a3eL;5)yR{-;d9qYLcU^iV;M-~s#5UXxxEAuK}63f zAn)e4J4IqySyUPR^G=z#+jb!;fO)#g+*X1|6+uz{j$Bs`9;Lh;Kb7x|5E|}^FLq5 z|KZP9@Q>eI$GyEmOmtRbh70{>Wf;3PHLeb%Az{_&YICwnQwQHo6&ThfvEdaNu6o6N z%b^IN6T?nJk_$Q)Xzu#=Q<_PV#%_X(dm_P|f1Er2dMroO7Nr_{E#xSeyG+tVYw;re(6U2EzqZ9;#laf@-50=~C+N4MBuP zHKKZz=Ff|e_#_{3st{%0b==Hgpkf%q)e9J^p2J{hoJwQ_13_BFAchE%;qrbSd(mIo zjh^C8v{8wu{ktm9lh-=A(eI*lB71{Jp1+y{QvF#y&CAe|BH{TP;rXXj;HX?&QX<^w zs#sA#snzTFn(XC-jq(kD$~E?Vs$!}VL8Y4NTiIP2+*Pf9ak|gjA*z$|_g!XJ!LJM5 zICfrB#azE?sPJ=mKBsCk_{ljc5`iRou91{ed3+;!OFA(X?#5KG2@|DM9Hq7VN7nLK zhw(ry#;HC=36J5TC|a^1kGzf-GsUd*0rNuZGG zkPFM&#&h9Q{6nss#ElO0R*qw&W(K3QfzSjxN+s`F2|D@E`C2Z{d3-Mi5@!Yz!a@MC zY^)}dAsi(^Fbnr2m)Od6L`7wJ$}FQCw#y8ExuiOg)iry;ES0@ctM~%8NGx1 z{kyo;wT{))JUR-xP;si3@1b~!GgBn|-R2t9`wL$Dzd84tGrhh@&`?I#$5bXX1;8zn zi;t^&a0%6cMkfm_h2;{omnCc`f9zSjLUHv7Ldbz(D3`l##%USd-d7l*#V z!)d}~#CLNkW+I@x>FS!SSwM4MHuB5WWzmo|{ZFT@EF%^ztn_m&?_x56!6a0U|I7A(~KP62n#Eb&!|REXjs zbpnX)(-b?AYd||WU3``1x}y59j4P`1cLKa0uy}|7{PSH+N zrF^VjS^hJ7yLcN*Jiow%cgJ=20)nPwcO zTg(o)knbilgo!{zCBn}lr81{k^89;TOoWO=%^ZWZ7Au)oDsv!=siF!D-z-AgIWF8E z6{7h}2^RVH9L5v4n`y*7{yxFvjrUsJr^Nj0sL7pvgaz%m6ydx zXbcl|OBm<5JzP14fwDd-kbVpXhcHq_xC93FORwBWmI-l*&?Rm=}Y7H~ND1W%@3;K}$SJRH7@ zyHo`#c-(AV!(wUjC~qi2V8R-|&aMe>OGpd~^%S+J_d=P;THOveBQab z9upadG|Q{y*bS2mgp4Hh+h2m%qSQ^N;a*_7T3Ax{qhW`-I)Hsi(b`IjpDW z2(o@?;FzGHWNKpUw3i)reGq1>(7ozaKMYQh%w1 zC;9-!YE}u8M^q-S2#Bw(0)DMW%7iLJF*U;EcK##i`7P6YC~=<7gTp3@qtl)PVNAwR`j?!0y|aWUTcbk zuTjiIdHEq;aPlpezgKFp4|8Af;wyi@f?%nj0#Pq5ulP_ITZD)diJCPkxGv=p`cTav zOZk|xwMq#Qfv5=epO#`MUwx*u$f|Wnl<=tKuAdfiN9!A<34>H6!&D%>SBfx{Q^v(G zgeC6g6{}d|BE2qCFubB1%Yguv3yQIrQ-rB2h3Gj`i1=H1X!@uKYg7|=Gf4uV8F!ip z0RrKGJNE+uV4n-~o<`@Y31gDC<4xT0r!B^@5pnU3J6y~kwszw|YcK9McjGR9{&uFx zTu8UG1PT}VTU_LoEwx8DZP!cD)Kk$29#kAHrTq!HZVsQzN6<7~e@_#}L z2C)<<#Z-PVMsKR-q=^5^67*jwLD!{XbY3hrnDh`LeTt2!4a@bQwPb0#T#cT*W{ia= zFdbW@@|(wScoM@^qXvy(s*a(G0Sr|NBtv$)w^Zr&eP}Q4MN@t^k~dmy&{XhL6l$lU znIXyR9R?6}5KdE-WOHPM$>kJODExIXK~jPbe{u@7 z)iM0!^xs}Y!^L()&o@~c6#0=={n&Uph*nZp-7VWMmqER1+OJgbu^^iHT=qf*l2jl% z!FBQ%y)74(1IVSn^F>oBx0> z7hdDZ@DA>D&Tz*+j-BkN*>3YR7tbXes*1kn0Bg;gn2GbtsvbdmQ8yCTrCL->8AJH9 z7($3_rb8$aNSi92}EKFX7<5qouYHap?fJ!~CU6))__%|JjElcO!lP8$WW$oFr9bWc1AbWlExsp6e!KXLFnNSm)A#UvWE=Op zW^h0SwUZgbc4iza$w}^LItYU38^70FYXpaHz8Z0^LI)KE?bV0tD4WXy@KhsQytRN^ z%z{=<dX@o6Fo>aaF*tV5)8&Z3PY{O5;{6O;EGuoo`oAt+;83nnxm#r~pxB z>cl|eTwN#QCz?@H{BETU8tN<>BB3w;zcSCuIcDi67?$_bE)cgv~Jg9vlCr(K?!KSp`fMXF-3@Fz+}DM!_Z zWvF;RfHEo;U%3R|Cq!ta@9={Cbpc;f1oe#)NM;EV1=-cyaH*<>%A}T7=UiN#Dw!&* zum+9-h~g4!VeZ1C+HKckGg3$6eYFG$M+um)wKP#t;6!dAx<1dfY^YnQBp2cqZkoDq zm*@JO#ttt2Z8*r#QUZX0(n1KR5hGOvA)pwv;VI7b9_T6FsMFM9tiZp#5e7 zy+uvvE^J0yZWFTC(uiLqmOngm<{4>5;`y-xD-p9Sp zDePyuuoZ8?X1o=fiFRxyy0M)e!hZ8C?)GlsZr>jE+g7pDyoiIYecTy(j{V;IT&T9O z+qsGTt{v?49AdqFkGtq8E+nnoDa_#YB7fJ~_jo~w+-V!8Jm-nU6K|usn#UNP^i1Nn zbNBGoSGr$RC*p5_5UE^xnRzaeTY}1OBx8M|{8X1wry0-z`1IHwKbhcs8F2^mS%>wNFeFhW#e#Mpu4#i3Q6l=d_j9#bMih$wf zmMFNbb}Di!5hQYdnae9;LGii^&1lLUAWW9A)qc;$^d1a;ZLY1IzE>7(KN$Fu%7l;i zJ|#?^TaT_=BUC2S-{bz+D{Qyl!a`DtWYN`__}N^9iov9)j~f7GaD|Rkt0iN2FcC=n zN_MAAj@60OhnWWzjb3iJ(u9@9J@l7P@Vil~h!~G`goVtxvV2A2jJE|08K0N?DskSy zME8}MLSf_zmGfkW}z~1))+&Wm5eScXX%1miZM`;h6INKaL=%-Q9;l+u=#A zl=5c+1W3u-+>pH$u;6BqjH;&ats><9vH(TDE<)z>a+?x3Q|7sS3?>R_J2RJ0Y1nq` z^JNiJEURHIn9&%vYSrtCi)buOwV+;C4W=Fhl?}o~N=4P?+KCTv5uGE1Zl{zY+fGG6 zHK9p|+~J#okSZ~dv^t2y6)RDiZ5_6F4IS3U@Sv%Q*GiXxAq(;j^3SL<*(D;w1u#Tw$79Vlz&7u0^Pz1(SN7t1)hfYI(j!1ro(gcs|P|TWfVo zRmfhdL04WHeZ?*4AyC?LGHAS-uyb7L?{aYoCNgcB1eGc^nAG`8(4F~rXB|jB`ESl6 zawf~2yh`6Q*052N4Fexkn@=q8n?jmSd% z?ko=;zPX%!?hdtk{0$cZ?$*_iQNeHRJclDQc(M3<{C49X2$5&F-@Sl4&HX%KJFufk zYROhw2e!FjZpC>_c4IFyh+W!tvX?f9Tb|=Z}^AOJ`?&Hz$ zZHp(YrMPoXjNnf98XgU-d(iOCF?C^qn-Zz6+!wXa+BY4`;gD36X zc-+bz7B7kiwEImR=7YVh9hJ>kXu@V)8q1M-Ob2VxcD2ILgS?sEULC-tBcn26?S`H5h4Uh*F)^}KBH23g?*|Q1>hf!zQzt= zvXokLL3g!O@f6BDVKNh2L|0&lyR$e!ri-f9OUkISPQC^cWp8=#Fhyb}HKKTl+NTtA zlOxww5kG4e)?03&CooJn)FXa@yF9|gwT#zfIYC3|zje^)QZu=-JoTZ}-xPOu0*a+a zzF%%`E5$zq5Gj(%56Y+F+Z4oYI*Kxq&O;?pHhXAZEEMpCeGT*ey_@DEK@>i0G*MD^<1F zQ2K0L0^6|!&uJ~9jb`S8;^SMEYAaWi>wC3Q5u_q1LhG44E|`^kZ5(%~9`5m6zpF`y zJl_w~w3HMHRRjUBn`oqx;6|)o>9sWkNeH*<>+pckc+5@LBZA~!QycE`edOx8O{H=> ztz)ybC($CmPY~(z`JZWBZl|hBDyjfg2bIC)d<0AlYpB5qhQ9jzv7Gg55lz&f{Dy+_wlEAsc_uo}&ul3a;NUCe8!m2D)y6<8hl}It> zsdxk+`&O>(vAdGO8SN-GIsLV%I6YLabiTST05WJ21sr%Rk~Ytm!%0SJCTaxlq=6r z7GnBR9U+oJZ$TrKN($+Va^t$ql@dr)XX4_2RMQI1d&l#Ca~=)h2K?m0uk!fjO(^}a zhJT3KzN-(Cg5OnEe{tn!MW!$={%xMKzme-o@VH4MNDLymgvy%=#2IgE^ykmz*5XC+ zmOAlr5#VBJyGt%!t#zJr6FN4PWi z9D7tH%N?(=-2Mf2dS4JSPjJ}#nCkEe0rS$byzY&EXQ{yk6V;hejT~ij(dMn$;|hAp zN2!#Pc1{*jtrhZbkri-5^5Zu0eoC-Ry|@&V4}MaMmb_lfCpWOxd|(A&3bZE(5}Cju zRU)X^W4y?80ZDn7E6ZG5&Yh(Idx)SYQwEj+q|5-~AX09P@ND;{fG6P*;{~mlin+lE z5)Cyf`!Iw6L83Iu;&*B91gX#~_&0HbrRzg~lO+s;N7H``DtsT51!RTIT9;Zt5= zE}gpH(M&aI`m#!kIJMa5GS#d=6FhH~uJMR7kXHPc4Z zhO3zn;d>AuZopN>YCWu(BVW5)7r{ZB_V(2qh}6Fu@v8+j#zOyc(4?DiXC% zQHu1?wF0V`2=1m8TWQ3D7H$}M?F*p#J6ED{t{oZ7Mf-W4E@CFJj)|Iij8JI|2D)io z7%1sLA8!v;j#_MFuxbo_yshM+u8L7Km-Hi**NHd*5>pdKn@$)<=+iojiD)e>`$)yX zyasuW6!H8JNc?HAH-A}Vk7?X({gpPpN1K&1sNO4mLn@^eF+xeRN#x>ED|xwQ)h0#3 z_&O_8jF}58sthl}L10kp`qK9*P{|FD;FG?jSti;zX|4&6yq~_z%_EhJxoJ-kLOjnk z&ndyNdAm+x+ABhZzC#ce1}! zA+l>f%g6O!P{EL<*3Ml!Nt$+*Hm_Y-L+@2_x0uC7<2ZIQ0|tsMN?fxz)rivYs7e&n z-sbLSTaR&(-{IqX+8Jd!aI3M03T6=Z+eYxPV-$}%$7mC{-!e?;%oCx$5$iPxEK=qx zZF#t)p5(2E#$)zyr_;@yNZa`mWX~3({*!zfPh3h@4c<9uP+oZgx05N{%QO)xZB!$z zyf7rmb#Z2RVQNQ}!Fp{9i=hU~uE}0-IoKBb|25T!6p3KMMf)g|%Z|=`RifLPx1yo{ zOZ7vz&A;JZ+YlAV1fFz{<4M;r9(O3%-)UJds@_$~-tByd?XJhz=ys{YcgMf6b}DjnEvGkJjfq@c z1`~m04YS{H)LxVpZX`7Y|~%dY14|Yy5jw z%d(u=K_5YqxKNMeMP~*RLPUeDQoOA(y9r)^X1u6m%IH!Yr2M^r#WCc%3Vv8_@Q_t~ zRL*jCHmO23stACxQY;0Eu}K)*Nj4BB8Qdp8?rL|RN>hR8J>nif6iZRoR0RQ2hK*1u z)+?nV_#UMJ%u|ibmXy*e2*@y&2m>p2q-pq-Vnb^8zCfEJOlB(xm2y}8KEwZsVnmY# zWtc83xA`m!RBlTIkABw0l$4whA%KGzwOX41c57q2#wWL`HK)Op$f zk}@||=!`n9Yg49Pgb{OJ=~$|B`aqMAJ2w@uhQJ4Nb57-6aQ-3huZ@BWoz*4)fa2R6xj+T02x5Ugomd%%v>;C;J9WABp){j5E0Za2>#)Mlfa?uD!sqrBt^<#;ssQ}9duu5 z1`9P=F7^p5RVT0LzjECtOQYzzS;@txl)Fq1Bvd6*8rMCb9J!cy z+l~75;i83HFyZ3zUI^)HX-rf!VIh>lS}h^kpzIc9$;dCwg@6l&ioq87ca4`stXL|0 zp%OtZdIkv(A~OH|Ro`M@quKBGKFdjSDuv&bwN$Bd!FM%zBuJ`0Y8D(y9!IcY30d<->5-6i(F||c? z^uTPZjqjV?ag!XAo9GD)A#u^owoYAaI0A}5qO2@;BwXYU;Dr?C|I*?f+!Poz2$0Z6 za$o6UQJKAIs zBDP>SF#5g3bPeg+y2@)y8IafizQFo%sX?KX^GrYsq~ai4*y{)oDiDK+D!K&{xw4c| zWiBm^w@usipo(!pL_2k5XDxDPK5(fJt)Jwg`)mRFFXUt7Y9Z!|%Pq@mI}&xpe33BC zhestgDl4#75x{bw2y^)bn7o;b`NAUX)l}nNsvdWfG2D*V;Z{6`!}?mPm1^uptFRR= zrdg1Rpy#SPqE^9W1QKVegtyizTC@!!zWMGhr3rK4ITg2j z@TN=EeBWve}YalHbm^A)H*EtiQ0CsR4c=0KtZ0CfWJ zyBPB^4O`r;8jz(5a|e?c7r{DIeyWL?X;Un!@sKj9IhRH18h2-9Gg!+W;IQjH z?x?1O%4DnO71jun?Vc~ON0@9ABwJn2O{whnzqWSu_oz&EI&Nbny=iVPr$`ilUo};t zX^TUl87d%k09I{~yP#ZF7L^FW(#Z>Vli-+bAbjdqu}(E2h^VSwAX!dtW1xK0T56jr zp=qF1`ci|L6p36|O3@8d?gt5wpj=iT@q*&{8UDD^Ot4UKmq1}|FPiz12@|COTaPX( z62)Bvl&Vhz5c$2G%R&HArw4)0S0oOQ4kVVgZ00h~j#a*{#eC!tDh;R0Fh=F8QMB5` z2qIG6rabJLXr;CA`cO8Mg4o=6)TOZzO}NVO>Uy5j+?iJiB2^f_S%QI!1%wB8=UQ0R zVnsc>3IbRtrb;L&v$WWyQqQOu7K%#vx8lr(eY^>f36h!HDTP2 zg>hI{g@d|~-QTSaQn8ep?blZ0Fz(dOuDQt6TTAsM32u<$Sc}T-=Bnqd`J!BKQ9@H2 zH*PFfd3#0m9^n|(31OnTk6<$z)y$t(4HH!aT2=g)$nUq8gFOe3$(- zUs{2Kh6dbIReZJwdyP|AZ(PG%Vil9oS&RgStlmSlAR{UsEN(-0VGBXhM}QD8p)o3u zadZU7&|Er*bbgm9kov35R0vsawiE}6qw-W8D&)d41yX~mPwU<=DHO#>q(+<)`AfI0 zOy`GOpB10fSvQh~D;?Im&IRE;HnP=!A);uu_}hi6mDNFAVLjVmb-k zZcM?n9i5Q1R0=%#)bmHFl3H|!3tiz*!`TXyk@z{k(%)BTbuNP9@!zN2ACG^VgMwf3 zqW!FdyM3OJ+~q9Tu80?(T0ck{bN4ew`1KMv*$ZVPcz}N&7de8|uVpy`iIj<_PHqq+ z$F43NWey@b7Jg8Ricga$Ro7&mRC1+Me_U#XYKgODmcgS5ZP|0Ye?~1fN-c#{DoE{Q ztd;!<(|t36+d#qFrb-+%)Q9U$kkIrU5hi{%ms*v`rKOZ$R{>u~xYXJ6nz|T0n?d4o z8+uA7s4jM?GVWNgeY5)&Ry$vrDp768PWN+cc04nfY;`^}rE-7#8$1}LYU#W~nD}lk zDHECplDWhNMyltmXjaWN)v(dOsI+VAAV^l5_Ayzrh$RAPxAU$klBM*P0cDvmQIDgf zKq6NbEkiYuxg?;}K~?LKPbwWacp=p&hn8!0u2(0C0Lzvn|l%3LuY?mYROIai%JGVpKWwPY?WHK4SOn%rC5 z=$I-gg2wkMCZ>jlQl_foI3zRRG@pXk*CUiD}<=C#G0;w#;T4@pQms*kI{l;e8Yift;OYUTp z2Hb?b6gP-T1@JRgS0cD*EU&V;wl(XWW|>`lY=gj2EJ*EH6c<{gYMBqZ1O(O7%o7x| zQYHDt7|$uhc&^lmTFQqkSwZzSijR!(?_VW|Ze@G1-!zZq%ey30!E)qp?^42)4}NNqS3Qc0C6 zXb2uEQP5FpL`uY0AyVA3zXlRBo+EODDz063sTB=WB?(o+E1N4v#`{;Og3hS*yxd%Z zi92eJi4DQxrl?N!P=x$D-oGyu?sHN9)&Pb7EAPWIT|ko(RH&50|n~wQ-v{p`4eSSDwOf_ z;W!mW+?v)cQE4n{S5h6L)tlU14#|hgT}ee4(_H+=c=Gh;s5efjr8EcLrCQ>brPkv9 z-*dd{^Gs?>P@yupN>j#K_(RPg(8RTJYZq;^ps#2_S6Xy2%HGu%>)b5YZMUpla|x~y zDAx!S*>$NDvofa)v~VB6L`SC_3i(oKmVkT4`M{X`< zammz5MOj^ZOfU(l56|TmbeBv}QEX$S@er#m_psXW1=hP?6B;ipgKMYjDR#S`y4Xqg zGi&;IkIF+x3OHV3aF1VjC)%;v*hR2oVDbuN%ckv+dK!at3Q>CiKtVp0ODs;?Iqf{;I?qVZ6;)#YgmmBSE=v?k3}xD zDpDNJufWJPs*k*4tdvVBM6eyHwdRIfR%@b^c%=r1u}YvUmj*FM*i3K-KX@@8J!kXL z`+2@~9G>N3y;NL;#iC-~=AwN)AA^^2F?=bXs-*;)MY2*-fTf~*>{M6dAtCapwF?j1 z`|ybNpsfdYTRN;r(TW|_A1m(WyPN5@=d#EN9R0?KGV|%%2Tn;N8T3KrjBJ+gb zTv333Um?cyc`WlQfU#T!<7ouTSZ)<&g00v{%~64Da>KHLiP|NMM+J>3j0hrvN^~A$ zHFGrG=5y6kRLGN-%G+iNL@|&y#Hc_l6H5x@T!QM|)v*a6GTl~suWH>wpE(60fRs|P z>O64DlmIzt9!P#{T6b-z8`p*kuDMEnkup&Mqg08qxio(#c{yVlTUMyZ&60xT0*UqK zGPUArrIcM*$LCaltNHHXPr^1^UNhxQt#Fg1@xVa>rHY%aEUzPZK-%Ih_(|Y1?m9Ai zSV-?+nisxOI^}{=Ttgty!hdvGg7XyorJd9R#*NChwCDl}iAlDH8>UtS2qMb#J&Xf6Yx zLZzaZhq5-N2$@N$j`5N(6+#t8@`Hqj7JCBZQaRFROHuuy^708k1^@Yl=?ieXzP|<& z0>q3kp~<}`m)BJubAFa*)9Q-3fEWK-En(DGEZ1J=Vn=#Uxh1#=5GdEU*vYQ*|7U}= zj$qvu~j67c+8QXvKm znja(aL2?8XnnB}NG<_D&3@pAX5p3S#`@F+VfU3+15>>zZy}1-Ov7^DowKUOpUVEVl ztp%f)tXsokW*4jN3bMb#LElRp_6ZenOKQ?6q|mmYZv@;|^9;0WVcj z=L5wB;bnq}+F7gTPpiDFK$83!m-QvU)Ifu))R<9$BuLD)<#wCSW#_>}{pw8d>wY0h-YGWKuK1l&S$!p9 z*;s^%W@&8K%blf^bb_QNO9j$k>O+$URT18Ou?THvxWL{FU^Q5U?V2d|>guu2McW0_ z6IN@nrNX^%1gn(FmYR?g2dR?8k%lZUc$hsVIp=X}5cp$l@glC{373#j5C& zR4sgL+f)l-QrCbD0#FG9YMZ~v&uPQ63Q7o(BC0LLd~(tDX)Zd>Y0_dTM)M<>CO{S& zs6e>ESV(O_T^2?Ohe5(YD#U{7vRVOTp0{u>IW~+oo?lrikc6ggdH{(KAR#Fb zDhw9`iK3G5Fe5Zvk#xNW5vBTSUDP??)knQu_NEy~98mrmNG#P|W0!fmjGN9{bql|$ z4({sL)j^PS(b|!|ma!^#=hD)A5}vbC%~aJEGZ9FrstqQ5UgHs5+Aozhk8+Q>EUzl7 zpAtx_KMr$Eyo7{Wqy%{(Y&74-LS~0B*&~6MD94)3J>uHB>yOll?=AqH^hGpX5c2z$ z^TW$e7d#c?KSsqgaf3+sOwtyV;3)-`xcGdi3hA)9xAdNVe=Gq@b_5oIgo~H$giRGG zHBG*0xunX`bXkLBDy>b+P(dYz2#i7A?z1+|L7NBAM#a!{K7izBC8+tN7*!t>A@DYL z?Y~qkgcl}F?OL20!MPAl0>zntpoQ6U<7u&`UE*S`_=%K?E5JNXfxfJwOwToTRXa#6 z`Z6v6*Z6y+ObjGPF!9rZW!}Qg1x1#}MW)2dC(qY>ayx098^-{}_ZKvrOYWrUvlkl} zOX?aD8O3n4#&|3##-mc_# zUh{btO}Tx%DCcamvfV{!P=OqDKeAL{-Itqduje`T2^K-+aNs5G48O)*&4=%}OJ%Z7 zh%8`^Fqzk+MRgGNVkZ;aC1!7SBEa+V1{HGZdJEOZq3skkhdT0E3|&QiSWd2EiVH)3 zNiRBbTTO+e34!e8I2x}e36CT)0toMCE)gO;CIu2Nc5-Bw>n#;ht}Q7PDU#%Q*ZEho zMPzE1q6+pfAyTQJ=?MiQv)kO9`R2pK(wRMcjKyLF*RrprzZ~T-G`z-ArYrlzM{0GlPmg z8*PVBk&DY}Pvpi{>Tz`#YoRKvl$BwjumCf;gdKr5c8#j&at^xA<)Za7%{}2}v#bkC ziAAbq!h`3=B+so$!eXLk+6o8z$_FtV8pl}mB*uB(Oh}QaTAfNn!Sv2R4>#QHNZn{b z!_`L1l2S!_^*On)xE2Z^su`kHaYGS4a~uQl-B=Z$sI!N@e-|V3t>8EE2Au?nGhdzj zRgiEKbyIO^ZdBzytGn3bYD@@{#+(kc=69g8s2fc=gdM>VB`}l{tZXjz-f~%8N(@oT zx~EJ&bIBvRZ81Rujns_mCl~%$O-ZY9ksxWXtgQ<*SZ^XoQrnnM?NHk7ptE$!+!U&T z$mgQwySXgpK~wsrPX5yOtN9NRK%DY0Xz;#{C-2M3K2|y@|9Ky{Sc%@#+*>|CXqqje zMvPkga^DVfp;N%WoC|UpO~FUX`ri~H^ic`o7AGjB1&~&p;IlHsP6ZJCs2HL5i%|ZK z6hj{JevywGf`PYNcRpt>!n#0tp=x24=|c4bx5wtY@SI)Kh3qL8=Vs8+hm8Y{U~>!~ zy`H~IkUCN(4kivLo_os{Zx1B=Kgbj}(juca2?f7V;9f37oiNUv>x1uWpF^MN*v!2p z1*65^py5F9V>|kN0tt`T33SDB3@9`oa!2XQg4_w1Xhg37qAt$XWYH9f0`jhavEq|@ zYq^uX(P>i=m#BKT+8&q!*`PvMY<`4|j%QRRghscN$zuZL3Gctaogu;GId(g4VI{qe zMFM0ozGjUcrATICsySKZ0b@F_yK>Tu}xv|C~vuL=G;>A+W3nhWJ+-~#-`p|kK ziN?z@D%?1luO zdhXU62okw_RMc0e8j%dvqbaOLl2{DZVy379<2fZ5yjDy#Qj9K|D#g{HP(^`_N ze_Sos(|q5g74dCGxIR#g2p)5J`BWRF*yf_W6AfXnHfrtI)$~v;Nwx@+o!STvRmhmh z;7*p7$>NTBYY8R|_4YHj8e(|Zn8oAP4l7zzr-%p5onBoQKSxT*0#1pj21a1pYfwPm z!A&vH9sW)$G_2w8CBW7Rw$*a}PY99en>m=go`Y$sCI$U9H-1)=C2RY6E-e!vyIkuw zF+qR`5K`Hw6wDnNPclL056yj zBxZsM7k7h+1Bt;zL0Gvx6Tkc_@`7wrKcQKiIXu){3P}w6u_6ZclPo!M#4t-%TQOsm5yNRXb zngKSZ&;~AZh$!3U!}1p!Qq?VxJQlH`~yf+iY&DEFofc zjbOQ!GF8$@a5V8K)9tKh>Xe!$Tm%xuN0c%wHKMM+Y8|gR?X}#YDbq{7U5vdZIb8jfwg_{hXGk#Kw&NbZ^Sfn~H{Y@T<-zv0zPn%Q> zyM)JH%%%2j)~2vQfGk(Vti6W{6(@@{_^JXE`4t$=tFT6ngV#zi$=mB8f`q$$gGfRh zeVe&pXI%RfPl+g7Yw|`uCT)nYY_&usE*!1j=|FfkGRNPFohMV>7vbm08Bs`x@dC>eGbE+i~IHXGKaWxxNnmZp; zoz%6zzl?;R{=3n14=b5{D=1qhNLE`OVWegYk#o(~;zMddE+*})0#qRZfq;vN03gL8 zr6Qm>kZ33BKvK=!s9aq>M5Is@FL5x5^5+O7zGpt)Tdh-+!DWMtKFJ{O-C8cdCFZKp z#p4%2`ZX;A%EiCEiHlx^y=&8=mMy%k%6qH6KL*H2km&u67kmc-r#gHkLWp>#gXw8h-+>osmc_f0CnE$%$}AUO#WPnB5U{Rk%BuHRDzzL}yBX!(CBq|hAT_X4g#39HtZi4Nqc<3xNs`1(N#nywFwQdrsM8^~hXr zL0ev{fuzv@Li5~OjZ`F!1dBn03Zzl0geHJwE<2TC@e-*KuVb(}11p=W?ldo|PXrSA zlL-=971fA>>$)y<&FI>3rp3ZtM?OSO!o)T=6hB|f%C^|9BW z`5>w}$r45}ta@WjK~#IS zm9I|_5`2unxJNL^MP;>IYK@{MjZ`OxsU%+`C{$Y^qH0R+Bog*shlvJLD+dJ8eqAkL zQ$yvVxJs1DM>Q;(AEMfmDrmalCe_DUu#}QqF17{8H&-g7t5b@~RT2+6%z3b!nU zcN$yGO8D5r4Arc_F&Uk~B+nNENo;|@@Brf37*)w|gaD};M`xhl0HU$7no-_x#dBkw zkDCh7OmlOgQgKv!7UjmJ=JPn$&V)h4Rjj)jqLUDj`4I6zB6|}eGI@pG1c);aEFM6P z9_J>+V8X}BxtURMZj#qsRvW4OF0>bPb5qrV*ag>{%g^Le@3^R$b7dJ=WTrr9dR$$r z2^N_^qLJ0tes%@P2tiVPox6$d7g%Y!i`6DxthA-NEv07pdlq9i1_f3$<-k%XZ z2$iY@_<&#bZvtG*i}*zeFapalT&#sTVc`Iy9i#))RkOQqgb6n+ zGtDnCo_>P)mghF6ceCdewk<{YDYmk465X{IWQ9XsJ$PA{r zIH)>)yX`iW&zR@d;_v*N7d%h+%=K2Z=Xdh=6BvX@6O~BQm6WNDjIU4x8G=LRE0OGF zFFp98YD}m^&Z;+;1Bsh_SY-u?+SOHamsd$eQqDy~ibD$i7y#TrBflc95Z5|8@Mu6e zGFST(c&{9#@A6tw3b(TVbpCJ?a(rGoH^Gpun z@8ux!b`Cl(mQWGY;7%%IgP?A!eyf41B#|;zq2`SDo7K>irjEhtyql4$N{WZDX%HwP zgn|O|Wd?*zDvHfe1s@AyC!(0k3A5mQIX09wTwa3J(qb%?6k@)h08=+|F?{hRx<0*u z)cZG4_Zx!lVikedg*)x@*lyY4=6J`7S*NL7mBlwrl`&OIkkrm%ink|YQXo_xygyR1+FfB!Y>Ka&gIxRrXQX zAR==q;DpEre+4E2iQHQ6$eqO_uS==ZqgaV&J}~~;W6qR8t1Lz}`z%7)``qN5Pur~Z z#@r6H6?CCFw~g0BGa4@O^Pg1%Qh`E2by<|)5Lje_hckiX*u6!Sa$1T+Ai3%kNthSa zR{JA^$7<7kET#8tKE!nW3TeHL#I;_+f{V#7L^=JD`~!9J%P&sT+;k0&5=;tt;V2Fw zH_KJ8=$Bo8D!Drl>Pr+}tRAywMD3#X6^zhL5k@|Pg_ zOSmW)?gQkc`fx>*$95cJ@RWnyF5ur)&J(DKJCteeXx6dXOj(>-!fYa|x5-9Vu1wv-aKr$2@M?H6i4YU|9 z{3tJMxz^gbBU6CAN$TU8r$7jjM&6gX%&mqac$gZw6f;w%mYbfamM3a|;sWe~i1O+M zkt*&!<^NXOAKzVNu@B!2Br>@#oC~AK=9RdjM+GpIt6v~BqH~iQ1A_?9S)EH}1`~IV z`fl31U*@Cq?R=y@3lJzF>k7O~m7;DAYtaOcai>(2<;dqpZj_<-QZZW36rlBNA;xmc zxw{Y3s<2R8hN-+lj9$${-{-k#JFQxgTqHlpwVzQSU4?sximT8BmHUmY)|X0Mf_+o# z`8x#39l5O1>h$YeTT&d`kuViS85T>5Fq@~Kc&?@APTnlERNd+PVt$4aOy(A0DmNd~ zIeD16o{RCTIT*ck6GNZpp#RKGbbopi9j9~A{AmDP*OORjn8se4>V*W5O~PV{;E-bV zjJKz$LS&O__b!kSEK2k34GyEdsF#ba3!*2kHrwzh%{{6)pEQ6}pOHC*Tg!8+ma6v{ zB#Mz}1MO0M<;GH9wQ`;hO8?ftHl_Hw^lz?{gox|8rjiGNM2eJaimyZrCU0hO{Ut!W z$9#KR%{~=;*XA;xn@YL*^R1s-^lXAKX(k-y>LOI^*k+@9BWG%P{z_2`7K)w7#l?*h z;o(dGk*mvlj7P--)g}A- MMN*ra#V&V#3k1k=Bh^THj~D4W77`nnOKxExv_OfL z~UYP=tQA!O~Dx*^-E^U{86c;E{CLc)7DSN@GkuoU~xwoWF_*j6dMIBu&b<{xO z(toWfpWwu!cIFyL<#Oq%OiZ1)h5n>E`QNgW(D<<(#YybKbPN7vzIQIapw7}^+#7w4 zS4&^x_0m`PlE+tbukd8-0S>#>WN-*Axhc!37D(JgKEK*p?lI@)ItB?pKOy2Duj_9+ z0h1#)m+k(K7j>sfc=SNxjQ?w%WKt>mtcqjF3?O_D%h>XN-hP(v!PolWIEtNk_m9ng zy$2KfEc{GA`h51BJ`Z7{#0aZ^C;Y9>#9GiNdCoMU{&F`OuJ#~wCc|?(M0=tCCju$ggD|)9)2$}AC*l5~zZZ1M( znu~-K%5rKGJMDKd9+^X&(5mOf??6HrTx&zeNstI0a%mm8wfHze(nOFndN9cfCf2~t zxwLHD?&ne@HRdahQjMsEL|8fZpDM+ZAW@GbA0j6diLXX1d4-EWAuoEhj#t|iRm<0$ z){v_zgg)f!sW^`mvLM3q^JFTpZ~nC)H|~Z1CI=;N723chwf}H^sl2LhDw27*yGpAu zk{d+#g%V^xEkx+uJX!&F@>C}IG%n(DWvM1)GN%AzR|~9Or0Z-Rnos4S_hPZBm7UtC zxw7u28wrtiJZ$O4!?qr)1$m&h@oGEY++kxD?q=OQki%pO`wen!2_zA$SMhU}mJ%fS z=%=zse{>VIR2AX3ZldfLH&OgE-hNw|R%+Ffj~RZR`TQa*6c%HSM-@9x=9OW(G=;_3 z6xLeyvC_m%u(J8M5uS`KV@fJS;HaB-?kvH?Tw045t6QWBnMJ=k@@l9Q0g|>Dh!jYZ zRimpbZb+MD4s@uc&4%hB?Rf5Q!;=OQquezvs5HKwIO#?{+KgpJNB$I*P2kcsMhpP@nZZg9u02dcJ~~%8pkma>g2*$YidL; zID>|c-g{axr-Gzh2o$?ez}+_o5eE|eeK&tm#@l`cyayC5+&rOV0)r0|*J9;_`ED@* zSU)2OXuC_`lkYuknA#=46t-N?gDn~5dvDkQzAbEl5rdOD6eU0^= zFKw=gOiDBS`L8CRhAo#voFEj=RUtYWX`` zZ6vYMdmH7#@=RdS9Baize7BZ>B9N#HYUZ-)Or%KaX(~X}#pKk8t7IogK2^UW72rBQ z_>m$JMEE~e<$59i=Y_U75F%8PYWx@xKnM@LmtrldsxSFXfD3@m9iGQtJWonQZdTup zpu)$GfufC5F3R35M8_4mx~ef%QjLi~7-PjD4CPj#laHs*1Q33&0ENHE!8HP??&ARF zii21wEi+pyqdF-m!#sg9b+f>f$-sqtbbOYF)=%>>mQ#w|x;os;HXY3>F@Vq>w)PrG z?wM<=6KX$yhagecV2$YAjn#P|i4r6#a4aRfRO69@6ctb8?L1Vzm5b0j1<0N)h31Y( zxon4Bs_bNjs>=ixKPyXaEGm$`$_ccW z40^N{KjALXM!~ zl!;&>*i=z5#rZd;2@=H(vs5N&ZtCPMi1Keyj8Ez$LQ^1KZd~i!KoxG&K+h_vMDY=U zB*x!y)q$j`q6aT#{$wCoNbQ*-SxjzYo?nbwZ_lT;Fcw{*1ZwApRw1GO2mwR?lxqHi z97uAwxJZ>~=cOuk1Ifo4&_We*s=viO6UxAXtsfXm6(In!hAIbVsW=EHX%H05^S+01MFasOZO`>o&Oo26&? za`qu!PHCvrCLZ-I;#S)Pb~1z5O!i{FwwnuN1OKTmtHuYG-rFjZ`QAzaJ_@*>fXOjX zxKL}cJ~9UfrvQ#Y@}}uA&wPl;4auK1ko;Ac_`vw{>1U=+)_cCjmIdKoSm)q_z9(>_e4pY#RqKR_GP!06 zkA?Uuw%c!W5m`d=l0c#XW4!Pw!LWS@#}m~DVXzrlf&PYkIqZw*wC2SU=SOXL2UA9FbR}mp-7ofgvpfx z^qf;4tsD&BsKBjc7LQum@rV$4)YeBe(rb!D%0%@bf{Q?MH|x4&s|CHL6Yj-ot&U_{ z8CO*rXsnQI&dMa0Knessrgj`leGLNza89Y{aH`{T%})0~LW|mcDHK<+t~iL<$Arm|2`B=Il8{`zodXFs3WP_1$5O>fY?MHS zS&5uZY(Jg)tFF?$mH4;Sk(35<}WC7TIaMKdi2ZGPMjIl=|Aqz8m3D^G`H)UH!cT5!ITw#~oz2uyJv<%6f}I_{BJ`K+ozx6qm5r z#4%uW`@dvIK};T{80-So&+*uj--72;5AehG_xNt@OMJcX7+=jj#H;B;JRe!b7HtYLQhgk|wmg8yjb$K-IkWo?CRxEmvl*<7N)!!L zBT^B9iP~tZf?YKuS}4@vp@<8Gc9nhxmJ%b1c;Q|H8S zOI_!=Ly#!l{SA=FWh#((5IJclVWQXT=Z-QLX%{{7?<`gBFd|r8TR#(A9+~ zt=IZ!5hS~W!HxR<5xQ95xPPuWb!m zZHHLO>|mxrLG)FNpIzq|ag?nIjjLg6L@r zqSu^rfpewxN^!)fIOK{IOq7YGIuW_I!kH@k zf9xx)HXdRoCXg)I?oRG4!Nm19nxp({BNe%r$i+oysJWwc_Z1W*;pG#iso+oncV%u> zo+3!3N(dnVMwwo6V_D4zZ@bE92Ms}`=-nva%c+sPx45Wj;o@SL%Y{*@#9V7sK30`3 zknjRiY{In@_b#{-=ITS7xyKwZbn&@GUbpoajbDPEOJ7gp-q04lUwe-4S6<+^i%;?O z{3E=cI>3vOWjyJf#J$!5?5A6?$z76$-YiFx7%h#X^5b&b8M@t~2P!TA&ODg#a~MD@ zRw9^q!FL(|MrQ(nTuJ;p`S<4$-1%ID@_EALnmTa+vGyhg90wtp1Bo7UX}d@5>yJR< zAaVo?Gxyv{fav#1p*Wb7^D~4I_#}?d`4-gQ>_;Mh1lf{lbW|*(y=(y;WrRujESihP z&{8~sj?yW_ue9+ZOwi(X(Ur;_%JU#}D#jg=+Nkt!lQN9n@^S0TJ070Gbi)ot6Av() ze2l54=UDA}jqU!gvDg2lRl)CcJ;HAH6WjbKo9m$WAvVwoTpUEO@YRSw!s8`b9iOYS7>U$Kf)GiZ<8DkTke`NmbQf9Zw05I% z@rHOV^BUkqFZY$E9TsudSj_v*U7wRT%(vFf)yzUEn?nRXfl#g)V=HcHw!1p~ z)+(Jhg3V9}D`f%9SlvfH6-WdcCiS?DfNJT&WADB0wG82I`y38BHnGvVXWchu;~NCW zic=v}8#7cL8ad!VvSi9cid6L==E9-^=`0!G=CIoVt+Z=hpIy5KV~?A{G}^a8ASy>NfH-jYw~rrlAUO$>lPX0fMN`GULFy|-Ws;Sfizcur zow@3CHDOY14IHga3eWe5%ma#CTXA!3U30Fj+UpH?IrS}`&~#z0qz^Gyzs4_R#bP9C z$33gt{DS%_#%w{*iK|_c7H}yOHKWsLUcZh+!0_uyth!yXkq@H$@KhIE)MSE#TxEq) zBJWap&;$|{wiOa8a&bA}=(R3>;ws;rDR!d9h|b03%-l%+MMe9okMlw^(*p0k?gpRJ z#i|}$%9N=(6~=OU3}4OP#&;`E@ZHigd@V&Xv5V(}b9mA{g8Qw#)?iKzv4N|L+x-pXMNh6d16 z-bqQGA*ibjB*(5W8Np+A3=$tECsl{v!0}C0a>Dq$0}a8YtSrT}KCB}^T9M2fLAr1p zt>p{ou3ABB=?q$_Km?Jdf>ElGNpzIYQIRa0I;p?X!Oej-1i= zpHiV*X+=xHFuM4&J%K@t1V_0bY+^Ki4@2>Xm~MWKweGL5+4nWJ`(9(K>lwB>pIFhN zgUJg5MD0|ztd3+cxsFEeIF+8LlU>KmK-2+xO6r4ak{R#M$fc<@iZ(;2D8*N?5j9!tuNXml zVKXZ_PoT&wh`e^wT?9SAT z(RXnPCJL_mZmnZGf{4IDvvJGbV?I=zIro-s7ri4DLS_5w0P?v$sm+twuXI$3FX7?a zkAcF+)D6UyD4aB|-9O7WwX7t8`U`2KxbezRk)-)Qs#lVM;z8UfDOM6bC8!7@ZhndN z-cms#70Go!MiM?B#-o83csl(pp3ePX9a$HWJD5vsTHEe9Yv1i!c#qdEatGOm()X(E zBo|230z%mef`w9iD^E2XxwC>)BZ`vCvi1{`im;m ziJ-GEgWKIR_;Pv|-^|^?SJQ|1VssVH2d40(XULk&-Dy(G?yPHVoovT`%Qz0YReb}?BuhsK+&JaMY*e|F7MfIT%rb>~bl;gKM?_Dd=*F8Vc;+2eJSxHWJG zk0+nt(b#R=9oWErr!rB72&=fg?l^uyn4C~1f{3XW0mTD}=El3(@&6@|IN0P9>LpYk zm1mn!cddu&V;HG|akP{#qO*z$gvvwit9Ak;lRruuL31%7Qn7@t$`y157tvR}MpZJy z%}1jtOvTY`66f3QyVdz1)Ay@%;o9?U$maB;HLn*#W#imcZeuup9|Q3Rn8-fGa>o}~ zZGVcjwujj4e1vV0@w^zU^`TagZg?r zZ0W%B-f_I>pTgs=Av{+1UIo{cp=Fs`v}}iiNLtM#2@wt0B1m@BMn$Q=#yhN0jvx=VV|nA2hb?(x~TkRd4RAgk@$~OyqMZ5C;-pg~)~F zH(S&k5t*RkI`#@4HK>$2AwZO|XFay4P6ZOFkT*ec92?Qm2Z@WDIJo#${GL>b&Lcr2 zzy-gUn-q=caODQhe3*C;IcWk3kAB+}!QU6!DAB1zLZ8Za=o&Su^KbRpG&EOO1&Io}yEI{e!sXCw@eo9;7}0>C z>TMq+YG$Tc7d1|)Xh%%*L1Gz1zfgvgg74h5b20W!3$uHS3o;>dg`l|NyO%ukL1Qih zF5a~lvl!l^+np15I=Db3qFL#agvc-+cMh0e_&#@H zg30~vNj#spg;#S=@oMn}zFc~RFBe|p)$9vAn|Oo=Lw5+69Zc4YbFr-F|E+|dg`dOw z%zpNkxwr@!RkEjZ+pyZYPNnz|uNPn9x2s>{>!p`?Hok|uJ=(2JV!nO=@ryBY`&o}H z0>d)81lkiIalv-p)^qwF8Zau~1PZ@{Tq=a3kLnQM2B3T(UCN?3g4DbN2GePe_P7k{dg#|pMv(?z+oI!%q(e0S^J zm4SvB?p#*KFsVC#tVArReoj^D zURx7F#GoPf6~STmeX!`UlZxaMb$OQg?kxur-vkl?MCOC#2q-5(Vy-Vjgy)p%iwbyt z2_nAbtJ64PqT>-z2!p?Xi2(C!r#=LZ>r@|#kr+Vid5xaX`K!?%6)2Sk(k3BXj)XG%0M8K-uPQU>*IzNy@@BFOo`r2+Ezal8G5a&t(iEej9nYwN$~b z0K429W`YS9Fx8y+wIfm=R^VubY^sV@>kWY<%>tEv=-hVy&1TQDiHsx)q#Gpz=Z+>TdL#>nct4 z(HNLVM{osAMN`NYD(*4P&o*M#r2BzcbXBdPlkn(|Y@$E9iMEOb{>-vLr{P8qYA?0g zG{q8vB$wc@xC?*3QoOZEaK>|h-@hLxE2k2QoXv1CXypPiiqX0)j3(}3zUd*>xapa$ zoyTa^7zWA)F~|i()$LZ4NSJJKA=+r#GdGrjL^DGO0?ig_C0tr=WDOEcSLM#~KtlCl zCV&tgjkGM)h(IE@mTE>cUbmD0D0-`i7ho}Wlw}BhRD}?CYn2~!!T7+rE{eGz{5VMJ zTEv%g0SKUki$le`r3k)XhRP4isqT5+5(Wkj&*U~14E`!e3?e5%;_jOQ=g*lPLFCw6 zGtpy{;wk)9KDw?3u^HA3^#&U$yRpxwk&ma}FX$ru&MO z5F{=(;;RuFVIhbpC0uq46q$nW1Pjj>-@PS;qRs}zg2->23i+>@0YxAoFpj|@#iHI@ zE)Dql&s0mI9)!x~QqeSjh6+b+SU=U+lBV7x;P{#K_b5eQ|DPlkieRFEd<~U}TwLn< zDc$E1WW7Zmk3$g==Vkl0XSWx?Cy5O90kz1>h zDr&xQ56_o>z&D%!9^Y;J0beZ>CbQ4+c>Esj4{YI1*8=vkLs+TFV7@YH<94@g+-@AZ z4GG+7>BU#`Pw>b6f5D&j{~3SS{v*Cy`H~QMfEVM3csjI!`(4x6ZydxHfwWoQP5^Xb zHrxz#)^x2?Yv4}=3eWGw9eh* zA{T(sKrP~*hxxx$EJWa->F@UxiVL>;s>Hdsti3$dUg3u^l%7tZ;%pOQH%ADLMGqcJ zgu@b=s3;nXs62|NkQP7+M+ud2w3p7Ijn=}~c2#XsjjW=rY}R5Wsk|Z7Txv)3e6tPa z$|oRHp|8S4-Ol6pZ*B5<4k)SO7$&~ydshNR6lL5@#LlN|zQ{y;4~vZttWIP-OI1?C zooH|b1EqZ!tQfL(-vZ2L^FB73_t2i#&K*+?K~QIkLUTo0xzMy3Nb1m}SO`I)qW~gn z;^U13NcKVYH1}r z+NeajXfEwnSz4MU;vmv$?yTL~2C9%6tcR+wT2X41?9=%HOco@t5}V`Z^&Zdldss~G zoBLdDD~-0DQ-^2E7+p6PmO$bH>MK@Is7iFFKokRMN9JY=Qr8+S1|onoT+X^$5J80J zk76PQ5mO?bnF?_VBhm6_FjmgO3HNQrz<%M5tPi zlnLSCTdC9t&lR=C(K+VBLW);*5+J@gW#X)WDpw|$D8R02_M227e(NR!$gibvsFno~ zo|`hu5+g)xyute(O!#+24JHXB2oVj%O3>5*Rf;6+O#zJ%D)pa5d2U|zVkGB+mbR5g;t+&P7JqTT1;kldFph-jNGTH47e0baYBabthaL z6_}>NaG(%O2o`~ai?iJqP`ulEjnql<29?Fs*ZBV6Kj8O!|4fklfUlOm#LJoIcsl+7 z_Xc)wOG=_;j1cI+cu6(-ZxmyqxEu??N~}~zakq1tO62$W)2;u2KOOuFe%SsKe!Kh{ zucja1`RE=V_buaI+c*xm^V*KLbMYqtYO@4NBZkWwxN9jhRU#!K*Op=;i`gZ-n0bQl zs6>9)`pzKoyILFS_Vx8=A-fEY|niO{$U7DTQI zBywZ%|Cz(Zy6_Y?08}3?b$1fk;u&O$rl}@YoB}D48*7%wDP*Wj(s^S9$~0OlmeE8N z(o#Haf%Nv$1+YIc(_nWkzU!w^9-%*~J*@~!h6)~xo<{7zFO z0!i|7fut5K*Rp84mLV)WkjN}U%Tpr;5e>X@qjN(9h%&4KUK9CD0-htzEAkW3LAnYm=y5hw+yYnbs$mwh{ZSLhV|f} z+kX`#0*JY5-RpetSp3fiiwBT5AmW)~h>FYPanVuFuIBRr^j@n1soU+?N&yQ6|R=B3d9A` z>j{q-;UTNPs1CheU5L~N;o+zD%B3Y0qP8Yd9wDj@xwBlSULFY&Woh{!ky7!y__}yW zjRT3fw4^u$kYfd+ba2h0(J^R{3E60GcjSWofe(=&AyQ6=D8?2rh{$|^oCL|q+s=4j zspWD{sVI@au($`mOSwwT1Q0&w>P~nbo`i^PcmxuitLnnw?km$Q^eL5z;wFSi@=Df% z@B)e&pjz;qszkHh)sySGHEmRkyzO@)|53wu@B3 zE=>_zOz+TkDLFUMT{gyF8h1dTDHBKiw*(M%2R0C#FjFH^5?o}II-JjirHGH~XO-%! zScnV0>-D_t#Z6p_uu~}7$+{+ngoykFOVA;N`?^JRRJ`!>&2pZXUs2N@}E;Aj#0umNL9rlj6b_ zdXfv-}FbU;i54t-hubxrZkM%ed1zgq>sye?Ey-r5h_tEF?t| zt(XJmnQ`yO3Rs%3H>w|wI7#aB^1ZKOcU;88VwwGXt$GDf+b}^c@-MorviDm}OtNV7j{XDmzO< zr$(x$(aUv63Zyx&4OuD>Kb6ZqgY&{6S_dMc6H&t3Yi;va%Yt=F|zElt!e zr0TTVzPs9{FwIRa)N?VbdD3(KG($5}CHgs~{kp)qVB)J1K}5G5NL)r=1veK*Mwlp; zR>2KP`TG?p<@urWE5LI@9SwB86$>aQ>?BZ}=`lU03_(@3Q*}svP>Bd8a&6`Riop4G z;7EZSWrBIi!v*;L$B#{}F2O{9hdMQ=fLI9%%H&E@=_vpo`&@3WD3u98ayrWM$pcB? z<6f|!>jCyV?_#_45cB+ERpg`H!cvN=B(cHUnxC!7VynpJ^%*4O zI#SSAfnv2uF_3srFco17g4`K`hU!36N5Wm3L4y#Xx%MhlF9MO&iNC0rO3`A&1<_Op z7e_8iM-%=`fjE$uc@Qy|7I*v>*ggRgxxTu}$M90X3A3>PrSt~@o}l$u<^THYH7c6*-|>B3$LRNz3&#T4_r8 zwW33UL;z7NMB{Q}%A5Z<#P1|%RpzSiP&eM_>5#d(6mRic&zoIIA;}$XjLO8%ZmCkI z=#Q#U&i4p-7pr{I%I>P*cT`1?2vm`extP31$Z{9yvZxdkm#T1!{z;Gs1X3M7OpX-A zNr)WTUxCPf^?7H4m;(LrGePXeGI-F`N~O_)`;9W*ws|30DwQ<$c`e*dYmSI2*BwL@ z57|-oV9igDG}u6`&4wQAwXI{f`@Y3Mma^P{DrhcOxm;OE!h=wm=9)62SO}HKbc0fS zRgLZ{(R<5B&{o)krkpl35g=+fnBopuJ)_hk$^c?&!_|Mt6>3>nv|5@#qBLK1>QyY) z2ZpaUtjN&J_0y6$nD`L!@4MHl_QFBLWobEcFcDaky;Y@6#U})b6bh9}kY-1@zNAEW zUg*3ML`vQXm^q;EHcgw3Vjm`ihYuA!R?JO_G6dDw(=S9+ti;7ks8slP(c9d(cuOJN@p*IE(bAE98r zAmReDo+8nrB;(?hqa7|6D(lJRnL)%r!i&zbw*(XA+8=*LA0%>1{je{P{1IQTe1lgD zU*P%l6FeHeh1*>#*lQZaPO1~T$wtfsqX>QI>SqQ*Gx%ox&!$HH$e;iH)*tZOrB`@8 zc^A(HH*mjm2Dh4q_;VdpAXFkXab95c*of9+v!x3R@99b^S{L(5C0>T z%|GCG>t9*%qq4;wbdB?U`mrmJ#Ijf+NS63Miv-DZMJ@XC!d&EZqEtBdm0-)oy;Nz~ zXBts`u>%06kZ7V-$J~aF)eVfQga9H5q`x(VcoHEfSfk2T7 zECh&RDspp~Vj+ZD36j0edp6H~yLk^Ajk^X7#Y_Z~vC3hd$KzPyLNpNQMVj!)NRcSm z&fBfmGibd*sZJH*va;&z@p=vFAuMXu$ydFzKCDEL|Iacm{$;$tYd#ITzFVp_xt4Me z;W2ZSc13|n4<@!#b1+dLzWf6PQyoxL&8`NGp^wT@O^B#PO8NUdSAXWxa@~Ud=A!Rv zo;+1Ta5&eN(t$nK)f=zp&;QJ+jW-|vvAqcp-vkmVl*Wr83zRGVp~`iqI;cPz-SDXc z%`Azf3>unYqA7+N%ez_2bDHP&MqLwjGGo~9*u_rwLvE1nVS($#yy6~;dGI)&ShaYF zgT{&hWR{Q3#0ijk0;GD902xACQ4iGm+%*?;H4bXAW9>}1xCs=hN;efElN-x}h}<$Y zYPiTXBy3F=eG?c66G4NJI0lIa5vdiK=gKmO7)Yc>PP(;pRNAkDhYu6oR}8I+;HWfs zP=WAVP^x%2&jp0OH)P#{hAztz&bK3nuz|YR+mDLxKuV=XnY$>=jJ(cr{ha zg`ZwTL%0DyDL)s+5*5Z|XwfIVv8dW5;huH>A z8{6cfJVR+$Pib7p9hUmxtLK)|9*D2NmCbVF9=0fb_j>MO zw}(5J);&zcX30=3awYt z5_iJC-}y&VBY)WYM|{8WdwjF_3a_T_nOjRBIm`}VJKoBRD~Yve3~SZ3v>Jnm3c*GT zD$sPkj0<2dzFhn*{(Sp?k3Z_Y)(DcBTX;6Sj0fG5xYaUXFx!YVQIRAGk_Ie<>M>PT zi=Lb?>d&gGx(WqKa5&qD(8YGtUh73d08sy{{4q3_&Ld4Vkx)Tl!I-(Rq&~6)Nk`=x zI>TGY5-5U+#YPIp`P?MpHwF;-yamD2No(f5} zi;qZ+5F%=eQu=Oyi@4$=7XA?GG@YJ;Y2>nOKT}Y|vDjMvJd;?O3)T`Wp(Qdg=%u3ZggWv|1oNd0h%5iw4C$ zEC%8V3S~}>cy7?DVN2k`9)P9>-(qj)c4PgiQ;Uf1cCzW|dWj&RA_?*WTaK?|A~Z%tGK}%cK?90*c^Y;4 zbmn{9AN|_qlqi3Hs&>JEGVc`y%@8JIwd?%C`z_XS_3tUaxnOHiKUs9>g648xDNe$F z>POrm77b&$?E&sie2+&{-{Had7q~b2jKA+0?u|UbVc#umw-8=+!)VJ(@r#qI!*hk% zd-{-45MF7(T$DUjVx4NeS`({)H^AsNFJ7vW`YRdS9)5`5bK(Bu!M_>xzc8s#pQxU20e>Lf54EbB@jNneQawllpxCG#L6lOj`sF~YDRPuWd@;etmj*RC)nQw&0Njn0H?o+wQq(lne7Ceej z`0ElYXgzMfQ)Uak)I&gVIjIj*ARbUWbGHQ=2M@n)#oZQ2988J~B)(EPRwjaE*ecr9 z1z1_4^@#I(5ZEvM{dEVD+8ZgXQEkY-JW(YZ!*J<<9W~xm74OgIeuv%8$5tdXsTs#L zvzD_zpWJe_C5asjhL;fiywPGMx+nybV;7eLh=YizScNzjziFOaw?@Chvz33wv!#C| zNWP>Jx`$i6hdAs#z(Lm@_S!bF-MoO+THn^Au*MoK$%VlfnpooLs3vhtl!(c+{g zhy;fpH}PXAR|q?0C(Nhj@%8Gr_>K_ydiiTSAKk~p?rF=8+PAm}FOr5VHYww`>KeFs zb1}LQK-Gt(m<;#g>xJj|)BZmZME`;xcK(cSSH8uona6lKL`BjufdjtBW-P^>TMX+| zGppfRgUC#A1^TX)BKc`CG8aPFXj~-7{)|7HI{FvE@Lasl4bP~-La~bxE<$p3 z&DGAMg$sM~Y#2!bLvF3C>;z2A2#&PHO(Hx-374p+Ou{zxFiKUT86hFXOSo8sxp1T| zs-UqEO*b4w1QUTIMZlykCk!OYxsP!ttggegG|eedrk4O>O&UL}KrJ^RarG=BOp1TQ z^O0aspxt64o+|j?v?CB4L&D5|UTMGnYfe5-^B;FV|E4KWU-4c5lcn_*L>El%5ghlk zntIrSyM)Ot-ri63U^hL2o#rL%bsb`->jBo9Z(FBu1=6)H%%xoV?yO>}QXzs!VvP!N z8RNl0bmTUptE3-Yfk7%w0i*?uH=8XJD4Lp8xs&BfC}gpZq1m7I2Q5GjwK+?Y}x zijDEUoip4l1gS!TAC#FXHm0!wI=%^&H_gF=n{589>WdT~$^TU$3VvNgfXLmY*oopF zTHu{xu)35FT`w;`y;y~f4jcppAz{$)%~Kx7jUxTWoVm2%6Zbu^RPepSycQz-+cmpH zo25Aa|JQz*gSdk2nLror_CDvrIEB%&K@64jp|7wD{j_C%>GuYoYaHwi;(K73o8Ui$&=NE{m)1J*pI$ z#Zg=!-r8_zf#U)taGZ2=IYmNs^2-vm6tqzR-LaaD7ZdmKWMG9#V$5na_R?+GjyDkm zX-l_VkJMngtdhIe5|qDNjGp`?o=|!Gup?FSZ}VJfs@2eqM4V5J+pa zR3p^_T0H>kKoq}bgLN1ws^bNjL5d2Zfl4HiJBpesy{Ng`i;i;sui{%+YdgSp*FJXI zx3S)&88a&wt)l`7E+U;bjE3v|sJYZ(X}cOYCiNkglooeO-6b3pz*iQSgNQRdE@&J> z#JjEMWCD-g!@dI#95Np!zIl(IR3^Tg>)6$$*qGEO0dcj{TKK2{QBw?~9|sK{i!p9) zxXW$k!k!=y1d{B98Z;6ljRZ-S0MUIj4=CyL0*O?L6pE)xJ_}oSUki$Jcd1=h-REg@ zTeaNG(g+h%B$}(@ySGvViQHQ$1Bum`oUOCG`>KyBJctAlp~X&z4O-Dt1k6Xgn7O<3 zGp&60lNS7w*;yy;$BX|FBs}+*4-cz!_wFB=VCB2I_}pK6f3@9cycnk9Xd+BHaIcXt zp{Zj1R<;|5&7-*0xrW1@JJ=!6R~xB51PrC`nmKSdfVd#KI%6zQElx$IFdQ5~Z)qP| z^I9#1w~^{2d$ZNDu+-8->84f#qEy~XihX$QDkU{|9eHC)-<2DYP;u~(8_T(}G%MXq z_wBZ?M0Ed6J5ngV@;HKqQyvk|1QP|GBLWK-QGrD2Lvb*#YyAq{rAvO!S}+qics`cvBgRhBT;-Lzy&?V%sbsggHf;~;YMTAEUpm5ruK_sE$#3&P*{bsmz@6n>H}?ZnHu zKVp$zczp8V_z6dZV$Y~GQly&lVyP?i{_T-#F9^If4l-{8Lia&{W0uxmEM z`}5dsTg5&BbI`SGU|Ea}p(9`8Ze8bKO@`i` zyR~nqB>v5mgsR(LPd&ino_XAE>LCF59yJZlr4_EhQdJm}#pUR@Sd8#{`IM^pXt$R2 zFOJ?rLHjRf2(|tN+-vQ(z~^>-+I-R)n`&vlm30^|iK6#rn2RYFCGN(mKW`-vx(J3o z^oAC()%}DD?=_xGy~NYWM|d)J8;^(g@StxUx4PE2gIq;_)i`38TMQstyse0nX8$uU zh1fwut}NYl_w8sPaSOjwAyO%dn;gIHuYyG8LqvAs@xKZaeV>GgD`s@D5_J?Ft{g`P z7o)`I1jS|kKPvk%UNwT&s}3YutgS+w;1Dz#1r^WiF~TMNc{S4K!shPEoR6XDN)j4! zrAm0E=gR#g1+3X7D$Z)80%^@@L^FZXWUj3YK|+`iByw-X6)WLxSw9mcFv5gL*n;a7 zmZhbZ@ikOBu~U_3_>{*_1*~#Q;WRbkK|oOOixAaLM`IJzy;=*s0AOjrmc8Xc#LNSW z071pA=iUH_fAnvA<^x6VX=57r-OHWQd83XSs3xk97ISCaX&J?xu5}#t3Ly6^PHOHe z0>T36S_h;&cvLGT6)xN34a=CTU%+^H0zFh9%D`$V=<*AjJc<*6E7Dj zDLh$&7xyr?7U5Ax2&lEY%}+l9hoIuQwY>W}+I>%%2p&=XyzkCBGK0mj8Bk~rBvK)A zZwVrNTxvw_Ebh=NxoB7NC~!E{;mn7JTwCVe@?m1PJ>#P8->&o^r2o5ANXc(XEH>xI zPoz>*e`3A4lrqepaS+keMW;CYLPsAQN2bSUCHxsd;te>Q0Erv-OZCCWoKiV{-O=k! znS>BJr8e`mmSTPLtvsY^2$GuX4R|*50}lIMaK|u<(egp7DH#g%VxXi4b2T%#-E$w$ zroZ9qzQ=y|Gb)KgldrDp=89!2D{3ZmzmY%zD4T5@a273ETY1(NDB0!b?t z>j$xx9LIJOC34#qp|y(b?34xVl=-onojd}{u7E<7vfVU`^<+Pmc#`)Qs@hWHdZPvu z1BradV3!JK4mm0>fmmS7d0~JIpV*pu=D#f-23&9Ac z163Hz4RFyZLf~!g9)Ft0FSrEL;SPK~_tc6Jr6{CGz8F8igZ2>|B%AqO3P=hhbyOn} zgVAtKDN?74Q22{H%GP}Jm$u^doCAq6w!U3{g_olnc+@qDyG>md_}r!%*@(ueYNSSD zm@188ATNZLDbLlENz)y_$JdJ8%s<8#lZSXV zw1!9hi+Iq#j5~zqLVOwtE{OTRAs_@>LBv49#oeIc8~=wqI;gnEbl=ji1rnL}m?;@4 z5<%Mogx=HKTQY&g?Bwm^*E*OeUD&A-!Ng_lIX9+YlDyi1#ncu*|1yRudNC2|$5^lr zZP(%i31LH+Si6&pYI!1fxYj2}ATdQ!i&nxx=lKK|y7B4>YrE1-HQsi!8BKgHOF(2~ zgoQG;1d(Q`5>?0(CTS^>%Ty$n2q0Pmcg;5JiV%qqB2hv_sww)33$mMgi)ti$L7-F0 zF?W{l@&EnvAA^L8J3%6dXfAi&TiOH(F3K8oRqbx3j!Zx2gTw>NiRVs&4dL1^%MHn*mp_dsIcq51Hz7~01nC=jo)%K-vJu@a@8o9l}|qs%|mo>)IOK3;0d zM45n|;^6Z>L{7rQZXW?fj|n6vV<678CH12BIfBTKK;jS5vd27_sIXE&czqAof8{BX z;*auhr~fG)jD1Umu#2JcAqcFzl}w(z9N z9N5L|0`EGZwlGz{VL^0t0(M1)ij4>)S^%U4gXp{* zK=u2)fd7sl`E?P7OS5=AeGh-!{rC8O<2!sg&BeZd5}Gon9R2-7BesM`8vIl#-AR|MN1@rRv%!0$JHz&DF8@WsRd zp7HZZ?L6*ZB2ZTGXkZh&?JMXEbejBCg}qz!xwtF7K~NYpw8#q@g!B#m?|hi}MUU2y z(F2NeZ5@NbnFEv$5>qK=y3gnQ$GqRI_b_Fm4GG`h;w5rrYP0aR>Xaf#-{AR^*}+EB zE;j=Un5rB^*Y!A(XG0e65HNg@7(~uHfMhP#pq(%nqpDfW?%-D6V;uB6;JLhpQ37S4 zYzV!@edsCbCR92tHj=*B;D%yd&!Q!#nJ{UlBFQ3sB~6g1(VhA?#Zh~F$Uedu$YMea%0J)L@Zm28`iMZhlC9x$120QupB(Nw)v*c1qKrb6a$MMBRoO`k)?CU= z@(?7B6b->*ZZ5t~g^sF634P3K{S;L$)sS3Fa&yU*W!3V0ogF>%f#TH0vH5^FY3?~4 z{oBW`FFo%c{oDS1WzmLD*W=R9Znzl9rN8?Gi*+-2F#07`$x}?#%wec(fB+f5AXQ0k zK?g=FhOpkag-2sA@%6?(;Nje#u-W~JFnNg8rrTIfbC+ULR@JF%7}gN1>s*+m66D^J zYfGlx4nd+1(R?$GiKk7VDt3^R}!FtFcyVP`FZ?u>n#` z-p+g>RENnj1wyNB7JT$V6WU8hFvgvbs>GL@HnEgl;z>V+k?H}S$Qcq`y`Q+tm2y#y z!h1C+`au5AC|WL7p!-??y*EqDx_GiejE%8p z`^R`WynrX2y|~}hjN9p~^(@+|OAto2SRmjg38S7H6-b?{u#;6;8daYr5jv~vmIj+4 zf46HDucn_`O7b6f{~3SS`ZIpJ@Ek8k*72lo8c%v>@N|H`hsxwp&jKEFFW{hU4r8HS z^8u?a=b9FDb8nU68sV!pD_Y>C7S!zVIv03fmALv42NLV&B{!EK#Mk>SD<32V6AvC{ zTqxab2NL%h2N|hG0}0O^3&Im7E?bY^9dApWWOBN(-n5Us_FGu2o#y{0g~Vr7X!tBd zh(ysuXk^bvEH2V`F^2Zs7AvsbXuXTwuE*Hxd5nFY_lE;dZMxz@Vg;k2Npu$pB)#a& zYe(`z0}`LBBXAN;+;lW6=zg=s>Ka^^RPLTH%I#8*C?O$`D9$1`Rt+H`tL5#)xmske za+l4;QS%%$VmGESyEN@c6?i|nz)K~_wUx{Nk=(c~c`a558^07cAGs!kI(L`*IR}=a zHx$X63WSfHH19R~ISqi5@=>jqI(D}gk6{IE+WR;L72&l42tZKd|@)u$uo>ayA-DwQ&V$q%&KeV!LmAwCp-Y{&3;^R|Eg z$L7PsnNuffx1uWe@(&~U{BN(Ip_(8$_wU|AZ%)Q$AU~P@4%=;aFiZV3%g46ucG6oVeTL=_0 z(H|K?>|7lNiqe>@Y{E>a5p&@*=Bv|Kj1U%)EP;_VaIDm*2ro@b@v#(@ND_--f+S3k zRMlZN7{NecEjo&OFvHJrFz^KT$6w&#gzO3K4&TOsD%9H+u$CT0Pe~KMoEq+K6<-lV z>QMN8JrzoX3PE3PF;9{rgg)T$y<$rI0_6Oh@c3yCG!V$IK-SJKdA=HlEmQdI;&Xh% zh5GsM8XmUw<4(E-d-W-7Qjr({f?-aw*Aut#td|h!9>UY!aXjsxB2=dEuv^m}$8fu882e2_ zSg7me0-4}%5VsS?6OAdw5Bp1-4)%5kiE2yF#zw%}+or~`SGj;1dn%q%X0=JBR31KDs5F%-%->;|V5&*sc~QZQ zgBz9=F}GHwem`F$HsE|93Q zUXPW&AHvzcx#U3d`Ty=CG@h%*aCx6qv)>zhiKX}khDrx8K!6ODbYX}`r3S0EP+n2e4Sv#)Y7P7bd|$m=wM%mskyVzUszX zipzgjWEnJ9|4zM!@^D4dIj;-VM_%5FIOxc9E!&DP? zV@XpZ3zZRbk@ZsDG<;Hs!e8Y~exJGzf`-NvGLBjWuTGaS}Tq+KGjz4GJf_dI3 z=Pg!D7<` zth7GCI-#=J@fd6EkFehH6uUi7vDtRp3g$*a6V|wNATVf6AUnCc9w_NWTW%|oSJFsQ zU1(-QT*2UTG1Qz6nHwvjAUk)Xij!2+6kJc9sfH#qw&u3cI?a*-Q_Aux;Q8Uk>AFQ;7qAwY)YUYGg8i7On?QGIkJU8f{#rqDC6mad<>2GuDIQ?- zxNkmiekNG)zV8YnSX|T767FszNZsf_{7N%a!CiAMjo$JpcdcjCXK%wM6v`&*mN5{T zB0vTRkPd6-p4CvP8?8uOYeu~~@k)W1>PT3Xw9<7I@31bs{QsG$$)eSQsAj|mMjEyJ z-GYv#`I^0X+d(BoU=TJ^D2kmJG(7v9u%N23NrBtJ4;GLuB=*fU)e-J znHF~f!PJH+41q%c5g-Hs2MhNcACvL>*K;AlAxwDaeOw%bn*=Vz0*G8({%cHu2pn7w zgCDEVu?AHHnHgc`SIw&_q$v&p!&C~*Tv;DD6G&VvPH2T)foD*zuN`K!@#J&eBGDh%aUVYDcOi9iI?<#m{=jAMZ?Ss+Xns#Df9 zQMN?-Ert19Rm>ow$M)Kn@st4hV&%_xx%fLgoqmN!qmOW}e;)@ON*kWUdb|(IT=W;K zvs73yN>$gYLv22^vsL|x8a%qp7WF@pxn_2ksYaYhk*J-8s*x2ZDMD9n9qzVj^yoAm zv<_J1?|wXGjq8@fwU`QoFpyJ*>^a3))Eg(?DpUjfGThE0g5>8_SgxwY{Y(;fQVDC_ zy%nv&8db=mVk;#<^xr5&)A;})qUL|vIl3UM8XD%kt;L(W942$Hrz!mr2DjQ5gF*odXER+q+HD2)axE-mD$ zSa&JGzA8Cs4jg`_mMIk8Hy4<9`wcT_cuIsIk-hnx>PZw2sWv!W)s9;*QM^PD;pcGO zpZ#W@5&QlGm1?T8=qFJGD7*E4QHR%o@)1lWb}^iIi0-<(=&8Gd0Ujrl53$_(46AKV zu|eA+Ot!lpQaO9!x&-GTSRzVZ$bNw7IbLl#f6wru0TE2NcMa!x(iy+UDRc8(+rJn?cS+QlrR!}SA(v~QAg<1tCoek z*Z|>?^NV6=UP%sNqKmou3>DV(3`VL3v7BDOaMchO=QtM@Hkg+IMw1InW3$sY1(GH%W_4V+B?K+Ep5FTbHVd2BWH~%rIlw&hRa%9ep zW$~61=2ycTV9cFG6{0waL8QjM2c>KaAX?;oRq>am=M?K83=9Z-OpwuS8Gl{?(X1M| ztdvY4SC*8ARE4?76l>x2uXgiNAOc3{w1WCBxF6yM!Bx&5+sU@|>NG6#)rb#`AK!ly zBF9R^zwN))f8IAi#1F_@fL^M^1BfeJRI#K3Ni1BCpIrReXK3XvPF230d9B!KyM@Q& z-(XiYBQ^6F3y#u;&BlWxSWRu1D*1Zt-{Y&bf5PFwmsrl;!4h{X%jq2q2glGv$=|`< zNq@yCI!lHSIg>PixDLPyluDf_cUeU)706yFL+8~B^xg~-C?SlO_%KOeUM{b2ik2W) zTx*bpNvf1o1yE$)vz#GSrf9Cj>Wn@VK0p$AJfEre1A z^PvPri#4oBbtW2qb7eJHuOd`}K`fM4 zS+>}4emOd?1du+%UF(Mdl)N240eAX&TpY9@m~qk2G)PTBQ-|X1+611qb>a2kIKCcR zz?Z|*cr`Feh>YUpz!)Jif#){uksxUwAV|8c1^8~l)y=F#8wl(;dh%+x>yT>T|A^n8 z-`2zj%8~kzc`h&?BvK@P@LfPTa&hs!{sJaE`f9})UvH+{uC?3f$uM`%`xr|-LT}wY^wiwOX#4?YvX3#}^u(0OYU_P$v_Hgd z*Awh_Kf$fOr`YMZYf~4;Lz8H`*-A)+T&#oFiaG#Gjc7np?PoPqK$V1irNumy?kiQH zlwSqg6Q70Ao!5lX$^rD1_M)q}2R(s4^eeH2*HwE#7vH1SeqI@8u~Yh)D#SnIZu1>N zK#RVB!t+(iB*a~F>T(K$75!LAEn*@vhB%=nm?$u>3@-sh$Go>o`ItIwtGAs}^7%Z^ zA3?>8fbn4>WukMx?87J;bGp!QsgXNl)mW(4R2`~L)e{yy7>h1pkSfFgLU_1dSgJ&C zG&i3*##)TyeB4@V_zptjU|Y9K&c}Ltf){b#31rxknnN2!}MDItUd3!x2)pb zTwl($<(mTcK0y48FKgZ?MRHczSt>ZJH8++)#5y<&A_9cK;NT$D;Q%35l$3-EmMhkA z1P(n$lMAd`Y2SPwLBp?Pw;()!M%|$eAT;N`atg%SuvlE?7$OD|sS}wG6>T^SB+gE{ zwf;IpocW65*p1~>h#5h1@--*3y4-E|dUszS@$APSxlE9>UlmBgP?hgw^*A03e}Q|$ zucgQ&x9ABXTA*$cDr)Lz87>wJ(NSRGZNWp2TU!a2{i2Ib$MDBx z7$KajSWuZ&{CpFIRa7EDv|KFZVp7Znq1gK4sEennU_+-hOkabWbaKaeBC92N6wMcv zC01tEcBPcxNdPq;6`|}muEJH5&TiUnQaecZ*<3b`1|b!o2MRQ2gGx<{J;5Ob zXD%yGiM#<48A0M)TJCvMCHlYedp5|Nc&&qk2a_Z7AJ=R69=a{}m+FA{ypjNP&RCx{ zonreqEcH&jRt0lcmuTwjoIxaLrRLHZ*Lz@BLO_20NZ=<*77RHnJFyD9|v*}w{ zY`TxF&L`MxxsA0(ja|NjP3wTIA#YR<+#t2k8ZRY~rYXp-Sf484qg+txPPyQD4dD^x z21&;Ts*Cn(Np8gYF&Y{s)CSOBHi&@=2a%!55q`E2DwS@e)coL=`GFcKP~LQ2^}qqtciFMfZ-Z-HfEIj}02ZjbRf zK}B&9U4v3Ewvqa~LM~D@Xv*!jtST*X%5HK&>I4@%t`%3?(Ni{twnD{GT9CAEyjcQ7 znO1ISl$(oAQx7r$S7v~b`$~|J5eU35Fc=^R2XlMzsJ1880)>y)7)acGz19K5y!a69jopE9T)U*QrSPK6>6%h!l!Iz~5&xOQb*y()I512x>g4i-KO~OdwHD zF9L_RRbTrSIW^xbuvtKGFlY1;yVqN8Y?DX3WFHQEyY+-7%Pb}e$JN!2-VU0 zKjGE#AMlhQxj%dt2VJY!&WvKMzJqEcLp2k(w(C=6Q4AM`k>cMgMI_gj+$LI-v`AUQ zN6CB-6atCv%N5`PmkuN@hu`hs^!ZDd$%_cZ~ejDJfRcc(5tT)tCiFsRSY^%KAQs%4IOVDlg%a~)yxc`+0d zz_qxSZK5*iz{{R~ydD~-O;MFlkq{)$x`*+&z0bA5Xlyq{vPVU-70X~H!v9sU7QMOJ zeQAN0D)3Z;AmEt+;Yarc5e3By-*c)!@NvO+zDA(&UU%~6-hjw2oZ1mcti6io{_y7CNQP`mNZbX9a*PAu@!H;$c%QEZZ}P*?kNYk{sd zHDOaAt{skxZ@AlkX$qM8rjfJNf`3{2iL`uX89u*tm>tzNLZVX&cF9)G=LdXr#^1-3l$+4Y;$VremHR9T;96hEZKkrG` zmoo>DW3asWy#JU}BBl0}sbu;qlaOacA%~w%Q&LCWlz$>y`ClMT#m?B3LG4E9fd680F0?@QPjf7rsgyhKy3=ZA7(fSi$c%XaE0{{pEWj*|O~muk)O}7`l5k zQ!!VWnMsz}mdwn|%*=K%LsvDe)rbWF9??lM12LOz{u_Q%N)XQZ1a7q0tIut}Iidl&|tEw57gG3IP-n zD%qdq5lTF_57I0Qrf`)h4^v1!JR%|DK;nQF&+j&!4xoh2s>zlKKB#TSvDVmbYQ}@s zF5GMF;l7keo2_hT9bGG`-Fgzmn){-BR51op@=-&DnEDaHs?d@-a@vXG_x2;q2Z_L8 zb9z1rnA3O1l>2bdK6$P8cPW) z=g?8|dgJTlaN5(lF;cpUo{~K@5hBe6+h_~!VX*uV<5l|@D_O!!$uvf&sOxXlqaj8R z7^Ethwze!?sa>eQ8Agrflw2vbzF7iBF|CLyBuGPmuK6GQsUBZRjCP266|eSh`SqGla_X1qhwbLrAml36%<(=BB7*+?t~P zDcJy!{zz(o9~1&a@KTlcSjQcdig?UH#vL>DArR?(v^6M6y=V~TAR_f)AhEV8QYjup=&03j zP$4X>a9MDY2O%V+8Vn+D1LNI&2M-5^Jl(faAT;GctVR4ME{SgIn(H-YR3UW!Qi)O& z>gA<*Eb8s0T$h{25}}e5)*|GjUYz+oy_AS|xfH1xnZJCmTjEzhc{{B2bC5Wg_|N-h z5U~NSTPRCP;}j&P(0JDr-g*lpaUWjCKxzw`qN+H0qC?{dtBj+qk>CW5dLLlB^%xUH zGZ@Ypz%WNr1H90?lA0~?zTJ9+{ho(ZB=@k=aA*p|ty{84pe$j6s${%GHRmgY$vmo} zn~}tks;X|1K31|chq5}=9OM;(qziSMb5lS+g?MjA&dnn80%>$B+LeWD zO^MYaEwojF`GQhgFl#uy04-F6fzQ=6j^Bk3j!5J07739PIC!c8CT`V5DHA@{b3C5} zzD^U{lrtGEUq>I|A&@lYZc>@-7+i)+*D+i$f!@>}v?sKqPD%Ed28V=OpJXFr54 zs*=W=RcMQ?L+!P4lzf#*N1sijf;dN~e}N#G$Z3(R|_kFe0B#~$8K zy|g}U36wsnoqj$ym_KYvMJ2|9luDVa^uAwbq3ml8L_X5>sp~PB*DQY@I)A}LPP0~X zWC*C)vS|yO$$4uOtTlafubC>LNtO6;aWL`r6)oUz6Sz=iE&HW53eG8%B$MOj_Pi40 z)l?&WsE;Q|=;#d~nv`i1GxeB?g!=-B!NM~s4F?Vfj}tW_kWl^jCBSBahm7yC513&}BS1yF* zFE^`sA6mdn`H+*>6Cj#%A`?U`-(lfFDLYq+oX_*J+4E+eA~Dlzl~|{8A$$Z9br;se zM6=Hct#?3T>V&{hHJ9IWOXi2XA}hHR((?B;GhTC1RP>`ktJXP@Y5x>F&VCUj7TU@t zNG^Jcpf5(dzf`p3X;lyhk_1z&vs6?vzu~5=X zh}2-JKov;JjkvH@Z(=s;uch&QR0*RS^0fb!+gAv|DGc3gs#+=njx>2EerWU+s@DcnJC*&arVm(iKrjpo=U zG(^|Zsn?*JuOA7_a8NaeG4Ea&D;P#cLZiVWbRiej7pdYd<(rCVi3!`>t0~?i6C7A+ z$%PSh&nA%i3&zpM>!?3(*uXTPEq}W~+7LlvYta)bbbi*l+bb-7hZuhy(Ng@>n31Qi2`3p=Z= zkw8)#*JdE`AtEynoF^!*)}lG3pNgatr97sh1Dlv>9gqoD&GPq>>P~I^k_jrO%0vo8 zp){LIVt?zxQ)YS{?-@(jOL2G#L?$IvqR<-mRi)?Vg%ojM7Nij(bk0&Cf`GzUW~MIW z{1t-IjB!sz5Co^s`>Mj3s=4wx&{HBklgdLClBPiPHz%)i@US&Xc&}Ud%7=$34`+FA zfy9RheF354S7_y!NsSO7^dGj$q6ZK&QzLHfh=GK_I8`L>xUW=n%qbKv^d&$D2!w{3 zh9kp4$0##$uOBFHnUHairFPp5zwvq_)|*B`b9sI2w3{y`INdZrI0M zXcIG)Yu1y;R#GHLX2Luc+Q3N3A{vr=Y(Xnsux^yZ5vzr_RA%d7V(R2KQKnE7^14Q- zXzs<0-$nEF$!OwdZ*|?Va^Gi*F3Em(^eOhccCk@6NhQ*TxnQ-0w5F6B$t)s73NesU zfX=vVR9#F(>IW`lq_s)}4~0&m=p1!Uy3m``doe~QAYg2D9^!D|KJJV>#=Ws8cr^7K zPiJ4_`5YC|{I}fy2KUDv;((vOTCFzOEw+rSt(ZqslQSjJ3KeTs(vqWM#htZGr^-}i z`tfz4NTp=*qD=i%$1)5?+NxQbYPe$*CzleDGWoqruq!OlL?v=K@X*fd+1v}fTzHO` zb5HSj^bkiKv)HQb!F*ATl~}jkOh#8s8u}74&>NqQ?$|V&qtX(UhUzQHDEK0g7fAvg zyS^7WZ1?^6;idm4koe{j?Ft=60L42dMPkQ&;CK-6|JFX|&oX|Fe-4o#@yIsv2v?X<*FMAkcRMt}^w4zA^N@r3NYObjLNEmFnj~DtF-#jdu`zy*j*Xx>u^E-TADgZR_&b%L=~^Ly zRbaE>ZMjvd5+%!3#is&C7^gp z#I_D34lXXo;fJ!M+7+%Ux=gbKx>F)*>Zq;4!JCPmcQ_$@k#l!cfj4 zVc?+Qtbl${T?nK$)Y3$W^h`oT5b^<}*SQ(+iZV!56dELSj~5UkIrOg%ES>^6F$WWW z!7NL((SS zGEg{+ikl5?1jL(PRa~)j1gc^8N8EzS6;mf3Sh&4Hm2&yNZrMND#nIYM?_E5Y`W6qT zzT@co1<&yq+s(^Z3lCzkvK}4ey)q(4WQtyA6R8K=v8z!i9mKxUheR0%VK0-$`Kqu3O z!Ga!+P&?6?p*aY=`Od5Lj21#WK^H^km7=z-kldQO*{A{v)C)uCGA@MIUi26eezZWVUYrC%;iJvEN zPThgzZHUMW9s&mU4H6zSoGNk03@SALwWlBv5cC=;6b~QZ|e(U;zV#v5g*yIP2*8^vh3S&oUoFzO438NuGMWw>^H!BE}AeyccRD?+bKd<}@ z9STR0G0M~NdyUb(6l|pFepil2Hm_LN78*WaBx*%x%ZWWsN;VlOfS52w%{GvPUK?>u%Um^gDFaVkTf zYdI5swp6@RJDw3BK{d&w%9k1oP<_bcyVQNz)Pey-A*WMln0>|Pw1}Da*awM9fR%_3 z5JZ791s29q@?4D>RTAvNTzakEL!eP7+yDoRUd}`CFlFRSj|CF{IoI<`<|z^>5d(?d zgCHSr{7Nkq<7D%*G+|L@xf1KWMR0iV&`kGJkeq@;$44 z)C^;>tQGTx)tD_Pvz}a^xH`War#yr8}0z8v)vj`;84Zto8ETR7?u4`4-~AFSbL zSD-6FD~Y)}d{tXWDyn&;>c@34sgYfG?hqZy3bwnB2rs_h^ea3fnC=W5;h=jHyX`C3 zYM#SJ!xUDP_X%lD@^&=Gmvc0c%*8hp`SAW1ab5HPObO(34 zXRz1MkJXB1!ZmElOaQs@yC_`$Z8W0)5R2$PQt?sksSS+2W6hhf+57w*wC24+Nxl-syv$wFKN z3N3~zFNTiMb%OS~K^sh@OtKANNgwB-IgPLm&Y?SJ61_#6XfNEMI@v-C6-ozz(p9j8 z-aG;$ca$I*qAD38)JH8;HC?f2t?S$3YS4766z#E97|H3z?f(1rTRd6%Cmt>Q8~5k_ zg*(&#z-`*W_}{TJ_&qkdUt*=@0T$~IFki#_BeX^ZPxW58Wbdo>J0?n}F;*|34x+=_~Z|BQFf&c)zMAn(Ead}plkC!_&9W{39bCzT#0a%O+M7N z=rXqk4n0;iSJ>P49$%L6dA?S!*KBrsUqQnA96#Z%xg|1vp45n-VM%-oWvQ%Jc@QPi z17?M)o+Ch{9t;eoJm~D{FSM?VoWI~wKtC603VFHXK0xyAy|j3foOlKyVpUiKh?RNs zb=EqC0FmR@WJXPDR9MY7OYqaI2_&~C;VlD+_t+rfDz$RHl&PL3QO&S|H)QG*cwOydwPS+;6@{`6cz5$w$eBpe?jidyAtEPn8t$N9xNSz;^qw zt(Z7Ng))>gKti|3LJJ@WT$4aQ55b=~qZV{#x z`_BntBD>HkwKQ|S{%RVEIPzAT5`~7c&hTao&U3W*91muH!t<4X;~P2v0|>Wo*8YWW zmVO{G6e7AqJEGmjBkn&MImW}GJv`{!z;XK=wrdBmUeST&;(E*nDlnBDL~lYK%FcT~ zCPxmr964zcR}V*slU4IrZP~?s-(B1teS(LRc3;l=zLJez)r`@?r|r*{iSoh#UD zoyB&;C^l-;TEC0dj+M$*v?r88E3Y-CwBp|Qb9}e?-}qreYnl*-lXvlOU=?@T$FW=6 zh1K!~em5bTN2B>Vaes=X!i&M>Uvo>lLeuwaNqyfeAERKsS*uJe3;mEMTorTgTz6^ zS0_?SSzi@fdHNWYXn$aaFj+@?(GD63llt5ZH05oelR)VySmf_EOQ4KfzNCjrW;ifq z1FN2t7PQ5L(3{ePX)1|(Lr?JI_J8Ah+Bcj3!3$nX&sYDAr?f}Q4lKt43f0O+#}i%$ zcd%5ykHxxeEYxmdK1@|AC0{X*>9T1|5GLbA;}|K>( z%~6G@x=6?PIdAw6Qg~0LaKOU*myTDn=ts5iSMeC?;u~!woJHrQ1?H4D@s)`{l1jKL zbQO*wNUS{9g{s^<5Lp$O z9Yk!YTqz7w83Kq4UzuGhvpk4}&ZO|^kc0dm0(8Vu4TW_6f`L|@JPBWM->D4;5d(?A zB!>g!Y#TVM!KT1t4K(TNEFn*SCsksz&%GqN;9;E$q&~bbwaaa~Q1uB!2ncJr;+f5! zH=w9`jK>5K&s@o`Qz5w))^c_-cf?aA4kB(Mq7v`|i6z{XC!t?4Wg@tkdHEC11Qdn3 zoRW}QRSqPLzqdm895kG$45vul(!8hY!=HrcZv_#lk=VazBCDn)q~OzEedR#%#jif+ z2tCUhiIrZWbWW&6TVfsRZc(P5=RfvYrme9sT{+JY@?Bdmds++k=MQ5btH(gnpV^7I z@>$EL+#mY}4<^6Edh0!CB|BHOrL~$Dx$m`}CQy3w#(5#u^8!}e6rH#$Q`F=@tzIag zxE0<`lg{dOrS*!N5~?v@HR4uK43A@}jGr$Rl3!uV&`uE^6KV-h8%fKf8p%dmR5~iu z3jVKZnix$Pn1wz%f`hT|@p$RK@nV%XJ#YBe>x9WF0YX*sV(tx|Ogtno_VI9VhqjFe zwEO*AxYx6a+nsYbZk@nE;}EvP9at}Iz*4XZbNT$f2$F%s98_LNx6;G91m4UX8Ll*M zVykPPuq7x)9`O5mgohK4sX`v%(bzrQ8{Efn_bT>VXRzBiitV~VY=#AbW-OP~VzDTM z1%8HBZ(QJhOH2V;62exk_U+nF&};%(i+SWZ~{sFwG3-U zsqa&vu`BNa#D|Cn5{0<@zF5AQ^F9H91Bw6Gg|7sP6oNvJJz#kD7D#CCfsEuEe0JQ;FXj zev0o2_3yVhC?QPVZ2lXs1QiZao)IVy=l_M#eb@+fW395&ib|&P!Wc>`rc%jA)dkJS)@=4<#C?#) zkt;{p=S!(D>M*r7s*z)Dc@iQCYqQcTyahN8*y)%6rfk(D|3p`I~9- zp#X9A(W(Wtm2AhAW`^C`}WwC^ZJt-70N1B z#n#y%oqmuY;g*hFPF~8xLRd7x!zqkYJ5?P7hi5ML;nYX)92L!3*B-@Myb~Z&F##zQ znp&sinOaZ?N+B%eISC1s?DM;oIcPYL7&JU(;*OU(h@4RsnesR^zK;O6KuEs_TM#)7X=wl}6`|a!1{MN>%YnFDhy#YNNCXq@ zdn$$CaGicN&=)|GZ5#i-dz`>+@&{R`p{@6S^#M};l7jRPQ~1SZBXqS04N>K&yIF>^ z3zS~`ag=0gOlZb#7oFeGb6cZXD=Lm~WIdSGjiKxwOa@1>riqL_5Ab;Ud)%A+0n5$z ztbyZn`3fcplrbt1o2;mH3#d@qGDnbiuEL~s^2b^-(v<|e{7E|1iYil*p`JkgTnbe* zm0qIC$ZNn%Q6pvxYcWlL2p&^e0Ze2TVqDE1(+aH5RewSjI-@gCb145+eV>OWxei7yp5$vjofJ3o4HLxYxIh+g%H|(>0I#J&U;Cx6JJl z?h+ukTgP$O*pEGq_IIcdHp`o^S{!mKB2p2}Wbqu6bI=(_kY#sZzIp{)ZQD5PJHqWD zs*s^Ov?JUZ*u!xTzqgJ>?6pi09DM{w3)U-}uwLGXwX#}UYIiwUfyKfy%oh|pD=fi6 zVJSw^1L#RCz)DpQo=x7zHw&-vbb@MRa1Hmmr*PENhpkXER*J&}Z8=8MgJ`^-iL}4! z`xGn`Ms%S>!NWJd%-Bkcf1gCBO(kFnOA-eB?{Qz$PvV0lXQYhkaMf8l8KN9^{!#8#(D>F;2>^ANioTi9*kHPbYY z?WSq&PjPTDg~PT<9Jfv2uz3VKHN9Bj0ArR)b1bU_eTe}yT+2qm=PA6=lli~%rl-&-}jnAuzsYa!lUr@0|w_4cBub>h{1O{J42n-G&)%HFz z?U%}^PBeGKhmDyjk5f~E-Uo=Ac`g$`$_ykr?&m>lZpe8>OoDXCt`aLqthF8Bq|)^;)dB#~D;i zf<|O1@Tnf!+i%5 zZyw1fzxsr*kOa&k;d4-YR=JWq%af>ks$`#f?`txT!cq?>zQaNP3(SQ#F&d!U&Ktl; zZa+r3oh_TeX7e6l^9&DXe#BPKQ>e}SM9C7tvrJnw08N`hS+Ur(wNK&2EP0hemd~O+Jr$(=xdBA;^?}>z%h{#z&Mr12$jchQVxb_14+5)iO)vsjWkqW zNI{{kDH4yHbdD*6rv84uyocaB;Q#au9*#f9?ZG=Z?B2#f+amUxCT;26{W`v;v5(4Q z#KJ_k+s1IzGK>Qr-wn57tFi%`Wp!9DuEuhJjy|uD5XrZ-LndgG1l&@22lnY0ZVMv4 z+c@fy;+Vx=^B7gf5VmT236D;~qZMoA4OpexSSg|DJ#Ec+LZcu^rBi6j{;C<|j26qv z4PrPo54~}j)*<<@ZJdgUYDt}lyQXm5GJxIMcC1&_VKG=~b3}$x^U-iU0~sGB+IQhv zzz8Z1)V6QtK%$(LbssPV;5l#4!TZi%ISs+XRbrjOsf>>XU*c%s5ss-=Z};xucJ~^NI~O^a znZw;44tjd$sf4-TNto~&+~YO4UeQRUSxJx-Q+WqWm4q+T;e5vbm-me3Ma8NzhXb_q zPYVf4j^xkNY15%+ei^g|jq0tXdrU;*n`BW8aV}tOp3&X zw+M|$Gl&QvPKg*eXiklIP!T+|T(QDf`dldxIcGXvDG)h+sSv@zg{|oHX}-$%Mf*CJ ze%6Bd61rZn8@ z=fBE9E-${|xqO6fl%XNE8g;iQJ@tk1A5P=RRNbt#)#UCBzQAh3fwe&y3r-RyLm18x zOop*owS+ys=HBp2+#UUva{V4=t9LP9vunAMvBD{{iQ+koss(k%pj~(Zi7H7nGEyTx zCH?%qTr!(1W#*Fe`Gj6uOfCkJvoJ`23=$sw3EAdUyJIu0#Y*Er!bXTUXJrRA=+w6=o3T~ZVskL|YCCaI-(@Y= z_iDPZUDZkzQjfLL8Z2vV_QDFxDL0dmgNc-Mj3s5*{xTu36K*1erl=6saL_!C?P@9* zDvphcCT<(BQLcuGbp#0Ci^^fSD2yd9gtfr;U7!M)%L`yOH{TY?Qa>-*L}r1tjA^@> zf}xZ=o_9UpzaK}f!#He`TH@yrBFn{a+O;VD!`n(vlC}7lQZ`~;X8MQo&3bRf#f9Y6=8Jv+D0baeRW|WwFrpVoEDqc zqQCRPXpwN@asNDh*!b$idT&vcXu_gE;;R)07&%^EUwo}bo`T6j-5yq2?qa@fALAA4 z7$98w3YQ3!MS^4=y#=!v%vbSAuN9vR1!l0?ddF4~RWsA6%5BVrws1W71mCXx!(j4* z4-+X9g}F?b{EGvxf8gQN_Z-l05H@_51A;v~;DF$M{~{jrFW}L@qAkJvpl25MIyj(d z;vj+7(Mm}Tf7>!!$G<-*9~}fq!}UUxa3GjUN1?o<24TAKb^pZh`-mry)rD(KI2P`FW3XnXfHax}QgTl{qxX0eUXHI>1CM82F;J&K3q(IchU%j@J zXVJn@0*z+EE03gQc-`72YQ^Fi;pR7gG>G^h@%Am8We_<9iGzrsq~y8>3xY%wkOdH@ zS_B;ToF&@{5m)N#N`D0s7uqVI3W@Bu{gGBA6hvIJJ5e^{6DpFM*;ET0=?CSs>1>N?vBdqw zKoy}-g%vvU)#4h0qTWJGa`;=7b!O`FyC&73P*!0j9eo*9LNOIc0frMZ&>xkA;n)<+ zX60bBv>ZpZ&DJ3Cc9U9`wD3LnzGWd=*gEd6mWJsNLUj0iKLTViNQe}MY@|6?KtSb} zVK%3b@W>-PaxtEsXPt$IQ&owUj_~38BQg9 zqta1wCJ71rP7D%i1*2*?+dnlQB4(QDFOblzt-I%>Px2<7laGW;TIs5z%C`%JS*ix%Q? zVJ@vzE@hI;>#I9!00({dvEO$OD@_~(g;p_KGLOE3IRaz>JpxJooRtF)=c?;NKl(BU z(UU!irG_KiAAf`G&PSLLOsL)udLQ81m7m<&{E;w`GFknPb!d38@DrZTyuy?5yLddZ zi^s$3cr>((M}teWWddah_q%7PiifF`yX=}&w`Q&7Kb~HQ-h_NK-^fMyQa&m#mRNgx zwcJr&QRTs|{STGp@A(`4UhQ|15JijUcbE1Fofhw}z_|+4#uFsb^#+jB5S5fj71e?k z^3vqP5X!F+AT;ZxWdL#JLSP;^s6b9jeEpn=3ukF-6AG1Z*D;Rr6~+oug#571MAdyCu>YN zP@(7tQ6+=ge-vLXqqqK((@T8J(r5&ualvrakD>4MCyC-h^GP(L{zdS%IZtf{}H~N zX<;EPkTsK2Oh>H+q)NCg!fbXC=K19;2FfjYzC_2a8+NIvoZu+s@ltCiGD{NAWCt;s zRfy5F0_*tObu$%BmlDu*BN=m44?7hhf}{zz8{2SP-+}`<_;LcIq>><#g)D!uR7`;J zGZunc52T6^(L5L}=v9s>t;NF6(d_dP!eEFBtUn~ zD|;!S#r&WtnW?ONo*!Qym%;x#1(}~{8P`~74viX5o&e(=yUyoMj1ET-@j-ISQw6?a zu<(-{InBaSmK5jfe3|`1h`-nuCdfgl4nfJ&3VFcWFfqT!`?@Dv;5P~l*uc? zRF)1T`pw<%u0caAf2UO5ODaPxzglfweJi8t zJdn6hmQ`(;VsOeqorfz7BvdB^i=KDkD*}U$-IBRf9SUO^OtjTwA0(xg1JQ&+LB(IG zP!JJ7)T*gaxeY1@zXHoth^Ih&kT{r}LL^`SAt2NP%>d#mu{0xH-6qr;Meie@X0MS7 zF;$@1<#g^;5|%s{JmlO7w-XiOK}6^3gT#k`=7q=v5{2SSi4Y|IaohKfOPOdJI4_6@ zS`I$eHi_q{rmkrpXV}(SqYxYx;*vt~av-i6%L53VyGnZfFqWH?$mdO^IHlr1;uMHe z9`uJFB=fPG(XM%EdY@z~{XI?43mz6CqY{9Cww7$FLPAb+gUQWOeCfzyXsE%!;wc1(-lNHODtJ%Oz zcoichb9~JLmYcS**1W~foafEkh0yg18$~63D1uYUQ^F;^pM=D}Bq08e@reGtTU%WV zNof|addZ013ey#z$wWs?Hl1=VMpHP#CsZal(x2c+Up7f~F`W}MSj^^?T8sE;s)Fe( zEjCra=johNaxt8gjX^r_UOM%*>*=VbItYH2fYA9AOlAhKTUCw2nr0l;wQ$>j{SZ}+ z4-z^2V1>0VS)wVAF|QDrmKH9kgck9d%qhiqW{D-qhk2g;i3CDyCP$ux;iXg*e4a>H z#v%5Pi6}jz+3MMReIB91|0^|%f0KoQq)bAG=O*ZUO0a6_U?tTpoOK$~5?CBue25Sp zGC_nUWpbV7DyHPn<-FzaU2@zxeQhK5BjLc2uatx3QVb;NClK*pC+Iju;{A<5LU#Jz zZePc|_c55sbYAY<{@=hvAW6_Fi-d_)aQ(?OEL^DFuo7&|<=JUj!g}pAmO?X_DX01l zPNFY=oG_WBO>=)1g8?e+!coiy`!QM2ZHafaQ<yi=~HHd6iHDi?uRs$Lhd?u(yMl-9> zlhVL@x0ECIe3Z~JmC!*2Irr`|L|n4n&w&^m3?!$I1*p?~l`AV;=PV$VB4uKgS_)^$kI~_Cpqol4 zsH=rlW@)~N@*&)p(n#TJ5~l8j2K$<9}^5DTFFpf zjw5b5e=Lr^D3wDOb1Wp(c`F^g(P7NKd2r0$M)zmC&amAUOKs zvd|r!fsUJLXt|b(`irS3|0gb0rY?1k&GSKWmD+D@C-v02q( z;jDR%DChF&5EQwiyp32v2_Ie`ENpK}Z)ON-LiL-b!2bD;OP=WX+ znD9G&3nT^;I&04&_fM1ww=M_5#~v(vkXX1Y!c>YY$(07)D=u4|=mkIT+f--Lcm=;Gj#@i(1Bd zuj3BBF_`@46ejEc#J8(|!<(gV@M`)2o{bVD3TO4s;%>(nj#~z?SJ#QHsz$6*!7fk{ z&uE@hRuH3ERhTRsw47yIY9G{%H}F+~#r~DwEad$f_^J?DbV%_WRjTsQg)g1m;_od7 zQ+lXocLRVf$%ewngk=W{xG(p6NhAZcF;VVrb)Ojd@u4*hh=Jnw! zR-#(Yn9fv5^ddsU9KRHWREG}`np8^-eb#3MIR9T)P@G#Tkes3XrL5u)sD)fIKF#14QGmMG8g!&Iqdlns zRaeWPmdmQ%64^6oHF0%VZE|v53jj^g*^hIyHdd&~Q&YTQ=BY|Hu-|zX5653yKd!~v zJ;Gp<;Ml-4<^F2RU5g_x)a_%gdK(K=H#3Bisg|-u1ItW!gJ4;~NYN~YsCK4`2QVFM z##mkmV|2EI86~K_QDBlU^^-z=`FZ>z(oD)rf#^$dHPEV~PUCAbPo$zMHiBQJA$%bP z&2+wWt1p7p;eir8kzqh)uWUas?bY=qArOhp0Zv&m=)%rECHu<$e7~bkM(v zCG`_fdo{xn@EavzY=>HL+%}B|{X4i%JMLJ-X1Et~!LThNHAeL^#*yV1!7-Xqj1k&U zS`d9n1?Y&&K_dZDO$Ac+bu#ilNwi*Jnw4&|&~4s_YnP%G9Ap0!i;TY}BLCAAS~B+u z93H#zTQ_rE6A%5;ULPX9I?+mt&Jt-pOt|$_iEo04fZ@Vj4j2k$y#*D%M&Qs4^31P` zk@aU|#LaY4n@$j`!K-s?Sxn1T^-d|*U?BoZc0t@zKo zbFu_HE%tp?tz_|9$wpm#2ks6(!8Z$U@Ob7vF;&n-<=v0| z>;ZIV4Wg5E;0LSzF#pdM3! zF5YV#WN~0NS+RkEz$}^)J5fw$r{!Yv=veZagPhQ4H$^Dz?eXx#)^@Z zSW+nhj9#bbt%R80jg*i|e^pFWc98?=i-JMG!ccw}VJq=9ka!R=72<&4avDB3RHfw~ zbNiMfAw<+VMao8hZ?ENXO{C0I>9149^=ng zeoRVMOK!OSUCPfm%lte*kX%DaZW(@c_J4ncGX8_sV30p(CSj7xA24*I0&SGa9jT3| zj|p2cNfsCcrDgGpQq8kV7P*+8OG5cGMJ;*HFJ4Ks`nU%4=X7JV zupi^agETsUU^j;ITQHPWho0mzG~HC2@MIKzo`^gufy|HMkVChtJxG=wCGIFPtRySAS3xoE13z@jla<>c4~(Rk`~Mq)L=5V4&!-k7|!h? zO#0ECGmQ4E5wvDapgVgQ{i&_!h^|I^Y#n-2+oAP)hVw_UT(@m7+3$UVMPApso|kGj z@pR@jRms2bdii_2nthHJQ}=moY~y~nCX8&=1~ewNTQye6H3ty`heBCbln-$^5vNF0ndMZ8CCse~tHMD<@Gv7xG{Tqj=IGr) zq=*h&$#g>GYpD*J2N4&(@>GWh4Q&MugU1}(D z=cPV^^kFU>rHPN0+n@?5yAVXhWdeieqGBRBeuIZV;=#nu-GhjOfwM?>oB)MeDI0-A z$6TjksTdyDEOz=I`A^Nw%ODW6mU{|+ixfKbl#irD(vV2dOC?x%OR9t>RU$Abyydq( zQI6P8q@OHsWe{=ZAmT!0%Ap7lmM{KeqV4;$-6KJgsJw}17WUF$oFGv+Omj#C5_L>a zo0iPasYrfJkmM007k_&eRhP^7rE2L({ur8=Ly{L?C_;T~EjrR#(3aeQiYuiy+f%Iu zw3v%Z*J@+xcp5~6%vH=|tK|@P2A|{3@GIV|#~9~L zq?HpjvGLK&54^$N5Ei_-)G0W$j(Of3^EEq|uiZD0%!WDIDp}+JsJ<7`fE6kB#kU5~ZeVZ#{b4EP#l*m=t&miLLy5^Cb z*xMkvK?fvo*lcuj^bRD^bkapvsxeWyzyZS%j`{!I9X`f`5&q`12ZKA^2?@Y~o(uD(-a5Vy}JxYvrz2*jcpl2ViS>HvJmkul$HtGp<|n zz1}6N*HP>@^b=zJ*sdMLR{adN8#F<2%RMMdncAiRta1bOc4??wN?M(Q;DUp7XGojkBTm%8;avwY=Pbn#1A%9lH0kOhi z3T*{15Ecdxp6^8h#8V@tLTEvOM1bIXy3m$FTzsDYmrM}x;bH*M-xFMVPRsf#nv+TA zo+&7NNT>W^Dl*hfn2uiS%&4<)8chmCpchD#bXQ2ruinbw;8#mVsGL{eS6%!~GFExPijvCwoIJAKb^JWR-qJf%IdS?9<7$2jcU!+z&FcA6J>6Hj9; zq(o{hzpHEuUtRhy!Nav}5ky?bNalK0$u!!ljcLO1&||z=Q$xjn+lcq^_yg;qrQTVK z#r2q_A{oyrLu(Xom@kr%^-%&ZumoOcR6OsgU_c6jAn>{&3m$rZg|VCxiG+!pNch%q z&)mw2PKo$-nkVt{BfnFAz*Ol1LGl=fybq?z)C6^o zK;6g7xtDmo@S30h77DozNSX5x-wl&6f7f9^t_act>v?{T z;Gq16X2A<04j}&dOEHz1bC*JJDj|-&`X8M6s>9_*B0^gR6q%PF5p4K> zyCMRW4XYAM`5J}Wa!oNQc@e@1y%?$7#A??Q9E^Nx zty~^Xy~h2q$2`A79CWQ?w@qy+rm$H*igk7Lt?a>)y80GXV=Su(l^5yU_+wmFdwV%{ zbMU7iQRwIjL8H%ASm)W?_xN%1zw!P0zwmta6&=M6cIrp4RN9D{{8F1Js2S%?H+Yc| zBBpp~`q6#WuYcr(JDuCUMj}Opx?oJKT!3(mE^_RAPo|cs#uXBk2_elcC%m^yLhqBV!0{ zR3_@f)tl0W?)V0D#@3@Hx(>~B?7b7x&NsL{_8sm{{tfr1f5QE#pLpH=9mivT!_L6>Sm}6y>DoIOF5N(P z?j)L1I?+Hiq(YfSDx;?OI@Cv3n4_)OMM5HoQZKV*C zwJ+gTCOAlaD1>DI;d{ydBSmThBi8fkJQyG=hkLqQ`XPu64YW0G&Gb6~@XWSPU?B+=a2ysABv)iw_b@va7&? z1JWBDeBPozi2IAnlO$6W$qXtUOce4`PDQFii)<;BrPWt02gA>@?!knNR^~hb$)(?4 zKb#u42-;hBrWSQX`rZ>QHf&bUu?ydTW*?VZ$%6?3z?d6Bfe@ z29uTAO{$V3VUiADR-4gGr zpHKZPfac6`%r@P}diQhe@w4tue&^OR;b`=*o6fy;!QinLRvfwmOXaOtENR4iu+}9v zIT{_$EJSylLIg>;Nhj@xj84s|1+Af?_VG&%8+4?9#}5R_8*U$s-N#|;3`e+am=9EP zbQ{D-YOdu+>aS#2VqB|4>hBFu1}R@roWj9@IL|frM)4hOPJRLKpTrZE3DT z%Lj=M6K)BYlgICX?7t59HSS)R$;%i1@TqQEU%to~4JrE-PdL}OU?gK?2&}Vbs zavu>ei8#kodmALCoVZm2zaga?M}rS}4cxMTG7FC6YYh|dtT!yBo8uX`i@LFx< zb4{ky4|?wNnty>U4yN{6*Qh*J@nHC_CE=gV@miVwJ08*=&HjYP3;)8?rGMku(tq%5 z*|k==Kl5)6hW?JV?r$*H^w8Fv?=M{Bc?_X7rGsE@K}Sjp+LK$*ncBwvR#fm@w8CO8 zomS!JnFI;1yBnqGOsYY5Y8^V#)n}^-s@T#TkrR-3>Z8oepM_2}qP&RmAcRN}NB$*d zC3O0l4-x?og|6sZg7h&(9HbT#7N$Zx1)|566sK8IUB?y1;^4JNp(TZ)G|>G*3PeeG z0?5K#8a%qcGZo^g13|#`>vHGq)JQo>4I~bFrZ{xo5&wfJ5T3v0jM#h<%}_r>U>IOL z&N~w^AjRB2dC#;WiGqrJ9`5NcX_uTGJa)cw8$js-;b~2N#9Elt0lb zi~>rALSa%H{5$LRYfha;r>?>$TQi=o)BLzpLLo&UptDyvO1TYHYDMwB*H({}`|xuk zKQjU(g`cH7vKmxsp)Qxi=l`U3_k655e3l~kJRfKO>#_%uKU_lf%^IVrWSA6*K%&IE zei((f8WQT!n%smMN@e|MYJ8^fmlTXj`pU1BQ_^=}r){5hj9CkDP1*9iPMHj0vS`ve z2Osr6!qeFwt*?-h?h09LwjAN!r~vaLc6y&-q3)1?Ik5bQX2g#d&(db>wF-01hPJ3o zme8Bii{7*f^rz&ZCm{u}kN)Qe13nncS*sL2QxVtf5(vIPrdW3FB zNfID`N^?yW-_Nwfx}O_yAmQhls*o!2VJ2Ac+)iNP!OsBVZE0$YFF2-sQHYUJf}`&? zuiFh?19RAJn7~fM7mGRzeBQh5(zsj`m)v~GlM z)zj&^u+~YUo$ADu023nSpyhbAW(b|JoT=c!fv3V)QV}-SLu$bTh4LK5-1;FbZA+v$ z^w`2y^d(;S%H>0-FfPzh(+B0N5Qu&$mxEU&$n~01Qy?ypE_m2H4XFkJM6gf@%L9kO zL;LFEMTal5xh#C`Nvjt7UkHQqTC!I_@D+-KhX&fV^^)k?*Wa58v%e#72)i6FU*m_c ze8pklh%muLTZ4*TBSm5+1>*kZM49k+5-1!4mkZ`N1 z4=Ur0{Zl*=l;iNiS27pA5=J1#X~`c|s-%z? zPuaB!)I`@{s+=Rat~&(A&PfHALR+I8ovOLxRLLYZns=?m$~}d$>h~}boVHxaUiTwB zn4mhKtx>Hk)ZfNp!)+^_R67;f3|}`}y^UG*?F#Q;sAz#VOB=cp1L)9%yGtp^{5Ta_ zDNf7hss)644F!0S3=~lXbUnuL*bg`!{}u-W53xn(vEHD00;5(vwNgq2QruuG32JdF zYfZ29O$y3SK%$xOqpA65zM6{c4_y;TQx9_ZX6~`NV|L~CTi)fHr62Ll!gqKubcFpz zj>0Rb;0h}-r5VYDpenl>`F@38B-uJ4k;+E)ix82+7f9YRA0+Ovo_9U8=uiz1?(ck9 z2qsc5kq~(cBpyusV}GJv4~*N;=odPAW(BMQg(*f3x+n z8my?pL1DRr32z_`EJmnIhBB%NlQ#5Z^`kvw5G`o~XrtO`jj5%x4xu%!4ejwAXu2Lo z^Ysd}Mpd9Au?|B8BiQOV#@&(USa00KTlg~o5*p(eq;gk*%CII22giAj&TvpRiFzue z3OeHHU?1-Gtl@V140h|sY|WFZ8+DckvCx(&6YW!#$T`y~yCiyvIZ}<%i|J%lA?C_? z9W)Fa9w;maLgz04*kdUPL4*6604*z}vJf0f2@kX!=mVt8d#ygp0mLP^ zea9}Xsb)Dqqg`uRo@+6{g`SmJP z-l!sM+OXERg}qLd15-&9PGT%D%8}QIfn+3q0E?j|d;ae5bL@59Cma8zR^AQckk zK8^b`1yUqs9!QF%4KEzS$ ztW8AJEOyP9A5Sl!y2-M+DWP-8$P}3VpuCHhYx#u`*8d$OZr^esC&vYa)4%icB5wo4 z2aW@XRz_6fTX_UOyyctktWV2qXP(Yd_)5 z;%hvgdVt3x2e{X_j)S%tY}O89vAh$r!6uC6)Sxk@ls8Ydg`boou?dW}6-a1=mVKT8 zX4|(xB6H`feIFz|XHz8_V5C9|F7>M*q*y)T`Y~TNfQ52iqoqAqqB>eGZNX|;6IM$b zZ2pFp30UQCw?b92Tv%Z(bmmN@s;hDd2Ggt2Z!qaYD-}so3W1V3KsDcqmc$O!T&IJk zb1eTVo4^3Rt#{Uwcetx*-x=rw~h(kFIw~Gkk_f-?9uPfbpjKwd+@`Bf8+Ml zKe03P16I3VVWH&_W@?Y9#~$bFQ!vxYUQ%C^AzbT_w0u7QXs0#o?Qrz6(9c zEMaaY^&yZH8a&D_6;UY=Mp786AmzwjgH`p}Qix1TTFCTzQz>-bZW5piIVrTHJdWjU zJcyVw5%_$7SlG=2iJ1#)88`$CsSa+HA8|nOA>sit!hUW;z;F=J z(!4T4#SdM5?$(ZXCE0HS=PBSDR( zjR>RiTFxYPP-&E*$c=icGuY;?z@ zq2aP?tD=<~Rh)U&6RIge=v~zNC)`+Izh7Z1}~-`+lrI-`ZjRTI)kmc zA*@t%VlLQ#@!Sx)l6Ax9BklvL5`%^Z5SjtShlz!@q=pv-YZaXHpna8!i#Qw50YsAyt(@00DTWeH9q2qyfv0oUxt=Nzy{=TRB>)5jLBdxa7P2zazHfT0 zTnp8afaw85sz!-+mGdeO;=)hLeVhVAivi{k3OS!;AV+x*J~mJg7*3IJOIX;x!A0kcec;)l4De4pp2Xs7kaK(g6-$4YvK1LM5NmrbRp{rutJH&||d zfLSV!sj4+hRq$reO%dM4oNkcnJW8X`kOPk8NLS_@yk^li#MNNAj-$$MC2$w9S>1)@;wrBbuR8Z?#(XLot|X!8i$tXS zMJqQ78qQqzT?Y@RF6iXt^a%~+Laurc_}RP=-vkG4Phr7xGxODmK=N~#oPg%{QYDFW z&`xng0>t*^m}%<2t=N`;vez+h?^z3J`fNbN&Q@-S)>#!#6sj_Q;t)F%xfbTy1hI>q{H1W9~7`cm65 zlr>;QD%X@3GbU z3LBk|vDSLennx;xyi~i*fz1}L$xUpx?c>?RWBjoE3f~hXuMH&YRLqm;&+g=R61I`O zz8h<6B6!g0mhnC>zg&WHK}2A%5SA<76+lWa$P}JZ_=y1U%v6VfK@&L2FFBJ6Ddu}= zaK}V2LWmEy3SKzEbKu8d5)~dx!;Vcgxo+%F^=dZQh?ffq} z1*LLisS5)IfnXL15AT>Nx0Lg-zc+w56{0YfltP99B#j`U>M(FfSr7(Z!rVY%<^W=O z6KxgZGFUh>Ridqo3M@yF9y)R5D*U-`GBrz7yAt)+O3_>s0>Ten#r`?r1R?^BpD#(J z(n$SC`%1w3%0=@*ybituiQr`5;cL~5QB5ARzoz1#2_OQ8EK@KcNPL)NQIX`GAxQq< zf#lq;zeWXNQLZ1D7mi4sL;0CUUFjuw(>CZW)z^*6X0!$S%fYwwfjKm>MSXxE2i> zwODunTK7X0R_b|Gaz-`P8pS)O@5*lD?FVa$qmu~@V!Lqy>!EJzB5a*_GjlPLoPl0K zr1@GB%D#$6+WWCq@+&nGS*mM}Uy6f{-l+fuLHfQ02MIys6eON`&-wN?7-Zi2MS|pK zuZb`(FY-%fqk82ZXm5k$6eikN8|~@P1fI@5^FX3Th5yEPR3)!1y!D7`?EtsCma*F~ ziq(pCf~4A1;5dJSj(Cm;&*pGI;KEpbm`hNx$%Hh&VoRUzt0SjA{BwQV1Q0t{Zndnl zqtvIgrTWK!Vr76bBR298}B|5GGV8S~Erv)bSp55o z{QFA|JbtdkpT{8pD75BxAU-j#BCCYw83_`Di5`0}Q7%W_g$)!0ho1v+A*>QpIRcgd zA}9$?`g^PJ;@{{zr7TpIYbrz#AvjEdL>d92uw1Stf)XUTU%0j3O>r1h987$Z0uey+ zG}`|nU~}IzPeZ-5)R0kX#C76Lv{vs*j3;Xryg(u_5HRt7;lStv4;~RFh@^TDiG42_ zx75s5ujf6b>aBRaCqbl^PU^3vC3MxsNT5&{%b=ot4=&m_uzacZFD1C_E0S}+Jx_-i zu$itE1c_pe%6r%nkQ^aMnP?Vh`L#-pvg<9pRTEXsAKBG(=c>}{YhGNu@G5VG?Z>RV zRz)aG+Zyj1jZ_WFg9OH`7tugwudUxOT{4BOrd=FRfo!)wuqv&I(j{x`xYhB1s^kss zPkhf&)dNCg8#7@7L~T^+4sAP2h|GnlO2WJL+*sKfLeZU+cxhJg6h-G2L#LxYGu1bX z(U%^=U`B=Y<{Hi{!AOQCm6Z}4n#Vy{WCSsq7NFt?U_7IU&#AOi@F>KPTEwU1qdzeh zU2$1xzLAEi^GVjS#3pmmL0KnNIab0%qlUU$C71~|yZI*pDxRzYjHhK`I58c)F{x;| zmW;|XiOBje&RU*a{Y{k1wfG?L^CJclXD)Phx^KObXa*6P{hhO`d>m>1aR&+yA`!1Y zJsxQq*`JQ?PeI~YgyS}tM1VvqDkgoJjjsG2+#i2nRa*jyg|{~TWl8tvGfxSULxN-l zyNzR5t7^wWQ4MAbs00c3{*+==UCQOnKn3E3vHYfuF0AE3S~7iJJtZ06+epd_(@bynbaiqQ{lHEbh8QJn~i9R zZ{hXXje(3F^rUoRFn5&B{}}hjUSYLiYipX2rHeLNrA#-qL&Y}Qag zCDzjiRG2T&fU)vQ87eLn69xhW!6A5@(>{HI2A^iy7cc_c7NCOr6@-Bq!B9c3tu_);hIUhc2oKew8&L9%~Z%IIX3rAtxw##A#Uw-fy5S;qN&BZ z))h(l$W1!5`5@d%b@-M_$fVJ8%Lq{NoD9uD@A{5kPb>C$3hI#MpFY;f~!g_b^9GmQfv7< z3?>sC-1a5rSpO@vZf~awX}FSxigO%oe@bBdS+m#mS*~A^OeMEbBzwvE91N$I*$8$# zBi~fWKwJvCqmt2lHOU~7^HCh))YAQ=#Yzm7hJ(k?^C3>DME^P2dV~ce?XRmdtJ*yj zLAB-jUj#lm0O8X)-h2BAo3yloCf?|_8gp_GZKA#KgxcFg|Vj1yHN2W^g;Bd8BD zP@mk4qoHG}kiT1lLV@HvfetmJ>?8s=r)AbOl;s%g`QIf=(); zo&+_{3}PUKkjN)%EG@B z#fuz_%}}xL*xVe0$<`@Mq)wE8S2dSHU61B9&*UGtGyWri@-5ZkNwvv; zJ@*XXOdaF-;3^(_gVqF<{+6T4AW}l-UW5t(f)FuS7&L+eglBa8m6rmjG+;Ow zR0_9JeAF%7JL+x=y&zizFRssK`O%X z8mVyATYBDwxde<8lM2ysFYKjBh?<+Gh)1+Kp?@GCd9CEtsFCo9Q*%a|@-eE;5-j}6 zEvXYdb|7(<^|e4!ip#&fdjtwqNR5=qu}j7iB${g?2u)P1 zqB(7tR1cUFv1BnFg(PV0jeK+_6rw93-x?zJ(0b!@Z2gY@L|GR4;}vpBM^9{qfkUe< zw%$xf)AcmeUQR(30aE;B65lVLl2Efm>D+%4=RiVp;Vr3>1YUS4Xt|k%fy7LHP8xcm zlhJ-X5%m`nQT|l|vI!A&E)q-4x0Czwm1DTRgH3yX!b?nZR~+4_3<>t+Va4M)x^I z1W6DrF{%*E;0@rE&RZ~%5h7~K;wB@W!sKTliP+aXk|_SoE&VMY>l#ssV-ZKnl~;>U zbAtf86-0AX0BwXpXIue#36sIJVvLxIB}{VMAVGCii3{p zS!j$dMOS(k2J$D-nKzA^6e*Jtl*EprBzh1PQJtu{RgaFuR;SprdNIg>)-YAcaKR*2 zsOawuKf`+SfrYt71JgDzduiPgTpN1jP5e-o9pAK%C0{zcqx8^(613*9M=sG!5HQr?tMsG@}~YfEQOr>}69lt(3> zuewadXj?6U=UWk9@2pDUFus>UWSZnypV*Fy>tSeyxLf5`x2-R0p?&he~&K%qbAQ54Qq}TB}GYSSu4# zA#Ng}@)^==QXL9iSqU&L0wQiwBEdsA+*BbDRftxQ0WAc>X|5kLqR zO;n8eQ=COpG?QJ*BkPMG_mvmX{1I2R<BRUK?G5h43N{ zy6qO*!5{DFZ-<67xdib>q zKM5d_OV(=chNNWS7s+V6l8UCQ$*8-Wh|t*t6nz$N;jGjTllepBTR5;ju@QCgwWzvT zMlh=d0iB&@p+}SeYoyYW(!uX%6=(1*#A0gMEP(3L%m z>VyG8WC*2%Nmbkc>S9{ZMv(Mp4f5X5YNKNq;b6ws#lMCAq&=kN_6dL19T zJkBXhocZ8zFgY~`5+5phoy!GP*z=aNQIQb=A@z}M^Ff@c0?Vlk7p_t_<$ONY)*aJx zeEg->bRtY>nuYI|^I9InQz7z|SxTs@9s3DHT!UF9yZt1)pyG}ZB!tLG+jpzHaxig= zU^#$T!$&?RIJnkI%DK9Ms7%$Q3XR#C>jX*6Ujz|_wY2<2JbhLiw4jwf^CZdcCD+xr zFf!3DsN|iE0Ll5^2_#karJ51FF6W7q9QpkMIvRQHuJJsnkCnJQcY=SZIQam+`p89>I=!Jy_$u#_zIg7Fp$@U&a`^e z-6S->N=N*k<=|s*<-gQVOLItE$Vnh^FwuzDIj`71CLrgd1Oz`%K!E`y2I(Ir@TMt5 zb7~tE)C^Wzw!BWm^H{7J#&}_ih2bhLWh3>2BwX|IJ(aw%=R$Kh?7N5iV^8sL{IRvu zxJ!^6cW+?7V+q?Wv)HVkz-sjn=F2+KmJqVuJ6C@rcsNKnkZ}KhfQalA7|!Xw4U$ub ze4{1Kdl7#)x74`9N}&cLDD#k%}^UwOKU9WFl%9= zLJVRbcrcNwAVA!F7PoKbeR7@==kEPK{&Oc4Tzt;p&-d2yq}g(o zXLC4W&ZPs*BTVwq5*t8QVi3K_MHmoJQng|wVV7D!%eP$GL{0_fid(T#HH6igF|5^5 zJycI(s)&jts}8;C^@K$yx-*ASpE7}}#0gZ!51}Tm6HW20yjOY&ig6CIMlg`kkHM^d z3}|hWyiu#Vx;yd$J015p7@M~h7&n>^@j}W(N>B52Jec^QE&)Ze;?-KkfU+Qk;(+pa z=5Kg9{S6+C+{V4`1spdGV!y5*Yt<8&E}Ow%Za+GboB4m&pgA^7WmQR~#{2t9kdO!x z7Ns--f?EfhQdD2lu~KWxZfm4}U0|k=QU(1=B^8B*y7>3<9VK+?3M&a9QVpsUtK{`k zbwd@Pgoq|X3L1ouKw|k2Po0<%9)gIqY>!l;#_q&oEPa zhblxlk^_QcpFp7^5mcm3EO&Boh?V9oI>8+**34rf*n{2-j;d~Hu8p@i5PuxaV%HCK zla5KFTeFkIyN;==Dq%ns6Zd{1%5T+RqH+nFoyRyDypMZhk8y95%Byc5d#$rruj$2N zX@iv=YZRRMu@;y~LPJ6gfxLkS<4-x7d}$!LJ9v!a-fbLoEMvE22HQ;&*r*@HO1KY8 zm7SO`X+vjHC1H{RtwrK15f|=~($JR2A|MiJ$|qPaE&fji4$n?T`uzXi1&K9Gw0wzk z_L*N7Vx(jgk0&1B`&FrupYZ+K-|>ywwU1Mg^e$q*X$TvkR;-rQV2L1^D^P`0AqJ9j z(0C&QMPF0azb{3?bNRCiot=cWtZK`hqZ!Y`FA(?{NW63QkK6z0LzSlIqWJz=zBH2# zMICd?36tt;gvqS}v=SyA@rCG4EJR;o00RjH=uad>vdXYj-ihsoDeN}S^S7YNHk-$K z?F1${IOrjO+Ty6N;>*#g{#Ti!s7V?}@vT0TMs=Y+z8f7Wy2b|3n0G zVB#qen>Rvulp9Fi2FK5piBlhXos#t`1=eGMMO}pD^fd!LQz0y`oL9MwQ>7t5$Rc6l zAA1`l3TfrjUm8fb)qD^Q!n3|`AuO#}=y%gq7hk_;maULz=0Kv*mMW;!wbxcLG)3Ym z5iilMneGBf5}%8v3c3C}sSpPdO^nv|>Tl(rP9q!UtNx&>RxhN*tpMUcqAZq(vKP-P_K|rxemF5W~zNr_NCMl_r zqmu4gj#njmg^DR^P6@v})n8X!*TvyLqFy)+@eNoAEn%Z!2XmFngvSWFQ zSw+`q={l;SI!V_YgNfqcH|Z?YXffswafAxV`C)8OO9@ILqP+{0n#8a8W(uw34Pg`#TA1TjDPcz4&af6wT`-}=Gy7Jny0YUFouoVVXTCDu~6EE`JzV51ZvO}UC8rHFqpjS zJl`@s_l_A%G|H#*Jq>f6I%r>&L;~dmEHpLFtWRp z3V+8%`#lcYX0g{iiBwF|Xzy$D|IK>n2ul-%ebOgeeI zm&%1ogdkDUeUyW)ae`zrIEm4~Bxb8tEoXAr_XM*Qt9*Wv1F>1$8G3ACLw_xJA0{Vr zP6!jJljke{!L#Lm<0;k2r1tGDX_`8e&|Z3 zecv2ZxX9*+9O-&iF+^QdL*MQMIj)cg` z)B+p$#`8K?2tJYKLh)vVj7*hcnw023;+6dpD5g%RNUU|rr}_B$e_e#F*?#V~7f^M% z(tc!J2&&#vKP^r8DLBJ7F;yZ(;^KS$-BVs-FGP8?_jEI#S)8i70XzxJH%f9 zH@H9bcdLz;>X;6x-BvI~plZI~-*##CVgM~$@{nU(V5 z$t2AQ5jvnKj*@S2lzajb{%8t+sW!Yfe+c{i5Abx6Bl6XM;>G-Tc&OGXU2E8?AH-^T z6G!ABI#R9OUWDn~LJXzmqUKTxYOZBrv9cF;hmP@JjHAfGJA}v<)d)xQZ8HSPI33zB zN5F$vsp`f;Nt3mEnJuWqSQe%Hg)Ay4oy#vfP_q+Rps-&fO^;2NEmy)iEC)X8dezbq?P7x$}+upS<&qoQz3R-p3|j^6xDf&w+jkK}o^QFfmY4vhiaM~@&VfPi7H;>f<962)jyq;>)HaEumT>}dm`bbD zCcO@)Q^~~Vpy@hcawU&IDMmvKopHhdif{HI=X?u_uhK~;tI=W~dNQ<}?-2Sj6#^c_ z2nSyybm)UTHXmBIMvq5?$z0_+Mha#u5r5Q6)wljntBw;)eks)DH+@w85SVuY^|Alz0GAgaW2ZB+;WDGEVC0J$D=Mvz$e$}_>l6iS3DIWYr@o7-X&5H0WF zCL2ZqM0pqO8$|eb0*TqXa1Mh{rAFezedevmEds{A<9#J+Jqqf09u;TW?B*uf`q2*YhqCO^-9zb zBK2`~s3PTcA?XLwjLbY$Z*i2O5k`^f!8KALP*jsXQ8Z>Nye)-SF%1#RVzb_J z2QgbZjqR3Q9P)-(uBWOfoW)q)IA6Dd^^T{wLsfEb>U*rUJ-|#z6!Couot`C9s;3d-wwtc%deOgkP{9LljUh|5vn?#Hkxk_tirq;zGh(;)S@q~lHXY#M+}i3IX(d%yF=h&iFqv0~K?1bpmL`3w4xaGv-p?5!^48;1i-d?fSI-0sGl7M#i2#Yg zl7FizE&)W9?j5f1yGV4<4MzGx96g)>z2cU?=B|4Eq; zCYp0%>O{&!m0hzxVS)Es=vEW&<6^3?VpQL1#7NPsE$1*+x_}PCttq}4jd4w=i>^Zj zm4R9%Ie@4nxQO5=A!tj@v|nnayoyT5!3zQkSqU6rw^bP24`UH9W(E*BegQ-#bs}@> z!zmM~kx13y!^9o)!&p)>I!`GV%^}HCuGio}P<%#ka9cnCXnlCM)ryTY!Qv~yBoY|! z0L8Y-gSe!+@*zspYe20~mQ;x4JuJ_m#JNn%-m1#VnJTfAR8Licom;^|3Pj+L#qz$1 zr3#Ox@6uMivGn&+AyOdLlP>}i28!P}#bNWyOqqBgF3(P0CuKrYwdn~+%n}F}MOPAR zAf;T1Rck3n!ojxSBDEsOslCcq|LX#Z@~B7zlDew}sG=j(4`2)FYtqqW$s?RV*PDG zgX)Ixn5F{JJd){x=c&iQu%Qaa`mI{9L5gH~NMTTpHedpN5C)f}y+d`LAyXQolS z9GBpgNBE9aF5u4C3p`u+8(u6DB2&+Cr=M!4WfJS*b}W}vV4)z0Ssx-m8P_S^Jtq7sODSI;Qm`CkT{rV(%((067}ur%<00Np*=hvJH-7#!EhQ| zwH;U~sltLFLV!%>6ks$XkE8cYTW&b#Q;x_exlLIpS7Rl<{`-5-iZp(vbv*VUVSUJ~ za?LmWCi;C6?0b+BkqQw+{1p>ptm&r;H55|g%}xi9@tJcNYFOHx-hs^~&5_!~e)|#* z+GlB#IBXloVe2r}$PkX32XWZYhn?znYxp=rppT_d^~Pu0e5Crz8C072Xo?A=C8-US zQC$e!=tWihAf5IQT9Z1_5ZjE_#CCM0^vG^_=Ge7ZlqUYE^^y|jmDfTvLB!=joSo)GoVgH} z>;xhP8K*{^x%^1!1)hu4hr;o``rzMHstQ%%GMM9k4=H-I00g3`_v&i7JK!vna z(rD|aESI-pN$ZLi)$^P~bX?`;ymYSPSph;R521^iuo!RiOHN@Cx&J?#14-m#I%0vD zPWk5`p(FP|qEJ-Sp9xo9bgEAu3=ZO`k0be>CG0d05sFRx-w9~_-vr2TN)CGBGSPG+ zooXZ%Deor|%#jdz8zgRS3oYW@PSgj#|45Mda1oST*{}P1!OkfV!h_&=*8~%-cBz=O zl=^t_I5r59&6auWv`ld@Fpm9}G3+;wP=yTQps}CUi~WXf?1WpeS`y|Ufl8Ctkh&-I z#^s{*Miy!>q*HC?powa&jS8eXVF(qm!>CCdLNk?gE7eF_awj?olHSaIgULA6h+s09 z-G_eOOLFqt9e1pBewG8N@uE2n&Umct2(PJpeV9Z7#bDxxxi$%s<$qb8vEq7zIb}~ zgH(u9AeG#&6g-r#I01>TOnivw_$f$~T(|I)sghEZQ(-toa+*k2*sF?zSDD^J)mc(Y zn)v8fP$>*0lbUc4$w8skc=t>o2?!EYBNo0gW#S;>jysS@i3lF*50=Nzu)+41uEC`L0*7*hn%Qpqe^L00&YZBY zHJ3P8643r=z_9V$sS0u^d)LSk-}SqNL`#3 zBC50!KZUofTjj@EAcgQzxfrF9R@YKjNlp6FMKj0I`9bGhEY)nexg~|8lOpFFE>qvG@}n zj6Sxo)H)%uT&m8pni)+sqW8(<=VjzzFj*5KHH%(7blk{8N&EB<L#$&IBucI-KG)jH4I|Eo&c%uCPccp?Z8f`nIIwbHTzuy4#Gt3cm|RRsW$Ua zcPR_i7xPd}$66iTjLO(vltlHSHlf#A&o{-jS{wS#)NU&e9>^ZVQ0|~jTGU*V5kh6M zbk;&zhkcJQ$HCNC;j|^@weq4usFBLV&zJZxd9(R{Y=ARSxkX1^3tQb#FyVdg+Jh9* zVV9#mrU|vT>QKW0WCQQ}X5RO$adl{n4pW7cTOLIDQ6I{AD#e{=d$4g>)GmrOuV z_{x-s0OFa}koO-;b-2Id_Qa?>WMA>U1rRlG{IURP94x1N>`f+AxXLpn&1Fdj50@-g z(p(`dmjj_QrgK)9$bdpqe#F9CUWiL0f2U5I>9sycPK@_ftlr1o%PAI_{?>)9be#(x z5xEgnlM)n~J7URpL4>}+sS|(SDHJ_#p*EhAUZ>EPUK7WA=CfaY?Lcz&H|J0(nqMkF z9OmMqetHk?V>~-G3Y6C%1e~8uQ zJC+~W>wkp@Q$ONp=%t0S#?`LAY>@y_L&rl4c};503U4YYlgWx@3Z|+_?v%bVk-so8vqzxr!ag>~83SlfgpH3h^CtXOGL|OrXQb3p# z&?)AlCq4@qAL<85MkuZUYy4jy&;1S0dBZ-OdPRt?W3#5$Qz9jp%PXWBA&B%ogvfAe zCf_5K4mBD1pT#5gkD8sTWSU~2F-UqZ4kceDQLUt*D>e)LNqHDbmFiG(-kaCp*>JKJ z&dNgLwG`xis`XRoP<EMx#8xEXfh3LqiTfbJJfLW87Il->k_r9!-I%Tz!cuqy z>$M};tQ*2s-5|DW`><2fMeD$BO*?k0Td`f$h}F_C7CBg$=M6VYkW8sDXciT0Mmc}G z613h5qFS@U=|F2^T2Y_akEY~4G{m-`_GUfmq8jPEo6wTbhR&2O^rZJ#0)C=U6~wQ655rPYYRDC6;ofvQwDo zxFy=X#}sJ;Fno~MNMDMBkZ}1CmF5Z>G;`p5EfvN|0$yhD@Qk0MW{&=Q z+01nhL_SD-^C56rsYQ?&Fd|>4$5boUn*Ma(sTL^~_xgC=6DJ@!^P4k9el z+#>eQ)b}R;>p}t|hw8#xBUmv;mC*6Wl{%5EQ#qK{`L*fvO3qleU$w{l!bxp4B-3kB zKgmPk`7%tE&*HHAK2~bB36F6bVNLL+n4nZ2FC535R}OsCdk5(Gt$JjKt|-1H?ZadQzDz;!nPgfD3cg4-0LQC`+3y5Tcdn*3PVzA_!Ndz`$qXj$|1q=w z$w5RV&sV(v@l=hR!bFP1fyCXMf`zU3FEfy2@VHiI)D5fEHVe_7-pK2%o64HMV^u%a z!hKYi-B=HGVxy`Rn^nyONE3FcMs_M2utAV47nWf`zcm%>3>EEkmfEV6aS&35k@Qk@ z#uX7J`Id8NB22o{)IhPr`f!DB)S5{Z2qx`RDBWpNC$3}gNbWEPV#64xy5DR&#^K;o ztTrBCQK8+E87$PS;pxm9Thm0{gCoOS-|ze%JmMgyFR(x*p>7pk0{fg`BF9{4QHhr1 zF6%m>B9<1yOp^vxfhFf{D;Hi0(wY17HJpm5Bv5>S_~ygKfx|Z`6hY%OnSL6|awV zTPFaaxtxi@XL>E~WxlWFNKBD(o(F;Vmf>~%lEo#8h)9C$%hvWfB1CBkFV%6~Oc zQM27$HP@EqP?U%tFJDDNQXfYUs^ZcY?aDkgqdDu7G*n(t0yGT`SJfsZ8EsL?bjGRZ ziAzOad>U0sCV`S|%^fxC+6T*Unr1fBGV(B(nnQ=4jsiLE_Yx4gS&XI75RSWdaM-bK z5Lqb`L@KS+)k0bZl6*{LIyIuttiF(B{kWp&P_@R28ai52N13Y4ejR1U)wMH~qxZbe z6H!d4l$}jR`MDHSo=-u=*(94)pZP%?;{NDE#3kph%JI@}^5W14CtgWE#Xfm)t7P{o zWw3jV2M;-ObLgiQ2@@Y6C(jEif(iHi79^VFq<&#)LKmg{#;^I@Z+L8@hh{Gm-1m@|EC9$Nb{eQDKW2#J_i!soJweZAdqC5 zsqv)pBv#GE>m-BMeO+u7`f{2u7Hr3CsY=2-tt@=0teL+p6-Zef)+_3w*jS3)~F z3?@D3&+7Loy2hyNComG2!e;9c9*n)hHlZ>{$3L7m&Oz2Po=y`c?5f#6p06y6UYc24mJgRULh=n^v$R~aumk0WV&)89z-b$4sRvmkNCXdFhf*Mf$O&9%rbJFcS%Q!TJ_^T1su6*P zujAjo8{RsFh|B|t!NdXM^fg}og!iNF%bVQF9AFd%iv&qTXiH^ws?ahsfT-fat(mOZ z>^hb&DMyfKI=(YPMPMLAEDyqcb)^+3)R!g0j1b{-3TgSzYb)sFD)F8>fX(JzEY)sU z53XM84BUq?fS zR?&vVV3+_=PK2Y=JUW%!0H$-LM)EM4#`j4`w=h)j3%6pT>cp+OOrvwp{6Yci7_4$$ z?nkLTG)EPhWSRUvjqpf(FP5Ou`YgU<)@xnDE$**r$fp9Wj;}#$MkCc)8@hAa(aOfGbIFgEBXuiF;X;$f&4DCB-NS< zPNAcgLNOz%4O=wl9QdKLZbeyfpVf1C!V=TW3Q-y9`(M+(;6+jf%SlDu{yp}Mj z$7XpAR*M9ZB7!7<*=+vi%C~XwF`k->QH4)ANa?2nZo83zstb99M~I)%ZK|aDW-UUh zc)Aj%!>&bROr!PY>P+cC4`E`H7jrZxOS#4|Ocu`(CWp8)^o;k{F;&STh6tJY>UBJs zeodHoi;StQ3PEyvD;x}hnAdByOYzL2hG$8O#qORAV?I(vtyS3@LwzV$fRmCfzhpDq9nhj z8Mr(MRS1D%m7-^;5({_ng7Q+nzku=h0kvOR(_lEr3MW$#LaO96-zr zAOcAY)w|4x3D5J-X^|i~Zy*soEYW^mE0Sp;8LA4^TJuZSn#Yb0+y-Kn4ESx+OyQ?(TD3PCbnG=ae!Eqv8SsXjpG zJBq0yH6dIeJa!3>+gPpJp-S1rE=OA1ZMU)2ai2HAYdrFn<`pyyCI%EuPW0xOgw@oM zYK0)0EMGxq<}hVysX2#OLPQH-*_N4rcN+iz<|;`2siXstdf0gr;5^(@92$-61<{y&@Cd!{U;OKe1 z*1_ZS@7>?XDVAKVMqgkU3$?S@XjvqrXR*~hLFG5j?|2wf#qDTGC_^Synkt{nLDN~E zsEtUF_%QLzhlw1y!dMki^@O}ix({%B-~e~}cW~Uhih~ZVe=&%Of;KB3P2>$C$oVS7 zi~w;}CVWqWhJfZ7_uZ^}T139Y!9)Pz-=EmqK=B{z90eZhN}!uVqc|J!X(1uOL^rF# zaZ)30j*u#1G)XsfvxH8*3cYF77|N{1NKP1IIhB~qtFRJnt)jSG9Hugmx>C9clwR~__EXJ|Vmvrbg+ED{@cSlAG^tQ?vPKH0ZMM3>Wc$CJ zI@$g|IH0O;N*XX=oQ9GdOcXLw>27~u5{pg87%p2yII5Y?Q4R1po4tNr69YBzkN}}# zP$FHGS?2VW_mGm{F>4O%1H>}{#K0nz!q=Uu5G{bEO0aT-2oRa}72XOGR;s>Ib47)& z{5c-JqVUy6ke~=sUC2zS@OSgee4Pq$X89ETjpau2t^2Rq%d1V)2}GnoGzm|+5Sc2o z3?5E_BvH-(tP;yiN<$FgwG$B*(gbGb=;ebP5G*{!ztJL9_C7RQfQqov;%)+og9%@6 zf1_(vuXE6egouC4hlznJ5*%+CpF4qwXQoI_O`U{qah|3y4(tm#PtFKU~@?TRV zUT90pURIpXMfrKnP}hZ|R?qo%VFk{YT6pUOB$|=QFIOOOFo_6VS#n)!MD@pRu@-}f zD!ObdnB-dCq&c|*YxSF0tJ}a#*&HSMD28(exgEiHp%U-oSg2Zt)-~DfxP!&eI@Sr4 z{Z6Wq_Pf~Weu#TxZ*YJ7d#p4)AVl^#@)AVUgKL*tg|^l)Lz|_khu0A0zvOw!Cwb;1 z*ipdCHOra$sM60@xWK~sVYA?^{3%*E1 z*2jqk5vdV5S=DL>zD_}Ryps1)Fbb2UU??#SqbXS!Nz1`dYBrB&5+dG$Ss&|%JXI0C zMLO4aVdCb*2qr#I-ZrV1MBWH(bT)gvcX4m@K92i#aJy#>M;)`+YZ<{#eLpsIDz6oxF}eip38m;xEAXV*leYL)w8pm)DjjBBY2E10>NBW}6pUM#>+Z-)%O%cK ztYW%k0n4?UcrxXbiSi~p-H%Zp*K6-Br*9z9{eM1aqyDPvVGI_{V7}oH1BLUbx=~MO zPgP(cCw0?xNqL2yLN~%ERah=;C5SkXoP@IsBIO1ar!pL1ltdRm^!W;HNrea?dW{n9 zO6Y4Axh-$2hKlOGYpO#KkWp=LTXaTC0Ot`Pk>+4xp)IKtK5wRch)kg^4^#wr^)P%&d zsJ%-16B>d^`GtIrh_k6sf|Qpi%bN(fZfQYjMQ%5S5?d5OzVCq{uoBt8d=U(I4^b>+)&(;Rt3b&h+T`H== zVz3HJ#nl|qhfvCajMivzFmXTRU@9Aw*&mliK ze+m;RK!v$8|J;)yb87)ZffUb5tJMWAo6Hm~Rt6 zCvr;oT~<+*G+?8$4lBhKSPT?Vl?Wz<2*`F7n znlK5|nOAbaS3w7^^*q$E)?gCTX!(-1#C9|gHf;$lR40T!ei{ZjK%-0{HJAWGG ze5|$Ht#KmHNl?*pw0@FZp{lo)hrjQuk4l8NAM&7Lv(^a@DG-IRl>hJnBRDBSA}6o) zKLinV=XH|>ouY8bbRQ!5W)2?QQce%MBF?O4;(TMQhY_C*ZN8$fgsTwB|q2U0O6o< z0us-3%{e%@oQYE&kpMXbi4PZjj$5Wz=jR$UY5^;&2D9@LK%BY!$SF)BRfs}behIO^ zAI*F4lV5%ACL~_??HM|;07uxAf#)?DF@Ul&S#CXMjy7tp7b1KuXbJdyl3FFc`cnN| zi65#m^Pu6vS@|~NPvJ2oSry8X2_{ax=rsaOHmalRu^3*)a`g&FOf!_`6BrFB@jghP z3}dor3d>5ocizS}fudzcm3%+yeTcn|dmMG$!+zfjJe>Ri+nrAcjUCK{2@g{v8&<(( zv)yfyqGrGEVybEf9of^!IaA3mSP*e)M59wVVizv*Lql3mT~LW8sbl!1D|Q%Trnuqt z-?``Yy33Zg;6DG}IW0FrmsAvh%DzrO)!9UZ&LyJyLLzD}QQe$RM$wmvgc5%o!sx2^ zUXdU$6FB&u&VlQ_x$hf~J9s!V_2Pj>-+x!m5RL{PSSjeMrSI@){0{b8C$L`Cg5{DB zoq3r}ikvGb!YoG^bNPjsro#(g&*TLa&+oze-2VsjLrVgQX2A{>k6XF(i-jNYdg({J zn0bo3y{p)(>%&HQJywb;uvDl~x>gzF=NE=BmQ~4XkjjJ?uM++aB+9p(nthIUTre@? zu}Cur={qt=NPR>q5chrg-;qH}$L%|HFcB1I0ZeMXQkr0riy^{fkm^;r z5~V_a_^gIp^=g!a`a!Qyzkb06}mFAX-dM&q;Y0Jh*i#MJmJr zMoD(%Vbl!KmbU#g6M3I!nsW8~T?ry8^ED$Vf(8-?4gtlf5FZ|%`iL|tF6+KQfVg!+ z@}W6!%7>^cZw4)${_dou$zK)zb|w`fXYK>TH9+*hAw?1a7lVi#w*Q!#8z7 zn4G8$0l^&;Kz#M$VB%E9X$Z?f4L~< z7|X#$An_H6X165n!_Uo(1UUMX%Komuo1$%jUc}B}%%FU_3BtGv3!5HnG=! zU{IMZTgGPdA&$BqV5g0M>3o2DW8Yd?@B&9p0?8Z|$z07gX0_EE6K(~PsYP9Oq$}&Dw7h?WUR-qN;KIdc&cOM85MyKaWbe%Z|cxwLN=v-9J+MYFl- zgoSHg|8qt1E=cs-or1*sJ_HDz+t1AZp7b5N?>X{2^g~@fIJEzZj|CLofXN>u^0!Q; zBTGZ&rA*Y_%tCv79$`|9nLrq8RDA0dwS*_ZQdn&5a110lgpDS-W)xsd)qlwZP(m(x zqO(wbmQF<@ME-w)uS?Jz--*W9R#e}tF{`5U4qdI{U@wGPS~VSeEl2Wg3C-wAX-6+@ zID?kiWo_vf!t0hZQKh8jplF7?LR~$XqjcObnH`l^pzZ!9%RG=NZjB-wlrI%N+aHi6GKg@k{yK-VJ87@05oRAorZ!+lR?pU=f7$ zx!z}+GVvb!Ra*`yDvv(}$GfKg)rGbQk~6>mV&Jo++(NyMYKM3FZ1{KJ)U*F<$4phDx@qr*SCr(vh$ztuFyf>ZV ziJdw^{f&BDh2q<=H~17Umi~^H^K|AD53R}IPW>Q9$xT=(uB1~dAw*oa;pyA}N6{Q5 z=L9gCnNR1G#)~bUAkk+Evfft4&$MtB_szV=dW|mlj?8vzAOCFT1zya(#QmXN9JY>Q zx26Xh70vwYYAhDI#Pob&l{Leh4OC&KfQ~G-07>sB+W#$J{31vU8v1>d8}Y+k&g}cp z-}>jqecybLoWdm1+;M{m-$U=EsxIGrnD`*Er33gHbuti8Qa(tg<4dJN$)G~WL0fzP z!&&9F#>{$ElTA)s4whO4*L79zkw{+y=hLq-9yDuRvl+cMm8k=XFX%AW<$vV9^>W1-9&k66twoi>!Jy z>nk-Tq}g&Iub3lXwV=y={T2NoCpvNIkVU3wRGI|@l9B7+`?wl zE@sOXI9i&e5}C4wj)U2~+zwKWEMvFx5L>N>m@Z$nQPz=Wz;{rUbUdIU`Ia}q_mtwd zF-us?nKIe(6v-YIYPFi8%7M2qS+#|+icNInOra*B3r(rL76R(d9Yi}va*YXLgm0>H zC5s~rLO|+7%EJRiBv5_{EIvSN>p3d_Z)EB`?CcgDH9_)U0*PlQe@|y8Gp8pf?qDLb ziYjx8o_VjA2}YX7v%v52<@$f))#^W7UZi&&d(C6m40qUk2eocjDY76ksW6L7O(VG- zNXX;BVn&BaXpfJB%nj|dY$PC!EQz8CxM_+3={>yfR4PU0L&bk=#`Dw~59*q#&_T+FNyz^ygNh*!9f>8@C3>~88LQ=5yvrrs z=kp7x1cMQ(L_O9BlH^?U$7iAS21nbUy7g}Kd#h?JjrWY2A%<_%6DDnFh-slC4kJV- zTzkEON~p>}(v{SL{uBZvr5QtMf=Mfev$c@i5T+}aIp}(V-L89>DPKZ&dOu-Qje(pI z+@)gq8v*jS9YSPFv)}*Q>$v@p1FtPK#CKR8!~w&HhYt`35vM*}rIjVir8o!;e}ds@ z2+Ki8<-tnsE3vQcyqTY65Fi9gJU(O&9g9;EigqDA_d}9NSYVMq9dlWRhZXB z8Qh>l_q4kv1QO?zee-h<4j|svgM=KpuOOs61POsas=y_{wRNVxS-v{4P#4W6I_f=6 zjteB|-hL!JP66Vp5__!A)Zgm81u&kUzuUA!99sVsOjcL38~h)Y#mDr+1op2ueM9*%qNW4GfT_IsbGHBqs_$PR<;iXME0$ zvSaLhYT}x4XY@I~r89W7_yg_@9AdX+5?fRRw)P1Xfx=3fBxg+%ZFar-cNJiSPEB)S zazCXiuuz}Q%YFX;4J2m2O+fBhLaBTJcltN+XzZ9gO+>y<57EULmnAxG>= zx)%#3QYGb>&M!k_R32}DBwSI;ciyP3Nux6t((-`enJs4JJs(-YbqWQV2MXEA{}XZi zt;hcVrN^|>@A_@9M8d?W7~X_DxA?y#(jlhPapt2lxeQYQLZmp1MU?^P2M7_B0tW~Z zs=yPF{%6O(mriG1NA)m(mZWafMKz=HGM)LQQYsUwlBgQA zQ<3x}HlR1Y2E7Tj29wdOZj5n|q#5yA⁣qbL@6Mz<6+;fEmMh(G>5YV|=^%6C#)H zy*vIb2WIy$Ub>9h=q9W36g+H}x~EE9BHfvCAX=+Lb4#kOIf%$ife?1#8zD16N2R_+ zyiTMdq(TIe9IX+;tyDxYeNT}Y2f1em5N?Z=O!pq2=0kY=tV^^jgyq&0(G2%IYu}_q zy3~g$539zK3UNzcsJz$mAW|U~#!?bp;UqbDIclj8-kXHSDL~w{;I0#QulowZKw_%I zhe`xQeB=Et=dOgh0Y%}fzh?4r1|9ursc)QB5K6g7K}bnBXgHvFFyQv*=3wFg6bTZZ zpOlCd7@dN~0OrBOLUugI|4$$}`|GnPyI6*RO8vB0{{;sU{fGsGhtx>Hxk6-U1u@A- z9+wJHp2XkUDqjMI2NNF_0>rzylSqpMi9P-(6O}i3qg5~3M8v6*X06k5Vg>mF4Iuf59{cDf4GSGyMEHEQV(=Upb1|l5T!x14eTyQGb)bvvSzCq2W6y z-?=!stMEq(&2{M1aR-KY#8H}N6W(PUJ=P}FpgE-$Eh*J#jOUGaofka6cg4nC<(GS3 z-GAOHBaMC&nk{O z7qQngifw_Ux)&Rv4xR^p?_h;hZaI)t5F}-oDJZpswdVAwTe0gToNRMPPC&xXarqG6 zPC??rT@l}loum7m{F~p0hFQe_W@|cluQ~bObWHDK@9BOM-vp1hk85A&=Hkq$AE!{= zXQL@5h|%my%o36_>dTgsM}Rw<%E~85G$}O~gULArKqi`Rq$BThl{dTdFj(@ORApF2 zc=Y5>qCU1AHBn7;+O;SbLs4DctQEj9&a&XXrv7CNe`$wyk z&Qz`7c<3qidmdq~LQ6W#VxoAKYX8WZJ!-CrX29PadySQ*+ZZaGMfFY1XAwLE0v{y0 zZat828zL-D0HRz76^M?7Z}2$+M6XkcugZBfgIoX+IBdo_q2M=FED{(vxD_}A30aB3 z!dEEDg}A(kd+b0W$S9npiG^-vx^f`$TRF(2^3YNjQX!cd=zgMPdNxuuH^j?>SSc?Z zyqvG#AeE`IbU)wet_wYO*M)u?-Qy?ukhdTbX#|KIwG;=pa^Noiq1;C%KU=FKW}N^< z=iui@e3+b;3G*D^0*5mfGIQ`S<>5c)?K_w_!05e{i*braTUQ+5Kys=~ehDHnSGDDX zUu!)E^_+3ZDN`b!WLP<{wGH@G zm0Ea^ulHQ7v6IF%RENn$!2I#=Imm_wuU(&V_)Y9Ea% zQlVyla)4UkDN(tgYhko!4`8%nhH#sr@)^N=RX-NAs$pdZW=mTzl3RniTLnn@E1jWW zA}6AY*1Le6bMqV^_kCyRoQ=!FsFzPyZV!R_z!uOt>RhGs?#6nBmb9(Kl0r#T0&{^9 zI<_KHBoi6g7)#5-aB4QX68PJn)lH;!EN>_I-DkPa^FP=2`CS-Tc!8-Ar21T?f0l{= zKWl&a9!ZvL`NDPPbkCe=s4mE=%t~e!vw{Q>3Q3_5Gcz+ol9D;Is+#f4+~@u9{*`vu zGV>kop;Yzhx%Yms$1$A3-F|y*+uQbR?T|d~Uy&!hOY*pD7S}PQ_MmxKRgtO@T#u@g zB(5pJQ4;HAqNqwD-vA{1SgAe;WIW$2#liEbQIU8+!M+Y^ZXbJw&e6N;hvC?nMhrYp zz~os{;w-6#;UIxqpCB&8h=y!Uom26LKa zG_OUbOS)vPd=T&OsLY{&F^_kU%48!An9TfMUQGU0wpt#_LVR5o>o&D#{9pF|xBRaM z|9^RhLfJk*!tbNyb_d{LRa;JhSZ{RKSA6iY0|V6u!C~+S0>sou{RPegINttju~i4(L?0E0%BkTH7=Q)=LSQiSG$F!q zLt<<>igoN{XjNI(l$X_5bhz4g2+ou#+WGvm`!`uNwW@!{DWJaF8HG7;xu20piwk>Y$>Ab?oeughC=P9&|sH zy|x4GsJH|rLXa@{zM5Q-&D6S9fw4K`PWxlL{M+()=!LwQ{$2Z^tayar3_ka>$ z?}9w)X5zn5IcOP>y>yRk!m$n);TfMOF~{aPCFit#M+gUp?HQ23Zv4CeFG{%#hKA(D z)MI%)c_@#2=H-66TlVXlWIvfw6|hZ^FwkBDMG|Go{Fp41hc)|g&+Q5cf5Y>z>WaZGm(-~98sSc$0uQwGinXMsfT$w36pA#-20Eo1J!GG_be zF*C4Qn6f8@jxXb_~WBHZRf3r*) z&lF41ukCz#4xT%6?I;oAxT%|6GF7vxRb1(7Ehxd;sD#UD}d`n_!8TbPS2$wKX_JnVlazhD2myq*7x?DoEv zaeNz1Z04J3iA9i5d6-$AEUUt5#<{H;%Qfd^S`+IJ5@q!jTU(uuXp%+OWS0uV4D%i+ z2obHyB4}W$N>Jc!;Fw9eoz-1zjb#d?_IyIqw6HphS<1`dmrB_<2+-hIN%ULRu^I(g z*Q7TPOt)$*Hli?qI2Gcmjotf0b3TCoIl*A*PXb0*C_EfUSkN@*%myr~PPiYPJL4Xz zOuS~kP$330?$7;PfFA3qB=k1`2w-AOhI#D-iUG$>?U%FQjK-4-Z0wlqztxG@qPFgNSKygzd9e_vsHBg;0`AS?`FIP$9WhfNw`CbpeaNL1< zZFjA8i(Z_BaT7~sWCjsQH|yn{<{nf*(;@_J;yeP3H~Sw}@U_-efTm~z|_ zHVx)_2HBrOg{-C?$a2FjoaP641$#XD9|axWFdOh*V~1?BYKP5n z<4J8j)t3{I(y!7S|1;{XX0QATC7AcVpMomieKFD;GZTw1+_^EjaJoX3KMF%6nlHL=81 z3FniPnNXW7S97>xUdyO#*L7=4Eeo`_Vwo+snq&iTTfy~==7%NnZHZjtaq+Pd62bl? z8idanx0^Xt@(D=P%n6t?=G01;bj-FK*yb#7f0^yDEP#06FnCziCOa^)8j9)Zna_zjMzFUh3~l)1;5^#4Kk9~3Yc`sBw#WdqAFpIu1Ou+WE$#Z3Ge6A zi4Q1v{3xsSyD}YFu>4%iZ#VK$_5hOM$h>g8b;d&o2TkKn15_A9PqR5M0K#f47ieeR ztcJ_hL!JtR0AV$jrX(Rms5WQ{5>p`r2NeiwLmWt4)fNl8^&Uu6sQ@JGRLG$&s-Qrs zF4PN$v9K{h*mo@qzXCOK+J*{`!e=q4uH&dPW3MU^FU334yMJz_arR^Kn}xgo*ctz0 z79v@d%|;NcvhqOU%^OHOH@)IC&=EKAQq9Efnu+Yr=^^{2#NY4NJX=yqi z#-CNG@sO5l3F*WO&OAG8wMB?%o__}50T8y@V!%C)ZBeb>BJD$J_tCaJu8O;PgI#~G;W{-H7^{Fm+_K$d;qKUC^hvxk(U$iblmac`~@C!{hEXcEX9msRJIHwRI?sv*lE^EQe7#EsEfyLJ0&V z19Q%Fs&FEls$;wNLXE^unG?$ZgTQizkIOeZpBb(%0}-=3zbKWvzv9PQiT5Zb{dq~5u56Q~ zsxGZAT&YUST6J31YZzc}#5^DpOUhh%6-v|PQvS7#=xEhu{{$rN8n9mwD9*X7Gr;yt z$=sJmpMY*5Ba2W6~eP$nxz@PCZS zc;%?fqHy+P6z}HLZ?#3_Y-||?stws=(|K5b`J{G4G*BFghrlcj(gI@~q($Q(jB&6P zRW+-y2oSc!f-#1{Z5jiCvM}?YLSQ(Uc<@kgWQ;JeSP1WxRamhLNr{|IWc&vmF&!wC z2mum`*2^cIq+{7csGc+&WBVJ^d$xjbN;4W3^=M<00)|?5ULX{dsSDQ z=Wz%VO?Q8SK%qMEVUi6L0>nYYhl&FWr&K5|2+y(PeUSX@tfctoodrlp`YWu`N=U<1 z2HzW`j!Duug|WITb-4y`vDRB`@n9{sJeXjr9S#3QHR2nW&8SXFPDJrQ@iEqP#Nby zeJw2zBvzHhu`AT5M81kh)v;56YFGgU2w2hDSg>g-_G-~e;*a<}IK z6p40O)v?>-vI$7B*Se;!XKbV@fzm%)X;sS%)Yb^j*L0}_9~VC&b6N@v2a%w0ede6J z9y)Z-c%5{#c-b@sJB+eA_Fbj4-mH~@f~1TVCuORf`Kgk4#&uAA3ANesYFtO9G+kuN zC*BLxkX{GxI{=cb^SS4dZ5fzg8ia}eyaM;bCeB``QU?(_3{{4JLbxzhqrb;w{|rwkQz%UJ0E{(~Wzr~q6-BQjUJ44Ax^SJS_hZT7)R-;)Cr($-oZ zNO#e&rYvbAFaVLZo3tirznzv29JieifeO)H=`3_I*v*cF?CWus@PGx(L z-4BjAVEir^;OG0qPKOR8bovGjgNmvV&vakhEBoUSBAKc!4<-r}yqjEa`vRN*2~`?9 z3u{X;0OeDVSdV$P&VfXY*H5(#b172%4v9ZIj~5Lu+}V&MFQOE4sSZknEw`*QBCE03 z4A}m(grqLUVEEJ7ap*iV{8n45EmEH5sgk3x;Ia$4bJ^k|+|LII1N53%ljP(Fi{lQX z#PqoDP#$$Z@?e5e5)=uW0xtk4D~Uz9*LfeRWa&;D87 zFaJYFSELeQ4_&t6T1`IyIQC?zaa)$^H)Juf28q8ZOIXLi;&^CKW1NDZujluiI?Sq~_`#u+4(H0wk+xhQWnY!Raj?T4)Lm|s4CTjUw6IFXimPQD zHk2EY_N#!-wc)fK!uTe9E11=YKX_hd>&Dte{mcB53mIkLGZb9CkMgT}|qRP<(HlpLe@ za3fmSpi1G0ksSMkeU-jndas>C-%Q?@$6Zs}k7l|A&*c^qcveW_yK<@g3Z+)U#Jh*g zGl#kVolY(P*X2@uu1?zTc1d@B9~48Ird46K+q$z=(!{)3aPplunxyMyO8V}$$Z$cY zj23mtcnOqqYyyVws&*AHc_(k?E!pEi-%D9WVXGsrU#qp+0g(>E<94%j!aAr-7;wLu zQh;c4-qQfaDa*|3k}4`F000e)bKV0<(gO)qNCVXeRZ5lup~bN;2d{$KVxJJkb>W|? z0ZYPyxv?x3TcychZIo7P+RI#7A@~=9gdM4a5V27U88=`nOM5T)UORv|XdG`WnKLhy z!NEWRXY8QinL)+2JExoh3DyS{Nj69dej7kHA2r3oEO$d-6Ol)iuQzwK+ zFn^Z=iP@*>BWSGFQp0`U#dD!?OrZ=&&OwprK>3$ARHn7oN+?VK7glM(7<_L8K$yOn zb(^){g%m7Sy~PZ_R_DliUyj!vga@Yv4~u;;#}?O-q%^|AfydN{K}75JaI@@DzZhSX z$Nh&W!Nc*#mo?4&d~{OgqT^5~vpSmMBL?2b-f2(v;fh(=Y~GVMQy=j`e3Zk{ALZ@h z-{tlEUuCEJ4U`BwBtDkaW~dQBV;LY>NCG7Fo4UNxbRY|jP&d_!GG5UoGZhJ$sRTqS znDPS7t|%t0*Py`Qq+NnP1qqC$0H%%UD;t-s?x%7%@gvj-U^4Ma9`^0XLEDtPrl(Xm zPe+44S*v2`=<#8MPgY1@j`dBL12E>wLV!nD7AvAUj4;PQVS4;lp6jGb8fm|E&78Af z;vBFV_w)AkEa=>saPba4CRY3K+PNBb# zYuXsrQ~SrY`?HVT%wKoJ_?#5F-m}6w1IVXl$0?B5`6Qq*FZTxy<=x^3`Sb3d3Hy?aLXmY}2TU#!CU~d5<~zW9!nJx0 zEDVNr4|;#N=K_UEy~ayA?)GTYer;Y0V^EvP7@OgQJE0_c@3umju+K@SOqBM^d}Kn^ z$#~f?3RSD}c=(OHMj`0o&>NYr-;y=}iq%)DKnM}45LRumqv4f$IDhyA_y?=PViR5} z5I};}SOAB%!t#`b83Dn7d9|u0-?07?rU4^d>fm$elQ;k$eGdnmVoNNx%Hm*osy^5% z>s&&&MNY%_pm57t6Iwa^Q>oX{kj4=UwdXk{lW5VYl%2Nb756dF_?+y*s5MdE-$WAj)- zL_v~SkCJO{r`+qhr-Aovs1o*eUyd(8nan6iIKR`fqkT)B;^m)H!NoJr# zX3DE&rmR}V;E-FcRiJdk))NL38e8YEjx~M-4yk%lcDnX)?w3$S&v5;lI+EU2eTR0q z+o-P7egtdL7$8C=5{6m`!HK}pbCkrwN|YQ+C4RaH4%JFN1`RV;(#|+UuE$iBf%g&6 z=ztmU4u2bhgN{i5osf*=Rmf;Q49;r!ZiRH;D3jEM5{Z0MAbCt*|8pB7Uh9bIun3im z*)e_Ne&*C{*}ER^I%t|o0wB@qCGP8etb933p-!scDC^Ew!l_ik*;HsJ$Px}QtOIfx zNHWKS>3Mh^folnFSM%WNfcq&#?3y`$?dPlpER&9+KH2NtlUFmZu&S#+r&Y#Fdu6JU z02u&Ox@EF_P$t8ZGM`wNgWl)zaPW=HK&4ROuqiJgLPgSgg8*T5cb)nM#ycE5tSK%O z2aT<*pg3H7Lt)_{L6Erhx@?xZ95;X{RG>z9{40%W@rw-_LuD&0_DH8Oy@~Y?snwQQ zG-fK-!Y%wsDGH_5PXtQj3o3j#RyhA0yc?{#s6u?0XbS_@{e2zPkBwC2 z@Rl>x^H3bGVl@Zz;b2Uwa=+_=ykg+J@0AQz%mFB?0Lce=KF)F8 zzlS3Ei#7>nFWvQ)CxFEx00gRpov{E84Y~s=%TOa!CX2Ai__~bXBbzKu$TWK#!$u0j zlD<-^RWyu|FyPKsI{cuE@Uph&Hp_f`OtxE><)Dk58yD5-Y{Xk-y|zJ{wywokwL~St zPJmHBBrHpnA(<}^$y8B=3_u~U*BF!`G89vKB9an!THO-Vy-GU!e8aVQ8G;oLBexpA3y8d?%D#|O%0jpGs1c8 zclKM`$3G_f$>iVzYP!WC4JJ_l*_RKCOg8xk`NP)lrSA$dXu(?yVYw9%{u!BiY1}5baK2a(~pW~y(|C#{_?mshyS|PAYzlz{{ zZcs(S=D=(%RSk&H$v5)*hAL@>bM6KZ#>@I;Iy5X}P$eV9y)shLhiSh|g{Nh^?LNMz z=Q19i(|la501*3!XqI3=qyfjuBuG4Cxv;d)#|47KswdQ&FcW)N{8al!VZ*Gh8`Vd0O@j+T$(pX3OM1j*U| zb{c;!;~+^%Uxo6xiWdPTf(ADI#XQF+Va%kFLH0|gP?$D_!)dZ~z^W&wS4(>e4Ojmu8kzPx2Cu5qZ6$=}NdsFGJRf0CWfSF+K@aowR#S^yNP zko0}6?plH>aY|*O@t*b|9xdyTi4s<8MP&dVcLPfB9KedfITvWt zEe$AP#;@T2JjdaY+2El9j*nGRbkr6ragNwIa&yYgF}wMo@mhEtEV(&V;yFPcPd{8H zv9z&YD4TC7#4Ht!rlTRe)IL3jUWW&hquW%u>2t7W4Y*-El9?Lx_kBt-)IQ+Fb z0ni{k5}w6j>;PD;@sPzz_9hR@YzfDLFPDMarII=iHS#5^cT5@3StvwsE;IIHVF13^ zgNQljjFZMk!k7k~Xx`_Q5;|Qc>AStcl=AZpYq-ZP4IXcIZp&^Lta}@9-IA^LbttnH z0CY(vYNw^IVo;iMIwT4e$^bE)^Yx!waN3lB=g0z%`B>xi=|^XPv+JbeBP0n+`?%G3 zY_D{9U1KCa1rj>tBJAhE0Nb6pee_&eAmKI-wwZPOJsbwsRl^xhM0#bvYgJxM03LHs zlu?rt$lLP*e+Y~l&nPSWR8t63Zv40H!SVfLvWrI5<6KY zC2Vzt51Go9?+CAl=`=bEy5;rsbNS=id-?6cBl$47CNKNPgWU!!1`f>n~JAetMbJ1zJ-}3~jQcTIz*wV0HR1yCT-IX{C@9=$(neSYChmGN#&^tviGsw8 z)mw}&5hNOHKM9b;;P|WYC)DGQZMhc1i-MBH#X3pyPh}9E@Bm2SG!GuBs|k&va38kb z3V+8xnSXGlPT6|Pfg}qc0D~zMn6})?whTxVCjM0Gea|O#jQ1x4Pjzq}_ITISDK!8| zcwA;fW3rxH)*kP#r@n_XcrIP}<1!hY*HrV*$KK1cvG28~?(^wC$VTf^z{18R;TR=^ zh5>~g6s?+zKv`y+`uL{w7xhZ(^$1E@C=q^%mk3VYUNEb=p`&1)n|dg_@$zPw4>zH- znvjKxDp@Fx$|9|zT9!hDNUbbY)?ix{5Q)faSy(0rk-MeRdbwB!!OHt3UwEtDaey$0 zu$qX~RhBMGoh9Rd$FA&jKLJ2r${~B!k3L6<_^CV}e+mWnOkTmM zy_|X`FD9SBp6U9hV~?@#p*$FTBs*OPGE=uGt+_q;SZi>mxyKPDlWC}uq%1>W&X&bwtRMuK zRHCo})q1{M$0%Wyil*DZJHmTriLkeHK%eis*{AXrO6T>+qC5wnp0xGKLmZc?gkzSh zN2+C^G%6GM9D>)BNvj8ws*`}pm-yIGI=s#MKVtg6;C^{NZAuCU5;|3_sa5K)aP+|z z?QPC)yd7#()yQ31s|*7wOEvSt(G@qE52P!vSCt4=L(`?Wr$!k2pyF^7d=a!7B`CaQa7$K=J1dEKZHM;5_GD&5i-ZOSi(R zF2cjh;l)%b+M>%-D2(rB;y3R4Ga&I}BxWe2;C=q)voi*gGyis4!Z=aoiAq%=l%A?c z@NzK#uSp!KKmd^{{8?3JAze{oXt~ak3|@+Bkj>aH1_`VENWQI4RXD2RM*DMFX$45M71zT|yhKgs@1yko5HNWl zqu~W@pI`PBtFtT+%$G6Kk3jpB-~&G!k-q#onJlf784gqgN5JvgXG=md4`8qv?qWIN z5)<|nnJHylq*8`*%B1~ju_R7I+5Coag1a~eM_cuWI{i?^uqpn2yoh!7tBd4t!&a|El?e%iQyo>t{G+^ENUGc5a^3-WYiOI}ax z$-Bv2dHV@SI%PlEqG@KEUx3abL7Nv9CuKaNhJy!<@)q!Hh9lMDbJR3&WDMW8IzF31Cb zWIeqrT_`{>sNM|aK*#Um84fJX1OwK$z*?_jo`!i_myi|~JmD*vF-?2ce8oE+t62dM z%Mi@rdYh~P5EKxr&BIh8tSDJZWj-*~B-=ekD{MFb3M|z5XR}ZCMf3L9bYU z+0G$)Z{Rm5)iAdW$0=dJy%MTKGXq0%I&fHD5v%S}PL zJXVqK;xZkZ#mDzZ9s(##>d5ikEwSS=rpvOK+LVK?hw^OvtvnihEBz(2(pNmDRa~#M zitD|+ocW`UU2=cWs<_r#4rK+m^cm zwou21&tZUF)d;3^hHygK@=2YuAH2?37DPhQl0PKtoloU(@^|ul_K)&g!0Z=Ui@<2h-Cj0TU?BFBaUAKmh>4*-xALD;s zm63`u>Ac$xMcxR5)2HLdv=s%)w(E7$b_2#Kto?ctQ(B$0UJF_h`?Cf>Pq%efzP4uX+S}@2~g6OUI1Gh1zx5; zxy{x^u#EFoM~*@tz86!#{C>T6j_;wq1NO`~wL&7lExsrAZaC9#9>%mMR3X-k*!~BD z1D!FA^Mr&sW*;K9pH(M$`{;g*m6$4FpAxFCPrwj>g9A(uCeGOLkev`+uXZl;9bjK4 z0)r5-sxk)R9Z0B3{OhuSySr8wfM-=cK~kb1QAKj$=NGlplD60aMA!tFRb1L)0zgS! zuGL|98Ehx@IrvpGjJ@2O*esYy9q}i!R6q%;nScQjwyCfD4r)W8;Xq<$aPa}+%t6Dc zlx&drsYmLswa9k!jy&mqEIZBHfXAw=Hg3vVYD)*hTB=#l#Eu-NgsJ7%0EFgSy)s+9 zB=2T^kQW@gWb6ldGxt|{G5u%R?tCq4%^c{8@sWoL5N#6d#YtAuoVO;zleOy-zubzK zj?NP=B3o>zytTYa)`OcVM&Qh^hNKtHWbk%{jNGl1(cBP9l_8lZ06g+bWh}o84yRoD zZOS4q~oxPacl{D6ba( zrak+=U;Bsrxc)c!arI9C!;gT*TL9rHp3eh$Ies87#~53>htl^$dBa4H(}c@&sFW9& zG8rZf`%gZU=c9Y_WMD%MdzNIQVHBm>I^4Gdh3~ZK)H%;qW4s3k0dwdy@8k8B-AAp< z`auWG`)$So+Y}@L%K{0H=Z=|K9|VYR&grtlT323LmXaf~-!U&w2G`})7(g<%B`=4T ze1ixV7Mk~oOp3yRKDA)Apr$c&j+AyD%&f6*Jf_2_i z;~0=QpmAQ!Icskp&s?m;Iwv|v7&uI&SSLgW7TQsWI3qmRn_csc!N+nPt2liXVy(LP zUbp}}ug8Mw4k9p5kvQY9!o0teR3v!sKRfepXC!%neMqe5dYsi;fCgJ^H2@;@AbjVI z8fm-6)?2L4tbtVZngwH-9ft@NHVbAz&suL;oTTzQD3DW7777kiBnAi{5|+{c590_- zK3V=c3MCNHz%d{0K$ zdtHa}mK_8~-^;U!-^t6_zsSRpAFQW)GZYSk?d+(ifp#-OMAbl(icjk<8p~~Un`Z~>t)h^s~jcDav8jX`P*gEeWO&G zFBePvOrcbK#onUUI*O38rn7_t<1g%aYo;Z94<13|2f$Wv{S`B^)AvfA&;1z?`A_*_ zsKJt*7BPMN7>^UN?DmGwZ^YA8&d)91A*O!OK*b?TXA zJWH(t} z4xYu1yo4%w);}+YU6b;lbyyBkJ({_An|*`e^f@@#LV1-;Y17@1_5*2xa;SoGDEpd< z)&8quIJwl#qzsjJ$Xs>5tR{wJqi#U9l3jSFZQjU{DcP#2_lD!GmDO;y!K5T6V^Ai2 zw<@LWatH;G3aAn~G#nfE5rl|6M*}3)3JW@{GcST8Dmz&#;j<};Txgf*r5>rdIv{md zyR_MG9el!kd`(_W|6cC(zm(<1Ej$mvq+(op^ZTSbw+rf}Tl(@lvD|@ahxF#QNpDUo z0K>6+niMXCOvkN8X{X(6ls1kD&&cvK{= zii@Ud1pl3a#I4h8AO;Nsh*K$8ub?rAXv|rm!}y9-X)%K|3m6)R*LA$F07yLJHQ7Aw z4c{_Tr7%!`iote*-AXr zkDX1(Qr$8f+cSCC3uV#@r(eIJYGk>FofPL}Ex9E3I`7GgvA43{^+eiphBe*xtBH4T zrXQh7zL)1Sf07rlz1~+EA6fINx7fdggI+0A2oY`rNLbZ1m)w=MykR{2kbbBdBY}f= zz1&^0KE4Q%6Qxr3U9qIjl}P%0iL_iSk+v&l>5IiE`4wqz@={i3{le;QU6a>f$%n?~ zqdb?j*n-o*b}hwYO6SdMNM3J~`NnOzKlDak%(4^aUv>QLkIR3Qw{sum<-{|<A06b@J&W?Vdk)TV7B-J%l)9lfo&z!pn*Md{x1#$}g*;%Q$<%;sCR$}RRGs@vd~rO8t}^bl@2c0I-0`tC3B4F~D(ErgUJg`%n=39BDF<*6JPcQwZ zMk>!3OybvirRffo$&D^)y4fQu4SV=TKFUGw3)$&7lqIPB$;b?@cT9Q0=tKjD(w4)p@4RNTUM{AVXVze*Rbi(_jg6=hu~uPh9aha& zT8xVTxU9}%O-BoU*Nfvf-)e==vHX8*9^3%nq~%=+MOY(F!`Qdp%$e`Qs!+UgAF%fJNd880-TSX?A~p3Q%YO_8u_ z>*QxAC2@|*0xINMSlD8VX{cMT6DFoS8rbRrN~8hOk#&;%6BBGgh(FxLQffedCPt%0 zPs5-_XlzkoF%hOnaj_481u!vq7(|Z3ofpVx{ zlLapi*9!9^VY<<{1}DJuDQ{&Vv5k^u7pAN7cIu0wCVs0Rc|7)maG)zH5ev8z zB8NKFUhmBZ0Exv**s5!yW?gD8x8cPzr%osCs<#Le%c}&#`eKySE50m{$Tx*j^(_qG zAUv2RyzDmxnv&%9e-I!!8hg18m@xQEXQL$^fGG#lJ2)1rxUAVP90r{-VWJ=@)RJa9 zN{dVBO}RISd!BwT@0Wfj9~OTvuV+5U^RcJ$WN=@e^lr$L?iD%gUXn+hbMlabOtp^5 z{T4XJmQi`oHVL520Vqq@X94@n>OK$Kr*+$Y6H3nUHk7{WWwEkW#|`Pa$zgfRHR$VC zFZq_G5Prh^eOzlS=K%k?IglJ{dhCB4BnHBK$o(?ypg2`EB%A4Zx!=8tX9s063>)Gg zU9sTXW1=bvxEw~ylluPs^TW~S>%c?bNO;vLq z-&t-WoO7#8Rkq4(nAxUNvJ`8O)#?UWt6>M$AV{jTf5}{F6sjbwRi|tcTz!fI7kTf6 z-j9LgSi?4cL*{7s-PKCyY?IVn?vmuK0jasx2cV3}YSVpr(EmbqJ08ha+kM$+*_XxS zDqu3LF_WJBUg^&3lEIPz87>=?q0)XCDuERNEQLK#EUf-&(O&SH=n>QQ+sy_NRV68a z1WM!@`=>O(_prYj3a1z15~JS$NQzm9k9WTk^Q^w=FYJVh8<5sJ?NA*l0K-7SCc}*c z3gatRVb@aD0t|3rrVeKPWv4_1h_j3%=X`J(rVgDDFm+Vu@a8uzS6>ZR(TO9R3m=yeX;RCddudZuo7Ad)K*&okg!)Xr;H6S zXwD9IIsY?Hp;7RweEbP{P$0J6wIU%%G`P-sT2^ZjB&;jN^9Y}gOBJ4TR67gCq!99y zEk>-47Y-1{u@~4sVX=Nrb~`uZ@$dtAHT6o~%)EmVc_~l&_vK+H6~#PC(=&j^gxqT$ zmV>5#*-!OoegA%Aw;ZJUCnBBKLz23P z=kpzd?pCV1{ohgg#ec>z!a49)S8kIG6{lppv{9zZ>t!xfFH7uy1X!;|6S5YmHIP(R z10+#dC}RT-sHWSj-m1_{!G&;YR>1J8xZJ(zb1-FM#yCK}F#R|?0#luoehcS)J}os@ zyCia{N7A`dc=sR5UiYDF;rm%{+LqPSwrn)-$}*JyRFu_R6M)E&^cD7FItUn!$Y|NH z43`ebKv6H$NV{}Eji?IIwDU|gpMo+mfV99ExNp8%4~Wp%(Xm!j%rid1Y8)HGqZlCJ zAXq5S0uIB)T{2cNBpo?jfJc+0Jb(}?R3fTSbRC-#-=mHmzfP#}2W>sA4id0D8M227?kWAOd1`|^DB zm23hAjaS;GuV@%5g@a;!kVnHGHGRs5mH(9c1MfX0@<`TNpTM5VN;8`VGZ6m(2BpFd zivZ1J-M%#59?~iqoN(s?&21NliA zE3T92G906#P8LE5?Q_H`t!0IXDH4KYhH;p?Ax$}7bE*VRlJOTDm(`w}de6|m*PvOm zR(?zT_KLseH<^&=`DTe;?39k;IoWD|3^j6J)=@xPYuwbT^KRMg&v_V+jj1z*2u;uFt)r9o zU=oafI2fpLSwUhj;WW72eq%8df7#ZVDj_g%T`;TGvbYIBLO-j%mg{iuGTV63`JnI!TO*8&nHVI~HEv~OQ9hNu;+sC%hw-N1oQVX~+?~P^IjNqeS zYy`&M?I>-Peqs6k8S^j&VpUjvt1PdY%j+|8w0bKGBv@wkjRT1zX-94sN{ff`WblRT zKz^^XDR9lajRF@QleyTGY&NXPgU$y~E{8H)F^&6Zk|hA=&Ey9;9C{~DCVnp;SN~I9 z&;40PQ)HEvR&P<6q&ZKJJeGBZ3see}%0l{y3`V!4>S8+{Fk5V~biryjULQeHqE5;H zVj!_*!$ElX#(fzRA#fXSb;@9967ZOp)ig({+mM~sB|vi=AM}7M)^y?CT4b;=iF*m@ zAI$XR)#s{YptuR!yJe?oRQ8)EWDnEb#zEO>7%)rr$yS2BvYCOHL1K=Q$h!D?Sq~c5 zdB)|n7_ClLBOFb!R_4p9WU?S4eK$kWc)r}mn1^#Rr|m%i^GAW9j?Ha9#w^wW2Z$-{ zxP6W@=O76y2a?Qo@0`B*`x{7tFtKanhgAnBH&oFpi**y4Gwop?3N1rh0Li*M>BVx# z6yQuKcLTDm9!NMaR}B;m6S4!e+xelGoTu%x#ID1xAvH-v)$4%Gb(s?H# z19=Gz35VXT!Lh0}fWLs}P2;%k>{r6(N6tKmzk)Qto~O zhINeBuyDpsrX~0e%TLuw=uBEVi>76#>#=ONAILJkla=I}tTt?z;>R)@w$Ns47G<<@ z67SrQbmw+U$87-Q23uiS)s<_~+lFPDH6Mn-(YIV>S|;{xXD@fAZmH20pG@su{&gi3 zNWC=PXqTD`000yUYd7NOkTBCYB8OX zGd&j1jVYbGuS$HF9A^d!r$DUmZR;3oF_6$0A9KfJ+}YCj5GK|vScm!LHDc-)+@V-- zFD&G;0F(uii$6OnOe?J^#1orb{i)WWMpS^p-9{btP4SP>C>g zI6F{so>dJ_jrjH{MEF5dfu(MC%1qt5>;iau-3PMYwJrC%VNetY9ka66!lt5wvQ^(N z>+vpGjHXdK1LV(D>YrTlbs0XG8W}51$#PYfY$STIUx%#Mw#s@9)da^sNvp$KuVUW^ zR-Lf5k(a(Dj1q7dpkOtM<^w}%n9J;mk976}>lH|Lds z2n~)a+o>N}moZ;SIEQ!qem`Z|Ao&y~0mC{4iJG^J^#&4a?4Zqbi@If|c3d`^7x2EU z%VFQ9H9+WHk_R2rvY#H2o%$Zxs>Sn-HR>~*E31}?!YI^QSo@Q-ULi;-3?#p{CZvQ& z&ad#%Lea80eC>s5N#CfI?z@CZt&9{f?FwKGAWavuAClQjaabY3fIIuEv|fSI`3lGU zg$EK=ZNaQrxPejCN#GvceG>d^1W}CdtKw7~-yCZV;&`X0<)HT&3NZI%t9e(pTlZwH zW>!Ya24xm1UtyBom5mmxk1xwacv`Es+HSR~Bc}qO^KS84X)&no`k=7yNwY$PX_y#~ zZPIv4{na!ab%{21pp&YAk4RoiLuE8;GR7+W7h0tb-@}@bW+4m~r{&I)jRXhJy*$Tj1W-yZs0hc1+AxCAy5MtL9Q=f~kw8 zunIe?YU|w3&VlfjMxLoqY9Jj62GXA`wpe`?h0@?3$HDcPLY{x5CT#?Os`1Ba=R+tE zTG?0SQUQye3`yiducjV!XB3C>n%FK z)ziV}vfZ>TOV#s$$don%W}JjoUF-$_uH1bt`#|GnUvksplqY$ zyHy7@k!X`m%_aTQkr_Xs)AQFt9WSpxR*E9aav35H0 zJ7v0NLe|m?vfIHd(`$0rvnr3e0G8HqsJ%Yfgu+{|YEm`A1_-S38-rRK07$xSRBHC# z+S6tDkMIHhnvFByBsd`2mux_S!qZn)1zdNj8qO{*U3Y4v?=E93Q5nt+%V=I$#_|d8 zumXG>Hkw-@19!@G2!NvB5F(71Ie;8B4<@{~z_T#O1YuIE?`~%y3M`$6xZelzh{HFu z?n{3`kF?)tkijDM$y$(=hE3UO0cctu$N~yKlaVPXlL2YF*#b~l5ZzT}X$MBC5OzX@ z3TXyFnz7uQ*DZ6^i?Z2rU*_tzB!R-+-CvaA9t!b|mul)I9gqD?jb_%U#(eF$xcVl> zMi{ssEa}zJ9eZ$HX@D#RMU=YLVg|UR2o_v-6Ts30z;Md=Nj2b93_s%c`Jkis9KSkp zPN_tkb9d`KbNk}|bP#aPKVzPAHi*C}d+Qx|vi5bq_n_s@nH?)JnE1wJXGfu8(D-!O z2MznDaGtHW6s%t30p{P*F-u_dPX-i&3ajr~+ma98b~2Mj{^u7g)$|4aIUGW_PJ`rB z8yf$>!>$A%s5~@20&uXNvi_>I;pX@x;gd0`AQ*Ttc&HU96*ChkQ%G~D%`iZtJyf;D z7MuPWEC>&mdy7?E1`E%0AKWN!md1I#X=}Nqgd|9CV{F~T2RRxVmnTC{o; z+8bMNB^ERQPupwXmnS1H<#6bg^rO^Obp|C!d=RfEKgh#@H#%MkJMd}xl(y%922=>E zv$To}FlnLzB&i302%v%wU^#UlW3d%UTW#87|`kDKO)lO^{Yb8QDHaaQ& zc=3UkeN!!6#iO#`jt_S9EzbMBJfC=r`*lgV5l|3ofB@qZ zv^9=#UK*GBGZiS!*`PCJCrTxCIVuCW)hN|gYkmDRoEuvc&1+SORhP_{0T5;E=*Bc0 zRoa=3ErK+q3rfNBu-W1YnJFrjse&>Y&nc6ko5j+7rAV626-)KE#ZvMcw#UbPu{S>M z%Yh`Q01Omn8Apz15d@6IjE)D1zn@ufTW}wL`O|%v#LziYURwqvYR<7bBN&;4Ga~7n zb{UP0%0m5&tToNaR(ekMnrEOy2$4Ymy+hW#Dy^l;xIRw~B1Y%me&4Q;~-)w^{GA_Ox!)&dq`Dc=|YP1TjO`ybhArVQroiAeqU3` zuQ%<=L})@g88X3QXD*a83Ts@??7(auJPT#Qz&*^`K#g!@ zMUHv0)x0MM`2Bw8V?gV%%)~cPsA`4-Mado4Qv&D7cnJ%!GzAG2$;BiJ!0bQLsKNX} zsKznCq`!1fTJE$1AOuO9S|fncc&k|&Zvrqkpj3EGtnPwBV)Dw;uK*J`Pg4?wo|#kk z6^XZ9rvc-Bgg|D0n-7}X$2Si^?s#0s-zhX`Zl3A|PF%Ycl;M3bn3!4KEjJ&8OAs6m z8gAayh*fL3V_3EW{FY6BsYq~s0|`|K3+p^TuLCgj)QL{9J_C|-9BNm`EP&%bixLq) z5(g+0B$$%^tg@mMBs_F%fGeE(AYtM(rUfYmy^23C7WV39ssaxBUx16^aH*{K zY=jCWYJqnK;WL2oEdvo+=Akg7r5`7uk(MkyvI+7=(}p}7ej)pi~S> zl8KsCxi|Pyo^afh>5uYs{G~kV+n0N7DE+2}wWPaI!`MU|(1`ohOqHH;sHnoSDG3H$ z^M3`0h!2o(We%qJccs#Hxm@~g0swG6V<^e8O2jNAlLc5-<2=@>Rie~eE)#hGM{cQ% z-YJsdTLm(Bqd@wu6iX*cz^T(k68pMP3UNM`9!-h(4ms$+UHTF;I(T#PK`=Ow90!0; z_x1O2%Ng~9b(s>kw~sR}({Vb;1wq2~K1kS~fKGt1y{IIwr=_=~U&f;&GFvkui}4W@ zPKISI(JyQ9E?KQ^fzoS`#ZW@#q3UJCUA@~FV+`y5NwCf4;2o8 z1tYxE`Fx-8aWZLSK~HhM-y)xM$;-z0QZQM*s=c&!KF$ zKa$zRx+Jf)YLLAQPL8QkIK*!JyqOkcQK+p)L6Arb_Jfg}R~b^d|v9uzXbz`7trbiLbG z_vh4p6FiPnA1n}9FLLjR&RaFmjy7v zHuiXD^WS>B91U!z&8BvqQNd7cl;dK`zbuo;H$HXZTz=JUoe!N)_AuW01YP@kgJUjsAjj?(>`;hqHdr%@ez{8sVGG@X)Bpmo^86VaXKCtDKmmwHXnX288mb_sJolfE&sud)x?y}BbR8oW> z=c5w8+Agy=&O=oqA^WBek_jKQpiifa|r z0>Cj3bum{KRhuo1NcZ(}DfygEqgZ>AYnlf<`_kVOO8iu@v|cRL(GUl2ThDsh@Le-P zX7CP6x#iLiC)IPUSbDD(OW(C38MqFRK!uFl22}1=$_O0fARXitD-9Qa!F*cgn0+&_ zIDiBldJqU^UX`Eso1-u}3WTG^V{qzV;vjPD@4S8V8ohOHUpiC9E-2gCcZaFDsV^Ts)`H z2`1&UcKG}tSs3CwLDkBpxUTwui3uyd0eGNb5@*V!@mv{zQLc#^Sy*VhYV%DOO7Z-Q zQGhczxPiP39tIW%7oFmL(7bFKpRvLQjmrvBs1kz-<8>%Jo~=`u%-5~UdYZA2CF!`^ zrXww~F-<-E#8~;LHV0m7+Ln>fq$DpfO+B0GLWOt$X}QWgTlG5D`vAVrMJSiu)_bxG zrL)@xm~=c+nCt;YJM9l;tBo+(faCAht_3XICg7J?8#B zgMe;x^9B-i{2BANWgtRx+XM6Nx2i@wxF}e3ov9KQGN=w{$14*bBH2L6hKbD+B&JH- zzLp7?5b;66sxC7WdK4yQIshE2Woh|<+o{hk03`M7koXP$81_tO6_qIu4xwwq?pn~j z{xW1F$j>Hr*38ZzLZE0<;FH#Zi`7^096E~k{Nm0_gG=F#Z>c9CJrVG)w5dN zwb!~Q&&S^CFrw|bL-@#RWG*%@kNaQ9J*bjLW51O*9GZ9H_p;6=z)cS{ojj|t3?ybt zP$nx_w*rtX)NROUcuso;mz}i0B|*aaZ#pCbrTFWZBmj~5hJATF`8#nzIc&j!o>#?M!#GfdthNFmB&uu6aDo|I-RR@=WgLC+% zTpURJZ9ZI_lmA!D!HR%z_xw}lVDkw`u*{B~bgY!}RK3>x+^S$>a9}BpQ=Z=4=O~PRew2PL}J|WV?9}Ah{l6 zIb?8BM|z|hNx~Y~To^uyspl11R6aKw6?DUZ$V|@W$-<8qCtnR4qY95z&xk#g5^5!lQ*xx@qX)@`yJ*PhX|a*U_s+N zkL#V2C4Yqo&4-9@$HRnX^8^X)C`f1yCbq9tVVN)GH4!K}j=Tqw!r$0CNvQI@$-#S+ z_scR*kpLtoIItCm&V@{@1?kyRizRzze)6 z{pC|qeX#}CU4`ogM4SO8@hdGdQ@10BV?WB1$sgo?{~=0ZYqFJ|l8yQS*-R25-GE55 ztX5fvwB>Le95x^VMKTYN%mE@S`OcP9$xLxntD;780Ef%PQu-yH0YKu0mo+DzE7yLW zD_4JJ{aA8;nJ1-R6ae&$XBEO36+pEVNYStIq~I61Qh{yh%MqC@ZIQLaknE->ZRu{lx$9|#-WCCV=|l~RYY{qLT z#Iu8{{1U1j|5q5tikv9Yc{Xsk^FN^EevaqqK_mzgTgDWiFi6q~*mvSULbF1L4-yy8 zWAz?E8afr1B;MP>azLPFNyfv|aKbJ6y)!A}c=?b_MW?0hPAB{T2f2zHNUkNN?M9=H zb23=WUhl(j^5Yt)UrelM%P+PF+ilwiK%h_nlHE?ggogQzmU}Xv+?2tJDQSVSV2oC) zwct}YHcARUstLZR3BD)|UqqwY=mNAxE5}i=Tan4wtaRq}z*jYC5I%jkLz;8Dq&cS( zfNIm0UW5vBe-SLKu4ls__NF&SZw~$_5F9XQ29d&y>Hb;2Ir=*+10)250fXj1611Zr zVbzvZZ`suBIGDJpsSY0~K1>J_QzZ5~I~HRjcHcH^F#FtC8hL{tTWdMX`&F5In;B~N z-_F2+TAGvOR=yZKaDF8G0QjJX z%7(R*BF81!frMr<$N6fGq&Nec(m{LJ0g5eY%^#6+e1IIKj4FgJ zVX7}Q$VBzBJRJRA4#&TjgB~WFT!3mC2S5hBrn+6SR@)AUq-7ZpS@IyV$RKWo^(mMs zjmb0s!ls~8g>2DNA$>PXQ1UH;(_tx-X;cVR2NJL92rzL#p;`YH7^XL2*Z-Zv-4^0q zh@;dvBKvJycoq-k@!$hYcja;4rW|%J%YA_4pm{|08hd4@9_pm7MYa;nu(WJKsjO9} zWg(iD=}MN^Q!-N2Af2}pP(ziH!zwV2fQbL;sPjKgji{49V!`d%#`G-z=c-1JOOU{U zGFVNAuKCjNuI1xHV<5Z`?;h1NN8zi*`<=X4C5@M3lD-m=wyPn4qf+6~cC}2JE|y5* zObGzOU@->(Vhkn^&lnE>=RBA77qS*GR^Lppm}7SkvHy@tfmv$Vm?7tv`EW*gYRt=a z>wkF7$$76-ehviq4}u%=>js-zfzZ)D8mcBeD}dd+uXciN>1>zi%>C{!r8ZpMKt*>O<|spizR zq^{1>IeFjcRBfK`jt>h5nhZ!dkM#~N78mi34fBtQWhxdm3wo=QXaBT}DsccY%Z5tO zg35$uzSIXyb`=*3=q#K&n5gQ4<^0m+*z!SQ&3^$B#z-zO*p5F(tFW#TAb`a+4s%Ik znt^&i!K5C*~6GAk?$uvedETqTN^iQAw+Fs;Hm8h^uW(Nhr}*S#1Q zY?&>oG7KILBnA_#2S7MgknpCzWNKvsgmr{v+RDJScjg1<2SG(b$H*pw>rET-eDt+u z{9SM4Xo|~#$-K-)rWGXXP$~x~{cz;OgP!NO*G`F@tW^c`a{RqK9Qt0KjQ>&IFa1Lv zkA4&;bhJ#s%nH0Oi`Z`lFX?o2R_5cYvI58uHp~z_R<$C@YaO_Dsu7MaT`RpML$cHT zSRRdjkiD)4vYwj7H8AUIpRCt1Xxl0)Rn4*zBSacyF`TsOEJCE*Ai`ExY<)Eeh|neq zBHFXOd$hZ4#P4Gr@$V~hXaT5c*q9{ zo4nrdoRfpraoKMgRFLe}cgbEur|dWOX!;aRxy)YiTk#HnrBzmIT4gcTF5@N5(r_^* z`FJ4-lORZJ`cnYlx@`L?Sh)4aALn?Z(^LmagEQ5VJ3bOB6K&AJ)-3#x`2pkOWLlGo zZz@!2F^*Als#N0Nu^BWJ!`B6XM;`u%T&M-6zoDb|fQ@H=6VIF=VRaInxPipM!xRb6 zPX|upHCe0)rvHtP=+rs=7@kdk321h#af0v|3LlCkm;O^?DxJJn~{fw@`IH#?;h&blYJM|ul} zq_22X21-U0BuxCs&W#&Q+p0_mldYCL*=>Iyduk6AC{!g|SYB&BklFaU43v&b>+KG0 zsl_TS_GqUfVHH;sd{y%e#yqSCJ#%_>=l951Xi^J!tSRV1A+d?SXWRr3X~DI%$D9b)5d}f+ShB zb)F!(1SP^eHv|X%Af{nvdI5IqVX6U2RfPh8Q4M3&6+yvxNSuH;MUZe?L{qFV9>O#$ z@v||kuawZY6;l2+d#jdd+=R`9i|`;UO)>}DF*pz?!5cD_LYRa#x0K&n0rbTU>BoVO zg&&4~3|gY%%g+*9p|s;=Iq1A6kNTeACA}wW?3o*1mc`mdZNasYSkghT9%B3R@eeYW z+?LSEI^1`MJm`HQ&j1g?iXxNkf@>z+UOG(L>5yt;y{`y^4atL>GQnhsfJtDk7f>+8{=5aDz& zl$80(gv?deDnPU5QK3%El0U)`detLz0H#WU5IJf-MCkB1 zobpO)OCAkAkq3Rdnr7vp8av<3YQG7pk^R&FfYAkzbSOBOuj_vEu-s?vsdOJ8)9s~J zY11sh8|-IM+ktyYYgXw(d{EZ6hECE4iJ9-@ZJPswZ$AYKxBj?o#~B@E1|pu(sdD^% zI$77;k(N;qfDAt*W|U=f_X1XDKr!5=>cDfn!Dq<~2Aajd@~1g-lb zOvTrvzieFEpduI`XVp6WR0=+-$*bPtxF&5kEx%VMpFw`NjE5&>sbO8?JUu1DfCnJL zsx4I{9e^Id=T(g6R>IA=`6Gq|5hX~#mF6%sjVo_04*O~gD8A$ARY$ZsTxauw+DE_hRcy>gJ~XR7!3TsFpB)Hw;CX<60O8mSX1odBG&|mK8DN1*aq$qV&O#}R zf9&TL4APbagb3unjd;cq89#ztVQ>2B>KS=9^xQgCwd_NctjK)Tyey(L#UMO^vfHw& zRbUSX-%3x>41iaIlFYO`AAKW_hu_Gfk&gh$KQ!h1X8Tiv1zv1cb1kQ~wZyd9@<2OP ztz&+riOqr`$?JEeHE%@Xmr^oR-VX&b513I64ajn=QOg>Zv6wtF1#s8b_A zG#odrbgI zLkB=&IlMNRiY4A68?`O6Ue${G!h3c(BzbVYs*K>U%<((R2152WGhL70X0P`FVZZU0 zEjGgUg3JDItOTA75_O)0$uA5hrW%}C37*cLNf#OSaUD+Wxw>QX*=9o|Xik;zSUd-- zp0xT2|Cu><7{{g8>pl8=cMkiW3?dxDw@g}d+GQ=Z1c$sTyR8ed(J%=W-Y28w&C->_ zAT=9Bu!W8#;o~)!If(EX*!!Z_$nS}97p&v+V81UalgOEbbQKKBBGd!}?R`aqQg@L< zf!69u&u>_1bxEQ6)P(s8!~&`4{i#`HB7eAx`xMq6wL2s&;zNhhou zFzG9RG6_!^Op>e83z)U!bOUnMM3^nPG<}O}7R(%Bw``mg!iq4_&Uyp{3J-qOl?My_ zZ$Z@&go#T!si{zc`>7EgFw8rMsQn~JTosmEm%S|;AlYXA^e9MBkVj!%lRRR*_R~|C z&>VdHd1uZavz90Wk_$gSul0fo4*Vg^QIopDOu*HWJYNM^L``J?5;fn2JdhY9xIPMi z&Fke=Z+Vbra4tAq7QysRBH^7m*Se?M8zRdc| zKa}GW5F$JVKWKgN@j>xJ;0SbLaB0L4CD&{e$n3)ORg2`tLs`-BGG z_dD+E$cbBRPo(x-i&UMilda|*c|P_=4*FinlgU5oAW?_I-^*s}VS3rYnag^u;!{7A;DAjwgRnyDJYc?M-J)B;E}%0eim)49reIM$@hmjfK- zjDyr+T>|i^)jUfxrJPpFba70kphDQOP$7~Zmf>6*IEkt0*(yup9&j$IN?0|*(k5P3 zI)ETdXa*ABIT0jx0Q}T-TuW7#tg+P?oqvMy4*&zoV!JLaTXjtu4`K2~#zeLfDcQmL zeNUAfH1*0}eVgng8f814l&xA?qvrJ5tZ9;s>L$P>rFov^HkG!&jQnyrNBK?&U2a#YnpS=Gm%F9$)}XZJj!AFnj7(Ioq7a4x7`~Ye6uPKN2oxrRT*G?8WEbBmZKn+ekg%EX zHosr&J5#eHgMdLNR7EQkNDF)xjeS&@S-8UusHFy&)mds zk`5@GHoyle=4PV;rH#kLaWu6HV3Jap)M^b1A!5OGz867YI6&CE4-<OqF@tuduN9C~Zu{`W~EL$l3ECL+tsL0k^lV!s? zT}duOwLFn0P#}}B72H=9Ua&5$7JD-ERvrv~l-CP?lXq;z)%OlB^)uP&d?MSOkF<5k zx>jpJr8GgMz*xP-ekb$EeHo7}$z)_$W-8NA6->kz2Po>)<_He8gvH7|t{>AMrs{skq!hV%S)!Wz$)pymfphXj~tAk{f3uVktJMOJB^ zAi0Zu@}>TAjgE`4#HzMXt>)a~sDJCRT3M^AF^k1zqpD6@Yi%kh8e}J)mfboIIh&SE zlu)*+6S7?sm#vywSVBko+p1v#$a);3PFA9E8O#l9^VRD=r}O_*g&b$@nvX7LLBl)N z(RFOCMW?9GK%c|W?G7fKniEwgkB{AqAn{@t9!&h|RGu%}$!D7ZB?uB)1}6G{8AQzR zj_bAl^W*XxsQ5M}Gx2$OJNHIDEWDOCQ%~jP=skJbzabAg7ojSrWUGD{D!4<&3zO1u zEv6~|*{8*Zo3(Q(FuePB*A`r7(ByAe&Bbirgh{QWZnSIcf_*W<-_=OjskoG$N&+bL zQgHxNL9D(eE#dQR29w(((pfww!{IfVjc>|w1HKc0fyo~?0g^4M6TpO3T?ETc>pduv z1DzU3U;xV+l*x2_RR+q(q$9Tz0BF+IW9=N;_m%@lqjX@tlOr|eG)qr@yKvl;@z4|s zb@MWfg5+SuxU?7aLoIbn`(41~Heu2vZLA^#lyq2N<_ZH$Sp8LXE}=fiAi{USV4=%| zhZ+?KAx$%osKF;GNU*Q3UH}ZdPYhbq3?L37taUN-LE)QLw`#n^?c=MD;Jky0o6iPA zb|}HBw%oQ)tG9y3;C%5HA%Mi|N5Z6yOyQh37gJ+1UIxz9JXl27IZBmB1*RN#0*V9c zD!wYsfFz9N%C7-|-{9fV$!DZKKOpz9yAL22A4-8XEu*?1JW6l_MnftQGrJ*NJRce! zG?fNhzwq~3g@cz{KRWLtWD4^PNIc_js__CWCs#CW{NdnpS#R9X{v>lyB$h^HR+bX1 z4mgl!qi^JX?@MXDGbm+WMKv(UDy~OE@8odock*`W@A7Qw53<|+N_IM);^jWn4pf`1 z9P6E(6H&4PP+0xNmR;-tKV7>aqkz?Daf3`2SEDozXRTH%(}2nhpfOX%c`6WAWl@nN zJT=1p2{w4tctJ$dZV)0m9{9C#sXmE&{Er;DM%9;jzNY$|DpAL$&xli2n|LK@JU{op zur(K*N94Oo87xT341hLM8kX78N?9lm%VI@D7AwQD6pqMBBnG$;C<&;G1fY^IsMI7e zAD8t=jcj217UnmqYIWTf!NVbA@tZYlTa8o!mK^_vE$w_&!t3#4CZEE^oW8%WZ$1!y z(s<8=DW1(g2Z`r6k2?9x^TfOZ3AS5lonZK9K=D9=YqfoCe{oPQ98hV{YOdrNSbn{q^E2}CZN>k>$bH2)jGZxf`c$&oMb(tX5z~-1ekDeEgSd5hVUCYWT?Wj8c%>+NRkl*;}@tRW#Jf_}!94JsNJ z$r#sL0oMQ$Fb^aO6T$=Ll!zJU4JsLcxNY8i5H7y6KWd-CzyZNvVn$VBAn~fO0>|)G zj{Tmhq!j+~%x9OaYU}K0r=;#owZ=E90FTJG6#xR$QUe@Vu7tW^RaF%0qfi|*Dv$69 z00a;rNZ7ME7o~sgC&20zt;)j3gBRTbs-GI$(v!|q92#WjP4U5eUZP5c>V)!%50Zf3 z{)~Iv{2#gcg6l^I<2IJMKmB9Ps8aZ6cHw1tGW-&tcrBYvTUNCNfUug&s=8)nt!@>n z<%ztU_(4`u_a%I)PHN63Q6k!x=i?vc-oSf69`FH!m~9O z<0S+M1MnQ0bP+aJw=Lt*B^fI2l97UH87qvUWC(=;s7wMBlL{5aK&oY$Kq*TADoi6@ zD^taUNDUxTC1ZI|B6r!;w_FF3sz1k+8+1;0J!s?E!XeY)STzY9pOvK5{wJ#fJ(ys- zItkA)(de8x6l~L#s0`<`V_hZAStgSO6*5yClKC>iBm$KZ1x%^|gIXA&0H|;TKPZ1KVJnZmNnQMB?~ujVy;^vH&=>Ux&*4rB$sFgh6N+ERKT---YA1+4fH% zkulyY?pr~k_swUJZ5fA3!@l+`&F(OE!F9Tx+nvKTXG^o=+W(MkT<^>~AO8c-*zd!t zwCs2mpYV^b=kSihYYRgWEH|vlZ&!XNf86+k{BHF}`C;)LK=MSM5AVpsu6fx@4a)`_ zbcCB^9twD>s9O4Ng|(-BAruL7Y-x{nSnxa<PQsX=|=k#!R40hAPMLeRpZI-!`ZR#zttY*6O&!*if@{=e9_1L8tUX zm5hWYWU_ieI1c@C!$|SE>+H$(3?M?@v)Q01=03bH9qs2_ps!VDwB($c(n#E>Y zFn;1p*L(1=b+(Tx5ynE)?|Fu0r!ovGY*ge_i75{?HU-W=#I{*jAn5cRK!WCAz+`u(isa|3FF0DGya29xNP0egY(P1pFg3-T~tS)0PWt3Bz8LP%D~0NMBx- z##r~Ck98~!@g-&lFN=p{N&|sER1JR^jioN;`I@g=l;>lw@fjeJ>A&KFXWO%6 z3(g??5=uOCb!*x~cNQPbNac(S<~PX*oixBOj*ohxxCSZ(!0;5xBoxYINdoKQGC_b8 zR?8^%9bsoT=5D%Pp{Y*lzAKSRj<)w7c%F2^?)l?+(?JEDp96^j#H;4=%u1hT>df%0 z*r~DMLWOh#SWFB!bjNaZO*5HeIi4eo*?^aIu%(!e*tb$yC97dtRBeq@z+$-)+pvCJ ztHl*z&LG2lTRuo^pUk*y&#)ORQ1HcTk4jvjnKLUw=#at6zDi8&Uw#4Ftri$RpO4wL^KVNE=%m@*V zcYPQ0I)(XFStcL-Kh^uw_@`o-%7i1R?R6dqtE#rr``STqHa4khWF@hxIe?bpOS0Ft zFE7U5%YM&GX}H=ARaPa70K%)u@8#jZTOCo6&4Hgx{$4iPpJ`xSQ>JKVMNSPO3zW}z zQI{ao7wWfVvUW=bN=J0yqT&3g0%8oHAS}k9RCHQcgY^cF(R_}u7nT0oAx+lTb*)^| z=ZmE3n|vw$buOL*!9n#$knowBlTimiNU+500YjAu`7W=lg>?NtUV4a)+SIloj6)l$J}rDtIS4V6fk$tJ60 zt|TJkdF9$0znCETIYHu7NwzZaq2adi9dheFwJgWyIqj@<8N+*TthMQhtJl2Vfda|O~Nr&D!%2QcX28C2KH@ID$ce_)zyA!&YzQk z@VbmuZ^|rSvIv+kON7QtS^yHlq}f_@t>9bYD2+Q>&1K4DhvUe@HrUx1-`s5NvQ{7W zv6H02BrRQco1{A@1%oQdZC8-=6?W?|z=NfOfXO&uGADER?KGe=ST-)5gb9>MYhDl3 zNhj0^Kyt^F4r3(*3CEvje-n<|#QQxOFELp7O2n_?!aNFe&Ns2akj8-3VH+GWRaa`k zYAtSO;M_TLZ#^73tJ-`h_#ja@C_vmc+wRQm=Z@_I#j^}doY{4`-?M7#+`nCcB59D% z&OnhQ&$5YcMB^fiiG)~frRsy(eDRQ2ZAJOZ_(T98?qfa<6i3aK&S>sMrK-^26F{ZT z9$?TaES!g>Bwu}S>XvOvHR8j=YZam9{gG-`Lf}02=L={i7kEy=GA)aV1-ajQC@&^{fSTBmiW9X`e>p7&P%RV7p;TCD|$7*rtPlN&Oh+?KJ}vh){p$UqKLYDAz;IE~71PDF-tF@^FN z#>Y;8a8RWF+ZEDvwM@tTXgFIW;S+gM@XNb$i`7|Lg-D3dEXKj=7F8OUx?ar+ih%hb z39NTc!JF!ThU2mqb>&z2Qgf;Rg^fZ<;W}EcmO`zR;2KM1vanociz;P?a49I0sr)j` za~(iaY&Hj*FR`kzc@7a<%rP}8r0+%vl$G^=VT>jSicf%$warXR`rh`R0*S5*?59rA zIYzGcOdY=aE#?&hn9sljj+4(N+qlhtr|JAN<{cyJe9kiP;685NbJ}XyM~`Fc-1!(c zVv@(<$h5qic`tw5`5Qp;zvZuc|9~?2y?kV#eSBXY_sq+FdQi6FZL%6|0Jsw}2Q@NT zSS5pZqLRMAng%#F6t3KT95uTSJHHyP&)vsy&xP+0)8Vt4OJPDs165LSGNy5pawry# z3tx6BC82X262Cbj?Zu1IQw}u|+mM+AJ11_*S|j5m2N{rTwy-tVmSzj)H)?sg0FzFy z_j}tz>~{dzY-yUJ(aJG}1rC9=D?z~Rv1!!n}g?-wLcr?W3$n!Ff z0_1G=QpFR$kQD$l3?BAXr00TK2e*;i$<2uZ%sunvQ4#tXifgd(ZqGG6xjJsAnjN?%?h zU=r5cOb!}m5q#_s8Mw_9t? z(19J@?#zeBvGbm@&Ny+;)CsDy#4?Ys=dj8xWNHFsR^*zUe; z>KUH9frRrso~`5mr1t=0mDYUysyv^3CEu_7AirH>T;#R9oqQnA2iN4VV@mc@{n`n1 zt*TL$*{QV*YNW{2NZ0kS#J__R!uNd#=Xdv!ZFb%)bJyrSlORZ3;e+oTVVdZ3|JKpiB}wD0l%JD3Gxgq*EjuiE&Mr(u3treA7@S zWkV=fPRMw8R;FUJ+OcvTzh^MMyI@c{a(fjj9R+>T0foYZknMMx&2G17qQ|BiO`2LN z#CrrdHPP)wW)kL5+YTf&A0{;B^3rNRICg6_W&Jnkpv|#6fH)_g4Flh>-80|G+p~Aa zIrcZ1<7UIb{WjaEPAqO>RbOTV$(heC8A!hT>`N)-O|jaFoo#UQblP+z8E|+Xi^VDo z49CGDxE!>jFRGQA`2sql%5aLq8Lz{cfyDZV_yDmq0tODNZ=_jAMNI{vlG66&d=7qF zfN6I@uW+P9rjlpR_W9V1jF${b_l-trzEF))a#%VszuCAd&qm(LTKXXX)GU>d>8!4L zIrW1a4u8~ET(9Q-rg0NC^<7TwLw#(^Lj5{Ec54+vt#jNx$E@PIE< z**8qGSEV_j!l$b=%|ql=6xSZX2Oib{o$IrZQ7u83sL^To|FZ)Xjmp90>$>*uceTY> z$*=RI{EJ+(-(dbXxf1z0UlOMaC4I42+AkMN=j9?D466%ZY`sz{^=Fxjsu=Gboe&*= z5Dvb%!iu{VGcH5Lc;=vC5HWKQx${d_JCx%bC0L&g6f2FJlcs91-*X#(>*md2Iv_ab z=}grF_SI{2`+2_$nsdau?9FF`gssVnJcwkQI(F>Cs>rJINtvjgl)X-l9ltFvM|b2| z|Ei81f3JB!bDynOGsRAWEU{y2d96$rSIZcbFWotX()-4}V;eYdEg7?GIeHy> zoxGQzx#xlHt{_67u)2re9KTD(5?E^2n90dnsFI{M39h*|Aa%FLr73?_y31E(v}zlw zeHTC=Oz^!l@5w5g0|O8oK#SF0+L{aVjFap#jS5u>VAi*BvwOECUESs3Xfq?Nd;g(onVGU;$%$Ya@D*N3;W!H~#um6KQo&1x$n*EFHcE5xoIna#2 z3wRl6R3@rOU@Lez36c#MQ_+t{7gc_@U9E(pEtmEymC|tq3gmKyv|g&v4_?Qx__|Q? zf0c{>0gl`$5WwU5zvDjwNH}m<4V+7N$*7FiEXZu#yeuW>WwCw^(3_RH_@qo%kIGoM zA5y*pC95V$T#f@cklp~X{U3yiS@1tn=jb`_AWVX$isj#73OE@1v%na;x%-P8{2zHx zy9HA94bJIS72R{U_G>u<*8HK4_WkM;&+2u$|Xgj(4}S zpj~Eb2Jr5V$wAAM>^0c{Xf{e?y)0EERE5lxSasHDUQ`Pze5WeEVzn@oC9A5v<9>23 zw?4SdE%Sc#-t0MLV1j-8f+0)dEa0I)r{8I*ltP)Xze_o*xlSb{cA-t;*ZZaR`jDja zXQVf@CjFsxIB>SsB1`}cd`pA~ZMDe)@f&~%gYX<5g-IO=5mt4%-tUA7TTITzS7bai zDTBp*T9w>i*d@J%Juoa64ah+8pbV4@%V61}NI2|o!=;2YUQ9^snJVG%8S1w(zQ=@-wIdKP(?AS*(7#vUt5UU#FcK^4ZLd5kMv2DKDHXm~CIt+LvUpYn6 zAfJ8y*%y+}kI4Z90C08`6vvsqI05IQU|@9$>@FOWrhH%tB|~NZfl^7hYA90}R1<&z zTPt86<}xysfpto7uW2xrIW54lgNTBJof+|9b8wCIm+K*kcT|;d zJZfeloyE&EQ86Ur#oaQJ-zpQO-LjqDkS9ZLAfsPM^X&mC{+t~g$F&#y0?aM0hDLR~ekrInI0kHWb&UJfQWi z$^GsX+3#GGot7CmpK)1;kH}DYkECuulAnxd{VxAIQztY73jg0AOoEmTlN`AUb)tP) z*!aR*r}p!^I0lppoEMw<=Kd-V?+D*D0)xMI&>%!;ZkgtT!W@@(ey_@lPC0uY&v|F| zwRLxYg?HoYnB@JYQk|kYcRE1N2?mdq`8}5X;~g<>b9UVNBkO!8Z0Awqb-?`fm~#0i z&6E>UwA5aNk}XWjbhuNNYkFlR)@Gep*$=6xTBZuBWCHLQ&0#fZSbDBkLfMu|*%$aW z*~%|?{^QKMrccd#UYYyzL1Hio_yxQ_tUchj&hL=lCtHM9;TM^_4zJ z-JOuu!Z~RxnwLR#R!l%uBsQTen0^Hc0YF&;Kv=ECp?M7^01Ke8-Sz+=v5tzHtp~Ef zKz)m)DVj;FYBkp|zUQH$Zos5RdJ3UR0Fiz`gf>t*Dud+{G8CEyOcrFiW<_RdS7a`+ z0vIi#05*;NN2D{q&#Ja=w*V$x(w5t+P-(u_B&kbD03@#Iq?mt@c?rwDvf+Du|6_j7 zTBfoiHb6o};(@>b;8!;}2T!N1DNMd5Q0Uuyp!gu6x!*XD1h@O^98BE4w!eXc=XPf6 z?01?tHwz>NxRPK$lK=VH=W+)xiBlf`3?$}c%)vNVxXxPEpMz$l5t^%p($)(4FzMJW zwF#G9m6QR6)l`H7p<&?AJ|%n!X$&?I9G|m!uq|851rH($5^D?XjN9zI4k8{%aE)w@ z#zb?6{f||da9o6Ws1y#>$ymu`*#JP&CyQ0naKwl5bnJUsZoDVa?~)Swwpu0k>#5&D zmAse3@!tU^f0u*4x7sG2eMhtf7jteg2;T^a-o*3{9Pj%4eRCaPAX3#FWf^D!N` zh=U)MeZf@WIr!k2H;aze14zbcV+sdd_EiZ!idLwcRXH4aDo;k9K`HIXUi*UVwa&<1 z+qCSrPs>63jO@43CS|{MLPuZRNlzG1Y6oPvj6)5_@%(IDc`aEgNE}S)K%8UFHUf=y z3o4Iw;cODwbgNl7UQPH^1mJ*k{`nnTv)?d4gX$RzLIca3Vx0xv0SB*>qB;0%qr>AM zO>}V1X}f*1e`DL&k&we1Q*}9K?;N1+gK5_9-1?s|9X|NzK3onQM}z!W4m#s3H4<>z zZe3Z>i$Dl!VW>D^Tig zj!H+#f(%90WFoeP?{Q6&qAt~~S<_$uM0>#FTjf0CCamt-YPEr}b_f#&>X~Vx`Mwt5 z7=WL`_dHTIB>lw$fWm4HoKa<{0?`LI2(p3K93f+=H=)XT}HN;v{w*E{!weKsRE(2>X^mO)#5j?V`+A&&qUYOvXzGWV&)h<0yy2ujJA2dz9oCa6KVOU2jE+|Cu}+ z|6ca{-^r87KWS%0c1~oR#1c9(vEx3V!xGXxonLBTK+&2BFVt_zXk=dME~m6j|6P>S zZHx)RBO4&j=wx~QDL9hFx>>o`_gD^k_GG(xTDH=oP*9_?(=;MGsUg^a?4|}W9hB|H zK3K18r!d9(ZLHgZt=9L-RD^ZCOiWjzj)PSwbOh!M{QqASE0n831lPg7KozH|r57LR zdfOJB4}d#*2sMOKV`?6y=5{InrXrxIvhOLFhsu-;fx@X5`yfa>)08eWuBXv)DeQ`| z&N(*Ud?;X9ot$5V?m0M+WPqaYhpY0o=j~!9{0{kT6DAgbk7fYN$*?A@tvVeAR0x(TsRRTVm(>_WaWJuEZuf!a!GtOas)Vp&OOA)VkL2m_TiNe?hL?0-R`Aj;LDn)K z*9?F%Uo|CrO#C?hUbfqxNW;|*TuX(FR7}eA(RcE2@SWTr;viUm)z(~u#zJCUhu$Sj znA~x_6%(D9cFG29GtnWNb?vg5#FSQtbVrT z^P|(q)BnEVCKOTIuDqUqCm)tS%Dct)@@D3hyc~Zl&xZD)h8AVCqFut@mU)c?{QGeL z$(Y_VjvcTdMBKb{V&=%q8Ph?#<)1RfP+S1{`0d%pRtui*nDf)Jj)Ut^3(g1E!LmU@ zJL0&5hHK5(k#+n(_`PuC$>Pror0h5OQVum+`UO)D<;$J_aTm|+j>fC50U~^NIDUy! zfEFXc`Ge>1>}a)@1Bt2@4h0nPqLgNfZ2f9rsvFkzK7zBzu!WdI>N30IzqOXPf7 z5;q1Ul`|=gfJt-yjPzA3%XoBIX5i47{(UjArmeONB-_^CB)y|BS+3tSm_Ui_XjRuE zg^3RBfc=(|Yk<-sRL-pS9T_SeltCzy!IB}&59=U!1PRAG8LF6)(dfL4S7HBZs2O~p z96z2-gu8RQrRfR)LKOmt)N!yrsE`=cw?QPP<25BO#xaFTIaecb>=QZx2Tp)0ILs$$ zj6_TN7N0mCCO#~(LE@`|Y-kubKK;D|it~{MCstPxF8;Z*%?F7c+x?dQmB+GHTVB=H zPl3d;g8EM6Qx#&4+~7e__*GRj|7L<}gY!X<1oyRhlH01aJcxLvio`R@LIabXlW=hl z(!W_FD7W$297r^6GY#tqlhC&j$lqCcHu_Q?4ZMO(euNhmFDGQ}B8Sh7FuCKHHWPlp zq>dx+@KSC{#fi8?zl-b3{cQBT+#C2Hk0<`1BP#CoypqM_4y5sx#zxjSeo30+m7s*g zm1uV8tktAE<9u4yYno-fmewNcaR4CRrqi`rT8pe=+h|Eb!Y9jgguDz$Xqjp* zz5^xN@vOIeOzsapllO~10wOT-mlw&d_InF#Rd@`bb%zPXC6yV`k712+54qqL&^PQOI z@VdIc4-@li4kA7T{AKrh?-+r3Zns>9JdOp;vq47Vd~n$v!}qZ~P8LXh`Pr9ns1|H> zz_8ed!GyicwLLUmfK0DVb;>?G3?M;saG?1hF?e9!3zGY0`|=_)zdHgFYbM7V!MvLP z8(Z(kNchM2K*2Gwzm9T-7um9InsFV|+_PiRZu_o08GbE$UC*@oX{LHvrlNB)$MK~B zl!clF+2Ob)BX8w?|7+>-2=I6y z%j{o*mzl;ET&#O#jv$V$Fb|**I76WYiJoi3$Ig#FTZx$E;N#5GQTcju<1$%ZFVhuu zGRtvGIF<>A;|(WeDcS(AGyp1%0V7~w0L&_&vdZz`YXA?-uV9_qz7(mK)@zm8e~6{Z zOu+)q<2H=FXzQ-E$|T_TX#BN&SoyttSo%@k&cBzJ)6W2qJ$clo)U7FBdd-x)La+-=@E`|>zh=WyrrEbF@P4qVrD&ah7J0r!vpBV*qG#wt&HcYH-+ z)sR*oVXI6HIOE$b|2ia<-$W(yT^(SOme|F1X}C2g9fi{}P_YDd zaF{A#jKo@Vu_f0clnYh9f#iW~wzGi?VRBF7m8^1}$39b400o-|SBwK5X8mObk)hC( zi~uAfkr`dzkpmTSxltO=C)66w0U~D!5q9FP(ve>oTu)Mo02~~CoG{^FfK(<)R)1Yg z=(flyC;+xHRo`X~+4(GugV4FtpV47wIdCgHvr5ZPtmr_J4Hf4Xk2CsZ^Opvrpep$k zCT6aR%LD+|*?BlE)`6|Osx1QvQ)7Do@r)309sdj-W)2)d%Z7-9gnI}E5<QPGYhcO3>>~;`AdSsAmV2WHdC^DfYFD~^*FwRheG97tQufO&H^dLu?9;A&eJ_yr7RIez8r8e-n~1ynN5c-pSzz z$727zyqx>1JRJUBHrk$QfSnx@=h-0<0GZd^TLurNO<|mb8G>i)cBHdtLW;hMWvaN` zhpC2TmhvjUDU*&{Q5j)s70o1ECDWzVGQ$xR%WE@0S&jfEu|`0JMz}N@OkgY300>~R ztSY7f>&!St{9tZWD!zoX__^l@a85R+U>|^vfdOAruZ8z^bcIS|Tj-5s@ zRi3Gl_0}Ev?55?`Iy#Vc&wxS9joJkKrB2j;|a&j&r2h>v0`wf&Bu< z2p)@5I#D|6%(jdZ4lHNP9CgN_J10*ItfN!5ylHgO4kVufBxv?L|8!jvFlO(ToNpsGav=>ZESMj{` zRjkNxWKE{4*8vNvl1-?OEx^PBiS{vJr$wttAY`@x6B<Hi&RsoZhdIs_5p-g6E zEHW*_m6I}5IUz%U3g<_{)4JS~-!CoKnxqL3NCOCsgai}^tFx#?7+mL28yt3{?p(Dr zT&$JU2!O%*5JdDALaSXU*zTNUuCc74N5}yHC{4Tv!qQ5c^pdYSxo2Z zP|CtbvDnDObvrUyyCID?d+;BWcvV?}j}jj*KSGZ4(r~dtdhdocwF)x{PZY;wvLvQJ zAxzwuCN^|$;}MmTQh+R110+x*x;|E~)1_z~jt8lJvQ%z)o%dL`9lOvd!_j%!?06uL z$KJ}D#oxz%z8THfCJ8$t~b+do}cslFg^ZTz%J5PFQA?qAR&*!eetq&~w_knxY|Hv%(f6`3VdI~B45>qK` z9&9jS3(ATUVTqmQaKH(PUrI~*&VaP!jzLipCX3QrwkV?!R&${+1~qT3xmGpJ5!20U z=3oU1$1{Oy!8*3$V$5W#73!q*p{#qs_r=7rjyFG9H3yi?%4ld>Mgb7^Ct;J|wp;B0 zND_tu5CGB$wNVdv)Bzp|4d=`x$DL=s>k?Gvc@tblmC1Pe(QrR^xuncqz5<$2)h`4Red>@J9 zJDBLc1``JoA0YJSrd9}`ERf{=s!Usg9F2WAh^9F#U+w~YQ z9o!1PXHeaa9jyAoJR!pEfn(UQ?LqM2=>wrWROyH1ukBCLKMM2iPaxdb7GL40fW$pQ zl@#GTeTBX9xc{lV82>>IdS1(7J%{#O(~e3LFjhseifg->)nIS1&ns!q8J0pkKOF>X z;JG}T_)#8>{Z?Kr{9T^1Z%OM@D3Wb$hRA$AbNB$J*&+mBndPv*%)teJATjgo2t6r4N-skdQ^o_ilMaiB@ zWcGtR9eE`8IdD>HSa#}r0g(>bVAUC+gkv(}?0gxnr6?vTz)w@59OG~@#%)@9W$HW znj`-d8b^)Ki4f6dyx#GG=FSs5-*MOFX94if8>~LdoXaZ!sj^jb5P{)1G%6DoPFbbH zcnL>XtU2Ex$;+*hy3r>sIpfk(IxjUN_=YqH4^_WPn7|AsHgqqi z?5xOm$yPhZ40@!^@7ZdS8GqfkI1yV=HNv4LtVI?e0w|;a05)aVF9-YgZYL0#-b!$(9PI>73)9TQJ&G2U}bDt*D5X3DB%X|X)UigE z>i14LNXD>Z^zziwH5>Kujl_R_xeA| zO5;5Z1}-F_KWN{%D+5xOsK@(gT<SUs%T6*q;ps)-iR7Q=rJ7l%(o;<*PJ)8bPUeEm?uVw&|v8VE&dtLULMrAwMDI4)- z&F!>8B?4QDBmpv}=ZMHiZl%IyQA~_&s3K zlG7=#XWz@8cK<5hFTa!5>b3zVM9kNr`D4SJrSq?F~au_8|yyw4gaT;@wpBT>X zE2DqivY?^Y>6y38|M#DQ#4~m5-n_*cES|~#=wGeYGL?sGa_4iP@a!l=Xu)d;ngdA? zVDA45&hvb#O7NRcK%&Pt$hZbP>_1j{5~}1}QmW6_Lz!?esa|{!y1`8e5eUH ze0DCR!>8I{wUxpH#(7p#)dL7rAne6XP&8rQK}12~8Rydq63pu{O~GiZF8n5RGOB)4 z{i*sm`ZYLk&yhQSrlw%<{h0bW4=x510>rBE6+l!NSjXc9&HX-OTt`3Z`BG=uAPJg5 zg!9(3-2=(z0111N8$_&h;9?fO@n)@x$vJfA;GLRKa}UCUhfU4s`2ARhZRhc{x{Jrf zc??Lwb8z079j}NPi33#q)Np(SjnV$oef?!#t3n0m=6!3^+RF{Hnch~#alij9N-z%r zk_G868kVt&Nd?Jl)vRo$w&YR&bCi7F%24H^2HshkT7|@Z4n^~Tf%n;f9|KegW%>+C{ZlaZ#%0q(w&KeFO-MnK^UG2*Kt%1Ac-FY)YQQ9Y zxk5T`P@RNikU+@|$p}?Seh5$rL#Y5P9P1sB&@oR+Yc=>iRhEF&$z*9l#*5<`bLr2C zNYiBoK#TG5=Sv>`^NxZZ+3dV0hXBcosgIg0@bSoFKx9jHo5p3U4iKqjRaS$nL>Ze& z$Z{CKf)b)K8Z8J(+ZFs5zhscc0gG^ zpL!06ypgxl_vP8Zf;?>LmxKCd9YcOCR-@ynFbi?}wK8ooYmo9BUVZz{m!&g+|Bm39;+ zc06w5b1>jAUtHy>Dv6z?v#poj%|nq;T`)%$ zGXOK_PMBCa6{r(@kCy0>L-jsZRl=tD%zZbGf?sEDuQXk0grkRQfOBu4qxa$<1`w(c zi-iyZ1c(}7z^W`N7lnlf45|+W$0ZLgUfhKHGvHoFKw-Y|YP~dEsn^0QGj$N6tYHbN z4fbefjfz%*Q8Cawu$T`E!Pj9P(6H($8vL8?>s5WJzcurHpqatPen+6V{am$|#Z!E! z(EmD!IFMwr|9<}2*SL`iPk9g^Y$|C=z~Es$GAzaHkr+D#bEP zm+j&dHXbk(6gX}XA0m$lbIQcv;XuTPj2rPDj8hw`IH*3b&S;Ed^Z2|rm`OG>C9gg} zBY5zE8eA$=nQ-5s(qZlG{&3)pY_>gx^WT!u(5wd4XRGFQK&*wDS=ntnkSC+>WvA-} zWO$FZY)ap1mxln%lhNOvZ&wK5n1!m*-h4kjfU?_tXSx^tz{3}@YO)2h4r?^ehFY*4LII5h7F z)XHdK3`XZ}Q^I4YfB=a}*R8OmuW(3Z#*gR(^Yx?dD(aT)&OLcN`WgUvArJfR$wBA3 z>@-cvMqRfyF?R8grEo$PLkW$~OcmDvW)bPSUM}IU;3&LisKFSBRfl+vE@MGQ49kFo z>ukHO*JV!4DH9}Du^++PS=bG=cmRkzke9=&a@aX4_tKrRmu!-)>ZFcAzXXuXm4syg z&ntG4trl@Stsdhs?OKB8`F~;=U>r4EtC}v`ISeXv^j3<`>;t27cMjhr+BD;D+_S_r z|Aa%=d%E2AjfO;s()Z0(AIgo>X%;I(-$1MyHJ1PKSp zV!VVIg}2%dWfR6mE`udF79f>ApOj`c>-Bo5TQI#!;xxuYREZEG)zS!nP=PqbVbB0f zE+lXa*k#z&I#@lvrFsAg$Hja-_H{}|V;e+A@eS!I9G3Pw zT{2MAFH@m$*r;||yx;p2PL|`A>`CRfalOZ}$c#K4`>4b5GEPF6+#CELOX>TvfRYdc z>zYahFZ5j9rp$XSw>a$R0#nXo`*ht7O4j32`c19e`JW}KLVS?exZ&Ez0}i49jz9cO zp~TOXp%hf1O?W%5Q>m0o*G*c5^xO&oFk$Jt6IS)ob2}{Ex1!Q!OJ0xf z$g{p#dC=M~`;Bd~lSs*?f+P-*RLeqXR7RoT>T&ORzru0w`^>om=B`PN`#3wg?Vnf% zBHp#C1=s0%?5Ac9oBP<*bKv;Dvw))8d2D;0&WZca*E0{0P8GOg_|U+))q?9_?z~6L zt{vyae`)H{nS+SEKivLFQ{W>oai+({GvNI&%Qs8a;ZrpVlf;EaNnLM~*4%z+FBp@a z(is^JE#Ny_0!-NRojJJ-By1jR%48b=*)o_gnWN_4+SPanzvJ}9dTG7@hptK_;q?uv zQRQJuq}r>>q6(?eR$E$)MQ{KZX*GZZ)_f@`tu)NH03HNM^R)(PqUs<-F4k%*F5O>) z@y@tky`-?N3eYP5#$vhz2*+`uF$I4q6$*V7{FCnk>9btG9rIKr3Xx#XcH73@?gkM1 z4Xebm;6V$_D@45W_^KouBwR<3oYi1EKyvQqm*D&el25@AvOg&|2&hJy`_lT!3rOmD_agbOdw&_@I;5>SV5VSe6r`vK$|T z^~h39t1NQBq;Q=!H=eJIYiqHol4>ZQsPv(f-+Zw|D!zmR`#F<2@|biio~eVQ!!xHx zI2;|1Bx`^D4IQ$#%{g4p0V_ygR{<+RwyUsH9`tU=^Wk-oMRb^%^9Bs-{yznYI%XR0$=?=q+HO7w675Wg z-#F9f=sEEWK%D8Zt%@mljBIe=ylh~hK=CYiZqG8;_;VVL%jdu_))?2hJkQhw3AN#Gbc7)$(2+E}Pe7)3NY648!r6Yex`bwu@bJAZ%m@I2GmrL-t z1a(4HLPcWLTsxY7YY~9yFCK)GZ-jyX3|Jl8WoZV;*Us;XdD zpghz7k&FQ(o^e06)=Ka={4KWw7%kTuJS76dvI9!KpZV^wI7(G=oPLNWON8q`|7T z6ePze5Fa4!V76>P@IavR1`CCVf`n5#c|rj5R2o_P@*;ika6J`>JBIt6LWHK(WCo4Q z!}J~qA5aOlsc}1x@9mdSBRU^=cy670kUWleEWW&Kza9IoO2;&KGW1;Tbw8Kkl5y$H z?U1q30hufxmZ|bl0A)yKV^b)p-vcy0%4XYRslAYv9DFeCce^zg7gNx)itFXvU*zG) z4=5Ee^DjVBzp3@KY#zMQ41hErDoB=@lPk5SBQDM*ccrgvPGaY9%wMpI%YyATWG+5x zIw^d#+LM_nNSJB&zvsvuD2n`F=1I}#mNKIPa0#6#f@&#&6=S{_FK98KQ-tTsB#{6J zexo_DJSV_ovkD{f9aKzV3isV8lcn`CQ=X6+D3954sE@L^2J<o`Z>%Z)+>Rp28m zl9JEucvrn+TIrS7YtBuf!1bQ9bCAfuf=)^uusJJp=x#ff>C8OGY{?Vz^*Q9CK-68- zDFIx1 zSD@rvtjk&O2%28g|CwdM#7#X%??dHN2X1E7L29~>2N4>dnP)+`*u4Aw@gT8fT*pz< zYvb?C+%>zZE&VU`S>Qj(0tx4NEkQK`h|qotB%I>@w3$297JI=n%LT{0h@MVJ9IB+@ zYOA!|?vc*?Vd*Fsm#*SzRVI_MWtpp4RhY0b%ToP1Ai};To3a>RmC^DEX}{G52*7!> zMHT?UYAOd1ZGi#9>p-i;7E)6OoMR3qqC8gC`P!<_@7))BO)=4YCKqF9UPe&#C zO-QS=ny{Tlg<>iN=La~NurC#h&g1t43zbO?V4?s4OjMz;;0ysR9n#{Tq$&i?n>9Q1 zT?P{PD$KL*2U9k2YW+Z*?{*;Zd|Ln{Y-a5GtZ1zar)pl+7S59m5<8Xy3XL(6Ox4yG z00{{ctkUv>+rBwPV9p#KVgxW5#dHidDl2s>@@U|N_HLhuuHpXh65!9^sB2G|i);9!Je~ZbyqNv7Y|d`nvMMj>}=Gpd9@;m4*9IP>wEWuT(Hzc)Z>enT+&cIbugM+LIT$z}2L|W-IG>v7A+8?Wu?@;^0o-r1IM+$)~t+#=VY>KNt^PrN$&z+!e+sfky+`<>46G|!v+|vNw391 zl5CB|7zhCZm^8qF(;Bgju%I|}*lr#6CwzDu zJ(hnQSIuRT&EhsR1qmOV{@xyR8IOzoII3H3QNP^pdo0f+ znUvY+w5%jn0Gxfe3tWu3M6C{?8ORf1n?13ya?#pOwMH+7O;N=JzOrT2m;T;E(EK@iT63oG5d6wX}Mf(gK=H2l(uUjl+G%o{z8dVpDL2F-{i^N|8Ymp zXZfDks^>V6`0#Kb$zC=mWR4oY^S1fsw)x9+!q?!uc)g7`l5ix$vdvDCI_zwVtfSPl z7K0jb4FlN6rJRF5aR}OoLXhzx9*>!QZ$ga_CU*dC9y=Q*!E@XB{}nqH7{PTvWxB8D z)M4u2u}+nN2NMMg{x9vf5dgzKHSQNQtP27p%e-rKE#?#yp1J=uXk332`)0x8IFPvY zFsED$Cj4!Irc+_7riyQ38ZTj{?1rn&lDg3@tvP+tQ_LReGl0pwj6_h-t6Gt%=z{bY z4r02U8gXy*h=%YMybQ~vXdsA zIQ^2U3U%;7rC?58K>}zTX9k)8NIZaK!NfrXQ(ceWnDW7IY2Nv)O3R&_J~iaUNE9R| z@FRBA`)QD<^QBXF#&sEG;eg<)iJyjvZ~kx0J~@U5fD8mOu<*^7Y}qh54j=@EGkbv9 z#)qsRf$5)xdt-;iYP@)hb<1)%_*(Y6U&=ya0|1$lnec>+mJG>Q>8SR0rx7X~Ct;~! zAHPdW0UUm3UN1oOT&uSZ#~FD4n>-r+9*Tsqk^@<5c?dgHm=G$=9L#p^O!i2aECVj{ z^*b_9!H$Ye7IfEGiK!C%aD4!LVpJxcI?3L4JVcyRSEr;OHAmEgGWonvD!wk3$hXBB zl#hLf>8WA~eN!Z*U*t;uFQ9yQJgs6QKwv1XehQQ9^Zc|q2aNkW7ADSoM@?f^+vr)S zrpiwICj*w>tr=rrw~~QJCMY6FsL!8tk%nH28`#4W4hqKtJd)yrWx-^pSxaH z_W6&RLD8J>QPX1w*LlwRSW{^D&OVd-N|y-|9|-*4f>6+H$C#i6bw^=0xC(raIi8Iv_Kh4%y1c{j zSDmGC5Gs&sSa&s{)nAq}C8jWG#`;!3qxA+s0)PM>E!P1YH7`yAqY@!Zta^(eAwVqn zuKV0bNgOa?q1Fe2K}4&$V07j|wZQ#cuX8i*1AnH9#C)6s$We2uME9XDRA1@Tj}MQa z6$4P;eRe_Wp!7fc5{ks4{c!qvaqa>Q7+OVi)I6u}gTQnCu+py?grT$c*Bv!F_);3? zX<6F?&fluY2nPd&yJ>a!xM?Fb)@#cvEPH`?r#1{g1cgsrM_r||_s|q1G(JGyztvNWRSKtUKslKB>m4lUaA-XLF>rCtoAY#Juq-EyYYp}*p&}u?SeWR@P04%= z5Q(>IV#;NvYYErF;zWx3nfuB2XYyAvz^Q2o1G3z(Hdi#MFc{st^X; z2_X(-#h&hrgQxBA4tEb5W(@R8jZg{9wZFx zpZe^aBnXnPph(KTf`sA*fWcIVw<9maDUu*m0uZrj00c)(VQ`H7#QH2H;ow0)D4hI< zXsx$scrb*6f`!nr->K5TJ`Nrk(+7nI#0RD4@E#pZ1tQx zg0k3ce*{&s0$|L@SjnjL=Jm^b?Xv8(-`BCUB{Z8 z!!7&!`yjD-{N`wfT^>Uk5VZjIx z&J!vvsxkly{Kh~+bpq9)#<)qN!lV(PYUFXT4}sEvWNT~^ObjT1gV$-tJzVZnhKEjB z=-OomJNM{sJrL=G!g+a*8YhAgoP3x(H~#=8Q0vO=R#n1&Bkb@rS2Zuwky)9I&C7Or zSMGHm!s+kGZ0(XvLxHd-?E+Lpu}_wJfHfbyj%Rc2I1GCMm7Kt zf@iax^N+11m&Qu~62OFz;dG&4Uj`uI*-3Be*ADgY{%~$ zGhuPmEEkdiMEj;Vkm!eR+h|()hl6m{R7dTffy5LC9g8`e3_R4?c=Jar<6!BSy?HuQ zIy7^(I1Yg3oUn7sR3&ukW#3dv$K3{*C{N2=s8O3J&sWr#VV)pi$H{5-dnt@b@2v`{ zK3Swg9Q##PcFf~I;u;3D&OI}a|I0w) zJASSD^31KvnC>4m{8r7DKiQw>#WlG5_y1eY!!oV5s5<@sk@wGc@&7GIcnw^igF+L( z$+}C8`b~D;X@|3iQ%32X)lqcvR0=NsL36#*GpS83_z+<{LtAEHp1*IV(ecAniNGul zQm^|kn9gb}##k5}Z@tMlh^df_QI!Otp-KbCqngq6I3Cs$G}>AV$7uwx>Hwvh^KmIZ z8IgRd3P3?yP$@`UIusuczR`~zaqmKw zl$nEx0!7t@cawmDQz2>~HzA=Ncd$(5L5Qe*LnBPs*~j*G5ONQh&}4E*R%N*d>yAU? zbErO3BQrXFI~;pzTc#p&vQ)RBqq1@Qc8}MFW60$^`eYS0H7ps zAYmSF!X7b%1Od^i6aWxXyrVb!5|H?3jz4k&2@PH{9{-8Lki>Dm#C9^gZjADF2Fg|= z+iZA|mr&y}wu@z)kG;2g;gwYEVoz0(e%V!dLxC&_6GCMY>!$&dNp)SmaQKvs{gx0; zVAM5wxpA%0c9O4)>7LI4;0DhPf26s*oGf`VrTxuvqrIa_APET=cwY{UJT5~*x)j?< z1!#0!8BSbsJIbmWU}!69B3%jo{q2^-&E#YaNJ z8l80*cmlxNH>v=HLnKC{6CN%fR9I1aiP}O44|*QS6Ny2Fa{ve%($y;~wOOzymAnv6 zUImCSt$>H;3zfGC5npeC3EN8ug|w|S3T3*IU1WL6m-1_y<#`nv+hG)c z@t4PKcQZ(ayg-2gCufxvjCv@Hxwe_7fJ-w3LIpSk4A%f4KoKSmkbqP3F~@P9?O+lh zp<$PiCphG71SGK3HoT8m2RHf7$NLJW#lKwwv6W-L!l4XEuFc(Yj!urw`8B zy)&CWwq%)OOa5tJU;mpuSo+rP!ZV3RcaABcH$rKUBEZ5)vhE_Cjr6OdcWtWuoOSJD z6&J%h122S_4~ z>tT?>pt_-H1xeBNz;jA&bX~q3MpbF4yf|KY02_KG^^S47)H7k%j!oEl-?(is)P8?O z0VOy3;gR%B2qYZsKlU4d>jh~!8sV{IT(5vXn_D;MO0(a`(Q|u{n3z~sniR2#3Tsz97 z02H=^uh*91ls`$kVU9S#geSh5QZ@8g*BIBp~9VN8;C307lCM5iLEhLkG~remyuy4^|J; zd#x#!7tTpnkMja>P!K&EKFG0sllIxzhCMqAqdxu6t`FX@l@r&jab#YDm}cRst_`ot zW4SSQ3;Nt1&Og)0_*3tW*)g1lp{k#r|C7D=^ap#j`d9mQ<6oNZon3Yk2O5e1#a092iv2l{8+jS0!YFYcmr2_1a>I%JQf!ygy{sZF z9iR7i0i4IIvTqQ7`cb>mGY*)HE6o1-@liFhuQPZh(iqak+el!%IId<#kgzHX_r~ki zw(l=`ai#omf5m;4uMx_i{Ap!_4nW~O#MgO|UU&^$cQ|YZl42jq2o*sZOZhx+l*#j# z?oAp!{vw|gO8os~c$Bkv!= zWpISyF9XEo=`4>njsrZq@gd3HsO5R4V%iYeK-tAqejHmalhs=MFx8w@%^-PKZ6xvK zA4WcV9NDmYI|b9YFQq4vIz&odi1y|31dZaDyf4;Ox%#LXyK(TbuC*r4ECu#Bephj(_UWsi#@;m zgTkl$dF>zeVDTG;OW~;ZJM}N^zM_;mI2eIgP+=7pmbU7OACIjOJ6Z@5~Y``MTk7K)suV*9chPJe=>+|#R+URxB3$A`JqIu(2J|6Q($Ph>4 zGa~Y}UWSt&(SXX6@8CW)K$G`_vXddqhLRx3>%N`5izHqv=e*Z7m(ug~rS)wT_rFs* zC6M5qM69t0>F$O)HLw#RKR*U}-V7Lkg71<_=@}p>U-D<#bRFql6*{L$Om9tV6_4|G z!g2)@oNs5C`0tMEj4m+T2E*HN*5(h*!|<~)-2virdk`G#g%v%qybUDa;Q)zhEcVJG zNPN2vPh?IaNU&r(2*g-eY(Sr2TL>V+IYn4K#VRWaUdAFp0?%R*@K}OJ(l`t+L{(aB zXs7ovt!;WPT+&NXH5VW=+flbF?8WI1rfsBcTt>SGwncW0_6*x@kK9&eC7`gHt7NNM zRW|yRpu~@5Rz(384iZ=t00GWEuDCoA%=Jck0FU z&HCT%$;I#O*36giHtqu)UjQUNp5%7z2|SUnB!WaRfoF0H%T;(f)%J6^m*E5t_A+x3 zEIj~;!-U?*U;z_uv)W5u34jtH$q=FNXUC7ltG4`)?_No>p~#?kJxF8-ld%c|l_7xR zBag&=p62;@Or+=heoUzx8EQWtuiIzf>$lhOSP=pGFBZ9OiYL9@~`(*5T2crL!*VL*=} zdp3Xwg?#~%b{JwR@`mKqAgxGGAZcfm144w_a=pBV=J)>JsQf$NZ^*CJe^-JBu7yjl zPUSo}z4-5s*wleJ7~@5oKRDyw0}Q--APx{tHpL~!fpD}sduj4m5o3()XU=>!NTqw*M8mER59xV(@lz+qO^SW8E!nk^lM2#-#Hq{Cst0f!4nbJ&Cz zQp5VJE#TM%Ea%$cNgd)~%31X|8*ZDh6AU4rs*6a1-*;@(JxNtSoD_HyZk_%tH~NQlD$j^cgo{@LCVNN8wj#AP&jQ_0Zd!WjqOVZvnq zhF|CIc@$7!-r~5lEYu8=!fPo>0NRapR)?ud3@|{tV2~g}gYJf%azaOXZY$xj4uGk0onmp%G!f58> zBhB;0F_9m6VCZD{n4XLtkKs;3jd4B@6(l@5zZf;2)z_8IS0?^`!%n|I*273 z!|7EJ5D6k`Ob>`)TYvyW2#aD&2#$$3S&G!lM)?GgqAE*mA%F4l89jaX&xFxJP_ zmO(FqRangsnE^zmtpSKENC_ka3Pg}(m{e^ZUdfq`GgdoPhZj<{(S4IPxVK_M&_Jrz zVs#OXc7Vh`U;`v&@jpypY=sE59Yi$ONeLpV-h#^2Tic0{VHt!7Jrz!T_#r+lL-a^8 bNT%%H`|SDo+L}#*00000NkvXXu0mjfZ#Oa6 literal 0 HcmV?d00001 diff --git a/docs/utils/images/vpm_tgt_prediction.png b/docs/utils/images/vpm_tgt_prediction.png new file mode 100644 index 0000000000000000000000000000000000000000..c258a6ca07eb52e650f3030f942a9f7b0e9aa413 GIT binary patch literal 302006 zcmce+Wl$Vl)HO;75+DTk!QCN1aCaLlxCMA{9R_!Ih7jB>NC+C-oxwdoa2Q}9$lxwF z?^ku}{(8Tk_s8koyUywE-TPFXT6^sks|i%Z#w5o?LPEk;R+7_3LPDAOcOUl-?cdeW zuEqABc>~r~ltHSQ0v!HxP;8~urIC>8ldzsFQ2+TDZc2t=BqSV*{~2#8B+KoQkX}ob z<)lCRm>oacrZOz~GRFOdyvOczY`k6H>$)|R823j6>gxL=x^9W>(D^%WH}IlR-h?5& z`BsRkoJ=UMl3K&hr=R_(ko|bt?H|lsT#2L{;OBUber7ggv>7)tQdKoEvTTc(&P6<7 zFtv<2yXUO^@8%JwWfW;;_fwm?OUmm1W}ZWxKZg9@m#Di0f5~}Y_e&@?V2vBsV9WMcnWrxz6uJs^vzwa>T*4;KVB|db#sCV0|Z3l=_#hR zcuw9WHA%Lfrv3?SVy-V}w!H=&m)iIE@9_c*yonWCOoo%yNkDebhJYPdL&xb5K}Jth z#_1zke$NrM7+ei$%>- zKz7#3tb|&!k_2Zz6$~D7+TIyXoeAzVnS)U}Jj8thJ=AZ`8jkniGjPe)8y1MG$oNto z%+;G5ed)lTa&Wsz(Jf}0a{pGO{RIW$y!E>7Gq-^lEYtBiJ!`jmqL3)R%$Yh1yn4Me zGoIR>xVh~700;T4I^Qh!2>z$&9Gra%wO^%PAL*L#5AYda1?+pKw}HiO)ZuMQ!mroA zNiG6$y7Eh<-yEGrS3UWEQCcDa-SS;u}%p(!!5A1F!2!6ugLWsnp zYnb$%hyu?9C8X9iqQ=n?!{Sit-tcnjo#$)*{GKPCeDPrFd@(RHmAEJKsi+4gyt!ci znl>sDrVgJ7IHtn)@=c;L7}(6e9WjkY41@Lap{p9*;TZ@&OpUy@w>SWa@c5_A=*9Dm zq7!rQ+_wL3-s3X+9!ExA87J!XfGko;XnhFj4(#%gnqkTuisjuC2|o?M*4Vrk$#1{# z6!S(1p7o2n;=(m{isqAcUQ%!_&APEgvBMR8H9$@~wi^w(t#qe>6azI7@ym137ZHoV zC*f`BW$~^Pu_;7%7?wYxzs)qm=hbZ2Q&>?9cj>7>Vk(KCz_$5FLNYcG@HT)0LEMcE z`*7KzPc=k{(MvJ^-$ByRf^S4$qA9b2qq)iEDKU^>hB zF`j(X9Uot~v)pRx6|81>H)^NW9060$;P&bJwmXUFr6X|Owoef9>yyUofb0Ss{8$@& zi_`jgCNCCn1)vghL&2ezfG8&A4bv-X?BG#Jx*xDmt$|6sT~?F$0FOi4!-tI^U{CLn zoY|%vRk`rm<}i5Y`W&=1f+lD~UT{P{>j8xA&{AO?^>+$6A^uu5Z$d#bx!tzEL4XyX zjk6%1XXj2ujZ1OvElk8~2Gt#tS?iHHgnsu*bd#pGCsv>8i7CJJEDZE8P);=vDeg^b zeqtcA=HBDFXYnGTc=EE*70|Q2e14ffef;{>cNu=ZoY{LWCl-`&n|F!_a|6+V{N0et zsTq6n)@di~14P5}2HV_R*SLU;i{7(?C;k=U5w{&llJX}P*@5M&vGFNKN!wmKHy5X? zAZx_Qt|#b$vh`Pi)g$M^%OpHrH5O#i;xO>eB1$kX!T->uhs%y*x@osml&s&H6 z{!j&d80fKhea8zP8+Ka1^*ibN9epQ?+p~Kt`{bgyNa>y1z0GjW^&YH7pPq!jN&Xgs zi|B724k56yb$%}~zT*KxKE z$k%&LB}ka~n+pm*k<<)D&UJ~VANMV8o7${Kd0QQga5=$+4=tjt&E5ug?jhWgA1?OB zake5?5xp?z^iKC%sBhBW+a&};p8we0reOKz=n?ymT76BOF`r}pC(6!l_`P8J#%@*@ z&ery2?RJ)^*+4^vd7n;(`9O+yCnO{G8eY>GrZ^=AbP1l|f703r-+^_p=XMhl<5ZVM z;TBwUywGCzIQDNyQt@?{--zLvexLe?b9wqvm@cL+gVUO7Ur2p9;| z;l`C~&t0|Z>#&Qxn_Zm&vVjwc`Rv<^1#QcVxn1C`wIT3*(_fQ}w)s8p(*jA(XD}|F zgSyiVB&4efn1-jdYjd21L&2-SZ%NZz9IvPEp|N~o5?$)If>7NDyt4JlTnNdA=L?T# zYf$hyR+;_%M){rO73CDDQ$(72n@6CFC-}3GX=Zs4No@gxzDh^@Qc{(u12oDL)D7wg zhyj=~&$7Y7kY-Bh~2T%wEjx0OfR5 z4@UKFSU-B=_2q{+&!y(1JvS?=;J{V_}`xo z?xjLIGa}CtA7C~+9ugZpHxcJ;^9aQ}48$73eB6-(xapP;3x&NBte*CkkeHPQy^;qe z=$~Ctnhy|okLU+eOm9AmP>Cy==Lz+^4i4;Nof?9AB`#SVe4ZEEGbpJaJ5O_>9SeR! zC(c`4$tfZ*&exqgxL`Zo9lo43XZc-r*&Ev?Vy9OfR~O0~ul-Pr`-2_lfrbE&AYzNP z`Lm8I!ymqC1Fk(-wqBbW*sp{FPkak!N!!v7dft?smf6o2r&z782Z3VVlGNtk&3Y0a zo&kxErtsXTO;4&8a4^Mn$iThfMH~Nb&?R(ea|N-7pzi{IwJIEzO*EZ5yYT=1IIY>R z$wjI093=WIcz8E|rT8KF7Kcrt4>S?r8KiOE?8aq6eJRkHpW(9Piv#z2NeIBQd}ers z@}5U#xp8rb1Ux*LFa?MvJPIv2u7O{yqR-EJUaRg6ZhZsP1{|F(rpOk>=i6e-rdu+y zFWaKcZf35cTV6eoPF}*9XYlgFy6-qV9o&!-nplgZnQ=9OuJgMiM)&Bl2t4!Z96OA(=<+xSs07{WW89%ldq{4}0%9UbNZzYsX}sCasmS zjN%7#RFd6f@m>`LvbFWCtS*(0|B8iIPeSYK4~3c=3;L!`8izFQhb+^@H zx75tnq?ws`?#`2ud>^)YR1 zsH}o}G=VB_ewCpn2UvZYE~R_;OE`G;3N_2Kc|5jnW63w}bxavTWQIfr6GzJj10QvQ zh=cNng7I9FNal-u#ogh}#jGIK^bDsysVPi;g-=BRK^T?9=_MX|5|wbGV!kq}xAvwL zhJ%}Nh@3z$+C|xabiocxn0Ar#zi@N2!gsRTSTg%IJrpD^Yzb+2#0v`e-fo-kygqTy z8a$P289bR6<6e~WdO6eim=y>Is?Y`>;s2*Ls`xN@I8wN`mK`~f!ZBv4Q{!SVywP{UT)91l|l@ZhV`{B9U5YqEBkhV31jpUg2gWL5n zzq*6pe6tqZyEGexZ5Ym}w{_YgPr1CK*?w>`f`|hzrdPqj0nQu+8EL&Zbo3f8GtL5u-^$86adCWeS6n-?sF8eu zYi!Fk1#&aNXkf%2w}-lJpt~H2UAn%kxfUbPrfZOF7{+KnbP%`m%*UD2;8__@i)Ut$DTR8ETfqDQ-}zJepC?i$Tx;R z!Ioy1{m|fP@AJT2e~%K~K3}pMxXp0Q6Lrvz3YujUG%&_&HCxaJeA#0z87mSU_TXboH99`ry35O{Bj=3A+%ff%c*Bu@Kwu#V>A+=AFktwc z78#?nB^>Jr*F@7XUGOJi8=vaEWSnm>9B=h7X6H3=pY$v6iD+U>mgyYtK26SY#lrB; zjAg#wR%_`$nqX;mcX&(D7Z6URWkxYq5NkF;)J)2!kkjuU8ZXjk>n2U^J-EjF;GOw0C3Ra`6u#f@6n?$7gIFr}s4!kBqU(3%S zUXEPav9I4L=Ipa>dcZbmG(6?0J&LsMcKv=KOI-do7;|M8ml&!xfM~G*X0^eK#1ESU zlho+LZk%fK+G0-3>OLF+M;nASd_t#o+*P(`!AC9gX0Rs7Q=wdvYQaSh{Np^uEw zV)=YIof1Fs4f)uUmDgf==F3vg+Rp?((w{OL{N|llMkkv=7l8L7n!Tb?I(z&M&4MmxQeF}y2vPMb2||07dvR7j_nf{<+eIv)baye z>G$LBy8pENM~Bc$gg%+)6HSirCJ<|(6Hai@ykOmt4Vo@!5B~58sP|bH5?>J#Pk5kH zF^XIsr7O3^z|wL5>Zyw6UO_f^M7Dr3x>QOR+L`Yzi*`N@A3Zc|*}pV$6We-J75qLH zbD6+Ojy~)w?p+4ELOL&)x+NJ-NmzJ-NlLaULn1) zmRO`bwcc@nc?@(61ds>IZkt#o`m8})hG|;hq=VBW7v-aO2kQv`n1?5q*9Y#`uZK z@oW)YSb;tF-)sgmM?t(wrk4OL}M10bYl@hUGsX z@}o*`%f>HF<=YgalJ=d!u;XA`4emXO5Y3Iv{63n+iWYB!^tPpY18PAFS%jfb zEt)m0I*Z}wq%hwraD`%uOJNjqw`bt@Jy@-};>`jQW^9{>!=`*Nmy>7}_pSj$4M`6L z^wseFHx~>#N4>Y6mh6A=(~AV_FaizPXHw=8(+?Ep&xkJLX7F|6izdt(hxkcf;$z|X z***q(bilIM0xTO6JlIEviyr{1TG}2*6W*?TsWs+5cZA;9p29sD9#UUy$l6*`+k23F z7r>DK!7IuCLVqkA2+lC?v@$|8I=Lq8ecb)`KW{7fOe>K0)bbQeoYS17hir>G8YNpu z@T4fXOw*q732_~5fIAQG4APY*$Xh7Vtvi|T?gNgKz+J1Nfm~w#KIQgRxUZtQ1hs=z z10uXYv2t+0#r=At7-wXw;QHxF;6(Y^by0A7g8!7*?*G6>=L+&(I~(kDTU`rfcL}te zk254Wu^X9c9`d%uc7Djjm-z)ilWCa5g=#$j-4buAULvL2PR9~QFTJKWDh`hWhrd{` zKJ!8A#gGbtP#YLF8F(M~-v(66T%9in3634l4q`qwCtXaX zluHyBZm9e!ja}eI2_@Xvgxx{ToaoY-FYauoBi+@K=sv{IJNRsO_?Qu0R7ituQqLqZ zfKz*(g-rLAJW|xpdJy!5GsA@o+NaI3O>Efg)slE8*3qEP$7{{gPU2mz11Sq7$x5pX zodIrHoqis_d9>M-wa6PG2OrB0_IgTFjB2~Ws9Hl4AUi!gquAYoY>YUy=sz>Eb9Gau z>VHuIC$s(dpn5yP39mY9)%8k7ip#(vE`fkcf5Hh?Sv-M&^ISRyWC$fW_r4oo8?>z! zL=sfa;O=*&kFPeseC8jw#21@2@dG$ui+^Ol@X+rw_m7QPJ%ka!g#!>y5MkjcPkSGn zBb`!I@#j-m?y`VMlEtd%r%Pee=~0c_`l=qHVP7YqTb^m~+*yD1fFKCy9UK^r-Yn5M1g$R-O*lN+_=OgdRG@PBstfKYt#(L;}Y6Gu!JqK<_4ghZe8ST zPtlwZ67k{T@cQ!TkmELN|ESa6zT|j@ZF&RaE+Tf7r4stU5J?|i%Npe(So$ z4#R~;={u!;@MuhT_Ac17*j5C%*;HSeq={EbJ%l54IOHmLx}o4ljoYnzth;DlI;=_R=qyD^fbMR*qR z7o$6q{p2-j#wr;)s&8~7>>^{>UPi+QQKqAy)m44L5J@qb!PO0ah+$h$)Vs_a4ivw= z7BeR>9G}*WpTk|T{6GgLYTVgUDi<^>)RIcD6w&?8s)@&cf61;%ZWKKhBZ8TwvJjzP znEu!vqw+aAonENmOLj7pLRhL!%VUyFZZ-fpYf9?8ScOoFf$&Vgmk=%VkH<$F9h8}E z?)_JssD-n3-Px=P1w$c89lL5%=o6}Lrz_h8GT3_W!ysnkB-fs}9P=8)-eU&Ll$non z*3{N{^F}1L&F0hqK|c=@>u#y~BF%`{WIMG!ZMD-9xcz-2l(Z?CkuKQtZAoVMXt-;m zB$w_WzAo@?xP>QMH`-cQ_W;NuWh{ez_0Ibx*X__9+Y7P1Iuwp`_|+^txY8lRAl~Y;{68F z9nmIV7u$g-(LF@EqLwgP%f1lq=LxHcCX5eH+dC=I->2g7S=0Vz`1117IgROAdnUn> zWLLIT!_X|;ORyQ)xx_v&hHE0_p7rg7M_tU38XVX@&}?h(@yQVut(7Ab*3N(#^S!j9 zc!ujewT$&K4>|tBZ1P-q{uPx+xyQ`PO#VV>dIYeM*}E_i`g`^Skh6LxelN=DBtyj+ zY(dYfsmyEqp8>F4I0rfXzY(akS<X)+?~XH+zTgq$pj7`!IGTFIr&a%3<=#W0_d^6ra=(%#`OF|K7qoe6XX zSr4|Jk{Q~QLM>iQC2C9SPI-XuDbh!Y*05R#&N;=Btp1!ts&0S2&2%OA4@@{}VPh(b ze-Hlv8v@wIkt{9OzrJ4HiLSH`@G@sWqr4Hqc(;--&%xKY*Y|5dm!sSx>q7=v8s!InO+w8*7~Y$I;<3D2wwLW8n~7a45C=Y>pqcqvjLf-6Tjv*P&RW&#lS}&* z_+;(2CvHI!xV_aRT^?P1jVhBK!d%P#RkbR9b)NN9yPSwVIpCWVGgD|7Q+}B8TD}Up zWL+ImDC*=>4ja}^n*7Zd`N=l`()O6=E_hweQ+ERb{{u(c+eh@Nx!H}#-$Xfe-|ZOL z=*SOKt+m~(lXX0T1um$#%a%1Rd5$@<4ZQbZj}601e=@horWx1epb@ufX+q-7166Fl z(r&e|d#rftfy^tgt2N=Fig!P0%Jh@}rJ`h@7d>c&R2>F-TP@L^p3B8C z%e(b#q7p##&Ge@qLu(oPT?+$W(OE4(C_yz3)@W0otiG1)>f$2Qb@?q zlq9@pCv(L#=ZAUhEY$(21Opy(Pm|sZ@4<4Z3d8ZXY)Ni|(l@V{p&f4QCZc8}NuogR z9b1+<@Qzz>)tRrUl@eoQAXoS63H859;vc3wGJhfZ8Q)$f)`9P=<)Km&uehrl8`Vbx z(!^gJJOa`;6N$y_1m<};5(oF-ZTAG+l%>36*dC2KW6N|k9I+%< zZ7%InKG4!i7Kpl9*Yr&2<(Xe78;EZ#GG*!g$_M~-+pTrCyAz~mRLMK4a7&UP1Yb6+ zw+CsV?2MN9z6Fm$R7;+APYGjqtR4wE;2 zuT{`r#C*TUHKaKevh7dv6P-RZ=*reZd!VJ5v!3h(W71CDPDWOZz@a2N<~Q0)4fbGi z00(5ebGpHXkh}%!XmrgMYju|^F_OG|EVj$RJwTnI&CCj|t&2BXC@B?EyL($vm{02x zvzhQ?!Tf20RnT@OYpQgZmB}@oVsU7-zxyc?>AI^`JgG;XC(H`?IlN0jq&ro_?6rZT zifRa8o?tvQ)EFFojZt>l-&I%RuLTD*2bv`1P-*aYwY((3yrS(bx-#2kRxFF7sNr6= z?hDlIXA#jA(}?D-niL`K`oWvAT#<)^NrRlFgzG-8Dr~Pp$wY~jCyp1FKTp*Z^$fK* z;;g?bK5$D@1a8uov}`YYd%L_Ul+c8#y8uT$P@d_UIFg8eQY+xj>b4t8HF<#Uyi4rs zZ;1wN=KuiS?1eL#3@0r0o`?2k*%0^+shaY;H zspl_5yp{`M&RdVPN3@0jw}cGvz&`(?klS=OOTSDjYc50^|BjfIU+Dcoe`~-h%0+pr zSo(5I8`q>q=b1Ap+;+@ZDPaf)Vt?o7a2MCAMW_hDeH^e;Bjv%~%#o%q_bzYDNp}Sv z6h^ti!ITy8_9et+Oog>6J#|?Kdq_(WVNPS}LIUzewz40>trm^FON_KtC z;A(Ks%2XMrdFee~<{ny2cPpS#;XgVd(L|sQ8n@@g6+kOV(yvtjx~V zONtpXgCt5*%;I&3xO<&H_m|r(N}iI&QSs5-IFoO(>6 zP&se3Si?8`vBwOkGjhG%GfWuJALJHeLT_x7eIhUWlx;jkF>E{UTbv3&zJd7 zXYgZ(o($z#vh`Kg4kU_oq~YJ`icvk!#iOc`H$?HNPy^JhD@deDF7FSK$T1AURtJ3k zUeSFX#@1o^8lc?hi6L8|43J;kq5VE_C_CKACGtIwCFxj>3^96vD{{D&p}DO8wRMbs z9rfcbTD@1Y}LyN#2E?f1+`2zS$5Q=pFBrn;GW8Kg8u>aXX+++%*c(b z^kg_l2oRA0IH=QDF^`46oct0Eu}6D8qC6DSL9W!OtM8zI|MBMHT+fePw#Xv z(g~bY?fn<)!uR7~72a%0xyN24$VLw#qI)T(pxZPi$y}vRaXPZ(@e5Ymx6#erx56bm zUIo-9+7qa=CYA{S-Gxz0G9Q{@o&`PD_?|o3ScS2h>OP`6mK>gF!t3BFg2M^u{NUlv zO=H#qI#zZvUs6~O#FJQEx-qRO+U?63^382t-|5C`>~*|Q>n`RE&PEh=lc>5|5@F4* z_=26ZrSNO1g>z@p-`HC^k>OpPfis~+$w^r7fkEbUVYj`4ItR2(2z7d}=(@Az5OC3r zF`-3dosUZ64#$NS?Ga~=G$O4C4VN&ZOkN8m8&t&{%3B}P)Juk;UHF<_U^bRe?zI&DZQ)23sN*m)6^1ce- zyZieQ#&kkCViNst#%W4F<7?r$3w`N?#qZ1bvC3FA^@MsB zkoWrQzkN}OVhv$2%Lq4>>C%*0FDz_BKC%Yj5N@&Se05cG5(N@Z zO+x?lcP65Lgu?o(KqBXd5au1zSW#3wlQT68;b$BCRs40}v42>OjkNGH=cc0J8Mg|O zv#)}1`s@!XB`&G$Di66Jf&5JNZ2_h-*<6fy`2_^_!f}cG?$fWJDlVr!O>Ne{#GvDS z0nSvHg4#`a7%fObe^l4BfUVn{$g)OVenFoA_dLxYBA7AsHlTZ;vodxvx8wwUe9weg z4X^vqfB+5>WR+%e;g;La6?y5tj=G-6+GF|PJn+R0y}3EB_Y?y2sL`-eg_0eUwZ?Qk zw^*o-2f06@abVTd%lQc8P5xbWfsK=kEMfY^EipTIZ+n~cjCWoJyvzd%?4I(WkLt0M zjH7Wru57$a9BC1sF1npRJ+UC=373S3>B+#`Q=bf!)I@W!5Rlbpn(W*o55hUyN_Co^ z@p_<{OfI*q@&rC%1Wpl$1MW3Om!0aNjJ`ug`5)t%^qHuK6@Y>2=e^+pz2Vgu0kUnqnguy&`C78hiH90pp&o zZP0gr0q|Y5XZ{DIhYJH^sod@T=ywP=Pe79J5uBme8F+E}e|m6X8^ z^anu>93lG_ropgpu6qj~+kEb@aHUoaHX^oLkYP`Z079K_C>EXi%hCmwwO0~< z`x24HOe@&hyiLD79=ho^HK1^U&P}jYv6_Z~$EPgJ2UM!Ko%-z*$l9$gG({z`VDA1YQ+mP*8gOCz$3%*$r%augkzIFJ=Vl`CW`wO}| z)t0{gl!MQhmA#DGAnl8Hu8Rt&5RQpNBlY!G7e8Vz+1T2N9p=w;+)V z1Rx94oDng@O9wxr}V*T4{3%1n55D??z(N$C68c%>?6QM1C-e z#CD*4##K$58zDyd*7Xs&~SU;@F#{ zI!QJUdy~AncBxU!1xuO(?5uitGB2h3Q~KL+_**)^zIuVtv{O+8u?r$v9aCa_-9-h| zB%X=?Z5qsU#$-*?(gvdu)mqQTN(g@0@5^}K9cGn?Y%eiI8Liroba_L%;d?B3bKP}y zO`0?xVOWmWc}f1(ZtgDuhzs)XFQ+?rrGsmGz#?V^@rl;4^dGQk zvq+UVdMl4D6Uz2VxM@Su>(o-GfbtjDu`wk`93}=u!roUPfw`Xb#(n&uu8L|65_)y#4MccyLoGeQX0|B_W5&cx|peD zWvFGxELL&a`73`hpKX_J1%Ol&_XOaTZ52p{uE=F!d_;?X#>Bz==BJFjOeKG3+GgZi zIg{Ez?l-&hg*oq((8q!uPlfeEiMJ9LBHXQOuEH;%97erwa{C9p?TD(hBUX{^EEw|d z<(2l;PXG@1bz!#rZ_5(kslR`g%bI5Jt00>>pa2rg;(Cjv>9<5MW13T`{?RJmd~#T8 zj9F@oX9-v179+eqU*57Y&|H#u(zwz}LU%s>)JqO$I8UiL_=MWro8_+hp-)c%y}d8Y zX3Rx!n8iwWFWEIwzWpa_Bjc|4PmskCJ`Qb)e1U#Nj zQu8!_0d9G>eFvh;>3!Nr>N~4%%cx^6NA)AjfnoP_PSjoJoFWU(2KvQ^*^>sz`bm4C z4mmYl)h=XS3$=so$&*6&9^4S|l_d3?jyl9%_}w8+`&kE0+<6Di_?aNJ-d61fXYcgp zSg)YD>QMJMkwJ%_IOO)Ye`7_Ws_ZLu!lb;a64Fq^g^%QaU^k#NG9p?DbBO^zl&xm? zt(`}ie6L<)*?VGMmoz)V2>cZWGZGGc-0B3NC4D>cQGMwN;muX<(A1R9@}ej4sy`R* z5s7kdbX~hUy`HucH>+9VY41n+t(s%&Pf9tm%DF%1w9_ge0fF!b$^MpA_7J_1(9YB; zu&+?wQpE@U^7lpYh)rdkUa^WV?holt(&TST zK1NOPZ#K~Vdb_7(%921Tv(icX9~Y_A)Xtm#rV%^-KXOH}vH*_1?8sX7wZAXWS_5&k z_ivITbU{mr1#>iARh{G{oV>xWXn7F&$=cC^jrq29Nt4z9_J5B;?)U{TX50rE9udh z{^md(DJQz@3TM+L!C3ke}f6kjVz2ytC&A?+$>$^Pa`N$m56A9JkV*OO2s(c^3Ofx*(b*_LbZ zm+VTaRMHeMr`avC9Wg(j8QitV3iWqqZ+5IpK1pr)?4O1z0fYr}d)^Fj%aK8v-Olx~ z@Kk;J{J-sQLa$2#UUhFYEdK_JdR?3K_Az?nzn0f`d!Y~A7+D4qIUKsBM+lVAML|4w zXef`lL^>*kUT;xuY)|G5Zq>^fJjyyo4o{wP^im_dzcN$Y3s)2k9#)EUu|g|#kQ>5@oodr%If4496sRqz zlfG5GzSvMS-I+9w9U)aSJ!G{%ho3QB61VYJcdi-SJLNiKozL4Cp~FMXFU>K)CLz-# zRo0!MBgV#GJQiIWbzJqF5TfDSnSJry^VXz$USP)jNgiJnm>Wn1^RU$4FSt}UNT=rv z5CwX_5f~6H;0vHA*DK_5Sv%u&oGNUYT>D*gfdKIYgyPWtqg!ohH3B*0z*e z@YQdOn9jJH-*WC+RMU@rd@m9KiD&u1-14AL!ZuT$5(3pfKWcPFu1Wli`qy&ZI5bIF zjh9{F(QL%d$w@L|%x3%C?3oJ^$E=0}~-PpyW2=~MBrnQfb*>#^l#UKFjCktI#) z(wk~3pdq*+IJE2F?M-}}Pqt|7qM$t7XacT^KZK7&V?^<%B#Bl(l%`w7hV8d&EPRs+ z&?6dq^p|)h`0E*bGh+MJPGrh>LCMxH6L?MI0LufL;t7Twf2D<+Oe5rI>rL>7&sEdR z0ub7X0=vu>KRJpC{$RBy^VUWqeoIq_*J9h(?HXK6R;r9PqqK%$sTk-hekx?`My)=F zY^Ko=Eb8X%Fhj=(uZ(cz`T$L?GmXa_j^p1drG)RrxBb-L0A)CW5k9aRzy=m$*?M*H z=QZF2I*B7Ow&zoojC+So-kHF{xqF=e)w^4w{VyF6t*z_dwEDBpPPSLJH!aquFfNCW z^3SiWo?LRd)*#?6kIt#l2p|Maom#@}=zSH7VE zIs%!^?oKT_FV00wd~Hx~P7%;w?S5BKw3&3R5cQlWH^f_G(BTBjO>OP)FNjK&1J%%J zmmoMVF=#b4YU0?e=*om$2CztT~$`Q;gn`~IRDw#Alys0xCsEQshQUU zvGoK-qt0@DQC!8V`ni&jR(iy1)JYuJ#{T|Dee`*RuHfg~99w^k!1f}v$-@8r{A)L> z5<{21!m}CPAD0H56+VSSY7RwJ%xC+DQ_SI!X?8uNL{c2CtAF_9Y*65}wg52y?|^aN z*S^#q(3ijclc{AP(v2l=@vLMnl;GiTcRchw=qLEgHa%IVAz8lDMq?b_v@0YMO&;VW z_|5j#@MGtv=A649O@rQm=Yv$^&xS@{BH7ZeGIC(qGgG`B;`OmYwXy_(9+Px<(T}4D z8YtejsH+XvukwpmUs7-7GyZ7}=3#*skXq8-wemK6tW*!zt2%DMjqAAt^V9u9faC*e z;sNw#ogSr-{9xc)O*-6fMIu=QX zJ{Rqk>Vd#p&}kA*_cQ_~miDqsEgn*awyjS4!wz%2G0@nDJX=;rOF$B}t!2)>^V6Gc zbBHWIhBApXU5BtIlI@~N|Mrx?vgvAMH4z)Vv#f>@g-S|$a)FW}S5=;Q~EQAnbl~8HL&Tzr<%$<97M`dyiYs*-j=EK z-QKQ4y1H7loC=_)p{hbW9A@L3hX>Z@P36=skFCcOdKBHUU^8mcuPFC-q3BU4*RQaB=I_#)J=sCUEVQQBCKtM= z`p3xeTxX_FN&gfHB5aCPO$g8}(w48!>vnrL>XF?*;xz|4WjdpHNBS?epyUX8*Ewx` z=hI@~_vs@6d@hV5IV&0NH!Jo#ie&5`9Bv`qdZ-HJqaPp&Yp+mQa$hUVsx@Dw1%Dor>w~zNP!~tKy>LK^DCp4pa zUp}K%g?xxXc30?4A9eqiphUKqGLG~zGXt{bSi4YnKqe$YT$B919saa5_CECq;DYEq z2v`5mL~pKJ*&4@Bok_f2ru;SO&HP3WfM|A-DqCW66}`4IG;|rjG5O9?*@!m%`&=84 zy)S)iZ`r8f{n2Xr_ROEsyTcscyFW`G&$b$TefCzn?siXTf?gDegKlg-Ncf5qNCq&X zQg?lgKuA(1+EU>RZEy}P8%Ts)d*&U~Ix7US<`pMs4@Rca8U>bgr$$u#jLA5fW5u>vAC z?uhZlb9=O?`mq+Fo4rG1l6ZQo#m{uJ@?3Lan{(*Jhf6#z|-(I3*(^j-vtfynKchC;kkan(S#{)kO8{N zfO6}8Z-q>8AKNH$e6&)h>yXS`MmjYL``y)m8VJoT@j zipGYH!WaR0!>FipYIYhla>?c)q@FqzrZnR%ygNkQ;>G+aEr838+4_!FdeUG>+xyD>esB!?+`?nesq%g zWS@6_;(%z9{LZyM39PikrpR6W!(9Je$8?I@t~kkT)URUc#wP6IjxO}Qw7?P7zuVXL z`%OBy)f-`UZI=!{>uED*t)_iA+&)b6!jd8TLZ&($Vry8JgnY1hp?@VJpx|TT)ZLl_ zWDoo4k^H9|0hiuq1T;T?b>s+M*8%tX$+Icx&U|x~*EifwIrxCa!ER`4;5u#~)7O<- z?1Oq=GaI%yCecyV1U<6qM7TGh%Ef-$JOofF3I{3b!wmJS~)zPYZlVWfnndt_>Tj)a`X@bvk>oc=gPp0Ubj&N!BWRke}cS^ zKTp_5xKwE}NneX~DZR5+IXiNXSt(lH-p4j);;z<81^*mvpR|-X@2h6!Eq0m@-WQ#b zND5v7+^h>(8e}Y&wf<8>yE%t?cIzQvfGZVnWe@3_#^52P4Vq>_MDO2XJ&ws58Dq5~ zQ$#UpcF;lPG{3mL=e5YOg8}4sACnWs=}?>KC)hF8YCSwu>#Fux#!QAJxJAT4ab?Zu zuf7Ac&9jsGu4BemFN8H2z!DA2)OJLY46)G>vd`jEsoIM{wu=J0cd)6Ku2-u>{=-$V z01bi{U27_XnSPMW8ql<}6}1c1xUTeC$jdv=W6Bdw=J>l1GtK#*ej$C03tO`IhuNDQ zF&n{6mz$q5^5?3hGSfZYGITJX6bfpI)gi zUx)JCoyF|Cj#X2>C^x`1|X)HSijA4K5JC1w$fH@rrt)rE7X-wEq=W7dpd4G zKTGOai!l$L2{#XDK<4h!f>1^u7VM$Uc32|S2DB2ro{)6i{b?CLn;Xv(a%%(R0G;~Y zQs)n`Z<@!qNFEOvZ=Mc<&0U9Lp?_!F@;is?4Z7Rz+64k}TU=YwNfz^=`B7R`qs-=V zX$G}_JsvjfbyUB~{|9fOlc)^6V|p2Z1@)S|X(V;Hi!jDa7Enax<{bGR zrBQ;Q+Pb5|c8zsIo#oGKIN@(r9=I)lPTkNO=y&~&GfPKX`nHr|kfyiqA!ur-L$>EC znh>lv@N;efb!h#PA7RkPrkm3IU8#dx+W&z^gileu#~WPhg{xAdGH~=#-H4Q3<|gv` zH2;Qw`cC+lO^W29aYjXATvOD=HxkVE#2OLnsIJn><&(}A& zJ(PXITaM(rd)~_r!5Q>vw)ef}RaYeE&6i1F#)s*J?xW5WcatZL)G}x?&zhHe;p$b< zErr=R^zGpP=NFtvJ%@1*HktTRHI}=wj=#O@X`wOC3FZIST5a{bTc}TRwe9b*rIMgz zc0{nLaeh${6MgaO`cCkz4K20JBeInKFo_P^*t#rdDNPb4X&I{Y`a7AE6m z{hjbucGxsY%p$(E0Wxbfr0y~njz4{{_=nPV^$ETzM+>&ry}7QZT%K=675ynvS5o{& zbnI{4k20cFz4sQ6`MS$>fx=N}ewxXDEdG!LW0n`Mam}5`HOIQF$Q6kbOXc9@YSLAz zr7QOD+a+nh8@12wQR;s!@?C{56bT0kd?HBr8{1ra`zhz4-hb=khlDlhLMrf-=e=J7 z+LD2X@o(0y(6pyMd6n!MsZdq)RTVjAqca04@V@pp8IMwmdFz&_J%b1MY+>Id9y#|Xl43%6mysE1H#)fJX5;|GGgtQ;8-Xv@ z{8RDL-X6pv#c%DVE2O;tsq*x%E#>RTIv}-!I6?doJpnD`l78YOk^!tlu3{Mxwc=z^ zw*?8k&}rr^bZ(}d$LXQ%@bcob3;JXsPh$==k+}NKA2{V2)qVBoEtZe;M|C?FEDz0B zg{4Iwl=s7^$&kRGGXupB=- zrC}A`@2d8V9Tn$WN5&iZO3s3^G_=T9`u)3zN|T>Qu*qb!25FM~6Gt$WD}3q9@oU*; z=4=G$tzUS45hoQ2xxe#_eNxL(oyiebv#3J}ZyPw6{7ZlDRU+PotxovIy@IiVh@U;* z+$kbQLuMj}Oe$pg!-VF5+qW89BOZnmCdJNC!84^yKu^_*vgeC|@#o8>!prskygT1wB}Mux0XSo`KmYZwaR38W)Xu#G z2oKl{>@DeR?atnNq4LMytW+EJ z{|^8eLFT^kYWhltJ$qg=0`r^s_ikYWC4QDw?!*56+p)h-oPL{4=iE1PC-(K9vz};c zRGm_)>J*A$a(WLry(YGk!il}$CGUv8XZ1*E|-hk&+ccVFJz%ZenNCkAL)%U11 zu@zmZ?c{amRJX)8pfRS_rfLfRrB;cesToC1a8IS+_Jihhsx4b>obFYinN!Vi(yEZ+ zc`g-M>KC(!-<{m-IGtjfPP3+TsoKmoR z>~pvFq7VQtv7S3hO!UDNxyjuEc8e#pGbHr3v@GwiAE*`>ptg?|0zmZw2t! z!#SuK)HI`Z3?n$D>zbM-sj-gXdc zt2etUbDL2zY6;=&nL#9a)0`5n|7-xw;n~N%<9D4K=lFDfuZqYvw5E+%MR_|pUX?HX z`RZ3TimJ$F;}hM7O7hCNE?Q9Rc-=GOcGK_U{H`8G!O#xgxR_d)JgI857VdS(q~X|UJkPoUY>@hbFJR5((u{SHD2j;96Ncf)OSkbg2spva(WTR zi(-xuh1`p4eH^>Mn2%?NcOU15Z@p%MIlMX($BVz^WXl}h9p-(!%cm&hxMX@kf5*R3 zlkzV@04${}AhuStVR>dT>O*)R^0&%JX+}d$7glUsfvxv#$C1~!+d~F>-pw%qk0_P>9U&C8Zv{Ts8eVgHYSJ!dl z^;K%qP_Wz*)bd%S-_>$Syc%OZ`Z%Bacpu|+|GdZN`unOV;v>1AJH}P4c>38*?wAo+u z)r%trP!QUSO6uVv`Uiz`#Er$|&wOOeu1E6RA*)s^jvApZ7$Bebp(Cvy{Q+wDz%cr= z`I$3%$mQyP+-&%+4EK`X#GU--0^(bXmr2K)mRA6c7`Fi0lys!u>@c28j3*kO3*(81 zBA-WmB_82ljz{QU#PT?S*Eow~zVJ(}pq5I+uk|ul5`1=%^+wAg&ZXh?SV`I*M`yKkDxIT7dH-J<(U3?$f(no0{1m_6)@>XIne+?QFy6IrhiR8^9Qc(b{ghD6fR@Y?w->Xm& zP*X{wszy;J-g?o{!8TD8)rYJ_!zc_NKsJduxVXu(1i3Tv2_7{I%;UAKLdx78BodsO z9#RtCi-v?j17}@a8_Bl|eOUx49qgX8PODC7P1M}#22_Vvqk;lkCFK&rg%;HoFmnk^ z2g+;;?aW)ovsbDC}<@>TCXz!Oc_%SSn4AaATYD1 z1x!Gw*Gfr<%0xZqnx;mEzA5OHPvH0tvjLD`lZ{2Z_1Gg|t775*b^vwcD)5=8Ch+|7 z?=e!I@SHi<1g1c3&+)w8qblZ$QGxc>Q!gU#hHwsQ0kqZBW8Ib|*l>I~cI+L*j&-eg z__Z4R%}3q%@z=8OgI7}V@hj=HG(7isH152x2oGFXj3@30#l4pzar^N|EZ-Q1%<2ln zMs{KIhEDv|?^Xi;xDWsOw_EW)ezhFG|F{vazY#?5$$n(gK~JsiGWlkstUkCX2>-aR6wDdSro-+}6=R_k@xl{tbAT3hNM zaXw_{L=ZNeYgQqh(-Pub)4M1lKkS%&)5l^knU+tgJxS064|h5-f7 zGC3Y4cc3w@8P!@qM2_>KQj}8s7m9F+>3oXnT#9PL_l%r>o1GT%-k3^f+L5oSQQY*+ zctjJ|QMBl*;&?938d=9&9cx0obYU@twh{((?5F1;ZHlIw&_P$$+!x$(+)2c8uvN`6 zmhlo9Q*c+=G;U9Li-;*CeG>Nf$~Y-=Xk`2E;8(#jt3aD*Jm;UcDiz!N4z^#VqWAFr zmSas89mUUm544~C0z)Z97)~!ncS1hevPy99C^3973U{6l!SQ3n?%Kr&O%7u5SCWW> zbY4S>5AtWlragG$hllY$KcUe6W&wWlRyy8&D<8Wq@xH9$eaL&VFtm*#N`PS2U&#P_J;{X&r#M$J)o z2>bfaV}Ji`R*7a)w})I1Z4<@z+RDw=D`~J`1v;{p5KFCeLis;6m7F%-6Kx#h4)YVa z+?f(++H2ZKRdgpYzQnTCtqPiFFC(X~robH{#%n0d>G<>h6Sm4j)H^J1EP!UB`#Onk zpVM6cO_*0;P{{yImD4rVbgkUbVHAX^>U7YiX%_ST%bQgURdy<4t$;=P+%_c59Y)%M zA&wjB3Dt`ZYM7?PF7A6Nwg<@R{pM76(y11wH^$bJ)2mQE#_3vlSI&B_;XaqR&r!B| zLeSi=KC^(ioN8iTjo72Fl2aXzh%d*Q_)a0FHT6TnRh%x|stQ+q)YKdjywBk{KC-G7 z@Be%g{^@7S@zXa8@zJyK`1<2vIB_x+m2K;>`sg<7cxD~$dZ`(AJX?h&=i3o%?ne%F zt4-G=rpGGIWEMDcmOjl3pn1VF^*WS|;d~75_|A2O1<+h|iZDIqT>Aku?a7zAM&?|HwDMkT zp#IYIu@2&d{#0l;3e36gw}5)JDl+Gl5|`A%Gql=V0n+AmB5mOa^1?M1D_k z0W|WvKZC>%p2@<#6l*h*RL!f(8S%P;XbPYQi0e!`)9F*)f+Lz$5+Jt=_e%Pyf<-|$ z)9@T8tm<3~5YWjM4ux0YPx!SK4l?51$Fugrw=|YLrfKgm>uij(_1N3H=Q$2|@2>>B zjS1d!T7B{1i=Y1hT3}%Xwq9t(`)@Q*mvel2B^%#=ISU^=n}QFXh{lC0@p$6d3jE!7 z+wrS+EAiu3)A7NRQF!jYa9nvb7r*_e6@T?^Eq?h<4Sw-XH9medfcKtCz}t^Rp`?jE zB=IdQHzsxB$nY^7>{x}3#nl)}%tc958JhY=vH6h$c;?4@@sGb>5B%*~{L3$g@%P^; z$M@dIz>&L4ky6}69Jr~Qe%qzko4BXh;$Z5BrUgW1R3LR~F|U8U1<_RSvP%mEsU-_F zyn<*4hA-21KaKS@+lMJ_G5e1Q>s7YFZ?!an{6s<9z zWl7-*4CaoSkKUQug{Fj7)DYhlVO9Kl@puV}6gVTkrQ?Nd|2Ur*&z+uyz*OR#rWLl+ z$>$b8vno@EadCMB{d2{7WTC|P;=hPN2u%ypr1JAgUn?-BK$-BZ9_O0Yt-hx!ehFyS z3N^fnUxIwC;hdOuiSTNh!mX+_ofp=HJ%{sg^KtBOK8|_q{pWmq8@JnI|D3Qbo$iul z_8Q^bd%e!7fAN9Uf8+q#y$4UAB`h23%2!}d=YDLiT8g!WE$B|FK-x{I6i5_Dyf`gA zOEGe0C;HB>MeEV!Xz%GqRa^-g7N(-LycOU4cnkjdciZsW51R4uOBwjylhL^CkpSx3 zn-GX-MRuv$D=(dY+J5m-qv6>^((O%eqiU>N_v3&?eEwQST+>im(#dM*9 zH)wDk!Au8UOV(;?Yx?|tWQQz8Y0OeIrYuE&?n-{=)keUIY|Fv!Oz%c(k{Xvb*c@t! zY(=$;<`$rcPPUORfjJi0CNO=m?E+|iWZRX7s{C8ztLig3*Din-%RNC!B95}_%;XGj>r2X zIABf$qs)=`MAP3+Bx0FGzj@w&pQ_j|Mg`j2&%8oMEFHByBe?H-`|$g}T!G(yuLmE$ zRgC9fNW;@lXW+Zfr%+JGai56qy_jqPv>(5kimjJQFnA~(J$pEyF3ZQ<>`E-WF@UiD zLUDCfHoCg1@a{)L_<#SyLHzS?x8rX=?#55w$iw%ZOT?=WBq4dQmqbCw*bkl&(jl!rvRDDH;#7+Y)KR3Xci(srO&Myf%;^WwwsxJfic@8=)>Uu8>hkqi zTCx@m@f{Y#R_~Cw>*P#-R%AOoe-`HorYnG^ELd}LudTG(oZLf)wVPzzjwX_EHJugR zV`?g%HB(g?n#K_1V@=`o18Bmxt1?Zb)2Z@oTM09^n*zEUB^1#$vHcv6`m7?QGiMFz z(>EX(yPU$04ma;RHQpBv^5>P2pd0ASFU8W*4Om{f7NbQgFqB78oij{PJz%{lwfIL> zST)Cn3gvQfFHhl}xJA)4PG#5xK zRc{3o+peNT)3|G*=@3S#A+Zt&&dC-Y{Y>&0mW59vQ(@RYukpdd@6 zI&Ththq?6#4s68ZuPnvy-w)z1-^#)--pa(U-b}+cUN6MX`-d>NZXLQ>22q&QjNG}^ zmH-oM!p=h{u} za(q3GdFMp`TXO+4H59LmY%#}LS!+4gn*L#qb=t5w!fOAJO+G7KTxz`T2hbEWv;Rql zb0ViJS*8`XB)SVjl(`nCw^Mlcpdxkz!ALsHp#wJUt1haUe9ZfIZYgqR6c9hf$eiDf z?9inMlGB^gmZMLc9;EOtTu#Bg1byW6uJj(8x?LaBNc^awFaVM zlZ54jJ|A)h4M%6@4)$Nyu!P}R1flsewcSjnaivLu-j;?(a}DZ;%pd{tC~>3PTPRhHzo z0J>*BE)CDxd;G3rci!iHU(fsamwA83W7F(>Z})n=rV=UCm|uhdnko)+mUQB=`_u64 zrxNhNbE){w^XYj1*<`%;SQM^Y4#NX?h2i7p;_#!FQ%%Ev>(MY=J`;}St$BFj;Z*$M z^$dLSS~fm@F#{hworteL7=e54j6g8I!vf?=&S>o3STTSd6-`(Z2%>R*3Nk8t@bowK z0sndg_^0FefBkk7{`Zet@YnB^;1j;*51x;~a}QJ7t!}0+3{24Relpwz%%$Nqoil4z zjrr-qc>(>|o`TKT)OH5DyU$^lf@tlhu)X^>>>s>@Jw3wtR;(^tZ*3Zy5_>F=KhZ}Y z$F#$>k8RuYd9@tSUQ;6F>o-!jYf+%4Bw?|OnC#$raxXEi{*h{d5SUeDN$}!@#a;l- zVcL9j?P==8rZx$c5slWSsVvf&Rt{BvHBAR^N?VHV!ks9N8bv<&zKmR>MZhY;8=$ri zO5U`l4Pm5sy;aZ)-y;P}ZGphvtU{e*KpC+#i`ufI_M?T=o8cJF}`Hife!0kv z-~XV8qB{>icrhLyJ{f^$AB)5K6CG&Tz6tHaD^Z@=Mv~MN11GW-G?U046WSzW1<%wY zNvrsz5L`(lc*PxkTvI-Ti3QbvKf$rY|bfU-}w`78>CZ^T0SSl&k zoN5Ko)SOU6Tpycu}_%rxD^5)f`I-X)buntR#*I}uNY>_XW z?7#@RGy2eyz|Tv;RY8YVQ#w@c9h_I>9Bcwp;LM%os#7GiRjCpn0n5QQV0ytbt4^W0 z>0Qpbj=9QB?^2bDob2!~$B=vl)@W-+n?9|8qJ#t1Eh@X`-%@ZWs37un&Bxu8Q8hDH zCQB29E8ldj;yD6Xyo8W0_J&0_{}nW=Y>qX_iP&8emn-R z-oF4Z-W!fLNyzU%pMoEKqXfVHuoXY}pb<~K(u@svb)tHRB)KqvkT1pCfEz(ad~sM2 z2KKbzhwnDyuRm(!I1s@1N#Jik9EN)@MI<1?hB@Q`PvEI8LG^YK1NmtXAs!bcLyh zZ?6i?uSAn+dF*smpiM8Ql^M?S=hvZIt8=!G2hp}$5bZV#qV4WFZWX61%huUa_`zOWEDMsgr410z%2@T8b1h=gAT2@c0%-Xb1XlJ}Jq{H> zQ}8T7W#1%UIn!z?o=xKI&mP7SVo@dPHL<<65|5^JDSM@rc$zqVt*t+Z#_Y|=U)X5( zygvUx{z@z_+X(e}Th6h|51jR8^RuONq9L}?R@tiv)70xSlu_uH&ZihB*%p!Hv@f2W zXZRN3%5fI?Dr4n>XTGpbMJg@r##G{*`#ai&p0%$yr z<@ur+si>Nlj`BIF2+mC5<1`l>Q-e~Huf`mep=&Aq%8)XvlF%3`54dlO2plG;5pJV`+J9ZhjCA2(e2Joo!bsKSiK<@RFXp4;U^fDEG;_R!m<7$o z0%y*-7N1-2jQr*L9=fF1r+Oa>_dfQG&xL(etnhh?ZMQmStBGzk8rMo*3Zkv3J&Zl% zaZTskqvRNQec$k%IK1Q%wzcdZYtzu5yOj4y0|l|fpKv%aYd!8zf0oN-eg&G6V&e3w z$QHDv4p>!+rhv4j4q9(T1?l9tYaCETidNywo|W%8(&TY-to@`|I>)~p@*cTW$*}M~ zI@Ud?9OLwEv}UeBW#Ve&h7BSZ+Ul~^G&$8-{JY%QboAo~Yzq5mP%9Iz#*zY;sTQYq zW$?2liqq?$hQs5WUTlGL;a|9yLoF^h2b+RhoUY*6m{aXnoKC1piN7`hao4EdVVvPx z!SjeOM>|fZ;8ul-s(-W!s1_AbpBh=VDssuq;&ls{E08uTk4`l|!_0i@ASbrvPz!IW zPN^o&g>{V+3b+fK!mCxGaBtWZMvc4myl)QEI&aA?iEQ5t}^s1>z|X_*3OsytJIEJXopar!Ofc5h)IXX1H3faY=fjgHqfozt~yXhbIU zd)hQ&pW{SncpFB_cUdBQcMq+b&UL42IIaqn-E^+iE6EaMR-w{utr}EqI__Lg#QJ}X zcU7RtiB>X9tLQhsN{Dwl*Ioeav(B}81Qrvw4RLK~&{Xe; zF6Ugw_oF(d+gh=7QkQqqxh{$tCcihKocF(0&(vzaY9ij8)J@&K1}mstM(J2Dp<_Kr zjQ3{sTUB|z%@lEtb%`p@<`kfiyk11@UgYt+0%pRtf@XPsMGCdM<9Kp9@ooD3mJB4$ z=5yhEO>`Dg7NsHTh9vTY0%pWEwY!38E@9?0ysJ`4GJOhKg zyhs(VMOJB^O`V~BX|6qsUgxxcD$T3uU@MTOaljm3;g?wTu`X=t{1}FH-b~NY@1cL- zDoK5F&zqyoy@F?IRBrsv#~L3p$U~1_deTGG=XYyJR-eLNY}%cLXYP%_Hy@3{J5R*p zo3e+)aqx5kwjPhh^Y<>odyhrqokybZ++7Q?abpY?rM9BDy$KIp48zwRip84`gy7k` z!*Tg^6v|f9zv8uX$uRMG37VpU=ugN+OLzv-u8Kis(*R!mkbc8|-;MwCH*4`wQ1n;akFiC@Tx@HWg7l8ws(`)2QOi7?`f>7+Jcp(>ulCoE46p_jAB<&E)7C385AEJ(Tvqls+&z@Oicqh%TG<*_$`-B_QOv9`7i+ptT zbXI`7Go=Tk`J-4`xB?Btb}+0L)p3LL(|as%-j=xRvlehyb8nl%Pz&{S z&^)ni*=jn=wvrFU?^p(lLfetBD-m^jcbqh}#41#5bxxb+<$`EUsVlIxQ}e1XVjl(f zE~`Ylh(n`Su#do07HbU&ctzO;G*J+%5-nMTfBnQhXrH!Eldl7+nrvzn&Q^+SWw9Db zx-ylEN^q+On&$dS%xh_AWrGz!OTAS+&^}XvM$xTaN_8>Kw!&sp(g2FXx~$B*CT;-r ziTwn72gSu+)TggUe)u5CT78Y`N%-v+QJ2^sDO}~i8C-=Wh08FM>&RD5c4J(VTNr=< zup8e4XYpJDb1p^g1P43Ui);&=IbPX!<}D7CD*KkuRtdP-4arEkKG7;rG@UE{>Nv!x z4BRqTj)-NUUesGFI;qEA#8>0_yHS`BRIWz~0VyK^Yw*yt1Whed_F5Hb3};FVcCwE}_js z)8~8p_VX{JJ-rY|&u+xu{d_ro`c0LBXW*k3Qt{q1iFoJnSc>Wpbni~Ww!_i*=)G3_ z^WW{mKmT?oe*5h@{QPU#Ww0F?jmU zXk0lJh9w)K5tY$`Gz*|9JDrb2iD@HQk*zXTPGFl8ZN%$WpEpnzGeyWuI+cCED&DSBWO4dIbr%g~C)lQ?yED%(cYabt1-0eiyBJ z!@cZ=EIPlkY!YX{K67w!J+Uou-Al6UBndQfATKxPIh#(SFst4HDYsCp&M8FUd^wWY zNFf2I-lF$SvjtgX>MtNM+>_dkhL{$^vx4MBVLd1g?}b*^Q&4SJ$6>6h*^U0ZRTx!O zLB&QB5ld;RBpu3IimuFlSKb{}PckDeglCQ~C0-!BgqSXwM@(}s!Y!PcW7rn)DnKsc z^|MtXTwy!orZj6hnxW%cQh6=~S+^w9lFZU?N<{MYKE4$+U6%3ulG3X-drin3Xa zsHNZ*5zFZo7F|%=Ctjb3-8tW3cF%JxP*s{83&;L>_nZ;4;oN&4&s2f%c@Lod)hE9| zZ+t!mQj5@=Sc2`%Td}KsFE&>$!Rmr0bSGCrtE_3vO`;=M-MeEgjr{LkO-$M3$^gCD+m_`LvW#JR2FWr1^PhEnS(w7ljJb}NYH zRiddfW#8}}I5d3GH5(^RR+n$YK>l)6gg4rBs02;jRsee}fcA$t-SN79cX7Hh*J|Lc zic>Wdm-w!a?d6vfo0^22?gD5O+p6fG_)eOVgZkJObfx#ACu<2E z>TX*EpdzY|V~Puy^%iWVGqDq8@yn6Fh>kQxqpN)FvneO$P#dRfipO#c=c(dskPfxu z^lD;C)kdl~m6$b7Czp%UwHN+vdWRfp1BxYWj)$d}TI9sb`BHNKBPDC-BqM0 zA!gNQbNITfG9-^d+F9XrAikyD1SYlLGLBv&F_y-t-3H!V+7^@>%n{6&;i zrxX%P&S6%ImTK$6CSI2*NmjC;2sVY3pGy^~yO9d>rW)%;{63Ua8Csuf3grCzFdIszmku89;Utv#|27ASY0K?!lK!mC*%wz~ez z3d4CJxvsZxi<{2bbIvMGl?dOizQ%*{-RY~Vx|}%I%G+(JeU_++yN-VIwd!fCMLeY8 zrL(EqWz_Dzeowf8W8E~ZB&%vMayfnT8v5qalM0?o!{^LWGJyWcP1NgCvJKyb)X=KD zN}ZBvL1s(9(dUWcxFNmYOYPlHYmRG$R^BWQ??W)67e%4n$erIpjWCFf_4}~Cem}Z% zsn-iuScSY+6PAWw7F>&wf@SE-9!6`D3yN2f?-Y=d-&;a_EAdrA{w|rP{df}0nX5pP zZ>|JbfF_JP9q;yT0fAH`&*F1MWk_FIj>;VYH0;hq+L|IHmX#o4X1bfgIU#`N2hkKf zTm0wISoCL6_8Kk5P-=m1FDS-Ta^6~9?Gau)kk8kf&X+8W8uKptj&@OC0jFhXx>D*>n zD0Lr6-*tl&NfBVZ`%SQe9)d0Txav{F=QWn1Vd;&gxF%jQ;ArT)w8-r&aoR5?o zij*}3*c~x=>CT0CL={Pjnj`0*RX{JSLF|9CpuhA6D3RywimRiKUa zNb!WWGSoKBiy(F-;F`us@#{_b@TY$GVp~%_wAZ<0f--%U9P0|pSgUtR4nf+Oyc}CQ z&bdl7TgmG-?CiUULrbnufS)BWx7o@*x~bdK`fZw*>yz~F1kgNz?Fg8Ai*hGhSz|ec zx-!%4X%w`yl0~%??)etwu8e5m53kiA@^}qMas;O$+zOy+9<>p$9Bh(M{7qRD>GfDv zupFcLOVOJ(LUG-W!q6^ML~9WMt-jTPk&+!oz*R{r`M;ad!T-4|$N{E&6Kx{_s_K-L z7|30M&UBJ5oorQ`DXOg|!sUxfP{z+urk)vI(9Ew;F_4mom8EtSDkR~F)hP;=iF`G$ znwCj`25wD7E-iZs1vkZZk{s-7=wM&vIoQ@{JeKDu^eDa!tbZ;eNV(6Vh$#v2fT^q$ zL0KIk=bFIus#GKvtdGNVeoiYRmC$y9GoOG3${K=G$DehseG@4CfSPll1xnXXY5d&y zvG#hsfa^!MwXgnyOyT_Yi?5<3CI?$rG~$D|0{Hl4^-##bcVA4y2hS$r-6x{()X#UQV-Rs>#dLT{$Tw(ddO5(d0FPB6noB2`=ebXxrXXtr$FL&HK*=<{Q`uLyI#7mH zqvp&iK;GOU!?&=l*O)Wy1<pCP7_3R&7>j*(sSh&)!}lTVyL-TX3AD>u0E4sdw5K z;W`55=OmUbaCUQ&Std=vv$Pu(KvPAEt3ZpoDxPCkBBF`;D4vTT-b0gu$Xvziv68~1 zp#r%{#k_8Lu3r?#%IfH58lC=VWuychrHFIM1JZx>N8llPyBz;h2Kp!HZUnnpLGerP?)bGj{vIWQ+ z5_^o(JJN@FpSGH_?J7?d(BT*~y^!ynZ4I#UCny@|*^XhY&MS;FV4mJ6BkoZ+E zBstQhk$tF&=|%p+RUyRqZiHO zba9>aEEQ(d-;nq(qm@!{D}XMCx_GwdSktWP)BH0lOf~naCf9-?;`03I zsmPm_ikvAahW(`L;*HB=uZ~6Hb#X|!A)c>|w~ACb*{V))f;*bmAcq)IRhbjd78LV2 z-K##WprfFw8LNVwp=(&_#5P~+`kyM$-hkTZM%(MLUv2s+PB*dbnTK76XC8a!CcfTq zF2T;%I8iREqElV)g{eSO1(2pGYp=yLzU5jmZ)Z^(pXXF0U^RQ$?2+AH0QI{nR8)yJT)GomJI`9h>7H&a9HgFTE{L|5&b9iT`av`~*O~l0=3J}i zDED%z&9U|pV+rP1tKTv8$c?$Kx^;0AG{v$Zp5HA}m3Fl@%}RaGv;v^CyL7(%rK0)8 z)@DT2DRQpWendET&UFe@4KC6 zZY3Sr;&+-HYYU=L!zaz4cIhj}sUx@Go@29c<;ZMYJTMCv_D;p3!2*O3*P+C+w7RgZ z{-#mZsv(Y;PeclJfRg1J3*tFuXi=}yP))<;=ZzFz$?cy>lF7kVwYdUmSyNS!=5)NP zYH?L5n#Ntn?<{WD^e?|nivnq8hF^z&RiF7)EY1OU9PZC7amjEW-~O@AYr1wG9=Z4h z2%u3b7A|T)>McRcTU3FZO-pd^i(UB9HyiQucUtg^cN+1FZ&u+~?^fYA@7M7-h#!)# z@46!yi>oRT_P^1``ZM}kUr9v>ee5veKJ-tW52m1q+8l_xvt|=>4^^-5rV3ry@`~NIsuaL%mF2R@fGIe@4s8{D8TpZTlMD za6Y|^V_1;CT@!lqH(`_YM03+QT@dXY_L%Q}#-?++*(9qi*sLNMY4~{R_t+bY_}+yy zjw{sa((SSIKjLVy*X0qP;^lnuH?`c{8sl)8e023xQ{c>u$LiIar@r6Bc;d}kgectt zXjYl-CBjqrcWU%sPoG*dze<$^r*>BoT-9f4%hHy)5^HOBV}0FDXvT=}Ee$Vxt7=?+ zx%v}3znu4dh=Nb0C|{@r9hLMJwx!|6_m&8wkFJ2Z@GY!cb*UdbBgVB?;5>Jlj#H67 zD-(GetML4N3-I;(7UH!l^YG%Oxp?mOIe6gA%_tnn#-h2I#JU1!sxEb=LYFWl#H{^N zjQR3OH>D$&*eMMoHmR#j$5Vr7oU0|y$2Gi5hD*cyQ@B-isl4lAYt!L|X@_g4;T_H`SVr@4ZO7g?Aj_OqgoA5Ncxd{=+kZNMc7Epx4x)+Z%p9WN z+=(^Cz33?E!MYti`049u_{qyj1Yr_Bq=WtKClm1A6Vdqg6Os7N)7(E5Nr!t8f?cgB zh-pDoYaM2{lws?=z4*;v?Z!X;v=2Y~dM-YCArbFC8HIZvh(-GVZ`SE5|JKw^P4yVB zJoB^ETAk1JNs@SWK{ONCn(`%)P2edr?JCbEl5v57R?FsEg$jYGrSNTG0fKB|5bZ#} znh@WHU40iUh<0$`EH>0`w}#^q-0dV_E#0jtA8N83YXt09rbrZf1Z*U%m49-O%>S<@ z|J5;?W8G`zpc#@d>lSgBG_IoXz-USo~K`?7~DGhG+sWv0Dq zvoii)7tMC$E1~U3*kz~{Ky%e8yy->26xvpu;zhL8_e>%?i2_?I_N3pKjKb+@2+m4J z0mZhQZi(uIYvar$y7Q(bBj;8*;0Z{*(FN9&wO&lX$`tL^G#dpOCAO=g_?cxSU`W5fl>sl0GvRmQqfw^xmGaF5wHWH0aKtP$h6n%eFxhI zpn=qYIqqEh0QG^Z&sr1L>Y?fLy?yZNo9K?nLRWbyw%%QZAHEQe@4lE$e5T+#&n4j9 zC!_JT$D*$KtzBN8w|>=ELXXasQ*4SbMnHGJ#&ibM2zxHT@1V?hnF(oGJTa@_nMl{|Ge|HQ#Com;(Il+3 z^uMyxs&;b$v=nouJJSbU!(R$@RijkI4x=?=B}Rgqv8qx*vyH^F0%w}8wVHSzr9;iJ zg@mhWGi7}$6$qyTTDG8wm?)s*Oj|I~d3IQ~Jx$ZKgmK|oq? zQMhzqI_^9;9hde^!J3^@kdm0oYm$RvVpdgU)zK}ep&8csoiz1KtAskiO}xv&)^r(u z7fk^voTn+?j)Y|jwhQaRxky~^S+1%y60}diZi**yOFaAdwa45G+bRintM>`h-l7Ed zn(--|8Xt1VRiF0&+OL1|Gqi`NqB|}JRSOjKNJK|WHU@azdtRDqbZVx$*V;QaH0`1@~{;%BcWTd$MvJ|9QX zUXI`Vs2_j*aUXv4ZauC%S&G7o@;dn*O~%dYr=mqX zU`91oLu-W+R=6Iw82D0c}O$mx|5+>yQHbS(tXZ;izB z7j>X5Z3Q+p?!)G$y;xnfg`DmLx3bkE;`E#mv?lqSu1s|`--}~_Dp6Ev>T$X%PW4!0 zfP!Y?bXRriI9-)zN{IQ>xvb}*EgYaero{8rbdHH@@wxhJ1vnlkc&?zixLbjJu|GIHrC(ac}Q(!>7n_NX2H3_eg zg5=Tz!l0&m3F8vma?F)5ljH16!rNO2$m4FEcVgQKY{%PfED+8oR-)*faIYX-`jkR^ z{Qi$TM16iY1<(?xMGNLTO|3v~U@pfp@?zcYVZ8nBGW`16t@!DiMfm8|1nfFfhU$tM zl#nBI->2|?94K0WL;d@)uXQ!Hln-DP^?y@KKNeJ0B63C{T4Rc_wY(Xdi_5T~pb#6I zTCn`?EqL$4KK%OQ4t)FV5-h#ahOClSB-05_BvutXOCYDq9LGDpALsN59B=1cl1z&V zs1i+DKApI;Rkyt$T1i+NhJxE|Hi>#5?(T7`Z|~GJuOS7| zq|w2?fe!Zda<2LRuH*kU=Q41{w6&$z9QcH{}*&VRtcQN^07$4(zt1`_bcAaj|nnGuq9IwaO zQ&OxSQu_Q<4DSxXl_PU-$C25%bZ9ot@1Kctd!{3FDBla9slqH4kypp@yAk{3{_I=% z9XTeX-{KPGN~Fat$VOH{J<2m$QAQ3H*0lniF-|*Z$$lDfkI>yY}CGCI*k)7lX>BSqS|z zH%(6!MGBf}ugBqEaqS!u$zd5Br!zG*dwLP?YgKcl&{B~|pErf)WAm#KH%ty*Lmz!v z5f)TcBQzvPeV;;0BEGVS^KlI?JwH}?Ml+1dIQGbtJQL0{JPogf1qv4{h_(?MTTWU_ z78gW2Z7o^$^sD0h95wuQtSVh=!KkjR5u1e)eQkjE=eS9`M_rS{^J>*XOkSOh$g8OB ziS?9Q6(lS+y)M%%8q@Ax!b`<7;&ZL09hj{Ix%_|4RFQ^vTya>gKx_V7WhCI3>x*awUSwM&O!7AZ7Emen zd0vrp&FPlFR%2lW&QzHq^0f*T?Qf>jI!jYN3eCZezrg`mKo&4#u21KWUW@MZ0hBE) zLwQ((k*}7lFNyB+0LsrPfO>(mWojZ$;V_uLjE*^-?D%F6pw7vb6IxCJuAr6E!S;o= zo7Uk+wjDVAy)sxXvh9J=#I_a}!;Xj<+Ce07pT%zWy`^GOIr0V?TJ7O95W9 zC=(U)(oi@fiJ(s4^CV$Dr-cQSsUEi&ijfRn7k=J|Mv|{U*@7wwZgZ>&N|{L5%Va4S zsKG={$mNu~Ot%ZBsVYU~;mTwwgJ zfw?yRi!p`sy%%3aM@TH%lk+jyRf^XhU4U;B(;qw&hi}tfeU=XL_eb&DPd4MPzB`N` zzEw|B&c*|m=3vW-ICPvH!kTqkFmhlg9)7L~KYurf-+b7Lzy7ELzkaUANq0~8Y1fprgQiL}>!)xM8|o=q%arjY0cQ>&hmL>r z)g*Fi#ksUPFVG1sVRWDTP!t_|wx(3`swD=M{Y_cm)Ls(x*9ngIt5pm{-Y3!G70tI?=Lw)TN(9FwM| zn!{W&D-FdnQ%RUf$h;{5>UWkjGaCs>rHC)6L~2?&QfAZn&=js|>Bwvj;^gT$c;ehV zJbY#@E*+hXp6%f{xA#_DJ~#uH4o=7Uy;E^+_l-yl(uuhxnPN5@Wzkg>+7iZctXrJe z_5x`}#1kCsS_RHM5>_Bh&6btnuA~X`R-NYKJ+{e6c)St0hi&e4Y(iU3v|-ud*~hfs zOF`#_ZwtJ8f0H%6_zMs~`vlF4lh7EZv6^Ev@4Nij0aPu_L~uIKZ`_JcK3;`ie5)Go zznYJ?Un|D@FQ-}0vyWd&!uMZH#78fNVbjq7=CrnBXj1_`d@Ka-KDZFyd@KS#dA|Yw z{M#M){f~z5$v4XQ+BAIqu>@o;?RV8@N{oruy#Sg8&&bD#uy@G+UiBq#dIb`>ZFLf@phsRe^dt_6*!%eUf&zAHfCSi;Rg3Tg8Bb>#Ew3XQvCD8OT`&E~OqU5T?y3h;c61srE?%C}^kL|DdD z1T_5+Hyw40Jzk&g zrgmG>>^j#QE#ylvSo{}`*TcV}ey7wMP2mo$Lp9sOMx`? zLex|b33g9xM><|tMOwL?uP4_10GjZtz}YzVedk)HDV^3wMK~v?JG?t?*La~N@4pxU zw10)I>K@X_&)vxJO4pzPMYHRWzitT6JxdM#wS4^G)f{~MY9<}*7_2xHKnTb0r0cV& zBYSaT#YLPP-jCgNz1UDxgXP(7HP3u%IVHd}_SY>+M{8^VJ-asG)$gptKm6rN{QVC) z@T+%<@Pii<@cm~(FtV)*F&xhmuFr9SGjh4i1<}YA6Zm~R5$>jVjR(+j%(1p08pj0Z zTvyqYPU-4WFNmg|XnRb*pT>@^v)I;ifq18;?>~zTb=2^3uF0L_YP{@PqR#Z?@$f`)BhlZ(?;S#EpN@x+W)n52eRpxp0mYW!&3)) z0W|eBGY2uc7Y+13`tqsO%Y||3c43^Rz}ax&3Up@lqcNe)w5oKwcwQW@s&Y$^&C7QQ zGTKDP+P8uk+1%4XC09GAT2s1&affpq7fee-(exDL-b%bGAvQY+Jv$?C=i%A7gLd)Y zEZnwl2F~o7ijaYPgwTPOLmkbrCA_KBml_Q$Z!y*boA);)|H=IMCrZ_|07eG#T`Hl*}&%&J1p%reC1^x*zW zL45axBz*96yt8Ly3HZ*l@%ZqWSo-VHc<-?=+Omo}}CG8H$drGXZX30IQbOo2F8!6vf`SPP>0I(}Tw$BNU* z=P{}_r9~6xvPAmwHe-xB-f(_vF@KjP^yF^9hNk1tD!jY9P7>z|qMgIu0R_>{5a&CM zBUjR2?+c7l3y{yRC9V}LGyR@U-LBx7^t=LSnf!d&)ZY9qw{ZNsnViq>t?%Rq3Y~^` zcvnC?gE~`Ot{}PfME5YRDpNmzrdeN2@$G0%>_m6Y3XGQ0C$HLW!95kTXyJpAqSffh z8AWrl7B7&-2KeZi1eG|kE@$2 zoNH!`aPETU;`Rzug_K%VofZ~RAZ;AyuCiP@UQ;?Xi%i!k=xcc4+_>D?XL;QM; z!?E*RD&k617Mj5u*OiL;tN4sHAXV^;gIOYpRRJ?O+Y;Mur9LOHeN!`WiEVSN`8T$b zmsOWWGMy=tvn^*0obMENn@$j8EICvf#>0GZa(+Wl| zx~(jhh}Ei8M8sws0BA9^*lP(;60xG*QP<@0^Q(VDr6;bP^DMJt60(VHf-{rgRCGJV zt2`TvZfj3~N(^>q3_#1(YB~P;lx4Q$@3Qi(6UbLts|_}#t21+ePIkLhV(6w-?}17y z2PGMifE6?=C-J)Iw#2qazy?l5wmmYQB?6`+U{1D5K;>L}Jrr(DL)Oj7Nc9?sD-)e? zO*~Rb#F;lInx&F()gLALD>3H4HAm#2H9QNo3(^p@0GeJOi^S_W*xr(ka0-(!UYAe~ zzRHYf%Dn-T=FT}_>am<^Scz=-FGj-%7NwB*MAIKHypwf z50&9(?*{QVA9v$--)+J#zMhL8y^?~DUr5B;Pe)+iE)p_DeKdbh^%{y&{}GZ-{EZ~m z8`bYmtIqPC*D;0in2B`9%nO`}e2t8q;MQX+0jD{!O`%O9RF>L$DTsil1v!v5*mO?y zLECLvtmD|;aT43QZ^z!jOZ+b94Cl&LD~Q%ZoafG|FalO}O7ztvND{Ey`=%;168u$Qi9ZF;NTkUeD{_0wuz3-RRb^YQ4Jxwz-pY+N`n1Gnv+h6@zq7xqoZxjj>HY{zwo%g@LB z@;dZyio(kM3z0Wcg+O{6DneCNB9ZMfSqc=2fQ^JHxaCl2nwnLhN^H+5a6$9=ydET9 z1=B2ZC1Da#YrQ1ooXg>!xN@go0&c-@AJ-&dofE!o8W@i?bxhN`^?E(8*JXbJ0%(8r z)4xP}bP*aN$f-A{p)-2~8ydFpz8XP$Jjc_0%kk0IO7O|o((!|rlJVhlDR}3p1iVLK zrONV;o{Pmt&qm|T$LC?q(o}@zb|8;q#qh4h*n6o6|M1sa@lU^6hhKfG!YbC@e<}uF zdng(yD>__aOcmumpG$a?qm9dnS%3QWcrwhHRiE8Ltc%a(SbKd<&AHZCVCS6xPno75 zTD4V}i__h7&UTEJ@1awD9{VkbCQd((T?3abh_ zkSgA<&1g&>#Xym&%=r71x#=BlipK^z)T_{)HH0P#b8(h}$;woV(_J#m3!ID7jn^f< z3oL-HEVX5;J;7b{A9A|PRimi7OuU|SZGv&SOaZj?8Q*gl}Jgd<8LKrSON0!>J*1{ zUvSr2z+8c}Y9+`xuBl~1{%nqM;&8)xgE`f%5>=UNC$=TNEr=$p8;?8tG_Q|~Yaja# z-;UFT_cY#XUxX=~|N1ZgO6N@p1O?C<_`U1tWYqFnRU@XM7x!Pu#&@5Mv+9(OUrxu5 zUrNBqd$JJhqr;Ltj5Te0aBk%V9PHSLt!2$vlUIu6IYIO#=A&>%G9r}x(n{*rB%=7h zM*RBc+wc#cjNos+Q-xoDBL_cuMN_&X@!@0hF}Sl7F)`(Iyj*fDk$4yGlW)yK)|?=+ z=ae9c$IhX4K{OxZKF<9rH1WD|JI4Z><~38(yr`?EmY^i86@$U8*w%R#d#T^|cAv&h zI@aomwwIb-K{Uth8?0AgKb>nW8WdyLCXbIf*Gi5#j*sGFttK9ME&YaDsoig)A7Re5 zo60@zT>A+!>GxbS>3K_z8P4TgyA};<@t_1*E1l-v>?If~T#Mzz`O0!>cekLCbh{Gu zTG_jfI?_$WCeMesfRZ$=aPIUvooVWIalC1FVq4R)HGNCi7RJY%YGT`}&xCiHw0qW+ zRHzk0?v!M5d$Lugq!QmTbdnQS^vHd7w(vaSMHpLm+qL0yKi5F_Duog zrF5fgvD5L6+go_-5@f!PuO6#N)Ay#1=l4|bT#JTjtWXe6Rpk}xw?xz5*2ReHUE79< zfi$P*#qB=E=|p?{?Xa%uRIf61QKc;y^6(d63a0{Sd2_2#Fs~Zv@!dFiI0o zQALrnZ<q-eKG&#C)8)AevR8 z5$DqG8N|AJqp9LltIl`kti{^;Bi0XXcjpQ0Fb#hj_6%^$8oC`DH7$~SDIMRRyA0Vg zOZols4BMa5?!rCykypuAmycdH0po7LARpt}%f#~;({jvDH;j+zcW3Hvs-EV9`Kzem zRpqW=-bNaEoa2s?bpts|(8l|}KE9P)*Tnl>(=@Bb`g)7U4dV-3VT%izxxyBI_K2${ zbr=s)+dCfD^z1BmUbvr`Z5m$q&bUc4!V>AHCmPNRH0$do`sXo0oVygp`y?FiJ{*Q` zJ{XFx-@nL$Xy3Sd4wjz`ptCB7{+KMZhN%reA_}J^@^j^&Dy0&+WerH{t3=1n1l)G$ zR$M+j+w9oEd5Bv|zD?+)W^N$1>8BItRpfVZvb4C*?P}dn&F?P_U&ZlF-_hY*0dtKJ zN{Fk_rOvP}4R3`nV>ovSGT(%0Rh=2eJ$(CPfK0PSv_O$ou+{9p{i}|8X!^t3e>#A6 zW!GU8-4cnCS*d7?$i$N591JDqpd*~3XjUew%e!#zyBqNH_dD?6Yk7G2=~T2Isln2Q zK5XgShE0`ySeI9c)!D@uP0K}l1jQJC)Xcsb?7k3>`>xDD-(iwUQ86NT6Ni79;*3DF zC9h3nt2c@>*FS-P<$$I#aDmDN(O;n7Ilx7c}#yb6R<*0au{qd+>E*LA05! zOf+XsHR=t|REv?q)#%R|G10FT zw^mkcb%8UIulk_$5}-!DamrfB$>vQc5(*(WNknSor|h&}4qEDwubbMTpjm)I$wjth z{`3y#Tyrmnn?O}{N{)J)5ul1{r%_a^fw(O7`UGT9cxNioePcWl)!*zZ(bglOY;Fen z6Y|j=osFi&=_rrPMOH@x@-~#AdrvBM99oDy2j*hap84q59F2q}d6?f?iUrja;;B{0 znp;cLIo6o+4iW$);h$%s6K{GC_;}`GulS}ItK4O5rT6Ek`Pa)DB(I1wa8k1O+>&XOide6fNIPX zNYl9q!L;}r0!T^kLeb`}s64wHxy#lfiSMV3kt$1ta|PALf@m}+w9BC>otnyNm1y(o zEQq$XT|uxp*OO2HLG+uXQ&4Cg)BBSwhP*JcsdBz6muyQ!Q`d}q;_rekn3 zorD_;4A(N3#nQ2lcSpo(LaJuNsy6lUE%8!Lf*CC%jx|NorfzJ)va-#@_j-;sB-~`J zqS@qSp(z+cy=pTOP9-rdt61a(&wS^Zrp%RsXT@GNTB}(Kzech%vUpDYP(-wv(ve}M z-vu)=EF+yuhd7&rTR1%xW#odQ=}E}CjyR4l$NKF_c=3TnybnV0#(fL$@||<=+=W@V z_tZ3Wua7|fa1bwDo{MKLEyU9o=HZFk=HuZr^KkdkIk#9F=M>DEvZdonW#-2!RZ49t zdfv=jlrB^tO|KKqy+9g?Ti4M|_fl^aEuXKR2@cO=PPCkCIoMVX?v7pMnHv)%(7z~C zIDhlAUt=)68AIvS=!`GL%90UmtzTn(&{pTu`RpFX(|0Yvdyg!{hfhS{BjWahr(^N% zW6^l?{)H4)3-I+vXJOmzIjHX)!jk+E4CHkqKdlyh>xb~Kf4>cX_x%?9;;n3akJ$g# z6H$2XN;smL+K@7l&yCalYH~FWR^ZGFo*9=rVLfI%uPK{WVQ#!Wp0&25m>h0Td?yp* zsyg*KT>*40EU=_hLA2X(aPW3>u6OmS674b$4PT<8eH?4OCd68VqcO3|#wBsOInv^D z6WnyJ6+Aa*JA;n3g8e~$k8*OiD$^zg&V_Yxx^t{mZK{m5R^GMX*+gFFUJIy<6s^H< z{&Jhvp;fq6RA_}@ar%1W^q%ZtG}BoUr`IUBBu)?Iy-vZcz_D|xCAeMHsa2ec*A+M; zhX;+*6)?|52_Fmd1>$p`(<#97h<$N-_O0rV;;P0qRa>GvgFG%y_k(8{H;xC;)c-{N zkILp|qBkxdJu$gxT9}6X+6Ejt8I7l}ETR*=2+v+xjHfOx#3SeC;~rvN)50_stlvHd zf!`J5k;~&)#CN>9-=e69@eb^u_vSr@8(!fyKh*MIY7|RA5Kl z5Dxb4!seP~=uK_r`zS!f`}?L0?0m2XKYOo|jbz0~h?u;pa$umssf!~dp+-G=oQrE55TjUsPOnI+bvul;Od zEY(cTwUXmWQz|$v$O-p(UE1AuuATYSDdKk>3;z}B;{Y$N? zl*d~9V<0es4lN#Jjx{kro^ru)>Q%!yO$jn#d_kdAmkR3&ph>s8YI3L5ofGXGX#3)a&CAe_bLHr4d2gPT#P3#EXKp<7U03#=HO{+fYkcT3m@S1<~4Qsyd-|S7LxVUbxq4e&%4?l(sSy&2`*fG@sgs z4z;*l6=^O>rg1`nI3>wyoF*rBE%CL$Io0F*?vmba4B%sNy11R@77_UTPvLCM9JJtg z(V_+v&Z|LsWiNIdp*FjF9^SlX5x#cULcDUvLOg$IJ|4d?7gz3xz>#}PvFh$2R32Q8 z)W#8HF08Xan)(qd8I*hrkF}@YEmIIp=09(`-Pi8Kv`h=}$bXHyPOH|ddQFx{9!;Pn zk>|*t!oM)?VL6uXWq~vw+c)~!of!Ab1<|zlkZ>L(hZggQ(;t}vdjDlO@G;=G8wU2KUk7d(>{5%CVzSVy0H zsH7Up(+e;VmyU+W0CH<9(6uiUm(EhJpP7pXPA{PDpM$%P&%%{svuV`uC+Fh=alUlR zLd2G~@!BYi-pzkoDyKdx>g&Oh9{3(wRr&wRQNJaIH%56AZQKsrwu>)dNn?NHGprukb~xvomg^l08hQ#jEm0=VcA`)(YA6JO$kL- z!63&miA0)2r!Hv$Z``S=2>oij&AGN-NSaSgQ7w_}%0pd`l(8ThfutTOn&zP?AFfA| z$dv+`z?Qi7MYdZ4Urx0Ppp8elCD8M^518;)bUSNS)p!tXb`9zimts@vDePBCXfK`W z?lU%>Q~eYU4PV3-^&lZAm9-woU5bKvRU}A){W=BBw33$uw#e7*BQ0aCikLD2lp?_+ zU)h+@=H4BrB7JJMWw4UzV7pAUES;aP7*)|-1n63a%o`v-%=QXsahe`MeAaB^RPECV+{vMdDArg~q0FkbGn4gibUGZ3WO2P)oeE40)?I z;EA`l;CDY?k6(Uo5a0V+A-10{M{Gqa3B1S~3!J&;!`VC+aE*s&%Bij*@hMv!?>cUn`@lP-&;X2b&*acIP?*ym5%?#o_cIFFK;zFGluiEU?5#Casosr)-$9cd7$Az_==8`%7v`tr0YM3S&3Ex^IKmyx0!!|LUPf=YsBOS#g*rMslC{zW> zt%)d}o`k}wiO43d<7XFQ=hg(g{#Y2^dos#0S8v`oA1~8cdhT`w&}Lx6p-`+knnpoQ za=m>v9zCy0Gzz%eDXLG;#l=I@uw?5T)NYK#{H7xGZI8r*eD0YGe9gJJc<|(G+;wO= zP9B(nyUxwQhEx0wJ;SIhpN&PNV99m z*Nb#(X`1S#)q;d=iEQ;mkkkBGd>h71SSvg2g5}P!*6XW~{RIf1*&+hu^#1rf^u%Oa z0%L7K4ORszup*}v!^wFlZ?3|oBXKxGk$vYw@p$lw1ibrn7|vcvK;_zUvshV=CeNLw-cqUG_pc!$miWGnP zwx)DxRb4sN;_^a{1G2!aE~LNcrCm@n#`|G5%!h&_7@P6fR@d#DM|K9Y|&9!bJ|_k`n_ z7xM9opRUGV|8N99cr6>>dL|N=F6N@Ay8~-8i?P3bEzT}IfgM%7SXb17kt|-vMMb=? zbCA{5j>qp=fVb|OgIDgFgQqXe#FevCQNEe_HLnD*-TH zNoEN$r`b)n&-1I!h;6G#(Lx|DAZ`J)nbPZC#hLMYCdY$JzCMHJ(;TjE7M2whkGDX1 z&Mm4;QJ=#En+=kdJE5c{YL=UWAwL3h@GH3-R2gMR?}o zB0Nbhe}dS4{*HP08t)BNv%G%y0=&e>Pu@1is?!En&F61OZO3slcUF*2vBo!#+f|K5 ztSZ4}2{LN=3I)=Hd9PB%Pm-wzYRLj#BjQ>;6m6W~vG`qqv=WX5!ms#S{4Q*})+-aW zysK0(r<$gH2C@F=7a)Mvl-|$JU(4@T&+l1}yoI$0B)1}|qYgP63(&eN5$!wTu=aEw zo_nnoKl$!5{QRRO^xs?Y-pi$U`H2GTJ5L^K>_^HC0VLi)|9VQ1Rdj3hTg~K1Chn4m zMfLP{6{IQ%Q2ApV2&Q`!h+*KNq%%YkwN2 zUx}vKWoa|YEg7C_L9`-%=X!K0h*p1;*mix5cXqfSnpWXHGJ2UB-YpoU%Fe+&j(>B? ziCtIlA`LI?E{nK|yndauhyrN*9G}9s3!eEp-Y`xdT~%m)uVZ~@1<+K`(UY^B_}*%X zuptG_GSp_J6LqoVG9`l)91bIYn{HP@3-`W;_Yz>*TVP!J-84MM@!5VaLvptT(TH(z z`YcyvYBR)UW%D)J{F@y5*jXr@L!3`nfs4bmCBvsCQ{Ss^nHNC2Du%iy8DXWB*nB9Q zT74REIva0ZnTewp7h_3F5mp2$u_`-=;n)DWIR>1hZzxQ^bcGndG>^J{9-g>;9v(U~ z7x$l>gL_ZR#(gK};hqz7aT_&!HJ zDu4-f?I=|IQZk=7r=PCtD1TjJgK(}&b@}Lqaq%7Z6FGdW;*`gJ;!LaU>$TZ4ivRcl zv+#K(it*OtsmNbdiO^dKAPP@SpVZB|_%C94BPUzK&F175^ac9R zk)~3=YF8#oz@*SDOD*7f7Ds?tHHv}O6Wk(JMY8q0iD@UOCA7`-*!B*vF$9Pr-Ey)O z-By-bS!+eO}NxrJc;Eg9iX;Zog$|@4DIoo5wGncjUZDR0DfOM<%D0;01 zq}mghzSt%atL)ns*(wWFaI}$eNmoB2Cv)BVr?80+U4S`XCXQ1lO2zEx~PN;cmKD+5BP@%`NntYYB6L zk>)$s65i%qd*mx%j)8IvoKA%M<)H>*?rXmw0knVk`w#HJH+u1-ciQmr*NXA})5-Y8 zgQ4aCKYf2J?td@|AHS4?@4c9T?>w7?_vi?H>)}w09Opf|FdZR_DsbQVZ2bJK6#V3s zczpa^BtCc|1mApcA>Mp21kXJjh4tqO5mnoUq??K@fM(OS497VnO$E;~NMO!l%)B6) zs?lyPM(=@5`1Vf^;-CI{1OD+xt@!oZx%k0L@%Z*LG1z`Mh_LiV3fS?CwaaF?Aex+N z2?0&zGy#@1*lr4|Fv-7kyAC}OzZ1wQ%*xIxQa;jHafO@G7zEK>% zqONk;=_oHtEkaJdwSwP=Lcp-hv5ro&R*%h?R><$H@^}|0SD%#BTeB!iB(Pl-idH^U zGwu5LZnUS3peudAO*Pa#si_-bF8J$D-;j{5paWx~npVD$_b`RD3F`%Zwo21Bbv&Wx znc-U%q!QH>)SkF@CJd(|I5QO`Gn36qXQkNm>cVLW2xyA+RnZ7dEyKmLY53lAZsC}> z`8&UMWj3C@Z3Z4aGXsxbo`)Ci3&%_MMG(7-@yfmPc{~r#(z$y4!W`>`cIMDcNT#D5 zH#GsF;iWjfPnBwX&7}o~@p~z-FYcR$$LL(Y{csrGc_I!^-WP)vd&`hlz67-q>iZ_g zx(yXkbgqqneX%VZYaDbWte~U6e0B-3Ubd**2v~g?YNAxpr^+;CuSC8M>%QO~%Up?o zg>&z`bG}^_%NHep_Uj*ig4RWe=m<|o-GUVBcheW2gQcl?7)cJGCrZ62Q&1LPfsMV} zuxnr~wsj3-)v7_XuWLhBT^rU_tiZPFCD>G0hjn>n6jjCOi_JkSFrq$5r(K^U3T{<$wUN(9nF2jSLHAS`MHC{wW`&|D^q-4*FK)Su3)(`*5YaFLFX@t+jyW2T^4;x%#Bo+0U4@;b%1RiKgIW4;=T z$S=hqadA1CVoK4rC>2d}QZd4@`I}G0krU$a(KGS*&Qr1Y_T$mU>#yGzikIksKYL*T zooWj3v$JvF$YKPVQjtQZCNP6zS!gYBE}^YJn)WVW?zml5Wj?RFinOZGG8-S&CoM>B zSCwwH$LUHaTJKjm)jVIQN^uLEyDIaE=8JEKcZYGU?wNk;7bk$$7L|>f#n~uJ>_*?I zHF)~vQM~&`KR)_qFTVG73%>tGB|iFE0DB+q!IR(GiU0j)EAX>#l(?zdPb|j0_r#)k zO)XYuHDOa(H}=)_U{_f!wiH%iH65yf)N&M%SCaTUWu*j>wWc1Y?kmKDj|Z^!0(Eud|4O{`9n zChg2vf>jk;Tt#W-AX<{TP)D8Urg4$8ylOKGjvK}u&c*W;mdp$>+i~oW!_3Ug%*@Q3 zRHZ5vP6|?0r>YK3cb}d!Gr#%Gy?3T(*4=kM?=f_n5A#Od++Cc zz=pywE*fj$c~5W2Fx4tVB!K4Red$>&fi=sA{_^a%bVwlf6W+@}q`4FwGcs{(dy?vT zeQjs-%ZB4;HbqdQN8;rAC>&g?WuD#G6NTG*qj76r4DUzd;j~-}&09s*UtvZDDM3#{| zJ&5J?D1#p2-_yab;a`E{^GoGF|;o%uQZgY>`yp^7A$Z*uH z7=d|vJF)cO5G>j=0HfA5K;M4=^ipa~+{s~BAOlUcTamz+KR7N+FfPOH($uZCBv7ga z&@|j?%a3h8zF!8=_%&MajNY00KUsqAysvOhuP!}})dX*@Z2sTPjxiWly;KbrP{Tt6 z=PFpP1<@vphA+@svUC#XGCe!>9b#N*cq4Ic(v(nidTN##J(lU%v;cM^rVK zRE?-0xyIp=U78=}Q5rsw;3DOkT7dl3$_@;&h+pfw z>2MLt*^+I_Ys19HdhRdYk#1~TLHuqCCXRby%bMOO@9%=w>_I>B$j~egMp&uwY(-c( z!;DYQ#pA)TMBF5-CMh}mnL zC~EFPaT+ze1k$MCi@7%z37;$dt|hOigK%{i&ezSK2qfAkPs&*%^T_v7MIz;()k?#Q z=9gf3;4u6pcPmbRJy_1?{YwtVB!ISH)W358ZO?)gXoxprP?irPJPjC~I{@QHj>ERw zQ}E)l1)m*H$Hzz0apy=HR|>A3HsOnJCgUHzo`~;0sloFrE&|hl2Pac8Z&NyqaV8}E zHk0Df0w;mkkky1SCBrejb~I+yh$wHzSZ^H$+2vx(r^wa{8v8DJz}iEjI1DwMcZ_0)b6*0NM2+AZa3KMtS? z5o_t8egb}v_a#H6hfRs?KmbjEDU*9D(woud9IgUqGDWjarf8CUOG&=UF#0EHs)+VW zk3zvw0-of+(760=QgG0+q>r z-CS#@QyMS{L|Ok5nEYJ8fR}Oe{5gF5*T27_nvSXf*_ljLdUb1mEN<+M#+6-Bn71|w z)7I&6_i((*D7(2k3hOrp!2U-23knuC*vQt|v~1n%q$!|kJ~cy_B4-#_Y) zZ|*na{*5YZ*+=0sqzbxT1;nsqZ)icX6z&Ph6xYebL9&!lNXCcUNh!;T05=QNWjzW$NTgM&QqEUXB?pt1zoSjdsU+Os|`dQAJuGA_=19#a0r#f@?p%Rf)D1 zK$GDiGWt3@yg*$8s{Ajq6th+4RjNhumHR@vT8Wkf!ULz$RwcBx zzDH)2>MfaPg&`&s)BF~PWvTmlLDKh358HQCcN%82N>8+nm@NUc)^(WGz7Y!sZNs1hKMBZq0 z<&9F4v?MD&=}qBt4f6qB7oN{}Ury*HfF`|@3<}FCfUbh&-98BQhDKDTOV6WPUbmu!Udu$`ddcY&y;`~UYh@nFREyYt z6+ly*?q50NZ{hI(r>j63mjuvc`kpiymcW?=$D~(^1kilJSt=7vP1=2z2AjsZFx-sN zC^O2V%{1diWKo1Cy`6#RvSKWq6^>EU)8Q;ARVU>7>N1?z8jFWVV{rdy0{J5jA0LQO zeR!_yh`@y{;W)c79A}90^BY1ibAAfq^E@#8R+`l7VD2u1E+h~ptr;}2EiD@)J57wW zhId)7jJ0apAUNmiB>QI`_e%++iO3dXttNfaROi1Yw$<`uTX<>lTa zx|iC3d&2A7Ex|Npu!(oojzx?%eSlWZ$$f--f=LC+wF5HAP|1m|LaR#-$TTjWDTBcz zQ-!y425eI1dRpCFHCR9fi461%gOBgEOxI7Qp$?RAiqTGeM-JNyxL0xSA-?56%+fm> zu8@3HhA8c4GF0}B-(2^!AydpX@h_PvvL|RI$qndd?9>8hsa52pG89P}HkTF*dQ~M- zEK3E*h3mCGrV<=4RGDX*W*6-)&x^TMhhxN?Xm?LvtyEJja=c`odHPsT7-~g%q!lH^ zu(aoqV402j+4N2(j6Gd&1o7weFu|pQYC3AFG$j81dqp!?7J1XkO5OqM}j6Xd|&sUgs*5HY@%NRH-fzysJfRuDIQwg(~}` zWTF;D7od#$rwksF?0xB#BzVsww@W#;rp+{Nm%}j`CLkI5M!dLFgzxUv z;+xMJ_%#FZ@ZMlN|Excrd|Zdmu9xA~xm=t%XhlwIKcoebqu;fvpjluAf@TsRmzG6I z5|EM#EIw8T;u2URPSp}lSKeL2x5Bue^8*1j6+ol6|4{%<4$_Hp84jk(IaLtN3sY|m z=PjFo?J4{cqNkopGtLz@b-^2ykI;qG(^+);_2zl z`gk$_-#dV|W6maPQ#7;3Al37 zgunb@K7M#S7++q`!=rOqxOXxGpBzcSn(gthCubr4H<{d+N-?r{Eao6Ikllyq=wP@xKxWW9F5`i<3>ArP%dG%YCZRY(d?a5vfo(*!t`)Vq21Oc$(o~d7rfJHB z^{Ph#$yc&e`~(~blm)U>L|n^~$MU>P+YF4qRe>{qDySy5HK8rpDl#lUnhHzinTTpB z(Ug-c>5*i6HxtPe`|dU`7S2z{t~IIHvpEg>wj^Wq@+cJN8#e*n+zQ{?@WTt?QBP>zCX(1r75Wx>sl@;(UjgQ8Zb3*YNA_8KuScw z_(gXSH@sp3u!I6#vQ&zBtIRclshMl3t(K*M^Irzg1e_ws%QJo+J<$H?_h)cte~21m ztT1~#mAIBN+60{45r^DKxiIJ`7F+Y+8BqYofIL_nZX~>KM2t+^`DH4SC2%vugVD=% zIB>{{Z=MdsUw=0Ne|*%87q<%V`DG^_o;PFNb{gx_YNWm)geBxE#LFe3mG56NiC5i< z*d_rd{w52}%f{h{KQF@nsks?Hv;IYlb>- zq9s9cl@|V&9G-9*v9KaILuI-Tg#vN^atfac zo@<6%mTJVxCH*JzDBxtmmSju`(Mr>73H%gGi6)6wE7|g>z?oXau`E5cbFmaar>lDOs|E=z_ z;=t~3e7vUz&F=>|y{RugI+DtL-$-Gm$Ai=9YHH(ClI-Kt$#{4&3HOdA;_Ci53|Wwg zEN2DM>x*IQ$c1Bw7vAws%v;z8yBBxEj(I(>enxjJoBja~tnQ1eJ0rLvad~@$YD9d5 zhI{w~19a9({_n-gl*rQhqY36|Op;%jYpvuOiKnZjwnz^q&hvgDy7%Zq*;?KrWo z1Vh%2$LO7NaOKhW+Q6*ps?wFGrd9oV>MFdlw95TD-Yz^yY?*t6G* z{)1&`j}s}xfnY11c#C>71JS?AL@c>3$yx_}<3Q*;h9J$;P63=t^C=}DuOE(SndB1m zdL^{|B3qsdU|B@ATCezB;XPok)sp+#3ASL~&@)$Mom%>8oGvRjrVi~nQ!%Y&9p=$U z&u$Z5*GA#bAG8GvI@Vx(`7|}HPo~~BX0@xJ_$yAA0Gc!&mSDI)1C9HbVA@3N%C#Va zN3_gS8teB2*IMsGIUEy?mqRg`&M6Ibg`-N6Wm;E@I@l?c10CUXE^Qi@#_542L3AY!RE}ZV?b${?IUj^G`pOA@sSt|R?>>n0jdQYb?@03Qv z-81?9S*T8Mq9r{W{iOsro%d7iyq>sk^fe$oy8!D~#o^+P2%Oj+g>B2@F>P`TDkqpR zd_g)k0Qf)$zpM|(scn66b4L(vZ4btkEg`tDDHP{6gyQtt5S&=k7h6~LLDNtjV#&V- z?lZbKQ;_jnn(Lrk*m7G?m@|Z2O|hLQua^YPUuB+ZobET({>)S9mn1<6$x2h@F*MZ@ zuu#){Q);+}%K)KL8f(AUCdNf*Yqw&k1^2#E7<5Oet){ z#QYizxBJkNY(`PA4#SNO?gxpd}D3TDMMLi6u>=u%c-oW)&1+v@IL8@#N_DsSBv-1;=U%r@}Y) z7%|ooEbH!qIoJnB7U%!-GKkhohL06eKWmw2IW*VW0oig)C*EgBc~|oa%xs~# z?l0zg17OW>@$tm)uPE(#yZ#wb8io?QJTG&LgYnItCNGoSPREX@Ju@()A(Iwobi4D->NK| z7?&kHFV6{<12~t~3|e1PfACBN(&V5_>t~uz9G8TdQAl3T6<+^Pj|`e_SDhDAXC`3E z^d#gB^ddd820Mh-ayZx+T_PoZ8tJ_YOtl{?S-GIG%vVCzH8Scs~)J9gU|R zi&fLvcWnyBE$Vpr{G-!yn3kkUzRqqA!m_!cuzPc0mHnl=HqF-+V#MstMm#x}f}3Y- z*uTSs{9%JosB5Fo5xjGyXhQ}xtiQtjt6pgmJeNLc)Jkzu8YjWD0hY zND|CvC#kcKCZKIe9x|heMgKspmlTV&x>CVcy{I6Uvazm zcFntMk7d0IqHzVjm-=0_fvOtot!a3p3Zi-8iK;@2bDZjlHoZ|BEHJxynF^xKXxoTI zL$>n%66zVPujnApaPIr%rZ3b^{qI0=qywciDt(Y+jp$ zqq_|_y2pUM8}*p8Fa;;5sipVX&E1i>L9KmxOAxMY>IHWzwNr8de5u8#Hx;5I-Hk?b z5jM^=;PZ3Qcy^W?Lfn5uoZs9Rjq7`&l}?vHne;l`wXQb~ZR5{97?0cgW9Z| z@#JLyEfu#8&=m0nJ9Q)jKRgBQ{vX3Qw8!u0%dOv)|BC|4ndP}H|%ND8C$2eN6wGXh47auYxT#&-me0$PcB3n2dm zDE$Bo1k>a;JEjbc_D)Q$pM`a!R$|?d8CcOi28)_UVRp?B{yhUQB)gHssZHyX-YHs| zs1G*YFU#n}@X`gC(MqA+Mxia)DXnYNc>D#O8!@wK3C5I8RlpobV9FFN`MXu>w>q^J z5|$oG`TSqFvGsIw^Db01xI(@{!giIRA_iNVS%8E!!|e;DiRY52c7C|@ zLX(tH5ldS3nFP+HM}kb&l(cV|*p*pXfQ9oiacHLwHx4J^?x6&gL3Tp`wXrXTPfWqk z=|-I29gC0m6L7pXj%GSi2??D_2Hg`T8iVULS$I8+u^Rx(_jN zMGBfGW}~^ygKUe3W+fAHf~BX(j!nx!)j$hYEl9zZ<>?qYH4paEA$%`{fVJr#l4%kk zUa6UWMZjDHW|>+9rxM-yD3Ht_DFN}DYt@uk0aFBZfg)f9LOHSKt(%Xes3CVYLzcwLn>PWo!q6`1+X+FNYVZxUeGx3Dt|I>q!SiUg{ z*)4fU4lYo8z)KP)Icq1?Qtsh@ESTh;A%0S#MQAS?ya}^fNWkh8OGe9&PzFvW5J~TpjQ3qI_AKJQWQU!9OHL$-H=;xZ&m18I zu=W*V&L*bCXlso~#c*qxr*aBr;Qyb=y~4nKzz!cb_5$Ktdi6=bOl6-@5C(dn$P#=D zrd5Mczj>C1qKa7MR*|o^-t95UWMH^>NYA0vwOHndl{Fu2i;vd(NQTWi5rmkg>T;PL4UJUy3&XBT*TUdP82 z*e8-G+TwAABwINr2P3%;Y+Rd)jq4JzX!7e1$`J5l?1-$Qzk&|lTnPN&UVm_M~58zv{?hdEujwn`H2tWA}&ab{f zL!uP}DX=?C9t?1l>uMYB+#imwZu*GT3|u&7#EHEYOjuHarmmsbd1MNH z_w@w)`AI9jxt*^t{n@z;%vecw3w9He2<}`JJTDCAwa)vhcc}`T zX{XhKeJ$%$Otl2YWG^76^RWnSCA3xGjGRtBFN$<=Ir;PK#JDzX)AW8OvV_y$&4BT( zG-Pp^-%N*>Jlhm!!$6%IgEF%*Q13>E-i5X_8)~AAD2Oqkb&i|DIF2SQQDuwX*%yxU zyCQLTOEeB{ioyvR=bdYNV92Z#449dQ6^nag>!SCtT!Q6`dtuMY9@xIJCzj6bi+-JH z2sLE+$6C>d3P z&NbB-v8fTGw$x$;dHCX?aO~WhfHTKwgwAA8)2HM95$?IWO&Btv8sl?oxXLj)yBLE^ z*{F@Pkdye{XO&@GP7Ox6E6`!|@!I2cHgE{Oz1M)hJgvh&Jg&u`?v>;FTRFIJ+J?%e zLd5r$EKVtx@&};F@6rpjGQo@CwtS4TxX_fKU5CkHxMg3E!*SwY3yeEp4b4Y|z8M4a zhSLZS$LNAiv|DP4JFnU+#AqvS?hPBQF8CIX7h|n7yp~-W=xHcAT^sVH1;RCR zE!il-^ETpMGEL<%xjsm;P^2|O7P4jX9#>8ugI1>5O%FYdcfaOr9NLkLE0Q6;Hx_%g zCSvZ447lB;sEf6rIl+d;SUajrW$2u5#l@{bxV19~AMXjpy#tYWcqkf=4oBhf!Eih{ z&>gc@BxB>|R6IMKfam9QctSq;b5J6p@4O?$ByuI}B_SGY|w;nlR2DGpA z@O#qm;zA}KQ$L*8Z$Zbr8swD>Bc_{)V=3$Wv5wdHy+UDFI}n!@s4kJ$R7zo`!bGzA z<#XIC`1QV6joU@T3#V%VbLw~Uyq2j_giP|ufsIG~i2CV%Oeh~jOctWk=utQwYREyu zkOr(hIszYkGytDo%g2=?Nm#Td86%fbcWsHp#;wuVa@31|{CX(<(^rGkgfrf^7+&@$<U z;R=(WOBr>aRD@_6-qOcQU+yBdWqQ73p=v>L4b#%b#lZhv4%rMU>$EVAevul|EB$wc zzj+<1z^>Bpnon1Ovv3XPauDVvZ>#-OVO$HKsRMC&j(C@WKMLcb@rBdr%L^myD(kE? z(x%Eb^ZaF-GI`VRP6kpw%z`i9i)~v|@#)D#JR-&(ale=M`SO7{T-p|fi(5i*X>$lJ zZVbVh)qSydWp_+jl!9gRlgQHv*sv%L^X833@~%+KU!ROm4o0ae#82ocZ|#p!8EBGJrP=@;h{1{NVJI1x2TN`n z@^t-)Q5ld_Pn^r4xIgj5k8zcG>c_f$JNP5=7Gzd3-meB46#`edR#(~ALm4_XslXesw$$$ThZ1mmcLWx!jYVO7BdV=!sMYmDQ)Y<@lnuA$s=C@i zMmK-H3-w73ib5HwEImUEyrAXOR?MkpP*w0uvQ=IJQUEMV3#0`=_TwO$8|8*JOmA6$ zWrG%CVas^TuIj*y(nidy?vI7F12Chc4pRy$F~M7gR)b9P@z7k$^sXX}gnRp73o4>1 z>%@@4Is92`D73j+R;p7hm7y|d3kCQJj4hKOnl>@3EV+(~9WT;53 zi08$Wb5$twrGd0iiSWQ_b%1~sK;PBUL(GELfa=|J4Q~hcmesd&!pj= z))PrH-!CV4kbJf56wPSMR4z50kHTB^Pf&)N=AB@b=L^GK6zyCQ6y2d>z|F99(?FT^ zPCCqQr@<(x-M`Txy^jw=W*P`ADYuHpM+f-+?~}CdSX`k2KDweemM#v(?rjNpcp@2} zok&q9B6kl&<1T@9XI})a>cqmkxB zMYI{d-a2IcCK2&b6!xPXIJzeZw?w=jipB9gX;`?DqHJ&tax8LkEzL>AU^l!x$x@}A z0!WH&t#P5hNvSf|T1J|BU1Pb#Tqgv~wUl)F&9wkhEdgb|7C<97ofLb8$<=-|{j@(G zJ-CD6U48xFi4sOyAc1&48;B-6>)z@Vg5_r{yGiu zze!gC-sE3pKwsRBeK!~5zx;Uy{{C@2zP;(fmzOPga^8psXN`Dt-iphovoULX1!_h& zASthj*R)%udrQ`tb~-IXHv&rwsOi%yF?n+n9^5qJyG!wSd?E^acE_S)vKd((7ZNEF zWj{#$Pz#{Feww9`uy#T%f?R~TCbVD6MC+N$mBaV00{zUxF`{HHrZlWnr&rQ^cp6Dq zf@li{Zc@SWQNvcMQtY8#Mxchd-YpTPE(>`sTw)!nMl^<_Z$>m6P#28fA(Q z67Va5tr4(#4JCl~cBVQ!kN{eKWFCBiU*cG0pGZb2O|;ev<(04wjI~x7q%Xy@R<5Oq zYAt}KPNt>rnXJBcIC@%P`B0{8N{ObNPMcKVtc3fp>V4GLq#B1yIagtr84cW{Thkph z&Q`RgSkRPC1D$MzC#M997klvKxgd)avTaC{@;Rd}D8T3%p28D}#Ap4yGh)q>`#?1Rs#$GmUpo%^%MK&y^1qJL&KI^4yWwyq!U zUiRU$GcJ63vkWipR#8Bkl#%}Qm?oYN^7WsyyAhu~pNs$WyYcwu<0?G6pC9^_bnU1&ar7 z!jyjVAXB^~^R(4ANF82jd~QPG8wQH*Jnc}dSB?stNgD?#+m`9v!s+JTS^!Nm*23$; z>7sG8tQ5h$EZGNyTk?loChaQz=6V%KC#Uni zWSbR`(@Ue>D2w536g5}4WT|ASzNMMJO-JUN{GX^VGNSX5lu?KTPa(2OOHou(gKA$b z8m#4LN%5e6oC!_wMwEsdQEbk^fxU6O9@6mmOu7n|-=(Sk_yBdlK63jWIoS@wxvjxi zw<-vQEoQjwHrT7}7&|Kg$Jh75sf|H6N!~xcE(qIJhN5|}^jj)G(_{zE?~&8(C|u&& zvdw^TGjrk19ZHcL;B@JSD4FK3#I|OzUx{tif9AD-xn``3P)wZ5_Z84wYlDJ{kmJX- zW~4=Q2TrYNs^53w>AmNEME!I>B!DJ|TM56VHlvpIm}}AAOwMZVLPt$4hSkyt)$)4q zc`#|N30F@f;QpZqT-w_c^Ol>@*3yRY(m2{th{48eG$mQks`Fu_w*wPPhhav|XiTpe zg-NADF+6W5I*L0mYiA9)3vjy}lnK_=APe%lnCk*D*V1xDWtwUS-mebGWNCpj?VwvT&Vi)@Xw>n!VyemS zURBbi8EkKe9R*=Fm5m~YW7)kV<4YQeYeNGJZ=@kb=fucGZhU?^o;;n7do;SA61zvY z$0KK0C2EXisH&<%&8T85pPPy`>k@F5IKHs856*7xi34kUVDqYAtX?0D@hj8OK9bsH za1qw8H{*8~lJNOC>3f)jkM>97+Rk9CS``ClqX)ho(jLN$qEHhGg5+S18l$Ed*N#Qu z*{NhaJ(bSe6r4M5#?Fmql#Omho}~k&i4DZM^gfj=m1@Q78n*}dU2DToKqFnK%3`JG zlwdty-4@0clk+Q7_ECOpsVemngYBYrcZKCE9j}6E#J!%^(*6xc{D}JLehkYgM-%l} zTZSD2Qf;WNsl)nx1MuW-qe^Ibc-D&Rhodoaj1g%D?q_;EvW;1&YstsePipbQ?Hqh@ z$;iE1hkM79)qpSwn%$9MFZ6KNcSqygre5lxI;khMwgkZ?a7O$F0%$5|ru4giDeX>8 zE6=GVK{kG`1i;AIf#6ucud6ccmrHm)!0Y@vVk((mo9x%_0nZ=keW+=8IS9}32hqs+ z#Q8MU(|C;>p()9V2=e-KT$y)cek6jMx(E;^l@o}S2~^;7o2NZrdj~4D8fd~E+w00;-C78T3+~Ere*8+_hcKKn7zh| z&rT-dv*RgvK%74~&G%ZxX%caMQ(v54(;F97_rjS~J+Ns}cbwS~iCg^IduLPmb@cH^ zsM8O`;Lx^sOk0wMf%8n*xIPhg_XXkBu25Xs8pQpe4~}jM#_ScTsOrk)^5My$NY#$% zfmAk8Pu@K&e}iN{>u{4^{p_}0ShFG;s~4tV@+=$58i{im6c#Tnn5f|uuWRMoQaz@) zJ&%=-We34(n%hYW-3=dS4XuK|*t{sAlS2uFso;zZ(A5H)I$G6GBF2ik1P7`kEqo&=yl3a*>B$7#J(`N!2jXyTcNC6q3P;B>2Oge?rBO=2 zCkJA2b7u%mP&_IcaxkiT9A?zd#Ke*z7?EA0`lghGOUaN-dg5mOVTS6jk@{vj5`LqZ zav{+)3hbA`GZie;0%yu_`!iHj5KYpp1y~A@n&6g`Xm3I-X0$EC_9@#{*4f;uW{l4% z#&DaDs{o^2g&5`VVwBU1E=w-@CAn1jR65PIkwB4hP64Q7sYnp5G<5(5=g-1an(J9I zxsL{0GE@{W2WvsJab?p~ga5(aQK+D~HuVq!mTWp{o}5pyY=^Zk#e0O7q2f~IU1G9n zt|dc7Nv2O-m0=_7kv_lN8A~_V8R4L=#|^3PA`sCA#e=VHG?R0)9Qq zOcAf#YNuF%Q)>~~URhG&!LP}u&@PE`p)A@-a8i7S*x>A8g7Iw~RM|~jK6J(+q!;I- zpotszNGnM+9%pvN;_99RT-ha&?Fd}k7J{oA`e5_I?%1+|U?X6kp4D*)vC5Rj7>aF4 z1C7U>{V}*jv3+f4C^oO8NzN`r`roF*_Msj9qU>mkHKUcny&=|sx`<2~02fS&G~KlY zSh^w|A5mam*%3=}kHW<*Q8=|V2J2R*V(8#(Wb2wJq-zOE5!;%%mZpUo0hcR5qsW&+ zTgX?|D`5KjPf1TI0#l52zFMyXGhZzQN-o#WD1i3KwR@b$~AC=&rmup5+j0I8re8R7|UyvxOt+AtVhg)=azZkZC;YJykOO3a|po+ss;ts=CiU^oeQaIS`P$*4(q zL#Fvi`L!1oV%*X4knK2!10;bdH=kdsMY2i69mYg#mN24`W^@OX?Eo`|6%;!$3e zgp}Snu=VDC7gwclF2YDMPYdLl%UjD_1&s4_-`-?eOV;dVK( z)=sjO&{jb-;dCzIHejZ;;XeUvYXLNU&urwxRG`f>9CKP{V`=*oENmQ(`E^6M24hb3 z08B4!z&LLO8q(#kN;B4GV&4;0gGSp}jIUXyvdh&l5eb~PNSlVuD)V$ozqv|m3$M31 zhwyzbhEB5Fxa8D2@)j?q&a!0GjkgQl;C(zMN8vu@3Z2 zQ=`};e2Uk_R7>zoYaT9vbAJF$ak^xbODPwzuf(UCDE-|WDabuMgK?!cP) z7K|Lr-&s_I${-{EPXlrX6_CfH6{kPvelLO5dq-mZ8R&`XAWh0UuJ4M(?OnamZ9f3znISGR}Z%!UvgTN8`}YkFbjf*=f?oP>?5xi@SKSJOI=tqsMo)gjoj zCJIxg7?I~7wq;UhvY6^d74XoSj#s|qb!Dmr^D1Dj32q6Pt84%+RRW@TU3g#P^a7e{ z!FnJgMTu<{G%rNv`;MPQ08M(J$%H&(50~o4Q$@mk__AUfJ^ zcA;@nE=DXY!-R=77~9r>F~$8c-q(f^1q0FH?uUkCH#(`?r@y)q^mD zx6`@CWLKi0z8W*v+i>?xDt>>%#AU`GFDGHpav$Q-bCCRNgFk>Khef<4cc=1xDm8Cn zS~-#&wTR28Kzw(v()lW*oR~KUKltH zT`Mu1OMJ=@5x==s2V`2{Of%Oq=~oNN%kPmARK2ugAf^K?8no%eBaXa2a(OnsJe`C`r!#P$94_U07k5Wv((*K{otKGyTe5NJhy!;{ zCE?CtdcM8VI6RE{J{TuAeu#dPv*7U zxGow2bK5KRpmh|1$@FB(6Jj~R4x)aM!KFtUxnTIRN_>7s>reT~;UwJNPw&1rj(c?sJybYOtqH=?1$~g$m4z60 zHKMq;CVc3jR@Dx?wF(jmkjXX9C5KCIz0&Zi9~$o`$#cYXvhccay6ABKt;#YLJ@6y# zPTej`alU-5_VuanYlmwugJ{(7YH*0cd5#LAXAIOMc4FB_%`TNAf^TYfS=8{hAnB=E#{EVsD4#7mni$9mlGjg_ITx++WEeWVeuQTP({ryV= zhv3rBRQdE!yBY{m7-~kI3X;oIU1`IRfpmJbWP10835B>tT;D&Qj(f+6Z|X2<)AR9> zWZa-0pS?T|L*}HRX{rfTLv84st;ZeezZcZ;pItoO zJR?5Z-5b|;gy8hXKG?phCq_<-MS7kOi6PnW7_xEVPz*jl!SCbmlquW~sORq=OT))U z6LIZ81TOCm!}Z;v^aCOE2GQ8MAstP_E8w!V^R=Ufm)0!Ac~Jo8%Bu%Bof!AyypWu( zwZ0G?uk}aEkIJWp7tZI`zlL++bGHP`iF4ufEb7O3BbWHm^sj&Xn*(UOmK}$a7fcon zlr*7}5vZvj(pcqXW7SS0K06eTTL+T(0z_l&@(AcWdFVgIi5Y8kn7S$rwUf=zJ4|p! zIWan?4l^qTVrp?SCgxY8%a+Frf*0x=+{oY8PHU3h)X|WqA%TWGfrdTl4gNj?RF!Os zu~q>y0V+W#CYwu(YIFS`f@nzuShI5|){WVKEu$A=X~O_aD=5QAhYxM(6q4bR=4jwW zXh1<&7H_j)?nbhcKmmV_^kk7SzN(*sBE}pApyFg1IX@W_YL^m_t4YAxf5_FbzhKM#2H19Y_&XK{NtUM7B>6FKYLEHMv7X_Ve>PJR%`~CMAO;SDDK7(Lrsh$IU&Fs@IWg zR31!)9&~hOp|T|()xHumCb>`_X@oD20&18Ei)LnG#VQ>RZ=n(28i_NT!f||U2o9{| zS{970tAa3qzdMU&*b^+Gu~Ic|7IQ6vyHGO91YBa2g@7f1<_BfIB482M8Zcj(>jFP8 zg@CmPwBXBWt6-R=0E3J^l$-Jqo8UmyTNcFqEk)L^(~$Uv zPFfGJO6?A-=efg(oMIDaMlcv$OExvl{&4vjOTk>7K0q-}>JdFZh=N;ot@ zEdg@Dw3K11MLf&tv?|+FW9+rynUq`ynueR-*Qx#8rFxP`e?z?*5Tbiqhq7oADv5AN zmK#G&E_6xx7De}fWHUOGjOZVmf%2>z%$(uIXBRW@&DAV?&Gq84f!9F>9-m62fDXqe zr($vcf*w!Kq^ltnw~6&@d*X3%dnB&xjKPtuLC7BD!@Q*_6zH)eZ0-$m0!E^j6VY1- z5-8eZuyu7LCe97R+}VAxb5SpBU)&qp7Wc%I=^+Sq(!9Kp4o4rYhn&_Yse#vN6|%#; z@NnfsD+S@aOprI?w>N_!D$2Tozy9Db&KNa%+|g*eJBwR zujk|Q>s~yR;F*+n&=~F8k%*d+F6dHA;OtGVu@_^(>3aO}iVk0#&A=CD(kRYpfQ~2N z%E2g1S(k;f=4|N6J(78*4#tGPh07JU3$Ob#PZh7f7DQ9xS-Abj0W?GJT(o2lBL~mH zqK^4k+&TgCYWib(Q3WRXiZR}!8SP<~9CT(nQ685~-qnn?oF>ciMAf3+G8&`G7h)PY zT^ot7P4b%4u~}uFPOY1-0%#+OCt-kl827zOWu}GK6W_>EnJJQK>IkOD7EYJIIrkLF zI+guJvQ(vHQ-WxLQ|nZUZo$0DO5uGG+#>n{oL&$qnWw_%0aLAUx=iGf;MtE(tVLwY z;vOJ;u6SH-h0_CuI^2cISQn~d9j|!3uL-sft+3{MF=eR>hYx1rx-} z#Mdsr5?#i8jB%G>TuvFrd&@B{w@j6IcA7jWj&>r;XhD689U~U!V972UCa!hD)vo}F zeVs`9E%{tbb--A^!QUI_L;b|TIB;MP_8#uU+Kts1)Rlvz@M491;q$CM9xUrxfptTs zVqQf*Ov)|7Xm=il*}ST9#oo)JvRKtV;pcV9UR4@i0_Ey}Ov^-bhLxf!b0CHm`47na z8h%>S3XR+4AiV!d)skgo(IgD@jX|}pnc5(mzC^g%t`5{CXl7LhWMZz1)UwGCGIG64 z7*-hf8*AZrjoY;#nmi{{WJOa-FkD+&MtPtYl4h=doMD>t66;!UT*JEPcN_A^^%6Xj zeujeeybwD*j#Xuo%OpP62RdZZ1Eg5;VJz{$R_Z`zo*l_G9t@plqMwVvgX1ziUHCc{ zAMFjp#a*HJ>~sRYx}?Jk)t(_!^~jUTBN+&AeP=kXZ|{RS)cEc+3u5IEg?m|^44jzj z!ohvfYZpbPhW;vmSB3k_wV!#L`b1%wBZOuwng{XdK_s7pJ!O#i6Zz zuytKuOq&ym!r?|#%=TjJx@xT6(1z73`(ePiQW*6`NPN$s1U~MxM+B} zmFI=$<)Dnbu1(gIMXVCzDtIP3ov*ppv;4KCo)74E4cjW9Ci;L&8xAHtPI+w;;~J+| zq0u@Vqf2LFTEj}rZdr*rEh`ka%hc^fgSS%GFTofo=PVpYoR3yR`h>@m-?s7R$N_i} zm*BftVcQ;B$yH5^SE$1Y!FNEv3x_L=6W>~(+=$|Y8g*Es4#0_VS(=8I129d;%R!hb z-4u)y%P;$z1vs7BUdlJ6szeULwP7Hl=jH2c#I_Yhp%xSo<9U*Orh1-bApMsq)Fmbq z6=q}S0V5t0x1XJmvQ3?;Fp$hMY5sp{M+6GnbCF{;!EUm_n&N_IWGQi*iZ9Nmt3$De zpV@)3q(ucwZFl~^cL43=_UouM42Pk6HUXEcjB4C3QjyRz3;Aui*swhn*Y}0v@U|%UhBy%S zW){LI%rY%T=*^kv6HoH%?p7DjFjqCnwh9w{vSX#o;iwu6KSv|-BgQi3kgev zTQXF*2vS=}1wkw$bN%U|>I6#ws%58$!8Z5g>l)7uUyPoge5FiNgm%6N?11SO0`{kV z3;C+3wgRRGRynDbo+v`T+9|b7JuZrn6KYwKeP%~quvs-3wo+t`n(4sB;}poJlku1a zNlLb!o+UuenegPi8IR7}@ZhYOCd`D}G{H?%9O#>w3wyO0>((aV0fG38pcJwFNXiBX zqEBSHBL((@Q+g7#UZoBn+7gQP#V)MdVZo{68Q8klf%VHRXdmoCk);4bCfl)pS2T@Y zlsY}prgcWsj7Q+YR*LO)!C1Q_49!E5VWH8_d`GVaAIJm~AHkz0q@*-bVAnt>8o@zaBq4Zp80DDZ)3`t$1)Ei(N!=F)9aUEmcsZ7Op_C>*0q?YdLJ&r=;8^g@L@L8w&qc!(Kg13i>1~TbuzgVwmd_7^p`Bvx*Qxy5(_rbA zg`6LsO#(88$0odN_$`;+4D|$T(HOGv3!qwl(ps1=W6> ziKYe6wBWdQ%l9*)3`>V^R9WULI>uvaQ9q2$DZ(J5OJz;gCD>HCe_^Nz?%oC%KQwV4 zb1A{CcwK^M#C>*THEN7q=qjA4jI|t&%~Ivtt1+iz3l`Tc=6a`^a+9{aZG5n00j zEzXSHYrEkVuc1dLq=Y*K50BCGia;jU-#?j-XBV>Y#T7eoZ^WfNp=g_K!=_E?_~L9V zzPgaYm5eWW`}jm0wysaWxCN;=vpERYcPHTb9^v&E)qwmOf5+L)VK}&=FLFDwXs!%M ze>2rTsgti$TqR0UB!E`0GR9QUJY3^-;qw5e3)kln^Ir0~tUSNi)&#c%(FN<$&Or{> zq>;3oS{JBbxq;W+&mn;JzyA4uVQAqjcw=jLJ@WtL|5l&kKnpR^mLV-uY^wSr`OS27 zaBcp;g#P+6jF;YK)_jZ|(12|hN8+o;b$E2U02eP5V(OwoIC2V66P|^7sZ5EM9(cr8 zxQW-F35NIdNcg25;nWF{$#xj+Cd6mj5Zg=ZC8n8a{}P^m+ls2%M%?&%D*ne01Mrv6 zeE8;?3C9m*!cbSM;h(tA_|Qh5(2fQD`eRm6DaK{zVW`=IhGd5ta*_IWrl$Y-wSnN7 z+ADZp29pGWXwq<8_}xx!FVASlAm3z6tX=Z*fNZ)Pkjd1(AzLxKbp^(hN+#Mk49g#f z8bdq3S2E*lF!YcNa~VvcnQLoEIczl7azJJw*USE*Fs}Gr%PxOCfF`&vk&FsndlC#+ z=9(B6b1iz_C%n$}>VQns>*~;3meTGTzvoM@Vx`>!SeL9)2l9h0YRYDQh=~|CKo{#l zdw)CTt*~Lm4jWeQ$ilKs1}s>Wj*TlL)zm%71buWc4EGKM;o`0yIJY|#&uN}NKQApA zGVxUPGEKq92WXo2khiym;L4U@T-p9E`sG`pf8T<_94DsCc3|)JblfCwKRTI;hul+C z0F6fZ_|8c5ADONW^CW0i9AZFer~wrb22@8$Wr-fOx@>gxvth&P!w0ky;~;fx#0}r3eJW*G#4Ine>QcJOcnJ-7rw;3SMj@- zw>gU2h2tgbR|}leQxWfyr6NPXjC>z|4jE{p%O{}IR*PYtRt$ADp&_>s!-nN!{wf0| zt~R1&xDh6c1!3Ljlc}F286)WpovJK}=QSFi>_S0aChE&|XspUaztRkpda0wDycn{f z341OL$LF6^;rma@@YM}BE}pZaX?iJAquogOb*4JxPT-2CUXNG$-LKnUUlKr5ysjOL z1p;W&lQ@v{@*}sE%GHJw67i2y^JXJ6=V52J?{Q2v!^%xb_8b6%Rt90C${f4;+wNc z)C%cp3hymyMd9abJ3?`OTM$;Sjz-JiG-M>uOBdKMeufQOxF?+58_d5YhTb3rpVJ>m zo0li26Y-GTFV$$b_D16N?qD3JHl4FrkA9WZ2ME{30SbOB6zjGMIluyE> z%JCRq*pAVzB8;+oFfOMU^P4AQ#o(n_*gOf-D>^X7SBEMo$LmE=oZEoW;~FtxY!zx7 z%MhPbj^y7^P!otk#1i%X5imt~zaB&*u#(=jsT0F?ONBbAlD<7vv1VSJ+)QW`)7}w6 zlTAdY%F2#6R$pup(fGB47nh|0$MamgI5cZnllUn2LFr(YTxhETVfQW|Dwq zl9vS0R5@qJ_-rP=ylTgb z>ux;Y{bMv}>$fD}n{#pa?qU*txRi(&7h|#KkPf3KTd{PdM7=|Bdv7#8K9o$bC#y2h zj}FEuFkjyuhWU#V5MS>`SOPbC8U@pvX~=p%1J-T^WYak3$5g-mAf^z{jeKRU1(EJb(-!>OB>n31SPBSX< zYthu|MtgG_#?H3m;*EOz>2U+Txt)zKFK6P5i+Zk16+nG-G6@&=$6~+?5>M|O3TnZ# zUs!V~YzLkb+!N;#L<^8rU?ofNNesATj=pBHUy&`J3lOe+o{tq_zg#rpyp3WuWtiHq98;Uus^;S|!Al5OdLMS>$!T>HzpsQps{p1x zGL)o*gj))8kd$u<0as}FR^+Sw4^q}4#H*3-s|+;hX{cqMXer-$cg5x5v zH6bmS7L03HRw7!&HSw*8najiF=w*SeyEGUzssNg$8%dY@p)BclCZ(K~4>O>9I}Pq$ z1`L$()tT97^;Tf!gd8m2>crl|dh9-wft5SW7}8mUnKL}Nb4iciUD4yqi!{a;(iJ&A zl~S(#p(Nj4`08>p{_v3--`=p{^GinDr>MTG0%H=u3B{~sF*vxs5AIN$OD`4)u&RDa z$5U{Zn7*+y6kAt^qjIzvo$bv#ru+_V(s(O?}7H`@BRV9$?5I+Q{iarhe2~p7_~SHL#J6` zD)OOZq#IZFCGveq#;t>LLK%y->mt!M*@UHQ({cG=1kUXZ#iF%Is2*&9&uqhBTQe3m zO~b6(;h0?5gdyH4McBJ`#QvQg@v1Kq-y^}Z1kv912g_9O zEbBF)Eze2jxxeg#Tr7Ci#I}ZS38cBA%CK(S7Hk>25{nz!F)6PUqud2F*ZdsKwUi-e zyiL>ahO~!}y-cU}3#|!pA;q#kh{oj#ub{E+#31ivm3b=DcvXp)WTFk&tb%B&CmOLX zeGof6qv4B{X}mIh&&{8aPfphk!&KI3h;X{LvU(Rl$~aX3?O%xQfZ&z@nhXUJoJ%8d z4f_;;f_uNAR;F5U`fGw)1<=UjvIm3-uk)6BfL6XO!8EPZqkvp4qwUpr`yewM+ygRQ z1pq@qyuO&U+>MVYupd(lKO*-Er$4=DCZ7|lSDg5Qoc{UcY&^T6btr@1k6Y$fhLY{GYU>v;|2;qwbtTtAwQ(h1qBpP4zo5c$=G z$SKc9Qd%B4+C+S5dOGpfH0FPohS5IFloF2Bihq3c9LWphvE6L7~DRV zfb&OEaDm)#aep*U>ykP?UM>aL4Ck#tai~)3AR*Q~Z?si3}9FFBz?e z<8Wzr7^cllg5E`Q`#~nk`sz^=m5I7I6YArPyfvVKt0^uMjnO6)nmlN&HDSaQJ$9}S z!?BIMaCk*eEL+e6gNH}MQIrEyN*QuPym0e>ck^{{_MzA4WrMkgl~^UuQ`<{Mngr9` zfZA?#G0Fv(cn0LAx;z1Fglx zNC9f>Xhzl}?_LA$8RE;&Vk$6Z?{&68Bp(qTaM2;vG98-?Acs zcp1KQ5Wf0iB>wXk&G_LqIY3%5oXo(p6G>RS+XSyWmp-Qw6}kpUzeyRa=HPoFZIC1@ zE#9AT`XjF{2{7W-iBZw)Du5;{!0oD6v8LU%O!L=+X#S<1=dFB?ALoG}n)EN%vQ2Y{ zQ|W75j=HQN7*{z@4gQUdu!9X&_2jT9NIB; zu@&2P$(~AHKSN%>V8-L~S-5*L4Ob7)Kktsfo^5ehG&dUyC)+S?5HZ-ueKxlQ{p+2W zGS!G3YeUs=zoj!%FtN*q*8E)5mK9^cQX6g^kH;f9P?bRw^xn7j${|;{YR7PGTM)H* zZ}M~jl75wexI{PgWj^j4Pr!>y8Tf`?;l*V=d0+fODm`H`?jMV%pO3)R9ih0kIT(l6 zy@U3Scx1-%TBat?k$^byZRB1@?zoWf9D|r$2Oj&*q(w8&v-aPOKGGjLZ#0Tjh{^1iy|Py`{W?1 zyA$!hNPTIvC96xK+x^{LSUh+$7IaXA_M3|Fg#*!LD?(#}5u<%ASTkZBRt}tt=_U0T zo1KS|)@<~Pcca)-iFIct;+x-(!&lFnaq?m{M$T)3F}oZ|@48glw+f<(srG|10L)iG zG!2vjK6?;B)R!CT2N^1>BKZyJv8Mfv>U4-pz^!Ke(k7C;j+mI;26i6+tQ zfWfx($)|X(MT>nnCREN<##%KTmkbq}>$x2=wQo7bmq{x4L<}bZ*PA+E<;JA%maBki zQ4wwua0toQPr`u=6;3Jib zU$jzK+bOVJ`0^?R@>LJTmRkw!&k58MM>5gS;Z-Aa6Ej^HJ2MTpcJ-#fj>4lO(RfIq z{qS%Uo}5a>mzT5f%?&5Mx#{N5HsH$sFr3}i4bM)-;l(+E?tB8izLbW~`S|RvU`$z( zi22LoaAs2o?(7W1?LE;1p9~|Q(B2)5QyW7tZB8VL$E2g*$aEBxb2E>#!t!1ka(ZWQ z=~XG4D>xSx60V(~boREwPGDMlT3{q7g?xP^R1X2?qInh)7EmgH3IP+80!(?m6)-8x zr6-!|$3POU&TR0b>8Jg1^Ym3Es%w*Ms7|yYhax-iw^})M5(Tjkg-n<-&^%-k@5KoP zm|i;pb89COze6#Rcpl>_#fU7Lj}#vwx@E&sUx!`is`1^Gblg7~kGtF)uO5uSseO?+ zuq%q9D<175%!sj2koyB>FHfn}2^Mkwh6K)JiLrj&TyqiYswX0s5V4ju8VHJs$qp=k z08JMEhd>`RA?g5WD~$72dZNkHJ}J>K5aViOeMm8?^c@&gI$H(MW;QJ+#+PGqJxREU z#=3ny33w4k7meqBIU2)Az;&hpG~;FbS@|$364r1oM%o@$!`CpExTcU$M5~Ev35-kG zx-6O0DMJKGV)B)d7L04eE5bSuK+{N9Wuzz^%j7-5xb!-DC9VU)TL{?I%fjoQ`^VdQ zSl-W6fn>=tvr=SB;7kH$BDy7jmhn~^+&w66orP#?EW(Ugc{qD85%-VBsU9gJh@Tz{ zr66{p|AHc%J7vHRmy#%&WynJu9+O-jP@rGg(HrY1us7^7;?K8?`2My7FRs~CIf$HG z-Psp~of{)CaCRzAZ;4htG#{Ok)N#Gmd*KML8|jg>yDxTZj>fn-dbCfrqH2-@O`SH3 z9hZdV3;SU8+&;)2$i1B+G2z$AFuapSaY2zDVM19{77A$e%fczp6V2$CXy=~kq?zXH z5+wr#%2WW&8RAhvG{Luvn3fZ4j|9$$bul|Re9axb9o#dB_lN>@iY3EG1nZu#eEgT6 zcmVBBzx#W1c_*oWaD{UWLd{-8SzQQC%RvG~Xh>WhCM+`H@}5|HbRZUY_l2WlCWW<; zuSt#(gFDjE*_HxRjsbo6T4h8QqPD0GW6S$vs<#@GeB~JD=Jk@92NNF;A6$b8!yC}A zrT__15+u*y@*h|!%xhf!s%$&J<^EfL5KS^sUX^qD@vAWHKvrKL=C;nphM`lju%-c1 z@=Gz&Re-t_a_HMSl|3s50dd^RM8hbI3Eva*`feV@>FU(Fhn!mH!Vy}Avb6pf9g-F1#)o0`hY_Ar#+oKo%!KbBgW0xp;p63qPWMx*rli8zqghOJ<@tRWhU9>RL;A zlRDntYdDT8o!6v2#HTXsT)bW!-E_#}H8pmq7oXqA;`NZteb_{ElZ21BH}5)R$M{tP zvEb-nY`oBhmB;&G%=}X1lw>3Med6}F#Ft#x{%c$CCIK!v7z-SZCGcw!ek=F0P+C}y z)1P+WuTNe0{z?)aAB)0?9TD)2@Q~M~aX4`=c&A}C_s)j1mxZ^#}Z{S~EEgLXiswPO)}w*?iVB(()Y@+PZ8F*Q)2X&I)-{g$P?8#9*5hpJ%?O{RC*r}0WPD0qy+gBleMc~^6JuAl^uVksk%+dJATyD#4S#=;IR{Ip z>#=>i4(G||7tc_aQ~zH%mWUJkqOpHlU(B14fQGg#RHj={L9@JJVJhwu%TG_GDEv!x z$(;i+xVbk>wP~0>I~oHTOlYpkLRE1#8cl9AB^u}p4XBHy-iYCCoDuz^Gf@+ofjsK+ zjAkE}t%$&(wcW5|QFpAJ`5`vVdK(QBVqwX4peE9W`a}zA;?1atCfA3V==q5Co(^P( z=d1QCE*k0A`Q1q~Er(>;ycL7(;~wkcKI#f`b06?3-S6t_#j&j?{fPSMe$48>60Md_ z)Tid4In9YyT{fCioZJ&lNccrMVtMTGxt6Rnsx2ej+9nv|*&X0bQO} zwAmWz5sF~wW+lcAP_0<}2jMSyT{vBq-(w5LWl(^|?fw9ozg@$RGtmMq9QgQUz?|z{ zt(rtn>z=-@Y=%R+N zC(b4KCZ(K$bqS&gwr!!+#6~rA+>Zp?0sdB8&V5eP@mf_(S#m#=#h1MF>Cy-7W$;Xv z!Z>+c1<*v(YxlK7aN=5coJ;jUQyN}^X~cbq7Cg)8Ej`b8t@kv+{+0szU!k4H(!|gocJHv<)uDk|lav;6Cv2KFJ)5rtgl#^>2A#xjL3_7}+nT(TCB;)Nq+2}y5CIf8*?(}jhdt1+v71|}BLfM!>t z%bE{gA00ZKt(f034Kpj+Fu~(hjr=>kB`7Ixz?j(s@TYG(@!!54hJScikMC}J@c4=o z)3;S3r>Ib!D9QMEX;vt=0!Y3!A z%BNvuzBb9LDXW8GrIKVWWuZdA#mEu@CO{>KCMH`5SOS$P|A!0}0cchaEdx!RTB*}3 zEqJDCsulSP2@CORB_Ep67JzC&G|gc9g|;%@0$e^O0n5m1CjqJYBfW1>O^*v)+1P(1 z1^_z^6Nc&^cF!?0Pc>jZVkfm615QsW&ce>4kF}dt>kFzF4`a4;Ibsfej0K63E>! ze?chHo4qLQt4Bk;5tWe!xO-6ie<({2%Lh7D4v-sUR+&1sK2D8%{RGt#t_TF}aQ_@Mj@J1D(*3jrj(@*>3_W6$~ICA)&XTZ{3hqO0|lV4Jp zcFRT@i712SFkO21y=5lp+G$E`=yW&HAdkWHNh2|1;vfuZYr%lLN>s($q5owP;$tX| z#`v&fS30hpNW+~|8Q8Nu9m6Ks(KXb9>LwQ=V|?jz+M)hd7{lZ<4qR3?K<0t zM%o`h)5NtCg~YqyTLgDbZwuVr&5CT&BD0Z{lnouf#z7P9>1|Y5x!%4cW8RmEXqFFk zaCFzhm*~di2{~A_(}No)VpJp1kB=mACF0{l@g(U;OkZU|;jki1o8_Ug&cKti33z%Y z8INSp%;7NXS{sbg0oQ2!q+(lhWusIQuw=a~o_Dbni??gJyN`W$Njo zdCh@|JSIKRR1i%C(ERx4ZGzknaJoN;_If#|;%6}d#B`IT2`l>#!-9$$O!gJ4jPq*l zL+S7MaTQp^n=;$}Jwx!WdugmS!7b+69!l)dSP#vcgo!mvDXy1edc#r`G@mYlyIq24 zb1{mhdSt;k4Dt+9eKloBhy=_f^VCE>7jBob56LRe>Rm*$!r$o+p1l;^#J=20CV&Lc zBvAV*qg)Qe`~fq?>zdFOE?3#(ehycGGKG8cyzB>x*R{Y|VT2fO8$5l?u=n6=|Gs49 zX~x z4sytu`Zv;$9OOmX&=MTm9gg!fS62>3;;^(=I2fyXWbVUo;HNfL`asu)ztKJtGCP7A9fl{6tJ1mxh}9Ot@l+ z$zJKmrIz&c%0RAQJI;WXWFu-~$l-im4grksXTbDc1|0mqJ#v6RbL6UW zpmX04?I3#$_m6b)zLZ~TSkIxs&!#CZL~%+Jh80dzLA05TOVt6H1kr@s=SUE(X)(r? zPEzJt_`TUO5INCR+&A(Sw=0~h12V10sRZUls|&WZLo#h2Qe4kl(eUM|btsaaT;%mY zgYj25_hVh~ELivBTwz;H))ao1#~R;jI$rQ^hcAfwy$6lu2L>44)g$w*4Cwwg6{&go zSiB+)xA%qN?*2&J-V=^%+k$a%Qy*O3&_rUbgQG9K(;ilpBy`M_0Y(jBaKdjvA z#KWsJy|--m`kIt<8*z_#zRc^PbaXkYb#4qu%0gR`6<+^1{ZT52 z_A&Q?>%4|8Q1hSO7mBqzlQ3eb4dul({3)nSt3*kZ z6*Bm*G}`LV&?HX8>k>B-DARpNecp|}n76`5((_xMm+MnIY@#>j^;)=O0{-Kdqw&ox zFTS{Bz_asOMZkT&k5_i}Lf!Bj3~wBbRc({7qX192+x^--5Jb~@pGxpd)A+A6yga7Hr%%@rf1QIR1QY4gr61Z1H9%ywzxC2ORZSuX;bWzmM1p9pFaYH3Q#VO2vzF@pw$H@F}mWOIx~O^NL6eA8SBW zX&#!*Md&hRW3b7M{^?fK#u!i#VuG8mhqP$%MoDXiVq(0GSSP3RIZelNDNYv+p9?ql zR5`Gao~NE*7aS^S#zwrmk=5Oag`=1G(e$r>{F?)4XLer4n3CBTUoi_Ki)W%j*P$}6 zWMz<~x%jvFy|o?7uM_e|X%AKisRw?{4O*X7f+a&`h04!QyQ;q$L&d z_q{H-UoAPg7Qw9|+kUZ~PBWVR9!d5sAuu7`z~Y7fPL?9v|Kn1`t^Evj3MLy3!JH{6 z;9{^90m})tPi3ODyGLVk?LuxKBDMnryb81XZ&2l(<4UJsc-|Nc&mWC8`%vUYRl(5H z56nI#D)UUsP^nc0J56P%Xs1@G6eF*L)W|oyP)Y_VvZcOP1LmvfwyZo7@T-y!5!(c+ zGTIsu2Mo46me0#L+*~1FIpOPOMAipf6xxRF7F6eZaQjpgMRX=bwH04pv*Sw&?H5vV z$;V$`mZ^It)gwn5h2A+3ft8z*F>0t8#n}$ng0qm;GYdvrHU>?0VAt*>Ts;tq13M$J zetQzGABx69n(4;``V%h6ei%6?38h_DoTtIQPjEdx5{l0b1mpJ3-ng{6JE}*M3@8{9 zGTbPmsGcw=2D7?iX!s*BX?8G{&+m-`%X;I;$`Gy~tX>*}{GlFnkmx(ovrr#zLVjN< zTTAEvnWm!J@>-O|<)MsZS}4=`;$<9uKjcPhr`4~_wMM=Hb1l<3OXFz3`#Q7~ayjwt zMWK^e=10>{`{T3gPf?cKrb>*&NXHYu2748T3~a{GL9J+M=!a}iDH7xIDMFn{?~{i? zLk45%sTnx<$!HwBGXTr>wPDU|iYYh6V-J!T1x)FnDlFbtflse{@%WP>Y&+bE0gW{8 z*#ppNu0UzHS!IdH9+Du}w%}NaZ6RE(2U;LIP4!uj=K?|VmpIq39_Xc@EiFi<1=0M$ zvUmCUcPO%DRJHnAKgQJ(d~?a3!6m2Fl8L4T%Cl8KTbcz+Pc%7DkW!B+4XZJgx3VTT z66Zp|O!>i0J|@m3^j+vj}aij6;NVZXYY$lIl#tu%tRsI-3$|7Ln}$x`ZMT(_V+*h z1rw?kDo!6;HWO{`apY|q&8Uo_$vcK6TOJ{c@Ro|j_poE{VVB{BnM_mi20=s zx#e}(erX*3{M|_W<%>?-`J@@Mwl!mTTRBE|4Zx{yR^gxjI1Ych-w!XY=i=!(12*qT zg;B2Gm~ss9jmF5LQRr`LMNwP{Ox!P2TZVwp7Nf1OuHpXwB)A2?>Xe#d+7gtDk+}_+ zkXwY|)*LjY*m+$`sic&6OLlo6YgNle(eO@Q_v2kAdC4?h)g!5gCb%W@v@ErSLV60O z)GolZx}}&>zZ{dv>+00HeH|v(%*Dup@f6&nF+@$>Zc%;D4C>TcEB(+;t!os=%hXX# zbZeO?a$op7iF=#`(X;?sp8vGk^YZp3r&C-j%#*v7xK^0gPNh|rS-3xd=I41a+cG`d z)7K0~4=H)0vHnm>J#;X=oetxBCJdb5z}3TP`22!FO-=slA~~Me{rrOTCrQR9r&Dn9 zpb@**88CCI1r_~Di60MYgEHXll@2@i@P=9kCNB5ku9SdMoZqLp{`hb#?#RjXt}uMO zo2GqF1a9sK!t}+FD62DL#`hMr!1ia zCGiq8r=d=)MSgtcD^B;DYTY)sT zcFTgh)`JaeQgC8-6#YOj_U=l=gO4lm`%f#0YYDKL@btVM5BPpRJDC7yR}~WYeTk}c zOO{;gyj46NSXv-Wp7WC9=C&@z<}uqaXYc~_Z|;PpxEzr|9whuCjhH89{dg6u>-e`z zW|wMbBKVOC5(%EY)|Nr`g=AUsynHYB3<;nqy}*_B9{>LyGI3kXHLou@2&bMYPHw{R zqG^~`znI)E{4UibGGstB{5o>JmWd|jS`NZnZA19?SCP~6m5!HN4e!zeO{Nt3xn0`C z$iDhpE#N0hw0wm>fF|bJ$7?Q6mfu|G@%q!WJMr!hmJ8km+rsTd5fVU?$698&Ox?_; zu@*xu!85zUxe*5XozxF4$cU%Ou5@A1;sjjU9fFVcMW|jwDqDnx_vqRn%$*aB%5ojd z)PC;wiR~x{%6!?FInRf0Xn6m4#|`OW`pq>P-**e1UdY08`m&*GiqPV)p(B~PI>CTi zlLw>Q^q4<01_w6B;PL5Xe0fm@3T5K+^U`Bc>!EpLZxD8F3P;x%FD4FfU~ID$V_Iw& z%{8jYiiScf3X+{L=9J;!s#yN5VYonpeu97N!R38$WJwPUot=WhVkMazlL}7^%e~;!7~Y%mves*o_7X0RVJprw*`rR zmxSG`_xcg_)BTVD+8FYAf7b{V=r@0u3FG^6ok?PgP94sq5=+8A8N`ZO!k_P&)e`S6 zfgn{yCR*SfUZ1OnyaxC&KZj5l%U{OUg*36oXU1v66 z*NNGfKer2`sv0rc;zM-;y}~=>@xXrI51=VdR|jL-LAVV3d!^w6i&zc>&_v7gHjs@b z&%e_88s^mmUb!!UG_F7pO=X+)Ijqwqj{BT%^*RvK-u;jX~{wjFXfyg zsNp5!v`GcgG!1Vg&Q%btPcdZbejtGMx^|aac~1CTGSEt8c#U{-wO+$GS0HHqdH_xO zm1}`Cjn}2$nNtF~JLv7vAA&}0Z+*5UtP6w-*#}(zw`cA#Igj>zT!SE1HYc1jK!JV;TSX_57S2YFnwtm z_Mh_L%Uc%wflIQ+UR=_v^6cZ({~eQZP~D!3CG^;b*7fG)ieuy}qPCXPvgsnCJ) z2rCBbtr%{0Vz|+c_R%HSyW5Iur_*rrOfoJUOUAyPNm#Qwp4Uzs=1h!3dq*}Jyd|o2 zOqa=p!KQ2sG-jhcGZ#(id8kV-R~o)1y%v?p8EpFs0@gJE3o%UyxSpUC0+zJ!{_8QTWu+?T6k|QSU>w?=UC52DLKaQ605bqiZAzb% zcu9tec48F>oT<{6-(>K2(y*!$&Ol9dAc!Ug+Ye5z0!79WIMWDNgteSzi2x5|q~!$J zDX@irC3uzvN6#$PU_9&nOk}>5#-)QN)r$FRWjb4$n#lF!d>TIC?akAfxO`lX69+SK zVs8en90cRm%*Nu&=>rQ+hDB<$Fg zisL)OadT%7J|cMUP;4LC7z)oQCu%x9IJ!FqpV5dvA-L}yj>N4!eX(dwBnop~$myGj z(w-S8jiOmd=WCZ<=ue5G_#POIevOEo1A z(#*Am=Gw*0@#paI@a8l0w~s<$LO(@t7Hb(MA0C4T_Z#r=lS*8@U5VwVTG2VD1)kgr zl(r4W{@dg6!=na#d&7+JU%yy9R&$#EakEcHqjjI;`Bf z63d#$V@6RK#<@Khnk9lT%P%hdC)LEWc1rz9a4XRrcpSj_Yo=Q3rSLidYgkq#*zfX| zSWTmtN=q$6dO|5uf(!ULA!dGF1<_Mhfl47_Fvr66Yk~W=kjB;U)YT@^j?m+sD09i~#pe*@u+L z(1=(C$tigBAKFw_lLTd@luLzdQ&3B(hYSqx5X+LjpViwA zx1?g1d(bw?h50LEv1>_BtY6R_or5Eh5}r>&rjZGvS?Z-_pB09g(VFf?dxjHDQ=0Jb zOeVe*(v%ab3r0LRqr<)9B3 zEts>IKj%sY{``>*KiqQQyPI}=bJc`TPsd}~ykz91<-yx69nQDYP$2t3gc;>g7L-L1 z-`p2UV!Xt185*+M&}3;xi@6z1hJL8as8S-knBrOtbzV#v-={oP>Jh-V5N}Qd7w=22 z!(zT4lEr5w&fP(7$UqWXPb&-`7?Dij`PYB^s~=H6&5ytQ>93eakvOqtA;wqD!Kl)i zC`oQq0$R!oX>;qIThEkQK$xJ=^JPOUwv#H%btf@l*kxq1O6^b5DehH%2ufmf)KN(Ny^d`CUx4lyQrJQCRnjZRrc8_P*arK)`^4xK{x*O<)uA5_A+E zSB6@!9tfam2VZh42VsJL*$*UeE+g(eGOddy(9_!tw|_ah(Nt3drO71F-h3^H6+?X~ zHg8VEwL@{_a2@t=>#yp*4m6H?}vIr6Y*Y)>XdSnCs(OK19`C(#JShc=^C%A zj1(?0(q3w+n&dJSESFMmH#LH@w+)HENW`PNkNt@H>3;mLfBxT?D_E~4)+=XYMDa9g znMQJ$kJq&i`MlP;_}rwbNf?{gh!O5046%CAlFt9Nk5RQ{iG1ITM2`opgA*~bD+ZOF zMoe95#TA+!Ddm5l%K78*$&q+m+8+%Y^;J?gUgIx&m-^Sg;Bk%P4Jb{i!J6T#v31NE zEUp`jN!cYBn8EjioFrymWwH6Se3HVcEUC;OuI2fcRU>}$?APmRDXeQAKxLqD@6h%T z38K-fkkbPPWTN5aAiN}{2_uT8U~=6Om2En$fq3VdN`oy}pD737)bN6J38J;x2dU=P zCgMC(GTW%%O}z?Lc8YL&0Oz9NHFGV8_*(E!WuQ@W$^p2>?b;Ho=ksxaaJ`?~HOvdw zYe6#!qDiX;X)5mOMO^okJ|o?o)y*;zeS zAFyCxjuiuvtmri7qRpL+{)IUx9$byp+j8*T)pRvbNCoC)svphw{bMn>!QXrKU>q(U zj=<%E)aZNr;{5L3IKQVi&T=ixTZ5 zO~}hDME|sW^f%X$+xw%@)Pe?mE&8Qb(p*!kQol=2MCnQF4D}N8xi16dl9`gj*Ie2( zl<>U}+*|ud^@(JhI+5{4D)z1Y=?Bo}b*#nM^4a9`S=2HO)R9KSNKf)#$s|T;RV4j( z1vk8vfEn?ivd{eYQ^~I>f=%Ih380ZXt@U*{cbMEweBYDyEBhjGYj$B%fzy9~7`0p?3aQZ?822E>2jk$oDQuYGDJ$XE^2MG4nzMvh1zf5?M zrGjW&#ILMCyA@5(YeBR0ZcZ3_(|7msK>DC*WuN?7IUpyGE3Yp1srywmiE7OtKAjq# z9PbD&LY;A-Iz*ISXwq(JYNG_vR$xY}KZw>SK{S8Hsc#gTtpkxAQTkHD8;Nzv=9Nrx z(eIjW*EDfbVbU@rT<^es7NI zVJhGE>|&N03UYd16lN_0+_gu7xXfqA=ZeY+)qYX zvoXSC$B=aD^7;adooK=I=?07%Ye0FM6~;0L61`p&*(><9#TakT#wfcRU6vdS%F0Ja zRw?>hh;gOixlE1JjWwuEt3p|FHHwn@k<%*pKGD-CjFZb{N$^Z`JXbDpUmTmSvQEvt z>~QuKukM8Itqe>bINgt?fBoa%96-By>{HAeuu(;4C4eTG7tOZe6dXmc^zp(ToX=Y! zAYRbCsKpdYWLuqZ@o@$(esebi-*8E}%Y>(=Uj3nRnk_xl^lxXv`0G?;S6AT3gPBzK z6Y$qB8u5=$tMP|>W%%-j7f-L`;NdwtK0cCyYx`rddutNnt146kTSc;8ifj#(YDsik zHVXkF$x{hr34HN!>U$D#*ESJFumqke-+T$w*Y9hOH82YqYv7chC$-j!fYph$KZw@T zOERrNXZGlqLA0qVkS3?r(y(wn=CrQFxN-@iNruW;3~+V98&jzOsema#Gzx9~hf*dh zecl4(D?}{8G%b)82%r^4`c(Pmj|i9-N8r9P+sa%AqTEu_Me_9r&14#%qlXzTg3ude z<>##2^a$AZ4aktJJ>E|b%Yq@(j*`({9M}_ylY64Ddrc}<%umC_DF$?mqycZWqk4uz z^*wodDv5$P4Udi|;M4tK_;~C4n6oMshYy+Y#~WGr=DHIvu1X{EEIc`rf%_C+S9S#9 z$og(Lv%V(|uj_-Y8$z*nXEI4T5#L=mlelg8`l5aZBk^AW)1RHfrID{RDdZ-|_n?>rEW~T=?NGp!Otk#i zJU^O#+8+`?8WT9DQp8I<^L`+>_KJLE$sRyK>=)Q7h^7dbc_4r$E3j`!5KWnCifh5RoMKt}=A$C51*40n ztErq5>xF!!oKt$ItWrW-rGk@y17+LI)=qdLiiz`FV%x1w|K#vNHE<1-XA2RF=q3Rh ziSM$MT9n5ZsmVPr1L6`mQ&^U!q|#_yh}T2WEfaR_ige}l+4`YD_4V`S6k^0ID|T)n zZr6oi@3K&AoF9U9bNXQT$OMF$ieP^)lSD0m?TrfDLc9*e-lx%|Bu4pf#{vF(T*f4-5SdK-LsAx&ZR$?;g+IS`5En`zLu z_Qd`D5x93iCTfM_?w&AQ-xh>Z>w6%Ng1w-?gW9n!bS*TXdbka?tOC`8<@2ja`2Ly> zzq^*n{UD3i6|cqfN!YbM4VL0!IxzUo5(hns`bu`uGB-}i$jEkaLH6V?UCgWTpd}J6wh=0-- zmy6^pK{N-+IPJGT{Q%lO{pqh*GH46tkk=>FEX25q+2|^mqNbQ-^~&M3?Nz)ZK{VAj zj7#+x;1M&Qdk9&X#pB_!b)@?}`ytxg3ecqn}&5ma> zh4o|-K02C=#hcBjtLlddLuX@Q+bk??9ETYtjhK*Ijd|OVfhk8e$THgv=PyzJasgHX4xPM6F zaj!BgFEdm8hWeGMHmIORVRSx6#+8Rlq!8s+HIlQ!Qm+g>Z+sRKQ$M?s%wxsCyvBdg->@kp`7un>Z%Jgb<<#u3{w-F;f6&R6S zjxJX@`qQYC#CcW!I4S>%{o6D|{W=R#Z#xkEmKE{3Ty&03#;UnVSi6L4SrXPPO2OC( z@z5uGH9i(&EV%V^c%b*$OWx*l@#J;sD?6d23#;14Vo`M?X5S1-pV_;qCw93UE5Vh8TZ6fF_zDnR|%VbX+QG#KVx=X|83Ehy)p>U-7iM zMM}Rly z@R19@zvEPgSHHURg8%%tFY{V2lIEku$xBw z>6s*Iz%)ERlZxl3lkkj>rg0d8ls9-0L@Kqm-X_1jCfbWz<4VGbGr-_;}XBRf8BmRqJFv`OS-nIEYqLD z2@ZPm_+JZ$rxABjPAHA;>4_zyQ#kQ$Eubd8IJKt-DdAqE@;XeV)=Un{Mp6&@_wal) zOt50%>_jY`5s%T6qOoa@2X{Ye!RHTKaQ;p|%wAQCeq)+(`Pw+(KTW}3o;BgS+fE!g zQ-qoct;meY<=?@5fPP=`I596%I;Esj?F%o1XvC&qRB<~O`C7xa{~%o9*^bO!9u!d* zx4Q>&UvHw{&WB#wK*)i#Ecsf=L=#=W|4sIdjQ6SGiA^ozR5W}x-`_&iW)8r(vYD!{ z@sy_ZkZId$U{K2%%h3ssJ^CJmTAD7`wzqf=l=U@kVTLT`Le>&Jxf)9Sw19|xeq&fYkfQ=DMQ*T z6@;2m9A<&9w+ZGXFGkIg;eHu-d@@Z+4#)k&@wj^+9-kbF#l8(m@Rb&!s&59WBQw!2ik>3Lguyy1 zhUhIAoMFJgL<2gLtQeGPNB>v@>Vwl!-6tJwaaN4A=czUv6TQV4>nXu7S2YGYThZl`E7IDOsFVPjO!twJ z2mOZ*%@p$DS2fmrtmEUv-{_zwck)6@3bw(TnhQ%x4s=l-BnP_@O+lGdP>zk~2jhQz zPvQNn8vk&=fE#u;UR;v~^%gby{QBW^%v>6W;!ggYNS`8B6+n|)zoAysedN|p+FCiM z223H$lz04sS;$v`^VO1|nE>$Reg6Ib@@oXZV$K7EEd9~+1fwRlCAzJZet4s+(CHqD zskQS}7TUzRRhU}8OslnS+kiQ3EB&WfV@bke(V0Dr8$qQ8aEfdp^bGzkWv~g#6fxFn zX}}c0Ej?sP5-KUQ^M7QpUjfs9g2nq{uvKG13PBmBAOSO5H<`jGJ&$A}mlG8+E|f({ zV2$q)-zQ6~6<(VYMNI`Lr^p_)&W_!?<8gRj99C{hLr%X+RK;YYjHI3YVHR}UBy&c( zaBW{ao}Er4StsHCk!alA6@)`;y5sEbFnmGaN+kQ+8+Hn2GljMV&(0h0;8+qaZV$tf zrNLOdI0W0bNYle4{NaWPf4t+sA3w_WpMJ^rWvSDw=jXyOa&#`5o64|slM5eROvcw| zqpm_Q zH2t(cQ~)h!GDZ~6K&^K)@`p6w*!CEl-xG_Ahf;9;csf2gO*2dpbnjFQ?(=~CrLCTG!OYj%^0_9 zHlF`(82-zP2K?!+oT!onuF-U@u7y6Kz@HB84~_?xkhuIT-j@s)G1ampfF=ZN?(Ic+ zYBNR^PscP>uDyy_*G{aZq;t-Ib(q<>SS#n0vh9K~>N-;d++6}_Qsyp0LbM=SpvR#i zUz%%IXg=CqLuq6RDW0`5u0k#m)rEdxEdewU)*i`>kn$o4oXKQOn&+IJR=9GiF>}5F zr?!OR_}VD!TN#1vOTw^WehAji?t`J@DS3{fPz&xz=9!cu7}bfZcIxUtNmV{}A1mhd;KAe5nQGeN zrM;0DyC4Ncqbl&=LM9%cO27lY{}0IDPd~1}Eki2z73NolE%C9 zF~;S?a9b{lqsV*jit(1;?_V#mu5o^#>_g#Ga7v7da5nYM;ory4$zH<0T^R=zKoh?A z^S%gc;qU;?UoJo1xpxG5or>s|>=ZqXwUl_3rPLG4<1k6G_xe$E*DvS)ung0h<QS$!@O$Mz&$|P1<2TYEr6zpZNa>h zaS7HXU|tv@WnJWSJ|}@Qbr2>e)#P+pa$hEHmPg6qgvRT6;dc0(yly9zV9s_6t{jWT zLt^kb*RxZ}s%^salL=V7BMU7#PKsU|$`Z1W9g_)9oE|kMGc}D9L!535Hc|9Sud(0^ z6ej60Wor)pc+*17mV@8jbm4_62hr0wr>X3gLtFcxWts=)4yNIE*A3)yEB0B~}pX(Q?hH*D`Mome0_&G$}lOX2$Qn&FxxtG!GSM1|t{r3g8_my~fCm3Eh4 zd0`m$;z;5<*rEbz(t5?&!w5(BOql5n_DFxjQ{-V{QHFVZeo;sBY`~W^$6}(TuhrAz zkyu6J>}x8-;HDfb-RD$V`di}hj~_XSQx|?uBmEr>_&3z>UtggCza$1+kI$(G&m9WK zsPQ>ixXpvluITWmTQuue4fyO-91iV^#@YSxczP@jkB;+}dyJHTeSgcP%)IOq4^PM- zp-5caAsygq*NmjvN`&QBqi0S7u3qlM|Mkb2_{Yz? z@co@)UKbYZIOM|Rn|1hq{%JJ+@9&4<&kw5c!yPX!UM%3xt%E+si}>FV|9)K`|684= z@#XJ%>Ei|Cf=z!AjhI!{Aqv}Ca7?gVn$d`bgH~Y2#En?pISJ$P8@P}8R3B8;-#D-r z1bAL_!rRm>AGnBdpW<~@&Ph%eoLA}lEBzw};M1DcV!G;ywgNMnS8ExkO-q%ASAC7i z>Ftie^n}##{C?3wuQ2{9h$i?J?O_l0qK#i)l~Sp)Otpj-!FPbu>C@FMbtb*J46(Cz z%OZwNF!R>l(*{=`t^a0LHw!Y~&w~Cv6EfeE3Ce2;>cR!8E80r;nu* zpXu~&2K;c#rf~ngWS|MRO91V<1kkeZG4Gf=#hOqVmWi^E3^c^(F+^|17`qE&ogR#`IxsZTjDcwebfy_G!t5jm6ktkTIVR@S zV63kR!*e^x=Y!B+JUVrJtA%{dRc~xYMLIosLIs?mk`d@2hZoYL*PtZ1j#|D7x%BFS zZwa7@Ki6zDNfvPDZQ80BNR_?37KRn9A-+z{mKYZkVdD`*fszudbUp%CPX?QVW|JI1Oze`ob ztokgx6xae#KS2{fNj+Ui?CC@zjdT(L6cF5j6w#MttCkS1Q>BI~@|ED3B43Jm1>68Z z^Hzy(O@Ip#r@m{Wq0S+3Y7K}pyA!BAD8i!3NvaY=TS%czbM4Pik*OauTeVEI@#WK1 z17b1QgK|cwQ!E+OAQ>t;3WJy643wPu2{@6<9^^$0Heq?PRHVzoEn`@rs+*n zlXWDUpm|;{Y7=d|kgaI5+fe7Cm>=%O@lBz)yC;N;26%fA&aUl^uBq|Zww2Fmk?+!7-7_!KWX&Zbb)(mwz{YQ#ySxRX83H)0Lyiovrb0!WmH)LVU zK^;E7oQChN=<&t5beuUHi}8cA&@aV@CW^3zcq?jY%8HVF$TgRtAvq6&Yz-LX9*6-d zaMrE>*-D|^WNt-uW)q52YW?IZr;`r2!U|LntvI80oqpyq^T z(*Wk8Dyax%@kOfMx;UZGkEWmY$HSXXG1xN!qe^CAXvu6;mUW?VQWoYcvtZ7KaxB?e zhCO>c#Gw=C_Zx8HxCIZ?c=9A%JDiTwyV9{@tr?}GOHn+v9y|8BaqmO|ZtRW1g>Au@ zJJ*1k>MD%0apQD)F*M7Kfd&U!Qh6N)366 z{-Z=TF)o>B#@J%CP8^Aok0#=u|1c7NdD@7tZ#eNp0zYS@H&+HeJ01&HMIDlRW)s&M z-j$i=9-#v5fknJ)so+{>ijc32u$Ca22<=hD(=el6Nov2j5Nmdx(U&!?cL zzDON%q`Z-?CbQWnzS}dh(URgoQ$ZCzrudd%)$eaPX^tKEiUL|rj-T>%d`jbe;z&Fu z53r&$)rmS^9+oa~;;Zvf`08xD>YMiDL@XX2jl$jiA=tYq2=kVw;`xbad~qgG4IntV zqc`po%imlkXoBQJ2o+x}y;(u`=1>dOvnhD=tO2PJx z>4?qFhn2#jUo!VCdpkPZgV9brx0;*LlGUp4-C$`)m7xViBCKhcond)OTnoOXv|smu zg+yFHgI$Hf#2VH6PzHwx#=U}V;#^^z`>Ko)mwmM|u@L3){2pa++^RptfBwsV_9N=2 z`SHhZ{)k~Yqp)K54lEkH6%%V0DPujNcsfl-3$JP6F*mtKxLks|{zYDw6KmDqlp;4S zvItEBb8&D>0*-D^#^EiAm^(ieX=NUDKsI@)5l1&C;^5XKeEzTz|MRbl@L#^^B*$0b z@9&r4^2gPfwekN$-CqF5d9K-_@SK@VGn>O^Tg=SVVrI0MWii>3EHgW%IHn|y6FV`p zV`gS%w&QU2&fa_G%(-*mo_lVs`>f~tT2l7RKXp#k{8fpn-s)DX)v~+4^{%y^_knhF zmKC8RIt3jO31|t8MYcm2d_^ECj}!OAx_O_5yRV#6{tXnyeY`IJE}vH-IXp#QL#Lj}zegu)|JoQJpuDc4+aEr^4?$^g9WotL1?LHRAVxDy z6V1RGc|DEzm-`7Lc^DQULfQzNDgIv%)kecC14$iaL8gru4x0#hjR+(MCSl@-k+}1T zs|L$%?-4HNK%fJ@Vxs*;So?~w_Q2U4L(o+lj>^IeR87o4M1=LMqJEgI!s z(I|GeV6=-xv`eI1GjS;K%}1T3Rye&*Ih}mY#q6^J5!)GFg-CGBM8t3&P+H`(l=skq z{Y_cET>uUgh+`0KxY1CYqMDc|r}KT!bxlT|>@S&4DG2^u0PbAAV#k~VQc*ru!V}d7H503`o z;bA}Am7ks4JQNESyP;`O9Cm)kB<6jkedhCpPdd<96mkfFPhyK4-a|czG!V$ z7Tn$AfR(FV#CWsZE70syYQdA^LE0y2-C8fKS>uMAg6W&v9B@zm-e;$R@#TdWJrHN# zCGIjhN278=bXRpDtBOlq|=trY)Xw3+07X31NksH!t(~%U{g0lFrs0*z_bx^rx zqLAOqLaR|2T!k#3VkEj`Yqbc6)EN98tUaR>9I}nWaN^w|6VW5s7l{XC#JmwC=ag-h z#b-MuAz$zwEqm(rm0L_i{cyilP2Ga&Q$21zYR0#>%kj;vQhfKQ34eOliEp2G;IoHg@Zf3@b{x(}>)KXi&1l4-!#Vi& zK?}aTUWrF1Vznyex$NESP5V!eYw+JhYybI9AwEAJFJ3Ji=Z?mrWnngaI4tbvqTL1S z&VoTF!Jf@)_t?tk#5pJE`->JS@T)*WY&-Hia)i%05GYQT zF&rp`8lGNV^knQ1i|}_ag0)OD1nhSfBRtM)ky(&IuZD^y3YNbm{@}NP@cvDJEPwd_ zPWDLgKaD*>IK15xH+H$=s`%fF8%E-&;AY42;g~wj1u>(=Kjf!k&WHXuvS|p8u6-SQ zSG|g@@4bPQ%SWJPnh%mAk}>b281d!)s>44!9V!@&626Wy+NPY1##8Z6Ecs+hjuV@0 zaBPDOE^l$dwH+?Fw#!X)ySuh*IJapO)-E1}!oU<1IRqhRQ~;6&d85!i5aXlbF*zX_ zy)p3^8*V{+a0q&$qBW^vQc@Nsrxs#-Y7JWAn^0>JE)NxaQ^$vjjt{FwQAiDP{mYT! zmXA2QG+2bsLk97)Py5%5c;#4d@F$GVabm1SLjT4~O$cK@H=VxN^#nn7~Bfx!eH5V6- zS#aiHpqMj1-R$ol^4E=Cr?njl#L;d3$efaiz!3tFK>~``!{GWdGr>Z2Q&0mgKnawK zGb67a1rsMAV_X{|WAflVCYabEWez-3}n<>{<1gI-l zIbn2VIy&AH6SdU|$F>j0*)79xdc$y>*4{=#abo2lEL<=WmiidHyCPgO%04?A2QBqD z5rHR6Bi+wFDK^->ei#nz_QfAAgy5?SkpqNwC_WdF{h5$6P4m;Ea$Ows!UHkkdp3)y zKH`I~d;gfZKIK0jgy{ih*(mfqr`QtGzp&reZJ(%%M2|nEHiB<0oNAJv0NX`vJ ztb-UY&k)r5Sy1H_h7#8hq>l=O|8M&PW&;Cg`k0z%b1{g<#|0C_zn@&Y7WYJix+cJD zP=w-}Q+d5@($F|_0>1jX6aV#jvzYcm+4#fMB;IB}#l3#Gywe*8);c3BqW~Ue0L|KC zVW5$=igor#(njN2?qLwHzwCd(gT+)ch?X`P(@GYid-U5FU$_{PifFLkQsXkUWS$X3 z69Q&B_?VbBBst~CYgiQD6xGCd)bJ$jwa}S9QB2J=#Wzcccp{o^CyOVeW`LYV*}_sS zS;TbAkZ^50s+lUoEQpRQ!lF5WIJm|Jd){}z&Ufvxb-BF|n=MwnIRp)p?ciG%2fvtP z__{>F`IjQHe&HpbMF5J#pjL**ptdX<=XN>c(-T2j!omJ0+>f<9OZJ0@`<*a#X%foP zlhEuHit4~PjBg3Wo^7_c|A`H5?Xnj_x5Wh^)a~y(qP$nkd~phnerzM6-AVp!L*(`C z@LbF{Cv&RkKJ1H|V(u@=o^oNMJ+5rB!>*4;V%x{VaA}9Vj=+9;RLsD!KwOc}bJ?O0 zL_}7i)W1r^ieNiJupL;4s^AhO+>)?buovF3jlqBZ^ACH7z~_%XL%DC6 z26mT@U8_A#X{vc@Jtn?Kj7OHdC!CCF2GF$J!&p{s*C3h*U6-LTu#HQHLs>Fxb5k+O zJ4J9R!uaRD@Enl{yX0*6=H%kstugp-Uv=Y8cXRQ_TRHghY8IZHNx;)Hk(jol4vjTM zs0k7=?#r~HP%Io@o(&gH7tR(V z&0zS`qh5G?*b|TTyWy7n&Xw)MWRG#v=aJW$v2spC`qYKqDE5d)xn~SYJYrDlosKHu z@G;TNsEw#aWv~cs;qwaN^Rkd?j1HIF zPKK#w<|!MEYu2d-%Z)=aW(UL$mAy;$-fWi?M7_ob27HT7_(2-b;w&*OhgvX;fJPr+Eaj5nUM}=D;^27iI%XLQV zdy288QB85pLw#Hey3#w)6kQ^`6e;)(ubn=7FybJRmc0PRz44LzL`|RS}J7itEq-noiI+jkRI2MaT0{M&3IM zGs~HcBG+8ZU>=S|iPq=gcr>zj`0YUL9tKX>4SFdMKELsU&#(R9{c9iH3+sDBuxF!( z4gk`!Nx}L9xnPeQ)7&8$3SfiN~U`KNpP~ql4(dvGJ zQ7>aZ*yo~w?(3g8Ve9G<_~2t#Jy`q0r5JpDDNcSbTK0foJlyMok3aUotYse9wQ@Kv zZnV)X*Kf{6;15?4Ra3Cd12faux9Q?m9-KMj z9FZz`GBUmtZ`Sp5(bjUk)yy7)FD_@`?vV&QJe7>67f0jy*93@1hkP{A;@+un{NZ*!{(K`<{Tl~KF;U{g-XIiD zOMmrVnQ0K{YB5c!M^0N2pf@vc+q+3?s-Ud$wKImSiynSuXBI`55nU$U~W{C7z0GI zyefoyCkw6@V?xPt^fKv%EgD!gR=HfX{Q``$PS_W(UN6{9cFGkl5iKFORt+yJaJWwB92uj7 z=y+J+Eqq1J;!q^+C!RW7P(IV2`y;t38tXpt6Z~+{h7I<=Pmqii{tA@;QUs>?Jh6e_{2*y-!y=>#aUi|B(^Lbf&B1v6gvc>#5o9M zE&*r{3df}QL`;lJKyOR}#)gJrVoVHXq@`n8MlmL&)uAJ)74^}L%H`z&g~H|H(*@(j zqM>sAE0F45fH*w>PZX^#pWPsOGK&V#yx4L|-kabzVi0eit-Q`aT7rF=a=F!~li!(b z%FI4a(vKc0KJwKN%>R!Lpsjmz4GLZI(49FM3!6T`jLIb#mpNTmbIN3-xs|||&G&@> z)z}J9*>i+M=_6vWY2!3}_USDA>C-9r{8j@VpU=eoV-^8xFzy`<#r>lZxJzSuC|FFB zC-!Zy!-kLSaB`ayE{e&0cq$2>Ue3c8H%jpMaz0L+$iUM5wP@VXjqJC35ZBg(sF)&n zikTq*Ln6yD`;%#S@x=uE*Uy^q-OU{AJe-95`YgB&j+2c;gn&uHBvPJKdA}T`i$tpd zG?RRdsUJeVy3Amj31()U8QCcm>c%Zkv5X)Zqv0&&jMiHUwCcz<5!JIbaMnF~38oe= z!W6EGcQBKgXl6Ml1vi@za~ys!v-@ZWRBRgr93X}}&Nf2;T7%Y<9@IrOAeH9YK2e(# zYNm=AJR{L^+l&6wAM5ag4N*)w8rZ!ep}8e``f zFw<0RS@$?RK!o?@?GD)Z(Qq6RATpEWAp=@U%3ip&+YL8%v5}z@E^oEPRRQU$RZcjt z%@fbXAZq~aOdOt_2*Z;j0l2f*4d-^+;MzfVe0wnh-&~4RvHc~3XJ+9 zMAIOem3(FMV>4`Se4(#}A=N3xMAHxZ>(RAOF}-XVx^rivLrh$2O0V|zC=bX&eONx) zV@uH)UyAP1HZ(1ripoW`i0;ZmW@(uaMJJ{djm6ZKPD~g-9+TE|V$b0;TtDQESs%;$ zn3)2{p9o3*%pIXG`^fd?FDpPBef#{%3(oAbqysXnK{PW^rr$H;%;=ZmBKwTb>k){E zFGS9p9hiN%3kR-_!J3m5D4tvnU&mB9|0Dp>k$G5qcPakgug2jIS5vh7_Z~CPv`?EC zuI}{2k#%mUpA&=tha|zYwGWzsaW&BzJhLvrJFzGR+E24gV+GrWspc)Sc32M7F7MgY z!ujYaT7n6(CKfU)Z84@-EW@%Q?H1fD;|E1m8TR_YpFP6G1oz<1!}7`#A+& zvgcA@XE~*56Z}jE_Bxd7OfVcJM9TnLg4|~?O^vo~j5ZHvGx5}6LK1b^SoU534u5Em z-76VDbHL_hc6fKeaJ)HxD3&iBh3)SS$A)Et&@*WSM*5|}<)P|= z-LP|;h~p#PcyTgVFdHgnmO(C-PL_OZDdlw5DSD{CYweot-9F&B%=yX0B zTBFR$II-3e5Ls;At!0P|P!X3u@iNM`WPkJ%K@wsY2OlwW9xsaykoVMYWCY$nRDyqc zRE9s@%ETXUX5fnpv3Me4`@wN?fIr^b9gl3;BLZF)ex`Yry})X&X|}(YdCJFBbj#0G zZ1Z4@MwrG}xR@MGQ_UsD+G?n+S>@XA)O^fXf^Xt|;Nt74VGlM+yp+>p;rB|UmVX2f zibtlW?1w4iFCaCG%U%+QPmhz+eKm{p$w6m4-tUBapV;B(`k^?o*#=*n_V3G5W9G{x zG1)7YI3iy(N~vcG>cT6~5Z#P1kz;hKZe>t0Dv5u={pgTtWcrjK$vFqnCZ~rDW@%mw zf?nrRcCIF}JhKjNF{(J8qMGrIv&?*8- z&>Yu>x7t3yoG~lWJ9>fkN#Z~OX6Kc7$DztM5_SGjXoyTgTSX;0S{hNGk&S#eUSm91 z2!yw2&sgVN)F*afdifko%kRYa#L?QXtUfdjX_>`HoZW=p9c@^DstyM)l;e|A1*m+l z7M7F(c#D?OAe!RWieWPwjhR`B2WDr9+5d%T0$mKC$#b~>2-bBt*!O~H240DE<3QZH zOfK(R{Xer}+)V@QtRj*9gN9vRkC}=ePF#o8-OKQHbqD5-uEUJ{MtT3nh;i*gAceLxVExVvy$;g?vYrYKJ3b1WP!Xo#H3^ zvh1Jo-Z}lu18(9Y5}T9o{s+!DE%?5?ot)}~!yk>r)J5J%suc}eor8s|V({W9%PYM# z`{XMz&g@P2>_o7b>Hs`G>@7Oq9rwi>+}UN1OIscA)(UsTk50qhbsmD%5W#u022NO# z{aE&nNBbNxe`yFT`B}&-io&b~_Bimd4L(2Rk8kDmY0e+X{=%UGH%tS5Q8dSiO>Wrz zfjzpG#-gmD3k~sYs12<_Rj}YYkT@?FQ(Yydx*FMjWk_Z_2D>!PE+xjfSUti6GBwt} z4Ky{p=*`~;A!bwx*ms(T;mPcIs+nl4)G^F8OFI?s!e{P3_rm(+8%;$0aK9$xPsW0V zcQL>5104{=U{FWeBoqeJX+nq7&)H{F_|*(tYfm!qGbL4-xWHtIJ>KHO!^DqA;{Nd% zJi9m=U)*iQ)AQMQbShE2dW`62AKcjKD*DbD`__Bl{5J7=yIGm!jtg5xV)=XSSi3Gl zKGOg^k?Z9VuNmi^UtisJ}MR<0z4Cjv}Aa8ya+{J?$L2`r7b&2-SfVt>> zYY>h2CU4iqHQ~caYq6?p3FZ}6qBkxXwSkGq_0B_US|{dI&%%s#Hz81<>HTwmMpD; z^A^E*Lwu+3MyBdVVw{1ry69Hn{4UuK3=PkIsCn*LNaF#k=y(Ruc;Ff}nB|#DzUR-)4u)Y?rcqm}>mz!sB0_2@xM2 zg|9Bf;PdlQ`1DMq9-Q;g?c!EDoLD~`XE!^lkGj6wTdn~g(G#Ayyv+p{H{0XX`Vm+) zZv+bcl2PFniYm_#RC@%Y%|94j;o%q?5`xZP3p#>BF*zv{Q!?u?Hn|nGY`egu7HW8M zx^OuwM{)xgI2(<42Ofa4&4_UTPK@)GKHcZ{JcKcPI(2-|Yf*@`%|enBGtmmP9g_yi zO^g%o>|xCQXVmZ+I(S;c0DkMGNj-ZpO1~HF$U?3-^vj;r5|G+&(A<>0kiv$YMFwO>ICf zo99+XEL;|X=9T3*bGZ)RK5WFFpLXKAM=kj3W(hvMn2r}0v+((qd|7#V|LNH@Ts;^8f^`vaiUuWsw^#Hu8p2(?4QD5+Qg!_ zFtviI;BOm2Gyx_1r!)%yv%N~>B8U}G(qQ|FsS`2@8KY6{-aIioVzg_-1f`46rr72p zI1|m3Z$`GG28SVz&4o>XvVRW)XG|Ao@GRtYi~Nmn1iu=JU?CQ7hh+40S#WWuqmY>! zuI@5~_6?SOh{(RR+g1CT?fYm1>gGk@?tWJl*(|Z(?L(GeP;4JCMpfSu(R*D0cVyiV zoZe|CpNAh_h)HHCC#UE=k>9x@n=eP%eswlTM79w;`$7Qx*-3xxb@cqWH=ZB$(*8$} z_R9va#|}5P+F2@4{Tnq7)9!Ifx=HBYBWx`9R7 zV6-B<8l!`&knUZCL=ns}HaxX5$d_eZYOdwJae8H%Yi?Y!0$+=i=VxlIHO+OReVSpe z1(3XD<8hj6PUs64W1S%4=*RH&@ai+ns(MFDwZ{s!+r%6;C-tD%uL#9%5vcKxMN?)e z<{p}g2N#R+@OUKZmn9%GBtfp*a!gs*i}N=d@Wq{Ke0Hr2kItkkA@AAkhuT^ZzP|{B z!%sco_%mZ!`>8mKK+)0Jf6p{?kr%Te|=hu ze|%7eudZf_$%?}9BSI9Nv9b?EA~GxkYc5T}e|bAcmTxjd%0XLg!AW)p*iVc|#~CPpWy2yf2t!R`-juvfFs?675-E#8?w z9P<{9z{b^1IK9ysM?SX4p7%yz!JCd4nv)2ZpZXxwF9|z#iBJ~9e1D%S&hHqBi(-WD zi=k$a{JvoO?mqUsv&ERnvB(XL!Wc^urp-;lu>*qHBVOA7jDhtl+ehKum5xYjNI`{j zC~DjTQSTLuI=28JR)3Vw&(jjFFW8`1dmM`JJ{E|(@^`L?P&>~u&Ml6(vCUq@x+kKV z6A;-}iRwuc(N;PGjWHsuqZ&~c)1s?3s$Q^ND@LdismwTY$}$bK726zR&Zh7TkQ>>h z6eijTmIlr=al8$FBUVIqo|brJh(J%aXC|5vJkzG6a?5=+RwwjDA!2X@0)FTJgYSX% z#gi{k?prE*VTD|Sb18C%OW|s3{%;g^hi{y6AM3FsF ze(oh$2^g6OOL{Hx-=2cSr>0`_xkgOdQi8IT%_w+x8ZzdHsNIl`&o0G_AdAPB7h~nK zX0S9|2UFZX9H_yX@~Ihcw26T8PmQL>M#)%z5KWCVzvth6B@Dj8U!KBUno&YNmw%68 ztOc)Do+fVPJ|}f)cBy8a83$zs&ktNCpIevkx)DU<|C1$N94KJ)KP0EK)k2m>DaJAr zt#CfZ7rw0-<_x5x$P);ZR zXNsw2z&z8$HUnod!s$E^V`dpkh&eQb*`_-BUKU$6FjI=b+`w1E5b#PU{9X!G@$Dnm zx6kXL@UG0pjKyyFV1*5~zCQx{KOBjZn{07<^9Y46wewm&`YtA}CFj(FnPVRt+}=p=&P7Pq&L6w^Emv)*#T`trDfM`` z>wdvo^YwTL9z7}I+x+5e zIG&#fHd?JPh7Ux`FfZWPWQ^uX6=h4;^dc3!Rx%`W0v9U zgP1S;E;?Q`7<;0rsaC8<8(F8rBH{NdUu=5kho6Dgp4_5A*(DuowCqsJIMXHw?@UCp z(`XHxQA-<#VLXr)jQxE80z9M8yd+jL{;%wkeS3cZZXF23{bSLp;qM;_(P{%uROZ3U zbnitIx;L+WF+S{C zg1LF6=!s24Z(13ax4n(k<5wtlS2Rw++m)@Dl~sc7*bJ0K# z-k*>=O`a$FfO|I5?BZ0r)1#;HP)zQpFlf%;xfwv`0T>U(W6b~>4{WGWdC1eRQ_bs` z`y?~axK|3!-MNRdqD4Gw^58(!xd)>oC_?sU!FEeDjvnyTUZVHqmP7t-ec~bN6J262E&g9;nrR^9NHv&zkUQRiY8~qs0PpW_!!w| z93mik{rtKS*tW_BHQm7|b%{b#U^H4nV$c{U_x;1s>K}qeZ+|rU#h@du5-my8?t*dR z+{8Kuez7E*8E466;LOM%?+={y^Ld2wx8mHyHi8J(>;CflGn)Q`d!TJ!u?tlJ)hP8XQPcNU%ZFId^r4Rm-b2R#s6BOP2KtnYPf<}ucqgVIH@1Ls1KmKU}{>Rtd_~Wfy zZBTia0Xq4Bcgx=>8!i9FUq#CX!XTP)V%>jgZ3u1Sw8|9OFUiIGT8x>2CdOF+$H%PV z+E~2*{Y4@-f@n0?rr>@xPA8XEhBjbQ!EE%3xt_r2HcLN>1)SweF{|orgMfL8MZqmA z*SiD(FGs*d01|JTE@aZC#5*BxI=ZqZ3s4(1fJU)Rp-li%Y?Fvn1hC9bOOv0a+Oofb zQTD|^8iQ!zgPCC!0Y3qp@5{2Gh^X`xQQ_g2jKr=KOj*kEyHVKi{xED^Jsf-2kHmp> zL$G7jAWUEAjG}2#*uBvf5B7`M7GORQflY(`;1J6~#Axs5skI00h}b^5ZYXxjbFa$d zpE1hKQ!VyC+v|?A0^rkPw!c0TfG^Jk;fvFOcwrUUM?LWLuq&R3$Y$W|_I9S8zKXIb zp@{8CmFp`Af4mTkuUYabgn#&GEP9sDCIkpXT(Mv zek486Zk@d=*XEm8)cTR0QuFkxEp4I@!Z;CH@yK(HK>osUcywbNZpr(6Rd97l#8ga8 z3VPR8asKuc{Lep6$G?0v4u8I1iZ3q4%bpOTd%^XCZkRnW5YDeA!gXW#vRUzlC+L-y1-3(0HO!n}?;#{jmRi zTWo*F7MqsX;N5vcFn7U7tY71fYrEWWak~pnZ*sul)g!UuEeE)jM8V}Zen^{I0QNw5 zBtrV(Ax_P5!Sx+BIJadePHY*06GFBZHx5Brw_wI06iKfHp`#-LM-K*RA0!6G*?{!% zQ9nI>e0abGhqig3Vn!Mo!s0Q;Hw^WKxhQ`#5ASSYnMkP7XW?`RK0OtJ$6}W6$UbpZ z{*C8Ec%5ex;*BB-vuu;bhQ!MNxgoO6MVyN`AsGk19xWm}TQFXLY!4CJ&S_dAXU#;@sY$~) zcH9W0G27>d-UIEcXJ2DXSQDy(DrHq+PSp}D8@o=2+-P}My6{SLhrCbm!s7vYFy_c< zoEk(kgIa>gfKeGpYMqSvN2cTPy(WBfzX@O5EXUKc$+&x%tr)`a^h^?-$hv(j9LEm% zyiGFHL z?$*H|VJ1c#*)J|!U>p2U(HBC5lz8udk=gaS=;?c=CJUAX{RF@nM7DMib zTYEe)cTE_=!Xx1HQ<`^c0F8%a#zC3E_r`q=A+SEC&m;aB0MHB-BQvz`5L~fN+>*Ed z^|E6Ad$^b9_dUn@8nQeE_myEqSX|YLWmOHBmtTZQN$Kd0Nk)Z_m|W*<vEPD&R%tTA<5M$UW*K3DxdN~4xZx}#hc1n?a-yqj6gB|>N zmJ7?ZF;qwgP`gDgR)$P`Uaqa`D8Z0Nyqm}P-9HTMX44~yXT99GG3B17wWJC4D zEchg4X^EWMpg2sJ<%Kib9C2rt6YlJEz`-rU(cU6P+cg#`gZz;6nlGZVZ54(DE{nJTd~!lY(%5mm8i4*WcP_k5fBF zVB^|Rn7G0h#Vtul^{~M5D_11E=8fv`SWKRlhdqZv@ww~`j}I|0?}6hx-7)c_(Wsp? z106Y&MME{ACbB^_d|gbdVxHNhxqhWcam&*l#Rkr4sG}9{#^D(EW}_D-x!wDBM(LZu z@1~9y4bReAPO9Y8<4iH>nQpl{G(b(Yc_2<+4-!qZ}OyU1v+}Q0Y`qLEw?OB3pgWpwO2;Z}evp-)XD znUD6c7>rLY!UtpLV`cLsEUBo+!oo7l%`U*C#5A;qCJ0t6@D|?d)9=4AZjB%s%Q+2B z_j@H)hk&rew>D-hCKk-YH1c^7hXpOx9%;n+^pg1)C!EfbZTfX)l2Rl3$u-CJEO}mq z$d>DAjHMpsfu-6fH&ZpdXm{r%)$YVNy?eS~o&j|2izXOn0FBpOgm8I)=mYXMm%&?x zR_)_0J#666F=p-h!11SEaQKNQLVxdzGS^Ua_(z~RKNa&nNWrIv-87Tj2%zy0%mp`g zIO2x*@2kS~+rius%RlnQ*7Z!R5$>1I?&fZ9+}h3l zg+BWIm2D2V@bM6=Uoi>=?U5)CO2+ueI7~}Q!DRUy#>Z!%Gdc}z!X>S7wP=<7u`#|? z_Jdl~M%F8@6XzVXHaegTX&&rVY-F5SgJss>IG53iA@qCijiKMIC9Z<=Um8KQIJ+!u zjg{qIfOPTd@!Fa}@GY82aW0%5^7}x{Y5fl_;oSexF*L`BSqZB~T|}Lj!kKuxeJy5I zy`weOZ6f^Y)f+LfblQVh_UO-?wo*+p)hableVj;yi8!4F1Z z-^$_m;GI!uT`U`aLmHyTm0|PoI{e{Eie@P>BY}pDVw-(;9vuzRXy_xB2psamU0w`( z-EeD{EuLOY!GHhLO#I7>W_*1)5f2Zu*O51F2=JG!_C`!{i2NG`^a@IACK|=D39Nyn zO5)}10Ma#v3>aeEYOeb-R1}o*Is`g{Q1ZPIL}ULHo?OSM@QxUojN+g=9mK$9g`Da$ zwS_G$XSCUOM?J-JSxNlX_MR;+ z1!uB=(h5$4h)K{%BcrtZD_#h~B7_t4TBMeQ`oB!Fk$IOqWjBo zQ4WrRyIm6Ev*WO4wJpxA9fb31hvD415psWo2=CXhYK;rF%V&IRyS?@|xGO@NVp~PG z2<=g7vDGbtYLa+ zwuca~3;QYb1C!=DW@scbek1|3%ZD$bDY6+0n^$30?R!GJ(=aw`Iy$pvp(MB-ac=Qg zxw{y*PZi?oz93vuLUlyf0#95z--tgxn~G1b*WkgaRNOuijqCeEape<#oRAH-y~z)D zLRKz6XM34Tpn_b)?O8|2+jz*gLAW{TjvZwbapz;R-bYb-5BU#Zl2=*(Y? z2_k4F6fV>T#8ZpkL^sD{CwHl_u9s_+!n!P^1|@>&_)&sc$4un8XCp_hpIlj-`k3Rw zzIw^p;FfsLbTo`KdmS=(mLx_xK}e^o*AMI8vB&mhcG$3Z6h4?Y6q9G#VEH@DD&@(M zhh~>vV9$h)M`F!#Tf~eBMZoZIEZ-vg?ukG=KfzKgo;-^o-tB~ITWxS*{a}3b;V9&F z#3MRB9r3cF60-2#!XVkJz46(pQ2AY!dKx{=G>|5*!M}(UGI4spD;~(Y zzuy_(oR84*F7`3ha;=jb8smd&J6&*a{V;Th_^KTz*v_7SCNUfgI5KmuUT*7Rg>Xe= z7s-2;~(|f@0koIAdU(%fPrnzBD()xpi4jv50w2J5F+x z&p|MrE2N&t*H@8kWb|@BwHoV)S3`gB0km&F`y;y2r=ea%%NP+aE%EJoFvdySJ-Kty zo+j_L2=9Eq8hL*rH2aK`ytp(g?B{~fQR(QMU5^`g+VI8gCOo^8hsUSm@$k3>_skRP z+XsAc=TM-wYq)jL7yGt5;H?k6uxpDm&KwNF-HVy{@=i7W_^1Wn+^fZl>t%R!xlA4} z$Hj9+*m9&0W8NQwprSGGc_SXqBa=`wy9+PA7?1z@^+f#fekGorO~Bc`VMv^mtGsNK zcu{a0yiVR`Z&MAPDV_~p|F?r^{F#Ay4{HF8I47_3wd7~k>6`;Ooqb?ao%7HXHx82o z-_u28GgwYeC)U}bVM@_lj7{xEt6ab2b@t(86KnE%Q9w1yf@?6^yI7th+$91s%Q0C4 zXcXMp!s*KEYOWI%^BE5D$lw7PO|@X2nUE~^2zlKioF1-$A}-@PH;g0&H#1WxwvDNZ zUT`hT#EA_CPv2(ig`Lj0CPMhy7F!%xGYI9=f-rWjr)ajJxVhCY)YCZ{E%Jv0O*qdZZN5{Bj5V{!RxG-hs_fR3K|ay|BF!}V%0BsCP<(T&3C zO(+emMvhMrQe2I|nPr4EU{0+Srz_Mn)K;-=@H+9%(!_zhPP_-n?=zFwFx7dU#CxKe z>UcAQpXMe|O!bdth``vCUNkWX5mt#_W~j8U!OYrs(Umh(`|Y)-O-8Ci0UQG|F>`LX z^6A6#d3b)a5l=1@VAV&_$e$C714q*E=V#sc=HWOzzgmWS=W?75GEQv#W5E}eidkp?G%kP6TJoV#b7VM^fJ-$WsBr}HhQA5qJ%+T9*Tv&5veUP z@;N!qK23Cd845hI4Bf7WVkyXw#UT|Lf`4OC$20pZ4k;s}wM76k(Xt(4QRrj@&=@?+ zv5P?R$Z$kO=3vP?&N#JE_Sg;f$e0+3i1C@&x6T9i1mmjB)h{_=&xR4GE)Rt7APao{ z!2{m^;0H@Y0;a7=#*1UFV4u-P2RS^IJDsn>|gta>;;2#K-k3%wm9|CU}Tji z!V;N|>2G@B;P#O?w|lr4?qRsSYZUGY_D_rcpVsb;BA+;n4GP1Qm?-q7BxBKp6g)mS z1jj#dM(2lh7}q+}#5uXWRm`+`_$^p3535CiUkTC$=ZOyKh#8dx%P``bhu-~xGnQu? z`(CK-dseLXACe8gyDaviv^OpEG#I>@${7yE~-7uWAg6R@7m}hlyyP?}aJz?XhFU zP_0ZjuxbQOuCv9(tqwT9$qvUq8jAc`qNAL|2g~m}YtoFDyr!|J>$x2DklYIn|H&P$ z&Qa*v+6(-7I{tLO9M8nlYlVUMheIDZ!X-N!&caJpyc?P32Hpo|ooc0sV3uuE(p>Y< znOKhY2+_$S0+28~1QSvUu&AUObF%X=EhQDbafxUNjzWP`I6_`EyuP6en00FEd40WT zZN22D`kjk|Ct|f#QdL9~#%9h!OZFmk<}JqL;w6|YI(}N|Vhy5E!?!1Qi(elndqE4b z-1E^8+lB(cc#@sqSUh2lYld)mDzaVDkR>`ki@`K59)c6!vY6t%IYxT zENWEh)9)>75KXk0cqwu@Jr(ttzUQp!kz32l-Y9U7#+Rf}7y~rh?9s6>9VT#-eLD7eX*2tK2fcAu`*C>b)QM~I;T>M{ zIU0;DhF;y_jQVL&7~v5Qm!BD>U`FsPR8vCTG#YJ)H<%(?n+~0Z&1vY=kt}45AhKm1%^#B&bFSIMpEo`JVa6b4%CgcD4YM&24#K z0V<@NA&b#%_Eq4hT%J<%LSboP@M}UeFN^t?<@YiPN7hT>VkBjg($fGRxX4E9@>6d( zj0nf*o@5cdcDS>feNzN90<7y|h^~mx-nsHMgvBPow>lL|#ju}TKOC1fu{q%|oZm16 zC$^2m;cd3q%4v)utncg*5q-c5_j#hdZ3LFR<&5IVk+`sT6h1rTfKLzF{(_xBjbJJ2|ZJuChhKjBLeLjO|ney4>ir7wd z%s`ZYQnOT;l`=FEnXW&M0NTZa=TPBOBE;~fnCVrRFnWPD8}3M-g8GCWF#J(6@ z-WRTYs4i}I#aR(iGnNY(o+%RpT#Lsds_z|(#jOJ&BAoqjX(#(yIAQXnFxbkq==L)o zF>x$0Fv<+r@5D*~S|(-+XWj;ii79+%I&MGc!gr6V@J|m*@Xt@m@!hQqe11L#k7a+j zdnf>xw!35N2P082)gKOFsc`!_%f7@gidgo2BU)A%yj~3yT>8TMl~9b!oq+epEyvr{ zotU0cfV$umEqS789w@+7V4J}*o#bOKBZy|+%HE;956$42-z!mipP6W}HtDDeZ$w+h zER0E$y;2Ccx9}|;F2FLHDaCK1E29@Ja?Lgg`L>9;ZkBzv(6>ayTY_xzX*#_p*EI#X zg6&+FWaNsW&T&pewv#M*KQ$$s87e%P7JMfTwIIyD0JA4~VBOmzv1Z{YteH0q?UNkP zK1W3JdM6zUa&w=jHWg-{u`}y!u>HMZXy|c3Ky)h3?J~+ZI5>c}4|qti+eOUtC|q9q zDw^hmh=@wW`c;m&xK51p`%c)u%?-~_aB@!wUYz0z#q*OPcq*^)l!3IPzPP>L6$iH4 z;k1x9n~Fap}RrA*2U(n zLGrsv4kCs;3q@q-A;C6D185AIaWR0#Q>^e;LVoxGw9mi8gggdxdeInFkNSvOOwON) zrDH$Fw8|Ce%$zEipNy8Ysj|1_2%iTS8E6#C408XSF9I_2uwqv&KD&^OhbQ83|7bYw z9SOo6!6Vx;aOsrRgMPSmkOx^_IKJ5yWy`8@{l*yl;Yo+KX87)5Bfh#(j8D&};Q84^ zJU^c-D-}=9B;vt|Slm4lh4cGE&^<2&wR1Y~%{SBVKmR-ve|cDm7w2O15RHdqdp>rD zcVMnj@?i$h46Mo`Pxl4KWXXMl%b8`)z?zlg2L#ddcgf?XIahOS^f~3gA+g-jaqHu<6yNeX@>;wN9hM>{Ee1g^2O(aJ zdCG`DM7-(;zu$9Gvjsk4;Q4bJ5&A!{m2G6C85G*5L;GCT$pr zOKXQ9t6TnklhQ@o)1qHOL$|?`9M?M2*?s-UZ;^A1b4*4L?4SrA1KpIW8^1EQ2 z{La!%?t2WNadM$m!+Z8|yAep^BKJoOjn}g6T(3f;Ihp00g7X;RbOy_R3>j$bfi@dk*=L1b1nX6$IkjgzBs<5V7Q9Eru|4?W-* zmjTD{L)qEPN0j%>SNM8Dy$!!+1-LNRVG<#l;Iy)XFZ z!5Dem$U*ip&i(@hOOQg~f(P94tx`9O^K^SYJKgZeP;(T8(`~Ez7oIT{lEfR3fseKRQ#9%X7-lZ+}xFlrF`x{peL(P1-s0xy? zbZZ{|_^b(kxtoj6&RMifgdgg2@5EBT5S+9L1ms=lVJPAw)Iwk z%M)r-X!i+j^Lc8l2}ZsSNti^=-&4S|Jd#2?RzYb*xLI0|hytH-0oycmNDfG}wIsGy!F}kT1=3 z5KrzEnAU)z0ZJubc|Vw$M-Uq3+Dg90eSdi_Uo+D!U%**_Wd5xYth0{NX+9YukbVqb z44@Ue=AbjF19NLQO>Tu^n`K=r)vn9v!Pt)raejxlHV-{3Lixz*;pkoxh)btS@!|$g ztc!401oe%*0oqUDBAdc)cf^G8p|G`2hMP7rWhR7d{#vHNjlX}9+_{H%4^6=AT~qMo zXH)T)2gUfq)dYNZJrRGolYu{8OUCEtqVf26s163$CxWr6Isi`c`8hEdM}Z*tCP{kB zvi=qZbwab6|_TrwX$SravQW=#j@$uuXx zr8=Y{-$O*52G7g@noCk&04lqeEWW4E*j+ zTkQYfWlUb;hpii&#dLF$pEGXlaKMGlBXMHu2;3B5{ak+M*)e~7dNNd2m`-7OdNN2K zyMNFfmkv7P%X6W4!2^=RZg{lE9-kg|$Dgmp;=60nICnk{leTrEck-KP&F@8HvXHL4 zH;p2yc``jFt{qj8jVKH(N0w)S_K!0RbdtQ+2}-i-EG6vTn)!Tl+bg%S(sRoZsw#Y43Pr`Jr}v_q-S1Tra@a zSJUyurDQxkWzh+4kB>3n$~FyQI(?h`&AwX?_q*cczEGUG-U|Hl0{oX3jrjUXG9Did zlxx@r*LHhh?;00)gyr?0R?F(oPBC#yZ1-`x!SkLk3+{#c7*x}qr?TASXVyu58iW&! z8@M+FHy8UL`oCguIs@ih%t%YL%M_k&$GDvNXv#s@p zszC%~Q+y}Nf~t^cW}W7H<{3fr0fF-z=cK*>8ne%gen>{poV?Cq7A)PgUf0YfHfiAHN$5Q#$HXcT#b$iA11?vgZg z6{n#kCkd67XcW4KV0>&c7Ubn)c19+qBn$tiRG>4t6)j2QjBHcBN5bvo^lHI9n}##v zEKM^}GWs|@MsROE5F@8E+myjGV%`wj#!}3S3F*VU;N9SLli$tdEsN9CQk`-%yFA@l zg#D-l?Uh2z2g-Z-V+o*5D_<<;dKQ{un#5pNVVoG5g$?iNwAh}!xoWU^NR}Vif`Zxv zy#J9euI&lO?ISU`dn^Wbjz;6+z7Xu(V2`b<2I1AE@E82GC6IH;uC{?Ya1`184(-Xhzv( zn4#UdOkAq=R;-i14Gi;n#PmSiTkq>Kajw5F7!Q0sTJ&H9e02Y(Ug5vjTTHs2Tr({D ziiD5s6Tz=VAz3(}B%lg)iBr*qX~^oTHo*5={V zW;gBgdQ*%s`xWnBJ52PKy=I+p+T~+%`Vn8zO`f>B+XpTYBZQ-sgzB zdtGpQuZKL}Lo|Q~o*(nlK89bO4$=W2&yM-xj{L3;2YYuEmaTHf<-I=goB(`vHW**& zAs7dR1mML{4{X`#kICKfcrzsn^Rja>JF5WGv#P{LwxA`Qz2cZvZW?RhcLvWm+$-0& z80l^~8aRs~r;DLx<{2|nXsnrKYMrzhEc*T5#Cl%BJl3vTHL)i0iWJ) z#RGZl=7~(qT33L`>_WH-kGlNKz%{kK_U8HrZ#e(N15OT6s9)KJ|NgvIr#Z4u_Qjpf zc>i4+IHYC3`KR8RtYM8ADE>{{8#-RFNj_)mCEgbddI@Kf+gPf{19)PS2lD(JJ$@zY zm&}&u>lztx)c=OZH}?m*wFb@k7{B)tOf%s{18DT@uf(a2j~c;AmNjTin~vse!>dmc zug((LDa=GG6Ahm^K@Y-P1>@{{Rv^AJ$(Ge$>Bx3v0F8k&1LGP%BbPIy+$`NRdY`fP zVWeDhv1WoxKVR{n$5*c5&{u8M68?3hXU97+^vUCy{rVl_Ra1rN^Fe zV`NoC)FIDLFz%kOgBUcqMR+_`FdnCzZe*FVy%Mo$X!!33$yM*DhVS?42EP08PLIxE z3k@Co`%m6*_!|%T-VIK#Ct&fi zDtz{|6W>1Qz!$gc@Zwq-o?Xnt)UOx`J<6I zv%?n8kNXMnhT+pwVyKS=XoKN&k$I%bTsLxq15CZxuoCpiXP@e$~Z z4nu8V5Gvh+@n&Wg78Dd=W>zsKWj0`JN;_H-Swr1vh-{5&x1&0`SwwafvdzF5)4eIO zqednvAPGzoG0Qe7vV%Uc2lbIJqyNUOk&&V&*CL|X?3Tdv7LZzo zim`FdRd6QD-({JX2GCs7O*H+mzb+lVgd(?m)Ctv zR`y`adKaAB;DqCA?XhLWD;Pb?g0rWp@zwnvJiT0j`zKOxV}B?v@A4Ay=8n@_?ast_09r^wC7#^t#9!_gdel519oLo;9n+w7y@@oOPq`J70u43@N{v|)A6yI5RSkJ*_yn3yQW z&mz~xFu}8I!iL~BS0CQ_y@B!X{ao?RELQFt1A=IRbzh!nsi}?@LP*u&04-@VFeYt2 z8Zs7&v1Xs7g_tHHds^|ELcmM~@6z<!N49&20QXiQPEmeirxR|< z=ezIY5!kzKn0#-8NBdmySpLS7BY}8&JQUAQ$U4Tq=ZLdM{O~QObe#&qQxWISolbjqLR-N^G^O^4ux=OgEGuEG5^kl%Nb%1094*++Qf!lS88BnUnGrxE z>6#Pw1_>!LfTkGd$rXD;$^J&-6=A}D6aLw&>wmL?bG{Dk?WYE0kq!SnP_C6nUD(f$~E{_^D5z; zx6z%$G5FKaCZapjqYPex19Ccf)AeV9(J=YE-inm#!v+_(x@a#`HW=pZdn6H?R%6|VSu39Z?L{uv zVFSwD#_t%vYds`0UQbW20(S^5!gn}M3h7Ib9KLtEZ5bQLZW!Tq*o zqD`>|(bC7O;BJv?yi>0AvfwHt*%(=;qrLOZ;F*bg5!)I-W6<1*4aJS#XNe-X*)K)K zpBR3B9U8!E*$|ZevO-@CRlyx0f}4Q=ifgOTW&q7gUaMtcIL>Xg)gbwu?W3@J{cx;! z-vP(By5oUP;xvW`+}$hR@3FzURes3NPeZ+<7ux-T(UqBq$*Xg5Up~WUCj<5QH-+1e ztr>yz&S+E!e~%51Mo)Acx}#z+HX;(O0l}zo_Qiy#ILyl{!}Q!1bftHqh1og@!s&wd z2GKmsMyZKuH70Rs;4BwOjv0uxH8`E8)Z}yuZ!Omp+y^swW^y`vPgzS^4Y55ifM#;J z^18|G1H?86Vcxv$$2zl2L*<$gEd8DV z7HSkDfad<|K*5%1W-+DYcc0%IL9{fv9;?Ee(3CzGZMkowr|4}=V9zA+Ow-F3U_#zh zu;Dnf%h~sAv~Q7Sr)U7pHA@3%MhPbaU=o9xe%^>hS{Fs;a(@Kdw(R}Ug?bM zvPXY$CP;KygbwrKb3Q*EicgOR;Il(ccz=B`IyV3%|>4py>ZZ`*h{^6z(%4YCb z0F5Qvf_25Q(XUV^fvd(hGOQU?GBT`X$$q0%R9p<8@o-IkSF1_5SMYVMzF!u3of+op z)wwj7W+c5t4NpR$UpbnRr=TJ8Ei`2=LXY-4eM_@VrOtn0Z?0TZ|ODM5GCy zvxHMK(A1lg$4xF5kDe(QPv>-P(e80WEQl3u2_GCQ*D_lxk*~;Aftqy|@p>3y2Zteg zknBOf<=)BQAhjuDEwbeQ$p2@5Ak9nOQ(r{IM`PvoSbTHLL)#{CkN^WT1D@W21XPc_JI(kH+ zYIr837%i3P)2%o+18Br}UyoNF5}G>R)IA2qji96FZ{!;M6}jAsb8ij0Mh%m_)hS1D zo@ke%oF1$Jv^gf4{{652?Eu=llNKP)IR%poXJcxSm_HTU$ zGD~H=2%Sa|+Yw4~Es}b0JCLN34@50=%`wfl#5RZ**MWMf@nrx1r^8yCtiaUQ;P({X{onhjsg5=PDz}VohYd+Uo>#G!%yi^0MSI>-bQdnw zQTVL0o+035M4LU)7(i=H9H-Ik#8L6caLPcwhhea@xlEy*BLKDbN#RL#0)uBmqYypZ z$jD#=d|#Gs{$4J?--YP7dy4G%p%JjWE&@U}J_>D(+_Scw0gcdDOng674AT%X;UjT= z^KiVk$_BHR`)EeXBW*e^#H^;r6Zgb~uUO-Qch|V+M9(J&-0?(6`_Vx!+}|re{KNqd zguD-K8jh}oL9(HzVthmbro<;=LQFh*EYTPj8ivk*0JQi7Vn%uqrscMyD}!UgjjR+- zG^!V1^3@SGf$_5{Fp7G=2`)ns3w~y)FfZygxUz6so2&Hi{wjSl6-}D&1IZo z8Ospc0q|lUG;_1&GJxr?n_sF^wu)^|=ZqX0gH-1qM+xWE8NPndizk;VaPL?=Zc+b@tn{2GXpHcnm~XXfe*8O2++jVR&*%MBhmv z@Kb^K>|~IZ19IXk2ZY?;8Ue}W2agNQwT8;nf1bS$kN zi$x5^W@KSPLK^A2CZ3ID?g=#M#JiSyagU&>wg~we7`Lu~ z*J6YqGEov(E$`n1)FjW8NWza(a&_2x~J_j)`k-P6v#k6;WaB!#K_b5l)hv3=q zAbA}zSi9I8%?{^39*UXsoY48E2iARHhl3kO;nY?eoZexN13TTYa-*2&l}XsO*MjdZ zM&WB66yl2Y2VyZ});LV8>OxE2Bs6pKYl4Vt2FC^C44hT5R8up~@{!_VlxniiS-h+S zSuE>d#u>{nXrh^U)*nE#VqD0V0W=ZUR)MWJw+d}tBwoSxuYKVuq-#Bywg$-=MB^== zA1i{7r5@>mb1fGgYV<%0{}J>+dw%aRQig`1J*iXpdj;AO#;UPyveYT&7c{(&2?g^o zE^C_hL#vM)hsY7>!jC~ZElY#bFIy0jlZYAbc;W22K{&g9B+hQI!I^cVa7?a+?H{~> z#VbZ(;>rY^Ii7$E$J5dIULm5V)Z*ZN3$C6B$CK0Xg6nA9IpAxQ4es#}?x3;r!fp21 z_(UGtX)gkA2=>eCUOb9RSq^W z+lOs_KLFdBVQ!Xi4m=d&;_vF~sL(cVRU4Ri@8fhXBNL570;(-d=*%&KXdU@WbSm#; zo>;T*Y0+GCnWZ0{spC-{J_gCQ2}pHFF#~7}p7jUJxw4(u=TvxKaL=H5qVP4#4M@rCTUkB0V32B?P#iV%*>QTG1dM*jv)g7zlZ?s5bG53WDXLNUy$r2!O_)>lHs;h& zzt2!}-JUuDRWaSjt;@&kW$rls;V7Kh=!`47*ht(RM>e@&+g8 zPXI3M@WI(FUf92S1Ook&;UL!(8(BMwA!eDdlPn`s;@=3MSp#I$<=+dKnUCrHe!Xr4 z(FPoVGk`WAh(_Eh#;v?;#jxO9GfRnOKE~&KZ*ea$gKP2}9;T`8lk0-L@pu5|A+M(g zW3u=8%3i@NG~R{{ia~};HmW0AG>FzH2D^K-U|r1h)RK9aA$#h?{ORaO9;cJ`i~UQH zWRr*_@iNR#VObY@9_pYV4U(JLn`Q)e{o*oa>shSTt%tdtiHPPT>t2|&Ekd3aX z7Chf?i%(DZYtZZ)9#~$C#f`tRAR6(` zeS`Z9xt+a|{C^vSP48?n5%t6UnqAAo*ac|faFtNu^Qd~^^Ldz4y<9LpT`)c#?Xqej zWq++N$A`P)aCLVeF6{Eb`K_)v^pP#zd}{<&Y)Hmuw;J*AbSiE!!(90I#%@=9dLdnW zdJ&$SiN}NEVYqiBSbLn_Ipm8CTYQk2BA)*5Llw6guoAy*G`RPJ|4<83^Yc(OKNA

JHF@52P{o;5$qD19-`lw zbwnR#Xf|1Ls~Kv{PLtPQTL!BqCzl(0hxz*axjb$SqA91FTAmX;nTb~5TaGdDJ*Z2Y zCtm$+Oe|f7$=Z5Jyn5+E^kj3u-!v2llo|mv`(!YHrag?!z*&C)EkOr>8Lb$iw2vwM zqW1CQwQF9p3=A=V7V@(AqE~|8EB=#ZvnquPMFdS#s4#DX+wos zj-Ba|Yi68L#}ns#O+N4AbY|I`RV1cQHTnVc zeiDJ)ie%V&<>9?0`FL?P+vuCLpC?`Hm*R`-yS;F2w~tQxyZnh4PH%O>dkcrd@r^h* z{EaK@|Hcb0!_(lETZ^=*E$CTZh1nZ&uw{P&7Vas<(iIEw?&SCJR{dN|&8|aTa4I4P zg@}MOz-f@NLBuo!eI%?P5^&N;EBP8gH3YLPE6JK!Xx}4Y>+{XuSCA@D&DWKm`@BS8 z^2FK*rumEL4jYnyY>y%|C-tH^YZ02Vm!PL;Ii|42x@?g)C7qN%qpx9Mk#8}QM#Uk+ zDIEn2pg9Xj1(Xb)F-s*!!D$F>mUl7rJC2cU%|4^SXJnf~k{4e%g5@IQ33d3ZA@KQ) zzlv<@NwsDO2^d}Fg7k9_M@mLKQbs2rK1EEbUpQjBGVtP{|62Hmn|une2hmn1b<{@#slP!i$MO|1my3%?C-z&5^ZgV07m)&Sd?m=LkuaQ)>x!jBm$!;h-!cr{%L5RY77rIOLVW`Y7(kQP zu{I!e(OX&a_m~o$=bng8OCoy21l9*cia?dmRSc#Qsbbc|ykft953u52anGNdB;0pv z?j4@LV}RNWqN&J^1j{+&Y_pK>Rf6j1F4U&X(UOm`*|X7|GXp(2(=j%Ef?=jZs`S)3 zT?1&D$P*)-=Ry;mAlOb2jK`Y+v=|{@4iAV#c>5Q@v2?L`c^YZFlhEv|02!G}vm!7VHnPA>-_YeptM<&?a`-df^ud6QsvoxIG%ocLWZ7JQRmF48eX`o7UN2`ifxW zRV5=MEF2To=Ht7Y!8mj-6Ejy&#KZ>nkCE#tQEp|ibcN zN0(6wU@pbHabitz#_K3%REh@5lUTCNat|>%p+ABiXwUCGLEP*9DDut{Y|j&uw?HTL z)drWNL-y5qb<4$UF99d?QE<~(7Y5b}Uq!+QObbp0qt4924opILja-jYLr^i@4{062 z2+fK?M5vg&j09{tT8d9^R0zix;?~t_oIPESN7t+I;#MWTkoDr~Xxuvyg?*cxv30dv zH|vJs%%%}Iym2Vjtg%7k{2)Zj>&MolVd;)q{J&o`;kz5@n&I%|5X(ayv0}MBoc%Hk z!)H&3tG z6D`3u1C`+oXi1-m#;nD9IL6YCsiljQ*EMLKJq6>^dNC%l5h=FuNVQACXs>+1JIgi& z>&}UCKLL3r_L+5>VH=GUwqy{&&1){0Q#Z+1A|BbNiHAP{vO-@DLc}X_U#@MIvZ&yu zpcXS>2yO-~j8lKd-vz?{wLsXtBDX*Bfn8u4)~*s`e9{9iWG{Ge&m#b=o}t~Y2ZwRwiWZ_bsmTrO@*yuJH{;M zvhw;svE6T|)tJ0w3T`phnu#Ww$4l_-DY&-c-72`b+~t}N5_8VKPflk>n%{4NaQjCR zKufo=pem#Uv#OV&D`T>9du>QLy3>0wuXZ^m7m67ajJJt~Z%yb%ZbB={nj`V<+kOnjEj$(!7^Re#|N%~0W?#$i&kI&t*=LtVv{&0o(-%k-ZemO+_z%A zFQBeLH52<>zc;kKR*uN)(U5cTuuqM=;NCFTW(FFAXv{i)*&^4Wm|@pE!Fwa>Q)i(y zcL{onX|5N`{wf;2d=Yv%WmgP#X;8UlnafZ1TExJSFaZtZr#rA;=P#jDw8?15xX z$bNL#IOx8*V<U1!^J{yLw&qe614){6Yhi3I6$^=e6mvp66}d{_AyOHye^h&_Vc@y+YNp7if z8lD{An$(Rpu7vR@E$BwiyeypEIvnRWx!}OZqtG+U7Cr@{6W65R@v#`u@Ie|pzqrj= z_JCp7zRexC4~O8vv2ff!8j8C|f^;|#Cv;xj=YWO=3+%;TI{u9Z9DgD{@>daX{*47r zKNG$16JNLqR>FhhvH9i%;6KmDKRqnP7iXjKxeXEW=Z@hgMDWSgj;!&s&1-yf?KJzBhLk#tN5@&zXXmM8$Z9tuZ~H?N_1(iB6SkIFQ$@W|N1i&SXY8Iou-L9P)elzsO$7LkVjDja+W@ zlooBKK{QzmpwYwsM7+FkV?u2B@eSv8j>L;of%wz4B>eOBI6S)+gLQ{%F>S#VbXSf? zQ|5TR#+n7^>eGpF_9|w^nQsYl*m7mS0k~qEd`^$f0J+KKEY0pa1ph0qKG3JLZ}s5~u#oAY zCJ|F`hYk@?xfh}~svQk!Z=y}a_IM%Si9E$BeM`$e#*5hQkc+d#zeM{YWI3f_G@FcT z))@`9k$sje1e~Q)x?+s}NF$*@c*qS%gVxmFP)rL|Y<9 zrH>O(8o@J?FDH1^Mm3-$r~Ag!!v*IL4M>%JNEF zy)zx(ecp?2@0H=nnOGt3Ak2C{6xmJDNY0N%N}>h+?jdk|NieLb--b9>;w6cg*=)qQ z%g=;R`tQRGk7-870?p`W&>UPex;Gs~G48G{v=PIah6XwUR1Nr}Eu| zdJ}T^;8dSaoN(i0j7qc2q^wp-}d?Y|nh8xrkvEa$}k|Q>_#23}MZ{ z8MDqT!ziwaaU*!9%M{rpUSd3i7=Ocxb0Z^#dm)7i0}Tu|T7zd?%s}%JOslDu)mO?X z%TtJ%?=IFql&&Upzg>a(ALnD$_6)qUB_3rn5;4p( z9rnKpMQVBp9^J3UAD;B!%RBY>{CYV)yIg>$=Q0G($#}%6ou^_%h==0jc6(Gb1R*v# z8mTGKNQsF+w0|Hx?ffz7cYbj2Ov1GH>hS+OFT!_MU_WN41tG7Iqm0JFM%Fa5WI_=1QV=3u4wU^(1h-tb2ha}3s$Z5Mq5lQW~F6denFX*Z%xc>MSF6qW|7lS6Z1{N>%@I+R3pkn zSLXQ?3a95F#hJJ_PN=PiVmuII=9w{dZ=k6bZdXpHsV3GbwoR;?qT3MK@_teODDGLZ zY2e-9byIM2{T1%n6E>KcDPpS0>!Gh$aQDhR6H!0hFAboLia?HIEZUMgFtci@?zgp} zRcN%-VS4c#%oBd+0S?U%v0fiL4vDel$gB^*oQ1Dq$)f&NWCT{H! zt-jp_7lcPntsRM7@4k+Jgm^t@HLfcz!_bjiwXBGX#*X@1_}c03HG{y>F`3$T z@cIrH?R};PWS_X<#M)u1y@|=^oUDDE{YV3iYLWdO*tg9d9b zF?n5aZfbZA2r&X^{k(4FcB{eGrNOiQ!!hq){}tX%oEw31BU6Q?hpG`}PhrqJ?A1tY zTeZVP)DQQoGB_LcQDZPo{35f;iSdR=`Cc&2!G9g1E$gG2WdH3&W6}hSE*yh`sqv_o z8;r>6I0U$d!tT`&#HOX-t&jb1af_|^?@>7U(NN(5FU01=V(v=zNA%F@kJ}vXx6jLH z*|2p4s_McpG_)9DRW)c{ScfGaRAcFfm8hLn3g_f}xcoX2K10LNzGfouO*{U0GfR7- zJv|YM`y4)W&EA&4WJp{4_y2idD_a??hJtI_p*p@ zdEC>)xHVXA@Ve|B+(UF3dOu1QZdQ#&h*xJ2tvW`yD`P&ovZsl5Z`VVxX1QgV zc4ct6mSCsb5#wp1-`QTt8a(4cSiIm|H2jEY(d}V6B{b}H!MW(v5cA-tzfZ4m2p0Zo zklw)o;-t_I=D+fmqbvI%pR z&BFM`>1fOrZId9_ju%enA$Xz{XGPO z=#Ip?7{nyXMLc>m7OyPD!4o~W`*b?~>C0~X*zmSASM?>-8fHzie@xZj&F<9RA zA(l7K$IRRc^u(s1+BZ@z)Bv*)vPsAUr!K>28yPDDKuKW9GKg1z#qX`;E8yzSL^H&8 z|1yB;PI0cFH;COz@_gJ|OT7q8xPZMu3=t(H>c*$TkEwC-Y@12@{-=a#4ipDOISDElD@B6_ZE5 zi7^plbehlDqz*0h7{}D_@H$guPe5f@vzWFp<2v|>FQV$xv42Z|PH4HZTQ=rBUfKuk z>fs2i+>(I!hHTi#XW;e|xu!^nvUsw~l1|s3iAWYx?iN^x(kWHgb+8rBo=?C(e>NU} zyjO$IE~et)u~0lb=!eOxEbtD9z$n3?y$D;UpUdy^dSxaB18An;R$Nnn|11EGZzRJx ztO%|drErTVgvZNq@;5!@`u3FP8^24D@A_-OKXGc}o!D0#o2$=Y3-$+oZ-{LJ=PI~O zA|}rDwAw1R#asuz7AG5aGIBgi&=B8^o~$WaN>>|Ei!qilBC@+sXQ@THO_Yl1JRx7J zfzFr3;8~h&G~$NJ#y^NX3qz44W}3mYI1wk|JdqajtbM%XI%MECP>6-sc6eGm#xC;2 z?$v{EZ1rF)Uv3XeRSfJ!oIC!lhYFSu7Ymke4aeulyyUYKbF|w5pPvlCXUF~VRLGaT z%)UNn!I$UR!zu(15BqBq(W~1Xab>HWPUq$6_kF?ovxBaw<*nn=`?m7^xG3LRo3 z+VW?kHD!|QYYdEcYu1?&IAg|HSzq8R*)ck{&{E|ReIgqkATlaGn8rl?_dDlyKC z7Y6c@>{3-AgvsCdq4z*z2`3N5GHoOEy6Q;n!i`)9&E2SoEJ5fi!NwjylRnT~%jyrHaannt zxHYhA72JLJwi;>T+zgiU`|n}h{5^x)jSLviKEZ8@?w8q&TsUB48blOTu7cQ6oyKuU7PyhpGrNP#~S+16F@swJZX{xn9is0Td+`zjLH0PvE zE9MQ{Gk_NII`M8~p!IRO)mRIs`^*1pV9;EJww2daa1-}ptW|7NeEaKR1Tz9?s$)hb ztEs+s^}dOyAMO_eXc?m-1nU;jifNc!{3gcdGDBsIW~hu!9*g<)%Q3Zh9-3m?z#h8{ znipGI5bl+NfRI2`bjD)ep>*6klaJd6!f|bvaQrrBTok>2`r}bJylNQorv<`U_EhQt z2FTRR$*rsKGqWJHH2ox@oP7>!!fVkG-Gs5}y{HMRL8=Xh1H@?nO-s7W%rkO6 z`y!><#3Fid1VT+?9XU8kC-a5MwXOY;#Q5nUxafDb9*B*~#JR2ZxUth2xA(Yddxo2z zxM9Z!gK%xPGoI>TpAcE0cy=OK%Q&A3H?Le5f#${>^hTzlD<~Re$%$xMl7j08j9zH> z#7MJg_;oSTXEzVSt~GY}aFHvTxTA!<5`EIOpoex{;Nl`rTYK z*fiIQdDZa7;h4YZAmUs(-pcF5yxg*y#K~KJmB|X*!untKY&&q zkRej8?Jyl;_V5y+{GL1tSF(#z%A4h)AKIVCUxg;S%kWrG8*?qa~!9%t7N!<&okQ93sp zZ*A_zrCT%c^z+&H=8MVr>`5!GTr9^_%=Z_kFBeNaR7dc1eg z2kUkPA&4!Wf(zmDa*XUJf_uTZ;;ygNQa|^8e;K&8YWMH^b?SD5*EMLyTlriS|K@A; z1<$NY9%H|>{veu>VQzSJ!K!;9>f^`jAy{YfIMwiVvafb$Ohk1^rDB;wz6=e|DzQXa z34(RDR*KQShL$&Y@EwlWp;6lRIA&-xBA96^*RuBFHV=C^Jb=9pwf7l$T=qw6NxZ@5 z0}d>xfp{q45{t%}>9}$@7+;+7$BW~Ec*-f9M>t5pACKks!2$M|b<(P_o~3bucUL@T zrs;8Cos$1>pFM8xcmriqqA+7)KHlFy7E6{-!}#`@Xv&`<5@SUi|&bqMu|>rB{!5JsJx-7nx}K_rLxR0%!|0fJOr5DHcmP znT6J#AQy}fFb4`uFP@8eV~i{njc^MAv(ziUqZK$!N!6KoOWH+FmD)^;1zE(%5K=+RhMJ^>3#S}`@P0OO)lP~>hAG3f_K60=Fj z)`sQQ0Gg47W`L34(OV^7156}o7mDQpK{SJax%$bsZwWB}70~tdjZJ{3*h;=)u$hGx z@Cv6#B_PEy3l$M<=*XOfuB^#uifL8?<^&67s8j}(Yi3!tLlO$yWYg3DniZTvzyeyP zgfb%~j7@|Elu<(n%qY3=A_bW2_ay}Ul88zn3zlw%2|!6EhR`+w5e%SNgJ--YP>mo3 zg}AvGaQg=l*n`7iKS(xNF;1aXg}8Yr65n2kz~^VhXrClu`{B`HSqHswd!HS~Pw+!R zXaXi$;xI!#qp6v-n4H&xuIx!7vL|S{pq31($QA(-*?^MZa^#B0W;vH(t_7G{Lv5OC zlCRcLbF?){3Yw4%8p{w8XcsRltr z9A$f#%Z3!F84J!1@fh8ifTbUW}?Pz<(h2t{>UENq?PHDIRM zXM#W1UyA7qD@N1wa%|dKj|(?i@t4PS`0j3{HXDC*G8PYxh6%=bV(N*O6*2H~41$vo zgbob&dj@(9W7s#vhxir)?kFZFD8B@6zgvo3``WQ>e>K*v%SCH%JlyS*bwhRK64KFL zX(rCCizi%qa%B?m_lVeB-xu0ee9P~fWXw}6Z^1bGA6iXyz$>vL5K~YZREwU>$yy5E zAx5S?svd3Z0~A>+`)!n-Qst9;1=l(9zGd0Rih+(s?2vH5Ylx6;gl41UImZfFGt)Fe zOF4sClBfYRf4L3N0D0)|W$zj!*L6TBa`VEGFQO56SO5RMmI8|mID zh!)a~G)dQ@0-LwyQj8lWI#i5C7|n+4cf1WF#u+GoJwh{f`2GaG6sV_q450BnqA;PU zW>$(dcxD944Z@|Uwg$~~3BHMOnwVfAUiM;QFh6=|9RBf7|70TShxw%ew3it`vmnbZ z3dP>p+6cEhYbwSF&u~&-Z`MRCXk3YL=@ay1RX@9wUgU+eV*IpJ96KbKJP@i)#;@%5 z#l@XoxV+sJyEeKZuOnD67>1PCY}~n;g-55;aOYqsZte5KOqlmaKVl3PeWjDlLnW#B3P=cP})_F)@h?LcU1<~?@Phr4_mNw(Q3Rub|v1b z7>`M*r6~7J(tTR7&lA`IK{RXmwt-QTlLssn+6<;y@l8Irit7RV-iLku4#hfg&OKv* z=Q73|1FfTaZq&^M=A;ycmvdsXRnR(`H zWS%id&IZB^p2ZBYAZ{3^JX_RYGa&5C0GbFxVn6hiAmMf4FcFs4;F(o$Gy9C3&H$RL zJnks}#*mB%#1#d=$tNC;zYfs7RkPCmPJaHkUU2+7Pc>H&ulb|VKMIqQb1(@=m8&+o5vA6QGdJOM zQ*0~lmDf{{U>=BZVzwc+DYW}nKc~z8fg)Q3cVFqY@Nb{k7EU+H+d0jwpVwLLp|=#( z6x+WxIbFD&WgrZk8CiWs);&qfouENZUx4?k{W5TP^ z5K)6Eqi1TLu2wd4k8T$I-YeRx7uhZ)@bpPT?c7Y<6#U-U6Nsx@9dLc~NLeFral=p? z_}~qUpXUgdpg6hCjO;}hPHz82D4gF&5)+pNC;vP+J7()4nX_C=POrzIxpNE-ot=R* zHx}Xe#p&38pama%P=NBbOgQ-DiJ1=-j+f{1nl<%$|G~ErJfmK>4i(^iBZxMD!^y$C zH4U{D>#o0*@5$>Xwyn7D3!qtXuG}6B&)RqBm(I~X`r{bq}Xt#;^GL4*a^$lB%!Ik1hb;k(3w?$$#e2?`bdO4mwiqHaQ~nfY`)%JH|>{t zYo{GxAwc#)<#xfk z^1Ch?ZKIS^55>3!YWF@~Hx`2!9-dWR*t`~sQNv`(wdnW@(bLRK z{;3~qhX*3w5{TSl3-W6tky{jofQ$sp`EU&W_|NFvjWGE> z9vO~lNSBo)_oKzHQ?oJ{Y-CIaiT7!T4xE_uv&IjmtlgkIrd>+8WNS>A(xzE^^$r=rBTh2g zJuL_0V^h%-%sONF*{?X6f)|}Z$XtCvG!vW(8Y>ACh;66tV}1hW~pfZ6fxI9uM5Z=a!?=JsRZ0D z1k4fS^-=5_(uJDPO6`4=?U;mow+u}UW$-M^LH?iecOyX$_56|v@tsYE8sOwvW=jHvEj;NBCdI|$f=#~ z4vBboO%$G=6(F8rLs0=NPrSrzKRx1$&kv2jthWOYnO=y_hzd;1>JX#WtqrT2St6)` zv#}zwJI&MS3K7|(jgi)7R*GS+S+dEjGhLd~-b) zpI=PFv(wSIf7o9GXZH{Cq=XZ}Mj)p*0gfZW;QBM3C>Yt1yaqK}l6#8$TO7soxr*4# z>Fvb93*+$3=Z*O1rxp12T7q1Yp*V6N2u0%(;rXh(jy4@<@XRpTx_-l3V&4dwJIlV{ z%zZ)0*ueAmFfNa)DK~$Ii<5&C<09fo&=lHzCs}H&1;3u9BCaQ*Q_M88&^lAbp&_as z<$eWXn2GOLaBx75LtKC9)`&>N4rZoVFcOAaFxn*+g)TA3b&NvVC<|hSh9gw4ufZX? zHUs4v4VU}DaxHrhr?QVZ2=+NK$?^9hOnyqyWW}-Em-o?QPy{j(0#Q&Lg|oZt@Z_L7 z9_)3}z6e(~49ERFu6TCTSEq5YjPt=EKb_FW!5}yGJ7U|uL@fHC8B=>@->R66vFQ|T zMo?VKwq$W)Zl!QVQD7xVQ84?0r!$3WHeS{_M4@$S2SVk;qlmasRt*o zjKTSH**JeZ8Xs&+Mctc|v8H<+-XFUh3(MOvHElH7EW!iB<$5Nsi5KM6WlwA5y-8K#DZnsczd}M#*BAEfLl167^LXq z`Tphp_g;vW{kOd|6??YC;E&g0@s}H!`19pZJU$nI5B8Vf-KC9aZtFpN_7vfC4hA8o zi}>bYSfX%xbTi6As*vw%2G0HsPN%7kA)bWO^+0UE(mlWw-d0X0moqD+|GqU~&fuAF zI)mndZ>!*@vG(f|+(z)MFJLbGPV8%+Si!c@uy}x}7V+&a{B`^CT@z71+%E>u(&Y2X zuwfQj1oE8Y(48e_x^xlR*bYPZy(6IwQ%BFjr2Ls;{y0ovyiVn;kL*NBMm0K?rQ-f} z8=T)_gUuTpF?(eQCN7M^q)E}}Y>GrmiUnSB-8uZk9ge>dZi&c3abq@Si6L9@ZaLmx zU5VvOvQgZc0XO$#1f-Q=>zOIQ|5^T*Rt%2|lg&c?dob-_~VJ-{;Q->#`s4uv_tN z{TtRyG4o-XR-D}VpDMj(V9g^wZt{c`&y@&rl})iM7wiGQj@Ua9S=MZ{r$yR3%)qV$$6o8cHAG&54mF1rbx847GQE^9^QU0 z8yAix<1g1^wVxpe0X#b5gNKK_#b|ru{(g5n+G8hLZZKAD3`Rre7?hWHVocdIv}H^c z?qJ0T4K;h6jTKYfii+?tvgeg*85aj|7#Zb8#ufKL z#)WT*^Kfc-G1j4CszYQC41Oh8`CS8O#JcEr2FU~m#_l!3Ls41c=e^4sZngc)vCbk$DPgRUl!$*!_7CCWFUN>$HE;smmpij3hVx0XSLxs;n zWp4}-9Ur8J;0DG6gult@{=fHE4NpEd4#9=f)t@tPCi>pQxbnFy1LH=YImSXff9a2z zE&tI|I43R?vF&Z7feQ&|*+rrzv`m{4P7okAiO_Bl(>Eb&5~i2T(^G6C!aWJ?$rF&8 z(vI{L5p+@h0x}D1?%qj(y@ z3(h>zvSz86WXvF%bD!8Y1vi02@DZRw&b~oKm@u^l(`OVTzkW2l5=O&Gz~l1E2)GKk zd4f$KS_#;CzYnMca38>}05ziNUP8`1z0xcd5wu+FpHduHD+D|hJ@Vd;OYK1`$KJ=a z2sn$yOfv&5L8p5VoIJg@f|Jv?LS79<%BVP`J8^ncHqx9#q}mEm2?2-c>8z1yWfkSj z$YB37Bfw~YvOlsdhD6`D{tfo~;@bEbpQC|{pZg;&GYQ)^`H9K)!lz=kU GxF=2= z4a1tvIReTiROIxEh?{^8xmK95$TU!rFUQzxsb+9B@_dTTa<2YpwgQuxDb~Q56_^C! z0HJMN8bqV;&=V{Zm?A=yd>Pp$Fx6ZO`MxaXx{rWm^<}EaH6Y~cqvR{May@d|2MJb* zS0?}oI0;fSc*ez3t!SGh6HPztuamouAo}-SDDlqHsZkU1W}+^%8Z{vm=t%0+Qtc_l z3x#~gYI!ZqQCWDKyayH`TEVJAIzD_S6?adi;`X6vToc25ahog7iBZ0^*#Ya;x+1$u zF!eJLurEg-sh|eCufK^uJ|2TFE~VqisaQNZA^1KTibsb7@%W${ZtS{l z5sbTsd~xT1H{M(824CBF1K$J5Qcbq8INc{guB${5ua=RC@P5hcdBhVdlav+T=6%Jr ziEqWfJlDuX3swU5?i1RPaz7HWwwZd(b7E4tf05$5DyS6MVw!UtMO=#z%5e|^7E_&L z7mX}iBS@AoG(yWe;>A1{yT_o&B?`F?@_%LkZCHe6lLpJb%U49Fms=|Qy)qCuJVsBi zoPO>pd##sdp0FHyAZ`r;7V#vXrOU8Xq}Qe6{B}p&-Q}RcvoouQVBhlBapa>RxVqg5 zclLVdSnS6KoN?ixE7opHz_NF$F?DJW8mpPHI8!)aB3e?I(IA8y*Q$d7DkJJ;Z!0qc zXB-ZYDVSykDHlmMO|YGc*ij6gafH2)uY3+x@+I*q`C2h9*p8HCQSyy7aZP-S85tz+ zhlp$9n?{4#XCz<6xh%~_ljTn=3(*>xXoB~bLzHL(G;k)^Hu25ouEh7iMe^l=1ox7# z*M0;goOd51_9buajYMOeWoto!YqFLiOe+z5$914lMEAJlahP8GrU=A2dScDY;LenZ zD9)II#z}ct`;n7e3&XHs)ksWQ=!yJh5qaT3uoJ`S{4-8}5`kEriKT0NaAK1UE^U>4 zfI+j3_BgzDIA$;RK;5_yWM_E6AtWBHi(7Hx_GG+xQityz6yqQ772}ULa`56@JRThj z#p6RhIJDCry>FFZS;Jzy)4B)?OT`$Z=Ak2!+1Qa_!*>SHn1R-xg=PlObP?OcsTJo2 zW{p#86UW4EUjXf|4YpNye;@Pxdx-HjI2FwMCALWrUKhdbBZ8ZvJINtevv?=uPVE!i zjnR$TZ!*_Ga4(o2?Vf7%I}_|@=#qRdawaF-H&(j`dn5lF&M4K?;JD)b-!|2b@;=*0cRi(F3@l=|gh zM#UmbD430wm}WgB>q+my>`G3Jl+R9Z-Y$BK`n@u(3zZ$knD%ZMX3R~)#i^%{;C(WHe{v6#EM7>7<*;K8**e11JoxHlC~&xT{aXoAiev1nPC zj*EA@fPYzn|MABu_@_rj_|wf){P}hk{`qk!{`s?dT)$a{`5$H>C@oF2=D^^Yxrl8I zqV;E^^&fzn8vA>TTAr8{>Zff@bpE&PdsPrh|?ZtnRW?CvxyU}&LBB)&Omv%YS8GRQ5rbQbj#Cr zC2VM)DEc=>u73v6LS79=h~S+SB;iBk{V#||&s#B=zs7>jslr7G;c)y-7^-x*i=qv=KzZFXo_z~V|<60Y7Pge!{~rAWO{HofLW3~AZTVB zd>cXXs1XB=wP4*m2sf}EuYJ%g!(#i)bw9sHAz~0SaKmKp<*KeWm(+J%*@Qp%-w2Ii<#Ln%S@6{2H9fC0?W2! zvBk1wrWucCW@mPHzS!OTE9>0*y5*VO`2L9fB5Xxe-1lCCr1x%~%*s=z3O?ft;r^3Q zxZReDj)yw&?N>|iZ(q*C@6NU2qeF!__eL_#ZVkrH*L;wlp8^-r%UbN>$kQ#X+c5&` zs=N7Ev2Aca%Ns}I|;vMWw>De!@ZH<01+PF zB&K;9j715jUUtR4C!G=5E^ExZ3~YVP7tg)XfDMlezGu!xSN#f1&YmwApDuhpRga^y zUn>h+sv{avLgUPKHv)d^E9Byb(Chr*`x`C**5W`Bc!hyx$k-<2k>GTY=C49f zy=poxd@>E6d{~biZ&%=+jb)fSS8kB}9JmOnx&JoG0FU5wgaPbGaPmsCOda$Qe-C%K zI9z}`Sbo>VX@Z^OS&4z18QKW4GoeFwtm{LoQ0$hj;M5&!nr8}5PUebLfgN{i1ft}< zN*$Lh;LKC<&2q~{hMH)SlnN1b<{VH#%&Q1iO^Y(@m1^Zm5m zS342s*PO43L2+p;?s+H#yEc1b^Oh(){cI_guI@zVAQ8|4Rvz|ad{&cZP#C1AOZwtkH zLYSwv1>@wF0PNrDkB2rC;?cWHv3zA4hNdsXoUS#PTQ&ug6H8I%lZ=oXf{{2W8~yoH zv9fgumeoziP^#b~u>v#ik%jQzm*6iS_u;d{8ItTp;>`9SoZyt$m%Z@FL*o%3;#KiI zf+Wqc4_2K8A%0>udUA#^zh*xAGh0#YoeuUKQk)ad8hmU8U#CcyurY7{-r-6#PVO=$ z8jpMAdRL-Sgl3sfzKWbmUm>#bF(`0OK%peljDktAYM>Q53*KE)bf-G$js&C#2Fg7W zQ0^8b-xGy?s#_nj*iiC=EViVyxb8CXu#FzKK^}nN(9e z`M9Q_Z+bd5z7UOfU-ZC{=O*FJ$M3+ldq?5XrPrap#|ICrnSeK*8ih^rIY0b(2JTu@ zje%*BDAz1OU)DTyrwZAo4Jo$SRJ57LwL+@3A7Gw$33Pg;ldMN9^Je0#$2iznlnFGk zE$h@+6KbGoFsUZ-nuI!5uN826Rd`6S`7)2Ovs^wxFisPVSSJx1!+gX} zmWb*gNshH=(oNBhPy*kqiQQf$vKy=1Y9ZdRvsr_MxqQ3uLeQqb7skK#gK_(ufe zHsPO1KM#iUkA>&;o(hE5s93~iM547J5M4tN=$;dajw#`2to27|LI}n<#bDg8Lg93E zG9G)f6klGb!RPO!zaW)k8I?+Wg%N0XG{|8j7dO@B;UNxnNaId&zt4nEf`mB7JPgD z#>TunzfsnNx;Q-flrN6G;;qvZcRc2VSMDBzvf2LdbPg7+W^umt^SY7tGe1naB^q_@ zNjS1E7_aSVz@rcKqG#FyOs-ggzI5Sq_C=9yc{G!6yP zjW8>bCa2RdSM0MIg-NwB&}^aYFw9kG4~uOk)>m-4{M(3oTWp8k7-|H!if&_=6Z16D z*dNLNY7;cmxcNmR+{U{tytO3*!^XTkcKT;e9DD1yg{bfE9~x*Gqrb`Glk!m2-c_a7@p|*9!Z$G(`KMNtsO}Pc__}TLT5}hdP1_%7o3L4 zLCNS2Oh8v)96EyIP+nPq=Ql~Bwm%6U9?Zu32QqMePrP6_45zmS;^HkLN1!?U^$C@nRkR{6Q=Jc&Q1$Kb|8$8-#`T$HCPtO?l}G%`TxE z-->gi+lO&ZJd?*QJSx8V+$7WX5iXm*?IT|f6J4;)$HN?NVO@WR_4}=`jXQqLhwU2F zNQ)7UXBA|MPUUQiYt_>tJt^I&2^0*BPt;_Zkttlxe#UGj&bBLww?}L1mB^c;5HDJm zrWp;iEVq2E(&n_dWWhAeGxE=0095=NklGzRXmmt=E2nJe(e-_dN>sezW^(bdcOao?@ zEkRFGFKPoTRKw3GoTF1YXOu0|b{d?t-IYEaMS+c`9SbYXECbEtf+ip2kS=$x96Yh5 z3!l8#j*s3FeC`ax2itw{@Y8{a%uYi^S0=W-9*>K{-DkE3s&RJm#qoG|;K zoNkhJz&6n2-|3jFuVLZQfq5(VSg>njUyZdZFgeelF5hp*-+ri zM?P}g^918Lf^pHsqKOkm$E#_^%Cp4VBK71+faIX2m!#oxZWxl6?azyC7o4w-!|c8= zoZ90rSP#bsyNUNeoZIS))0=5%@;J7e>iPpujK$rnB^Qd2zcZ&03uafLXZAew)~vwf z^qJ^N8B!fjgPa&|h-gH)|H2P;}XO;HN4^WVLqop zPMkB8(`9Yr1_3KeX`B(`+Q*$Z7fcUh-RO8$m@A)ao<59m9X4QTcw>%_!1xG!+uU1I77I{;fv?1vLmbcuC+2t$c zjUUh|6;>Bjgm(y`L^=#IRyt`P4ib+5LMQN9$(*~o-Ts(#O%x`Miigv6QE>YylRLRN zc(m~P7)%OJgnwcvCWc4h4%c{$|8)eMe&mix63+i5;{B)IaQdkaCjEq0Z(S6Ij~&uL z7|0|k^6*lg5H00!SgV6r)} z5U>hu3z!T`Y}=sZF_4(CNfw&E-+y+$xD8QPd#l*y4hoOePA^)BhOj!+hg55|>EilT zBC_w*=~9!mDrE+m68a>GNrLO`$!MP%f|s6AU#+E;hZ}!8nSJ@lM9rr!bfPq;{F~4m!R@E)Rx`uhU zvw04dmbYO}Rsn_*(opCUA@@%Y?i+j<_jWDCVkXn6dFZchz+> z?1OQ7TY&t2AMD%cg3{T7x0@m@oR18WZ3tXsEhq}A#_Fzn@W`|Wv8KKsQ_-Xxlr(3;BA?mCoOkEa^XCHCG zu1ChnXD|kRvy&0%p8+4isxdc=iRNuJ)a07XHPO#C8MBt;;MkjSxV$SACn#c`cfrA@ z#$o6B(RgO{^$4ghEUaCr0|olBS#4l#j3rG+v|lam zxIGc$eiM#K|HPvM9&r9C8-TOYo7^Dxn2J5Y{f+S$E%)>t-U%4(9FMU##9|WLGyF40 z-h0CN7r`iN%D}6V=$+fp&9r^f~sT+9eLGSVYZ7nqL4`4w20 zU4&^#Y3PcGN3okEuvZyQCw?8h&?Lbg9uj1QBdO-qgx?J@h`b{WVYes2``QQ{r*9JK z?;2==Yvp#iUS&4GY^)o+ODNuHqDf+Hn`qZ^6n>Hj?n-TS!G4pCVbzk@RO_Zik-RTz zm`mi#XTjH4b;>>}+_h$-;iPdXYMd3w&#Hmul8H>`bfk_?(XTW4X7yRZts?|C_13Qq z)`@wKtAi1ppMu98_f+%jy{-N@`ic+sKJ9{6?;npyjTmD2e+s}hU z9|Xfok{JJ65>Q`UhP&@>!ob{xm|VUbz1mEexHp{6YLtekW}U)SAe^4%QJ}^dk9VYK zB?>v+N~mp4ci{X=UN`2MNvdrww+(Y9)w)BioX*{P^19)4if!d{KDSM|=(TT_GP)@ro0RimP!3`NOV zD4bh`ldlKk%nojbh3KTdlbd~oLws;tc;wj2?l}C+7|dN4h{s;5!vFc*T>Sgj^YEvO zjrh}rTKvlgE%?ia?fCY5Ej~Y#rb*x7mjJ0iR=+%u*P9`@3K`bxBSUdMY@nIMn%MhJ zT!D8RzXs2LXP`;^cLthjc01(v8UxMz{rop_kcizEjV)PX#$=)*xDI8$g=iFQb-OSS?`DQy8`gxUJepq|Dyn$d({QcJ~|P(W%7RsNJeX4 zIQG2gg^Szed$;-Ey>0&3wlx_~JYJ94iv`;aE72!Obx*2boH)-Aj!)^=ij=C5S`_%O z%?cZb=bB?&^6#g9xAE>cmTkus8)U?~B-z&Hn9=SgskRNYnA>TTi+&f~Mn0!WZg6fw zanIKz^f;%y#tJlIT{)gfHAD3~an2B&lk*+?{@o2S+dzBLLe%&7Pq|+vQg08FTs;z* z!tLB}osu&RrV6b^O|*{K7A&Y-ie;_$V_NBQ#W!0sv?dM6=apl)TlArQ5{a77-Z=58 ztH$Adb+~k}NQVJYuOH#Y)~2!O8j42a(i-g8TZ@m5l;h(5RJ^gt4|lHhLjP1h%)c`l zpIqq0fBk*|zCK=ry_*A(Ke+(a%R2GL&lcnN7klvWJDE7YJ3{h-KpcHla)C{*c=AbS zq@=`XC8cekUCHGJhgXKhzr-jpt+*EqOAy~`nu!Ltjk951=heo!754AaFn=FSG+%0X z!8j|?LT*VDPOm^+M5E}YW>k7-X-=NU{9H}6IMM0ps17VaQ)Hb^=q&QgM=no?Ig`IL z)j-RpNluKjl1p+%R-MJ)!s;{T=+U}Pu-%fEdL`y_@xCfp?$>bmUM)I=dWYKnH<4&; z49Dv)`iPG5#<{I>-NI@!HynFm65f4w0^Zp$4hJ6}jo0oUi?+c$l!P=&Zo&SSbI_YI zP5YH<#aa3^J*wFfXJK5pJln%e)wGXl(mYGiA%129+<|d&x$?TL<5T2HK4+NR+~{`; z;}TkF%BnNn{Jv7VlfzZl^YsxJ4wmK2dR^a1>Z))o2y;yawW#E#JStgy#F5< zXjTPUx&YD^+?hhaoZ!_IQG!5&<$l8}UlXQR2J0(~>;QQKSuzkozZ@YstcN)vlgNaJeR2`>WZDfo=3KXr5_L1%^e!ZhGv)CSlt^GvV>u zV7W=3Ul0Vp3Ez}f`NV;|hHjvtqUV!uiOU6l&$QqOG5K(oL(F-{&Qq0BQERRN_a z@fNamN<*gH(CJQ@LcTc;1C7phs=S_X2PgbQ>dqC(H)V7bQUsvv3&b63&)@K99It`k zBtM#ITujYwgbBWv_SJ-W9CCdS@?2t3ADD`^@B%Ge@07&4H*+o~r0}2Bd;h1POz{RA-w88bK+f%l>HEBaLKh8)ys%`HI+f0CO0W{?~?zi27j| zXuDq6iiqEMT4mr-s17K^?DECh7pX3&5_Q297|fo6HQf(ke#3p}&zpzd?CDyGRuWW? zfNNsl9WF%L6@s}-0FXk-qMRT{@7t!Gub9E$Kf9$2h1H5jIK}u0B9)2nqM_zIg z5_Z)l^anOh#QF!_(KEOT^QNrB+&QZ-XZixnZyv;gylTu!$;6bn1oVVQp~fQ^L#g$+ zvuP2Qm-S*!N&$u<6EQiz5Dz>th=09QfzJ-5;{Cl*x+8Y{HDBy|))^14y#se7<_PwM zgt&tvYtVNmc~O{Wdlh0?^9npPcn?-qcVT8)4w{0K5qEo(!K(w;2IIsuF>GU*&+Wbn z!vrjNlGoHMw@frn<}CE9*8V8f{(|3eF*A9jp~xc(6`pa(9UH9GXQ>$}Shda@Z#?CMH=lCB-Y4DlI%ApOAUX}ff@x!x8T>Li zOcD%5-Ik1yNlECNlZ*EcC*rGjQo!j*XSe$4juodZz4g=>?0fVMEM6Cc)}`^NZkUYL z^f@A`*&l(1_@J!yOrQtRoH!XZ(QR^HRO>O0bk}^OI9dG>SbZ`vUB$I+keeN=48bu& z9?NrEXe+)ogn$L(8e@62$ai!+Vn)Rx`qn7jtujfpAz5w4EYEcp%QDVcMW`5;(B5b_ zvxiDFqV%{Y2Z_))Gv=9XplM}_MZPBN$ENy>xM#4${`Wrw1MNB*XyF>hMC0aI1WMe~ zbs}HCV49|ReQ+fPG6u1<`EJauUM-3B9Od-MS+h_P(kj9w8csj<&`m0{QDrt$lrJpt z%}Kj@^zWZMwV$b8*^u~W_V-H2^9i>npr$SYYwz~OQ%`!}`KR3Q=%Zd(y4D|4ruiVN z)E_r{B*0nLk%;U4QR5SZj*wV1`bDDHMb_Dyx#=VPE}ZT-@}&kEg|`vkcEyMh*u<)- zHVwTsUhdacOeKA@Cff4(+9KP*=OYX>N67a>sx#e6KaT z*5q^!7047lR1;dK8mb|%$ZA%(|PAP#BBx*F|BHBpB`diCZ;5Rc+;YD}Q*7TtTw9#3xF@Gup`31EUcPQuos!de1kN#?Tn)5gPQN0lwy-@C z^ICy+@CQ)NsArRlhD&vZPrZ#q`LGKl$WYBA-3Oze2x6NfjsOY-Zk6Pe$A z&IyN}al%_0CSlKGWAN&|Hz2rr{P|+FL=FCMx(J`Y zlZf|svB$AD9(&Rk-ri{j&)va6Aai;n#&p8d{zQsCV zpojgF61z|-TC2z_UocODT!M+U1gp=8op|9p;yV78Fg-oOs?_wcv8v}e_`;kTF{en_ z-z$22Hmchau;p2Ae6ZUemv;r?(iTtbdfo-KQ^GK5Vgx4sC=|Krk$8ENe9ty7oPON{ zTb_@^v?YS;R=F24=b<}=gT1C`kLz}Fd{VF67frf3#-ut;aL%f7tws@Svm!+~UiCZ8 zGn>upH<2Q^ws5XtX?VeT%&0igD6B3O9=EhRjk1wE@4&k7y}d!kU>`)bX)%?l z#I+>Y%JC!lo%-Eugbj1N38#Pd_(2$GWxn!W-xi`+*L;0+7;1%EW)#iS7C`mERj3cD zz<}ImE8FhF(zXXNm~YxMu}cGYS&I65x`00|(_@-n9IM31 ztr2+WnNU2kxddOF>zD5@#>IUJIK4eYH_?x5^1!oCd7!K>3f|)-pZ~ewTq{n8^K=J~ ztFAWqcEkvb8lJa8FwH5P-q%K%p5-IVvyp8XMuz?l>+i7*dN1L4JY%+bvR&WeC?w8nQ~O5PmIEL*BQP&qt^uNQe0Am2R~`JM&Zsz`J6 z38`9%8h>jzk|a__MaNx;tcr)8mde~&M|+~I4JZ=159bAJ_-xB1zWmS2QwVDOlQEc>i=mWK^dxtpPZo>5 zY!P9yXm_c|o`TloKGer{q9U|G`ygeAI7oFK-nF7(re~?;F-e%9G&|HbI2lH0o5VU* zfk{9rFg3ZBpoylyj1yoA`C3BTfYKz|2AEcY&BWURCJi(jl$`EGP%^YBF+&n=4F%;$ zVEzLG?FUkUMguMU*B(f_BNFLjqL3#9(2>xtW#BDQEoca<*5kwrYL;Wo@A-tt6u73xVq8oD#z=DK{L?^5qCNGVVG`#p_qx~U7(`U0 zV(B~|ELj$S#S8o~W12TI=V#!~dx!A&jqa-{73+9&MljAA){Ad9_Ih>D62hwq3M>Mv-;3EmS zD?&2!;4&&+Nm&zYHPMK9lMs`rqvXDyQ7lQqlzXwRbq?m`)u1OP3ptYH`tW$yNK8wp zVP-wP;%km%dN}M-aN?ZT;F}YXmsj0R7dxsK6N1`=dZ++;#HVhvJx{& zm+Lgt2AXIAl_(Z$mwP57Z%i;EBhs+=&Hzb_-Ei~;Nn|A6d7dK28|$A8MoDQLLa&cP z;MMZ}3%+?QK4g3>$_n$)QCE&BQws6!{&;+OCLn+f z-5MZ?O$aXR_r%n;8f17^Yp=$35!W=%S`sItHmVgRfmK>@md>M^tR_`a&E073R#_$* zdz}f+6ZH5NBU4Q@gLjgy*|BDyLr&<@5S%mORa_I>vD$1onn%`dF-O;JJTs#7b&6-2 zV-(khfc2Os$y$3Y#OWk2tz?nlP>~QCX(PzjVBCo8k@&Xbhi0JZ5l%^_Gb~PLg*huz zvd2ZDCAtA~Dwd%`L?1c5F|-!5isxcw$9l}EzEjrpIm+oYOme)+Rm6=n&y*+BI16;d z@P?KNM{iT*5-Vu3-4F-w*aW0j2BE0N2Z<#?a7u{9t**%!e^sPzn6bi~8)Lq|_Cd@| zeu%o!AAwh~vRz&$#|uYYnP5u@$1{dG-f%m`F&pM*ihy0ywH6x}J&(s`Jcwmgy{Pg} z(a+RPaBL^mS7KYj;&Dd^*4d81Q#jn1Xy)(nlAp7CAkst&xj7kyej>P2dqq2mW)V&= za!C+PMNTg;oSr#N1osm33x769f?W|TBmaV(jX%kX(hdNObYDTg= z&X|y_N4*n72k{t_FL|AnW z;T{=_jGS0B&yB&n2mH~pI2+j|!sAI(wHmR9eOc2bsg`S7NVj)&_J&f5k>(DD$XM)`>`aM6s1@((M@&3C-`1DjaK9l(LSRFn; zUW3brOR(qlXl&Z(jtvh^#Dn*_;GqYcvHpH1Jg{ye9=_WJkKXT!73*9P+n#~u)lE2Y zv<#oWorsV3N8qc&$@uoYJpA!YKK^hb17ExyEBq6LGh4jy*7I)guPYEd^F)M&>nrdr zc$Tm$zqLX|LQO3j=hX4SyN-=A#y?83Evze_vr>fyvh|vI-NL-}^Q=_mW*Db(>fiY* z^LKg6&#~#apJk-Qjmttyd^f873PoSVs&Q7}Rf6h>cJyY?!;JD(m|e9N(~1}9iHz!y z8kG1HX{Ab@U_Qeo7pa1!Bw|50%@}Cms@rqfH)*2q3L-DM@c+ z1BRw1;E5+fvFZ6BJn~>58V7~5yd&ZKPeBN9jmN^Jskpq?7niqt+E*x(k)wDOobYqPAoxCpCwmDw8{fZ=8@GU`{Q^VU= z)$*egwV67TYHn_k-%S#2U&G}!hCG+B`JD!uZk7?-JPO8v1A4@p&&_0Bo7)+~y#9~5 z-RYlxI0jnMEy03w!8*BpEPEctN>12?8G`Z7gf28_jxPCf`68_D7K~S~mHTg&7B)0Q zw<6cG6#m!A`zN`2oP1u9^8WM01zWWEHCAEY%W-;ITmY?WyEWuoDyp)smiO|(+)Lfs6{aLq=V6RXe6W>_2zwA)0NO-Mwc zd{2(6@Tufd(d0L|&w0{|J)n7y=E*uy5m+Wan*#4U($L=*i+6X1>fjI_<7EHXlP|kV zUckQbF1oRC;5j!ey+0B8UUeu75)Gd`AUL0j_EZiB=tfm|qxK+Y+XdC}+yqk|xA86i zR*6K>@xvP4!nxqvVVFyvZtM6&`S&HzgW#2G88^e^9*Y?jBRKyq#?52Z^)%JgShKiX zUZ;tsh8g=ID#p$6Zf>gSP(X{zNAkJN>E8`HzA3ttyqQA>29nVJvLC9|L*wwaFeom?Hlg_)6kQN83woW^cJcedI>=ru!?R z*5#U5baa;C!Uu!+!$%A8(a8>cbhr|ScBP|pO(AB~t--q9N3pD7Ip!2}qqnFLQ=gfJ zuRiX_UoUs!t0TGi?9E6#w=E7C6`63mAzVcINb~GU6U{-!BKEHYr3pj21}yIKy610V z5Li%vRqLX$^JQP0+vS1FyIgVZH7D$UX*AmEL*a5miXmtMPj{+^0jhwtNM1w0&F7w1 z2}vtBP4IOR@UV#%B{ypUYC;=?40Dh*E)K;4xXRF0w5HC%Gy&$!$~!TuLI7H{2yMLi zgKLF=OHkrnqy)<$0%@$~;@N9fo>}0`bx9Y3&O`bHxxs~8cm#w!z_@G8^3d2Zi756f zLbm*lU?Cb)Ey&6OCgA2uGmS?j+9NVCFSiVf3#&0Rs~-76uEha$Xie%<@@-4$Lql8# z%0ueS?zCG0(nJ_=cbcFaCSVF~0i`^T7cg=mH=wkDX&?2n1U3O_l52`>3hnOz(?Pyh z20>|n$pqU4Yy^ujwSQEl;O@!-D zeYM}A(~rF5D)D1hn|KS>e09P1zDn*d!3Rw=&#R&^M()AeLsK!@Hw|OL3(@s>Kfd|h zTp`@$_^)pk;a@+SfiEsK;E98s7@AX#9PcOu+!%uFf+8$?X%@C!UV;6W=ite=1~Ig% z7Wugu=oqTRCm*!nkDm@CAkolK1m|N!nHcZ2x=R*A+nCi|H;O;d`-z(9xJx( z@3XM4m1xAdkg(mHG~}ju#NJ^hc-1FPMP2F=iN)y3S%$&F<(OKs4AYAjqF0jE+7S7_ zx@4iyB@qRIBG8wn;mu8)c;tuEulwWVW*;1VNyPATUU+e%7g`$<5OI47BCiid*fk-D zoDheK)=aG27>B1{iNl+F67b2JX%dO}gk!IFhv33iUp-EDX0t0k+!u-89nZm+@226x zTfyjWsY1-e9QpUAikPlPZFGlLoMn3!s;Fj@aJ$+>30Lq;18tae&D9P*Hw4Ve6u~wL zS3-NEN$6{11mBuO6WeB*ks(++45rmkV?~;hvYFDwporGt0=LEMzybC})ZU2JhpT&y)ADCxC~vk_mFw}YgbplgUWXMO zk6?P~3hi@B6OAnfV#lVdamFA&Xm$B-gN5UX2P5(vCKhQn zc{UU$x5eZ1HV$nGlVm4Yk8AIHY7$<#dlb6n`l4f1IG%aJ4R39nfI}O`VaeUXxdqM0 zb}B+mR69CTId!iejq#m29H79b6j|>1%ITU=Gl3Rdt7$Hw$F?m_C(af37Tz5_ku0G- z(ln2!;I=lzXq;)aDRC{>SAnfuZar4~6Z3*|d90ga%IP%9$mx<$J2;)qiDhjdr^{6_ zFMLi;9~RuiI}NmvCK`L7{csF4tw57Vy**Tqc&UD8f-P7t_szwOqPZB%o+enYM`KtO zdeeHby!9?D?|2YXiWY0dX-8ros*@+9WvB z^NKI-y5AXbZIOs63qovB0Me(#Vd0ZScz$~op4!oZ1skVe$@6{Ku_p@`cKPD`c0WB< z_~ko^_}#H|{O(8+zI;0dAH5NT^V_`f)(ftP7|gQP2@BT_BhAw65?ohoGtJ;z))t1P z<8AZIq3IcdQSvx3ZmLa95W8M#g#UvRCHy|+@?iZsH@sQx=J@+`T`?w_YIw^?i@76R zr)nmNHZAroMPu?DG-a(qcm8S&7O$2BdnIO7N}^pjA00_Os0pq{nNJZ|p~B5?@+gmK z^Vnvz;F*09^F=$A`j=|^lXUsrG}jVE*V9O2m5R5l!-->(Q0-rW{DfAd*AyYEISxKq zshA`j>iW;Jj$Rjqrmk!pdMgQ^?+e4_oj%yJIRw>n%jEA$mBcv@b+XR4h>qt}%_3QY zI0PhvJx$-xHlNrkjKLuZ{d9e))U4Xo>v|3FwaWAv%)#P%GNLA+{ZbewAp7Z$)ob zKUx#z^Y+QsHP<(~3@<+JgG0|x!rL1sVb9}Zux#l#Y}^oskIzrTw;wOSCns8Q{y;9? z+a8PeUJt};&v;_d`T*?QSA+lh^IH7r@)UghP7Y3Q3B>;A+%V_CDB&GXoD|J^YHJ8C z9>~(T^O<;rL}g>r&qON-x2t)s2HG(0&2#d32wZQDhlg`2Jl#^^c1Hr-B(E@9-omP4 z){fx{QwQE1&&_MY&*kg<9oAamcx(l=zMdLWBh4J$4!McV?-NnzQ;w$8*=WpIissDa z=*eG!0nzZ(Pcupup)Yd^>N$JG3N0cisSX0KsCw>y$Vn*>p(_D10LU4fIl2f#uuX1Ki(gM3p;{v>NRg1 ze_43zB~}Q#V*iUi=vrHZq_J6uzC9WFKGpKMv*N4?`83Xi%QHCrS20cS2 zhlj-r#$zQ%j}?r^%I7ew?5>ZyEOiI+T zt#iwl3OHNQ7+#|wU0A&wclJJ}H%ouc9JD6%pdqG1-n>i|*G!=Ouj43Dl1ro{(*lB9 zQ!p@4j?1U&@zIHHTsmBj3kQXycf?}nh9GqJF2}mA2e7hbHRe_gVEIGy@W-!b;g25; z;G@HZIJ+krC$N8Z2@_LD^{Nm8?$U}gJ*w%(30&DizBI~M zZDuOY2udTevjl)vLal)0YX+3VeG~+UV(P%99LY74Z2_me9!m11fyN^r76IGf92VKbKs7zkei#PY8!zuf z+>OD4>3Ed5G7?beny%Fkv&)vEGp<8KcAc7Nvr8A~K!G6<*laA$Y6LbN&X_n6#EuD_%p7lAFL>3(YhJnl$_hloGVn-b*b1?Pfe zbF5Q{nCpRsaa{u}A~q&kgte1ps#5}Qh!e4$DhX)?8WM(3oira+5^b3a(VMdXeezxt z-)(X2g7FFzx@4esup5UqhvWU-!MLLaAAuFK71n--@I3dKb$Va zSMMg{#KB@1dmGI&Gnq(32$)HA3}4sauC=K;CGIJzZ5(q=ax^1% zV4Na5K?qnd{@)sCLb^)4|HwcyCfX0dK)dwb1;kzNuLc@bDhH)CwJ5KzMQpU_0eO#!y$}(>$(+b4_YDnd4_Pyue&z?~8^hu0 zB)sDi4cF1~_Xz)R%BOAGnSN_rV`!k6HVywBuiIf)opX>6xi(5Tqd%t|j|?uwy{!XS zR#=E>iAgASvo^Ps%jNUsl~}f}%I&<42y!};YW-gR>eyq@yJ4Da0SW&t57K4nOAXe5o;wd=Zo$5{6slU9Z12+ zH{x)5XBbZLm=T-S3x9pQF9M$)5H8y7i?iE2aNq?O#7xbFtE|E1D3_f;o2!~<5-#$X zVJFuH>+&^fcn$Kk9W>GelY&_IzGA7tjqf=D_H^%p)Ib#VL1nV8SE732>^)yYc7h^DcI+~)IQRI<>?1?Ez z;|7u>oZ1&jxGqkT&8!Its0k@YO<1i7S);REg?q=u%DPUSAL=mC;^k+n0?Sb9Rff3R z(%}D_2zbkTrgUol4+ka4a4WQes^%YgWnCt73UVm+ljTF8liRaoo8QCSE?CzJwD12Af!eSlWKBvy*2F~7 zgq+S!4WER3mo&j|E2b5&zZEN<1>=#8n3g|7H2i&-RmKgP>7w5U^(bdcLKiv`YSEuo ziz!*n7)Y-{YfKT+L<{%N4Z@@AUGUI-Zdf*V0=$c3vFiRDe0HWApP%W&#WxED%TYM? zs%Xzwys>M86CQlXMGdrn|7tP5JXwcJZ^UYE#O+T^LTX>hfQNxqh!-XeH0;F)qAKT(f7I z_D!?2z0vXU>Qj)<$>ZiKSSQZ^4&%cduR{cy|JMz)RY<%sP>7aI?1X?x#u8cMW3)nr zLBYmz*3%2-qb0HdEfMwTOY6s)uJu^da4!b)=AkvNQ-R4|3SqbKtgh*47AS(6Mw*}8 zfS%VUp|Ua^TX&@4(!p|Ec&h@Z_oUQ;>eH5@HFFvI3YKGP@iI)!nOKOEoWhV4&}M{`RYB3Q99RuXlm3{+H<`i}xAZy+ly^l0O6kE&hf;O`^F8#)LX!Yz#8V>Du&`4X$G)q3*|Q z!FOjur|w#J#0WG1GQo}Eb8h}b2*$(IRAYDx zUnCSZ;_-b0_~86xe0H`G>vuOGyP^o;vW_<;4Ps8sa!KB&$vRVkBuNCL+>=n$(}L~| zgXr7XhVpsUNK8tHGfi(LC!+0E(Sj5AvUM0oq+qW~C|nAlbY= z9L7Mi#J0`pf^&*(hk3@=#lot}UkNv?AyB_Vray!P5Fb%{ffa{`LJ@-UD)Q*@)5 zzS|g9gRaC5ET~z5rOo$g|INwiQ!trF^w}$^ts74~>V|zAobl#!o;dUp2mi34xUU*$ z$6oftsweyr>KcwoSI6Prdo%FKg>HQLUMoI+w-_HCD!>N^GjU;WGA``Tz=bz6adcNC z-h9;q?{1oe6E8dA^lNT7E!^|oo**3B5rLljN|9Gnf*_ALxXL=>=y7NpWyJP(!_w~# z9Y3-%P1dQAn55=5RcO}cH$(BxtHJt}3H68=`FkWBRcLZ$c*)OkEmUq7yfat{&Xx@g zag))Qwh*<{@Y$l_CAntL*%r}Rt&&_fM>nC&w@C0SdUtfJYWHMGnll9V6@CS1ifKfn zd=G;gog79~vG*#2@Xvim!bFp9>uU{p9e-L(kWqT>zD&E^;@<5Gp^4TsxUAF1K3*X_+Q z8{@Lx>yd7GtW{@Bsz+Nmzs+#^Fve|;C&tP1#w2G&igG+R%h)4La2`coxA@&8*7CY0 z+#~s&$Gb*yyKR;mZVyy$m(Z&6P{H~SV+vL@c(z;0oS2 z;KU-t)keUxJRW0wQ!(bcWXznIfD;Fc1=A%sy(13CHwWO|m%Ox(*_OvAV9h;~uxU#! zzB<)}i+fXWW?P7S-5U?zGalZ)>F_B}$E@X^vJPb9Pah59PnWy#`_tw4>`*wm*QCM6 zD^B=au&QRJU=Q=A z^N5(wCH#fh{^v<^UPE-pn^|Klmi}cYv2@tJZ&JB$g(Ou24t!RvCMuS9? zV7)f1R&x7HWOE3K@OqeaYAoo61k9X~jnl_U@#iyT_~PAkeDFpD&TbPP-^>a$%Rqa@ z9eZ93M8SdV1e_AiNg8ZoU+Tt4M6r9O zBy;t8l(Q?b3oViLTB$O-WC4~n->b(8+0TPXHXDxf0}5mkeGMznXbMHDITdiV5KL$$ zCa){N$*tiy_F6cOZi&K?-Dx;-pa|RcmSVvRotWNTi@_JC;*THC!KbI{adCf=05=3D zw)l(q^TE*(?ytJwj}2@-5FDCK}%}CE!>D-54)7Y8I-(+t8RgA2lh9(UQ3gJz4Y7E(>Hu zKo#0zg@B`4Q0$Q_pP7j5(NQ9xBXoDFM3RNZ$T}h5W(8(rvcsywSh^HYCj|;+F@WOI+APqb%N56uNr4I`AXR28>f!8A>Uz85~M=7OsD+O=P<)Vu|1pVE=1nfx12lK{v6aHclh>rg+7@Kk+1qHcd1Rtnj=h1W{dW z`1o`O{`|!v{Qg2OK0cI<%Wo#((%yKS-4!M4LWmG>0A6^;8*lAO#UJ0Fj6YwVjBier z;?sjkn)Dre$qiec^}q|;i|{X(I`QSna(sF?51)#7|MW;1F29wFv%BMP;`I<5knfF( z$b>6N)k>`G3N&Jnl_aSyS?JGd!s4PDOiRf?S79;Q7xZDqwguREem1`RYAXKEx3cg` z{M+ZfxNxc%v!AO*YJM?1wz)ROc3Kb2A{iF8hp!a*l7Ke}aMdIR)Kp{*a%jd>p_UZt{$3`Pw z?s+}_oluLr@9n_1XLIm}_sa0)(JXxOW;{OH8-Wk^gy8)hez>?*1kF}=T-f4{%e#Z{ z>6@|mcz-0`m&YG$_rznbi%82VLejWw#V2u_C;!eouaYZ_Gd-TIShk5)FfP|*d7iAV z^Jf``Xo+p^bTNTWlmt4FyY7O|IKgkM;y6;T!y&H=hIKcKLRy})jD|65ob^iSh3y*2H(H;5$S|W~<aD-QI2#9bs zhXno7909dC-Uw|gQ^SL7w?;|*!{5^ux4gN)h>xP(cIumNe zwUt0SIGv9zUgy_okXzNJ%J=eGl!bjAKA>R)Hye`Mm}lP<9`zW;y%F8Y>DK0$;dDE} z7EZsGxDSIfhY0*o475p!$d`Mgz%@nio{R#)e6>$Hs(i8p=NU*JpMdh9a-GUKC3~t? zpf!gH*38@Mea_#eFF_woJpbU00Z}oA8pgK`YU$O>k=Y$lFtq z>B361J~X8-LY*Ynt(l7?VeUb>U#VcWUfZ!W%jZ$)S0w8-P4GzMjE_T=e-S#9d(fWL zi{|()G{v?F-di-7aF@&akTWS6If8c{^$KL;anbNR!d&bxzgzONY^NMV##Q5i$LsOe zGnx48tpq(v%bw2Lo{L9Tb1xD`6QA7R&O(;x_bm4!O{mkIiE~*41n(mgYsEZq&ew(e z%^G0%T@7+#e4^obW0EH!UU06PW5lzCbEDa5jxlJQ8J_2c7`3~u1Jv?@<>=elggi<3 zog8llj;Mww_bb-9u9Dx`f`vG@Hpxa9XuPU+AIb061S`K2>yNMd{u^i=ag`|ZNJp7x zy5xH)NE{uD5R@pMu?!8$v7` zq6gl2&IKFppNOtS9=Q8a(WE;gaY{7z@mGAYXM-EcrU*u8YX8^=o_R%hZ%-nAf4UOi zoG!=bYW#$WMjwaLxo+#oZY&OCn7FG5jYTZZ?4zYzcR`}z3hd_6vXCqpO9 z?t9J$Zq-Hd`vr@lN8IIJ@g5fg&oQxZzae@Aj)%kMaE2KGX5n7L#`hFZG;}6($Y(Jdle1@{J#|Pv6Zsx_tu4L-9f_UF>+G4FKPds3W1|sqgXEjnCt}vL zWSl)(fIprt#pj1oaA{8%&dS<)ax*6(d*H|>HynM@1>2wYMMiIjXn0c{PON6Q6^o9q z&}#B@(eP#?Y?!-k{^r%z@NBmZy0al_*V}n%9Ljt1-P;66~DW z+6#?Ic8A;yi4$@)few(HnL(i)%<7a|(@~I@f(IY=z|QAGaO#~}d~#+AK0MNZi+eM0 zW_uVmz2JdIU&+BoM_EadC=6>~K(l;aZSn1ffCGjJI7x2sfNO++uL@DoS>jiW#+Z7o zCM)vF#`N?WEU)Osn)+U>t!=~V$_DhMREs#s6#yA<=1NkK>s5wyj_D=&GPzbjj&QL_ z#9AUs0=B_Q@)cl8@f9J1-n)Iq0uC7j;up#bR*`LFTs+gdoitqm1x8`$KOxW z<9uPaBuF9>hme~kZiz>zJhvlQUL)>lp0go8`=WVW6N=!Java!Oipxi8ap^!dF6@cX z>e35u#NyoUXu&y2H4uAWaKgb&&Uk-sEWSFDjV}+S;-h`hIJZ3j$6s>A`n$c*xFj7H z--u9C>Ekz(@Zo_Je6TMW=S8$1e=QL2zT_(j*_hv$R)IRb)3R6Xu~ZZYHXudQctS zg68shIDBzF{&cxr4Kx~EAM6U&sXNDoBwu}eG%71aERIVM9EYJmFkM~Oj0bo2;jd>a z@$K1a{O)8vzC4_Y&)$py&9aNzy>R|@SDblu65e}x5{_?j!jTukfUBv0ZwM-8-GuM$n5jd>;nYekCvn1m}hH`uquHA}=zMJkVdX)hEKE`_yX z+u)h^BcBuRyvE%gW0GtU-_cg(i3)8IjxqA{4%2KbkD(-5q-!P9Ed8cIF2rjRYQeaK zsYbcdL~}eQ@v_>CRcBUcg6$yWh#!Q3rbjq$2-MwbCe?)!yq1diuJ+4Du}6khEvL#o zQx{&Z)#bfueYz>e3iFxre17#R3>C~*K4);^c4=^<2oWArlK1N7cn9XqCYihzP=Y-> z3-QU>AzVJ*j!TEDaPB~%n%+aJnlP{TPTbkG9?Ke+VOCKWX5KXq`#-!3fBS4IJ~>u~ zbFv1U+8TrtTfFhaeco_#&w?|1pMBRn8xf9vrzY8hzcb;bFxfyOn6Q>T5d@sb0J)oS1@0v%(A6{_!RegZ7fut+rwlD= zb5N7K5cMLso8kvVY}ZIs3rF{8B^r&6GS3X;O^8B?XtNgK%15|d9N2>q84DUX|E znpX#>Gr1OlO%p;5a~fzr_QJ8ZPFRTg{{H#fzyA$scSNaaM$KOCm4Qlc(XlS6NV+2y znUhkGg^)xqY0|;5|z59;GH)9@>%yM8nTVefm<=Crm|kXalN3YDJTGp(kw+ZKBD` zymKYFjz?Wk31$?}!;H!`m|C_9efbN)NvWNwedtV{jLwwF=#bB_I08el0#9!Xa!DnwL>ENJ?+r07mQ?bY>??K`i!KLVT zayR)s%e}xP)-=gg!wb%p+ehGA!sd6Q-|Zva40$g4-5BKY!^WAV-OXd+a6TrN%Q`@Q z*IsArbx3oK*ca}O5$&$#xFqOTB-ajpr&(?^ys1nPtkXb~|Nk)74bJVyhTn;C!8$qq z`f$Oz_YcBAYlpLr>~1_5{%DBujK0Fb7Fj!+)GQF z@5SuO)y6;*K4;JJT2?c9M5EFt3Jrm=645C42uJXB!kwaXG=AobNs`aam=lZrFM8qK z7d>%Ea`3lg4S4hE@tC{N3%3;Jp`bSv2cB_}aK_H3oN(6)Crk*+gboDynGXU&va$b_ zV0>~o9bddFI`&{JF6;^vE$@V)zEn&cBf0!fOpjBoGXJ?BoPHgS&Zbg)^}DI~n`rN^ zPv+vI12MQHymercXzwX`i1JKBN>L5cXSAZ>p%#?g+lr*3GI)#DQ|&Ey)k-wsZVvw8 zgifga4JTgXTcwILH*LH9MFV_~kC5s(2(TMYk;Ci$qO+`cMLewSCL2Y!8yvL&9 zV>+~&zf#?})VJv7`kC`=hV@mNd(|a~!P)`JP3*NbAR|l4$ z)GZZ-lj4zhM+&A*%fPWih4|)V5k3`6GZ)cKuvgfM!4-#|m38{baag~`3#qwnYM?1j z1+O`tr6x~jI|feF6>J-P+hJqd3d=mRF-{$?M?HnlYkoIVWrjwjCI^a883<^*b>x zA(*FrXaC**bpvhf;4(yB<%1kqxXVbsK3PVz2>~aH&`FU0I2~@D$<1@i)hZS4SZhO4 zS-^*~reSgYI?OCzt`4_GjtIku4n&PklO#Guji4aC#v(|F#a-UCpsFgYdaWN{zCQ== zAFjpe9kDp^S^$o{;){1TdEl`p1*orwikJ?<$*lo6D#QQ>{H0(Okgx`NM8Grj?5dZ$!0RHk}7yfv$34b_SgAY%nWB$X1@QcY6!X&u9 zL#`u1Na5{xZa`;)*^6eKEw(M8ZCAt46(8=GL=ukC1UqSbj=Z0pLcBAy;dp)gbks<4 zT^rh{VtaDh5L#ne1Y~I_bx+4o&QvUDya#g{?!)wI0cObxNrL8}ha+s$`URN1O2BpD zb&^zNp;*LM_V_p*8S5ke-msfvg@E&Qr@J|-3e(bRu&}5RD=S;Dx}p)wOIRLUAt1~V z@-+a<_o)zImRh~fNWLRMnPXjTU>ebFcC83ZCfFwQNSAMCEHFY=Y8CrggH{2w)@QfC^C#}QJ7B8x!jV2P3B}&Bn2Nwol=o;#-VAL<+7WL|@`3bzENi|S3+mP> zwtLctba8KvpNtge0!^etC6Nwh$YV9sZW1wfi(vjn9$jJu28o8pIYZ$q?`6)SN*vf6 zhhwh=3%)~fYFjwo+a4}!K)5^>%x@0B&W#iC(qp&d?U&qec~2-l+8d5byMplE>)tr< z+(gWq<_5ROY_zXTz?rRnIuifFju5;j>jg(}zx|>QwmByGnY2%Z%%4}Nx9CXIDqcNmf#9bE_RIO0VPbple1{$&7o-#!~{}M?I zBN2XEk}=B#^W=5%e3*RptDj8AseQ$`bhrT*->JlzJ?S|8dH@C=NJn35JC-*u$C9eq z=wCcfczOl?Rbyh?oW_4K-PoT{RO{Xc=CRKOo+)IA*yZDj7AzwGb6G`evJm2 z2|J;-c%E37#F~|`zSjsJ|5{#Wn+Nu84P>;IUe$*r{Ky8v}CK24#VNGa??m$n92sT^KA{A_>t9^oZc@NgohipN!T7SwH+sQ0$V766XZux@5qo zxC}Gy&ydfgSVVU&K7C7)kX`<=ZhGP5s~&2c?|Nkrq^iTT(MIwj>>KlcO=X+ym>M@W<1eGw}GfN-TV-73KF&M&+7P zJo%*H_BBtO+Zl@UyCQLBM=0Lg7Hoy^_O?Kr7M|X_$s6}R7l(_N2DOF5A3yBJr$>u$ zc2AV(0nzv`cw*;c6X9Q;1E*_49QZaynz!K9W?lIdJpF7RK0C%%4hgs@8tKgI z?%4ED1Tu0ug^y`^u@WU)H^(>}C{;L}nw`8(jPrOGHGtyZ*6)IGGqKN@W;D-ICYp*A z_G2T??WEcy)E2%4%ZhOca=bCm%vzva&ngztp*qFS!SB@Z zso`xMZ?MkC+yo0Z{H`1yrinGf>Bn9_2m>w6IUT{*1Sx;BGM`h4Ie9qCDHTPM$0gqp zhqQ^Q=oZb+qnllc-Dr)hN1yP*;`-HC-nL%upLufcOhs2}zZO^IdE_JhwqRt83r5z2 zV8q`Vr2XUkBrj&LMT5^TLoqQr3;hd2vF*|E*!t)sy!zlotiN+CDyrNu>G~x2d&Qw- zP6X~=aXWfuy5f%bOt_M}$+5z<-lDG-tP93TSsyO%kHY(Jh}PfYi-XUOM@~y3#>@R- zD$tC9#^Hc$&y=&W$A%bjZs+NWd2^i8a5}MOrtJG) zA0t@EMrBYPniGd~x_)KyLR2Ttl6-wYaNdcQm^QRT)Jk4eg}G((@z9J7SU>Ao+%x@I ztR32jrJaKFn!7Qrcrk|Z=gK-fRX@wNq#mtEuky}Bx$t@4v=Z#zUxMEq&B3Q{v+a$n zv62^YaKM`zCSubAV=#06c(|2jBFDWI|Cz+%3OI$k*4K1E`0 zgxh@13N%yjLJqg`b;UY!bZawAIo-xMwR?mPsrg?w&>ot%4*3&eg@7|r>du}Uu?~lu zi*e%U7-URHM7Hi&C!spD9_ad!e;OHp5!=0-%9%bDx!&a> zvJ>@47ms`~LO8~r3;R$DPI8y}omxqnqeM8f!pt9sg<$t@biti#Z%5nWU~G{jc#qr^ zyPt8!whivs{A?I@Z^^{*L)AEcssTISE_Q`+NZZ`f(@ze5oGap0C0m&Q{>7 z69xF_trT3`8>6D&+-^V2Tb2k{uS~f8JV?MsU|tzEKnX%M&In8^l#~ffyVnBA*Am*s zNE~z)-+Aovum*+zZwg3%dsG*7%h>dNGH)wG{ECA4>OrhK*<=HTsr_NAXZQg z12b7jSCeZI*(?Q(*Ig@uayYRj5H;cEmC3b$l1EF;v@U|ycC+J^VA+_M6>P)6lyHzQ zlWUfKTLzk)Tsy?JO}-<8JJqK0Y&gL-!1U800!bE{zTbbgJ+}?+KX%uqloZ(9`PW_Q ziAmb5nu}mbKsgpQuG1!%?eVSXl$cdAA1m4)!1U6kDzIswvAVHRlD)XGLh3iiX|T$a z$uvzg`^wh>u9tiH8V(hZds}c2AVf2HC42OE;ou8CIQ+6dj&2Ub@z;YTf^h5=AH4GT zcyuoE!Mb&xIQqgwoOs0z$6j{9JI{>Az4y8yyfg}~KNbv+jY7)QRP29dqL!6&XKXJg z%{}CT!9|nc>6->;Nu*VXTOwOWVp}`M6u>GHSf%2Yfu-vQfxj-q|B=N0Z(q&AzkfLc zfBc{wp9@)i{AL2)-^+&F0XX@ZtSc{hV&(mA2$~Qt*tJY@lCA^OG|+}0+c;MOW;L3_ zL^EU@B<~Z+t~#b46>);|f>k(hZUO%Fi|P3GYzaPnGYXfs`QXH>?s)qdcl5}6m>8Xi zB>5cK>#9KTTM<%?RdX9~=2#(qcd8n{KUFTmBOhNKNyX=HMd7120&sD=J5Iek0dG7y z3WIaUAf`VOTc48OEARh>T@m=?4S!6(w;7pHL5ev*_h zeh3EI4|ZkBEm-;>#)O7mGYZFc*%O~FZYSNd^YTVW>>3dsDWmJhB*fc_{e(gE1$F1 zHG%NvT0S-bVXm177@eR~wQU~1GRZctlhc*QZ4pkaTi>Gzx>b>;n`;c>ok_NQ?{_&p z;JO$@j!H#!SPL5B28Gk-p)zR!Y7%ElVm*k)*e-Nu4Wd8450lecF~4#?9+>tN9+>?s z?k2BK*@z__k7G{tU3$cRs9-MoMR0ee^%=n}$#{iV1}Y?}PAsm&{w+E9{8%2oIGltJ z-w49FtsXe_imPZNXS}sx0v@=3JnHHak>*ye+)PfVQBD($2AWRZ{71Y_(d`i0Hm4io zEO~-(x@bn?Sp_vY-NEZ}CC_V+*N4S6tH}+ga{zZCaTkJgy;_w|SlB88L5?>S3|X_#qwP%$<~gM0sC1nKAUb5Y_E`kE!GN z{uZz*(KTK9zDyD^RyR!*ythR+Ni<+u{&XyFy%!7W*MS2C$nD)}1E`Pgl=mkRPCxe4 zdpmA=E)MOh#b+01w@bSque0hEVUz{7jnYT)CY-bFPYzxG(*M&<2$HzDOsv)kX z_-np6`m#H=Z*ay_FD2pIPiEmS9}nR3BU!j4$@F_$*<;NU`!_gY^ZIe{sm(S9+RqGc zJ4|!O8el5XY}4Ffppn~Yn2ii#Q^V5RHfC+SI-c8)iEqmg|86L*hr^A1mqbVCFaw(L z^7q-?u6UOOTiZ2=9!e9O&>(M2or{{(C8!d-*Co#q{P*h7?Utw}w8yrAQ&ZP;-G@i# zJcISKpTRvdpTX*Z4Orav2xeEW#f;J=n36w7J}dd0M2oc~bgG6gn3RaTiNce&@gQW=bZ00|N{)3%F#qtHU}VL$BY7Mv!#Nzp;5}VQZ8u_?G)|*fcX3XRshcaXvau`JJX2F|Qk5!tum5hli2Z`IuAqbuu@N zbNT&>b&J<&wrNGGt=}yZ&1m>2hmI$|6XUE#vHKko?}pn$G3iHccx>$x7NWktfAYO^ z1mj_XammjF>jvkde`%6Cr|L0Inrr1j)tFbk0@Ebd<0-MO1i7d3XJSRmy_j3G)@XPR z21}o!ga3lAkATN7ec<^kAGrO(TTddHlO<+zj1}iTzYc|yX9^;l;!!u-6S+hFaLY}` zq+4R)BI^T({t#Q9w+WBRnxQAcekI)fE5R@OmJ1}lG z!io7{(eQbmCE62>MtOO{d{idPL7i}XLu8BOQ*9U!UT2lyysD*mXyyhyBsjlcaK3KZ zGg#5{6c#kzhuKxDFs+!Im-1POF6&AkKy^qBa>t~ip{oR&ca-9bcQWMtNXF$o;W+cU z7v6n-A|APK5@MU9;VPK%lYeKHN0DNernqW&!St|(w=nL|@HEeeal_{}zE#6p$2Es5 z$jRfz95;M!@I8!i;d~3@12IkT>fpQ*SVFOJFq^&JhO6i4Z*v;A!g_4=D4Vlr~)6JXvAk{I`PH%ZhUdJ4IdvX$E7!uadx{e7Ce-SM|bq& z-#+QbXOcW#d?QLoIRHmr@xy;Do{=6aZZ+kj}Y9v`d`bwKZz*tuGF4rRmqD` znK&P{3A43lQgc+R5O5==3$YI6&cR)iAHpNEH!3*q6#`y6xB*MrAHm$(b(mSU3{ymK zYh(C~DQFb}E@orWaq-A=lABj%9FGgh3j;?Y#3*q~&B9qmZ4NxTH6 z3TqYF!^yRZY%95rA14X*2w)ml+fJ^{F1216Dzsy@I>oVrWrI@5mu25VxC%^@Tq`&& zVESAYgebwq58=6^!yz_zwI6`~coqh<2ND9jFZpV<;Pxn#LM|wNsO~IFmsg$Hr_P`cdob%?k$4#@riJDOzyK2Z%b@jJJoDL zstwR(EpQ)|gqgD{@t4n=@vomu#-HAA!nfyZ@uei5AMB0PBPwS&?Pd$tD=+O06F(yp zUN>^gy+ytwuuSvJ>{J_EGxTdBy!A+@yeGaQPS_{T=W6p_h25H{eF`e0d(p7?34Hbb zRQ&mJ2fjX*i4S%K;LNKo*!*fV%4aknE+7|4+HhEK>Y9v7zg#sxhNkpl)4@*pcbDSx zqeYT5bEhg9AMcOA<=y@`x5W*IpBabe){ers(sbml?!ftj8TkBg3O<)L@9VchFndiq zGQ%n?1I@%0IXa(Jdie?qJ80gmtHi zcqXnb^UO-5)j%6zoas7XCDQtHigz|1jnQ2zO{OI{20KivO3kh`u}t&Q?uEpo9^9q& zyCzuHVfM}nQLOXGhr8TQ-**G;{nHnbaASz}w=|p{qMXiSG(5V^(dgV5EA+@f#>6Du zG+~dOxs}Vckv*qu_at^=e&rG@Z+`$YD^_UbDJSuErA!p^ ztG5SBk}XN}OI~>GX=k+cd%)$Y7~P2;HqXf2Jcb>JK>tF-4E18_#&+DZxf=Pao8cFb zEnnmC0fTXxX~etsJ`}wB-V}qV$O@$P52E7XnVA3jG`#qB7Y>|g!`(Z35!u-R-&<*h z3q~zYw|U(>md~Gj&Z;zC<@>nd#TE@(fySTHuUU^Nz9sm5{C*uQAYATeCDjz&!PiRy zG$9kU@q?&IS%?bZ^s3ldsBc_@r7zuu2M*7~vU`?dYWZ?3s9KH(r$3EH=02-}`z{gO zD$Y$l9J5PZi8 z?nOXbe6AtM(fk<%?+ z=X7n`EH}I^2{mzVak?JU9>M9v`$S2qj~@K~8)!Vj$p+J=DkVy>93sydoNyQ~S}J2g z0Tn^$i@fnR^a0k&3OM%DbDRp#nGKn*t*FRTQ^L=!DlAn9Z8Z8z2Jto zCEj>)Jnp%368i5iz#q=kO7dHP5BG7HMF37nQhjKX2X;L%5l`JY3O*?ra1w2;1Hs7K zR@iI7@N-+w8_XMSXN)w`3=eDk=1N1&fpzOTY?|9)Fz@)9^)pxePRl&AgGsjOm1H!$ z*(4+G!zBsLKuuJaR-#o~8onlRCOB-MJ+2*7a;9T`-CEJ;OR=Ww0X5Mcn)58~Qw_fn zOFJILf`)rdf?c#gaxu2m;PK9Wl>3*U%rg^3E~!Wn?hLw)+!i69eHxlGTG2DR3wJ&i ziznB+p{6$p*`BqEbv4lJFn4IM<+grpL4mpWFB*=50N%+MWDvX?Vptt5UQgh3`Sg z6*yP@E=e|V?t66*Cj7`1kNyA*v~gP6unL?u)qKy;Y4P>IOF z{M8=V`h*i+d+>I|wI^W0=m@y}gcaeod1hgo&wn0*?9?>8cCZwGIV)QBohW>HG#UTr z>zVlWTn#?h9e~rD-EnM_3l2Z;jDwOp?0bA1(q|>ZV|2J+(JFKyW);uO*BLghB~+K& zVPo8IH}P#A%h%L2w_c-$m!BQ3I2|_ASSVEJTZP7?>B8xYP|mh233Jd8KLu@~v$|6z zV^-Bl%&uICc{MA9)1Sn{g7XJvJ%e>a&tO&W(^%5_5avm)KBIgYrWVe}VBReB=gdG; zXd|*}+VH^BW%&3&B0f5hfJ;2W`HC~1c*GM?m3*H=;VPdC`%~v=%N2{S1Va{@hpPT-e%PC{29TjR*4hO zBQQQPj6rU3I(0m8F87Sl^of#N(l}$ACT@arQ%rbVG_76Sq8nk<@ixX)!-vazU@*>b z{;>z9H_o!q^gsXkzc$d8OkND{pLt3`9ial7WuYWrk}_XovYjjmPOfvRp0kdXH>SY1 z6iXWK!a(*+o!~`Rb#}>Itn3i6UAfvE^&&V0ycP0+Crrpj#HeHu@^J_ZDZ=2|I-J=V zqa(5pzvQcl^PA7PVeeB;c9O3}}}=7FW_MFb}&!TqNK zBpZ~xO4ve~6(;)Fdd_QjSr6Vg)Ppb1wd2ykJRIL1jk0Bx@bFBB2a|6FviV*7elPhQ zW*2LiggKNU;5#W_B$J*}ocz6~a z?q_lDjAyZCU?Y}wK8}S=_hXI_@br>}x|7u}qPt5zuj-I06uV_0Z(=gi$0dotq?0R) zvu75%6Km0%Sb^5iR5S%AqtvfR2siIaVA4Pn0ye-TXsx506HR4`5!p!eJLc9)x(ui#floC9~$@A1CUK^+s*)+}6 zJR|uENC`@lT!(2QZHzOMT$>mMrq^!+5h=pphwx|Xvs>Z*&+dxvNJ*smezWYGNp!BO zER^z?XVi1tbA^1DV=#A?PU~V!&!2&n?IPUDmqMEjv*~cwEUkbn39Lg^ShL`Lilf;$ zE6udB>uUST9UEhy@oT0ZnLj-IGZ534igovl$L5E};JNjqu=1{Pi0w>)YkVo3GfNTC zn}*VP5h$M#?e>3Md*8C?*xL z{A?F~_g<9{WjxMo@x@!OOvL2AdK8A$BUSG0jIpsO^%7AYREVaCDs&~ZU`pXMOj)!N zYj;n@nZtSb@a;bHY%-FBbcx>txz98#JS)*E zuH%r%BwK>mCaxVMOpG#7*4^mgB-)r~46EwQPNqlToUdu6io8a`rI_Z9m0gL#BMPP$ z3JF)REzh+-ieTI9Oq*UvY(DBQqR{MOnF=)5pL>7r4YYH|&%xuL-4IJoztwPhuwXt^ z)-qP1BvSau=fPd;97(qLm}ckHqIrt%9vbJdavx<5U}@7@EN;0^?x{uK(GT|JXiexw zrdyGkXRJQ6`zR22S=Ot|nm7Di@3|$b$E%RCte0hUbk@2Dg$Z6Kj!^2eE5^Ej~Hjjt`Gi z=%l|(@_TAmX25MyBwT)CW88>vKM}MM1(HNOF@RI2yYT1FX5rs{Hy?lfau&Y1*ox0j zl;ZN+S$KDQ3^Es#!RN+kgH;vW44a>=kjH9>TVGd>*RPw$j-F^Lv?;y~_RR*HtcA+y z#zZ5x(}2wOC`VoV5UOM6qG;eTy!_U3{O;3f`1({89(YBPommfHcF`)#6W+dW=y5zu z6K&SBST|J!_vELswCy1*sJjcZ%9l%GJx>MqfC%oE=uT9u>BYe`PCo(M+KU^aZc(*<|w^6w~8F-=a_O)*KNGc>6-yVb*-Zky(2D2Q#CGoem& zh;2#q)GQ;P3+^>+UjOc9m@&?j(}`j7w=K3u2yWe_=GPVLCW&S;%^=?Svpnvhcpu5@ z@?4L2SYn$vw=vH(fE-Vp(=ccJkPNg)(J8DvV^s>1YMN#YJ`TMu7-{3;QRtqoCrN0q z75Ws5_FI5iC5yD*DJxXi_G4Moomk%Xfa!B8IA@hgYf`W1_e{8?72uwyqjBHMMcDCb zD2{Cj!+SepadvMC&h1MRd?(`6&S;$65shQpBJt`op1A*>QFwCQE!cej9eDkL+p%Pc zE5;S&ps+F<=l2HdBDs|Q?iG!ylEX4YNaWM{;{{|EOev{q0p}i)!|LLLDnLePnwtmPFZDDBHQfn zM3Sf6)3#~GjcXQ=__<~wFt-T_v-&aRnaOx&rA>pI$s4otZPyJ+lQb zy_Sg&*!JcXZ|r?`EM`r0hpYU(u96RV%6iDpEYG`4d7SvZ65}RsH|E($j8ntY9OsaL z6w&b(#x2dLT<*ZOm7godt&=12*%IR%P{-$H1DqHi*6&)$=HPR|G{d3a4ZgX77B8=n z?`e{Ui*9Gd8M&M~-eHcjvXnXc2n|mz*W;Ul6aQ<8@gT{oBmlB!SWYTU~{LMZxna4=yr3rnmrYAMQCR`Nn#>z7LUtK&X|VzRVy)Bh?d~& zPwT@HJ!ky@2J`2sdDfRXT>wdkyIS6eEMz(tqcD8{Lrdzh>$$ObVPi1b9_U2pta<2~ zIs=tGoruaWg3q`(1yn##K926r)Lm*e8b7}$3g>r6;N0#ooZT6M(|VN37bgTj{Yzv~ znVNw$4+i1%%j0qEMJF71VjP}ZcN2PsM&tH^G9=FF#k$ugrT;B{t#J3{gwp84=Ya#ypyQTQ!rA~bOb{;P6&%&iQbJ4h}6&^nM@c32O zu-K-%Ex}41O|Bg#8v98_3u&aHIH*+sIZsPO*)yqG2)Hw@6J7BgBA(|exz4Ltfwf)t z;h|X@@W7mBarg9RaOV^f@KadQ`XJ`lt;3wk6_`=FNP#&ee>UnO8jw9P8AYx_z!Ouf zWHSO8PHAWeFGFX1B`W-L~54j14QUNb6t z21){J;CF3=R(5H1*Ht8$VIz(AvzK7U4E_+3{Ix%PC&a_Ex)4*>=ir5xld${sbiBDe z4~O=Y;Mm?$9N$|cQGgSyTHRZW{o9kV`0iM^MC8HwpFF`vr5gO5G|ZHceGCa(!utyG zEdpDQdr^3c;N?!0hg^Mbh(>Bb9#%h}f=fHSaZc8Oldrhr&~r|BbK@lJetZI6cwjUd z7W-*qRGqR*%&Ne)V}!7l=YJ=*CA4RX{CzahXew%jo>uA!0bdt|1lMe=eyI*09;v}c zhYE3KM=VZm_C@#VM${+OqR_1fMcyUatCB~xdJ-BinAwAYtUgT5pN5&GvoNo5Ar=m; z!PFJ&u;`h2IC7vA-A~R#!{8J|wiTgdK{DQdy9j^0P>XNR7URn!$@q9*C@$}D#k8qS z$P6f##F*8kIY_rKo+^>7rQVr>WfO{T4hUcZJ&w(66Ghkwwgt1qHVqp?vXVR~t|ctv zjJW1b6%%fn=_<-)ZO}d{f^Qv}E%??6UxIP%zrYGp3&SHtc7ouXJ=Rz%&VeGtwK+q-%xW)q$3g#wnZu_~z^Hw}E#4n4^RTk* zei3o2(Vad;lWLmhwNV}NnNXDFqcXe&6`@Tca1zxF^1Ue$#jDzI;@AvaI$V#_yAt(y z@Ud5UOv@YZK06U}?+nI*2ZHdX@b~$>sknGBA7}Puiue_dzSGN}9h1c@1 z{IN{p{O(Yk-X4hE&wC;;D&OGQ z5w^HCBHEbY_G^6no#5AhCAj|uF>ZvmZKkQgHsXjEH^x|HPBSvwr5r`mAH>!p%kjrA z=HgEuPsgW6D{yjG4DNiq9bE(KF{@}fW|b|)Jp+&8;knP@J`vn^O??Jy`$cefh~RFz z7xU$_nk8#Dlc$;7Bx_iS-t`UG^I`}NKkJVD8{F{b^8q;UVi?|dJ_4_Alyzl#1`=W$ zkmFLIqWTJ6XF^R=O=EaN%v6+HEf@rwNyMLUGr61w4mV$iIXy#xSQV)`@TOIYV*JWLku_MzR`dOr}F6iL_0$P~kM2(-{=m91i5*^y@56 zzlNM1q#W<`BR3rTff#5!o^5YHvZ6fndcnO!jNm>|K7&-jH;wWv;ddGxeBT>m+b~b~ zd@ySox)M4tIi&})xC!2VpP9cOsoyo8N@o*&Wi93G^Vj9 z+7i^I%tvd=R7_58M{ly^Q#muSu;EU@@(L_(T8qadxn`9LO*Gz5n;yn? zLT#DkBe0%KoR1sEc>-~+SZ6|QINs*^M4nh;HHyvg7VneiV+H@Z*==d}MD|IP=ZSV= ztynjBxA~nxoKwf!mF3J&Y<>?D9uH-{LhY{lJ=}1+;M~SKx!m_E!|#UItqQa?-~S4< zERRgVbfg+)s^KN5o7h7x(>V?0{sqW%N&$y4#LN5K7}Fv-;1Ue5A~~rCeUhWkW;>R) z`&7gCiH27LR&@M;$J+7XsV;nWrVC%5ZO7MVTO^wC#hFHYA{>43%}kuw8HZi31|l{sAFiw{ zrAD`=Ycte5b6}cAnt6@5b{8!B%HJIyCHHh+DW*PFi+??ngHQMR;q)sL@y>>^*!%bx zYUVNFTe1vSqB%@7 z$S2I5j*^ zhtVK28lKwSXn5*(&C!W(`MQRUacY0Tc!E5Ry`3k-;uP0Bo~alYO;2vuwhPx9jO#>g zD@SK_DYd$<=y+q0n=3cK%@NN3r4R_$UeXu1;%n}r0b3F2lHc%;Dn=icxgorL8NN8{-0L3nR#AWm-!!l`Y6 zIJDUldp0>?=f-h(`r$Dsoe>3(&=mMcG88D+h}KL*4<#e8BMB4A(-1Ma6wmH%#~1I< z#y6kL(IcSm9Vo!o=OvMT zmpB_WN%K)1KO4>Q1L#j`M^B=B76l8js!2dsz5pv)*JAz5r?GzabGUo@vsg2@5i5G0 z#Ip8>u((kKckNmw;rX=`-iy&1--#Tj4CFecB3s09#Ldw{N)d8{C+LJu4i_l$FVo!# z8<+&8akXtg+Ti5Xkgo!h$+eJgqJT|BHbG}O*-Wf$P?Bi1ubBXsJrWWGjBIkR#+&>* zm@FI9jG(lEDWL>xh}S{F@^kwAd@T1=G^@@8h#{;hBgx7&RKUjww%lt50crP82ocay zX!}TVO>i0l=HmeQETSzmeZT+ge1Qg`_kVz6fuV$5+u!H&Ym4Jr|7hy1O9$FK6kS2+k zhmZ^nG#|?#@)KfVDL5zg(L}RL!kvHO1$U<+-1AfuP915&2k%zm@}UBJ_)b1PdZ!SV z-^#_MH&e9g^uxC@@bTN}xVSe8hqw4*%_={*%RTC%rR0W;zZ-v7fu=!`Z3}K&Xw%S= zfA96OK)WX(vN#7Z16jzNn~2QWaR`~52Jg}g_=Ts!^Oi6zV>c$4MXD`sM0~T7a8BbI+_~az96wcy7k4(HZ1yq~ zOzy?17t3WGs>bh572(TwQ}N*&=6Khk*KS9DLj|(DN{}u@tJo&S73(y}nMhmJW)dXb zWD&s0LWC4|+!a-_m7tO4NSx)qRpK3qZ+Xn!EaI8? z=FbIO%L+8xNDJ1D1mCMd@XgnMun_hA{Bz;tc?2n^b67}(a=D}Gl$<_B#E%H0R9OQG zy+l+^FvmD)TJlK$97&-2(g)N;8_486wF*m{@4=LU1(H9u*uAPG^sf zA!K`$8jN?gW9N=0oIO~BQ#<0dx8c#v0eJUiU%dUCJKlV16zZFU;gnZ`yPs+oP8`JN zr`z$-kt%#}xCHMXF2&`e75Lyt2`;>qiBo%Gu;qnNgtXM+z`H&87>QV;(8K?}aUScl)eSAtL8 zipQnhfjGY_1n0ORx78PKK06Vi@dYZLJ+#-E;8`BK{ZiKZYhw}ZQ;kTkO88zMCs-Gb zR^dG& zHx|~f!Q%Q=SX3j4b?pL_PwPV8y~WtIEgh%#X5i?K6uk3V91g!6j0F!BBCWg`*^{^_ zMlKhuOVBvyb33Wl-DwSjeH!Mr*yex=lTcG^(>xRWlg~$Ry7on(*yi_=?X(Pm0t;z|VhSMdXCblJ6Ca;^IY0mXPc%8{MzfaZyCg0p7 z^Zt#$Bbhe59*FTjnuMco9<>nl{r#gyIAz^pr5TMhyRRwN;pDMA8q2DISiyNNioJ8? zeU6j$F#=g`*^(#^X|r+N1W)e4VAhaw#WJ?fsZ%@K&ujgXts~On-LNuOv%n$9; z128@`7mcZqLX9b=TZot z2{~~4tt9n75uTT?lh5_I_OC5I|7DO?=NmxCkgo)bR_plzLtkP(eSy>8JdjxUn@!I z&9U;?=c6jD4mlo0Bd~7tyTLk>>tUQ5ez!Kqh-2Y%+azZ=5^GDto1{8kb+*|YldmbZ zh2!OSqzL~fNf5&{&TR9{kysPcS74n#%Qb*WH8;nEvw1oI1VQ`0M7dpXO|33C=4&C6 zztSipztbom#yYirsOWctbqBXgay{`!&UkF?;})X6zkiAYM4xkQmG_uKzcMB!>-xz) zQ=B4S%szOY*UgZ1qvEW<_1Ps1;rHK|bDg`;76{wCGLRHd2 zEq1Ao?L${&wY;Y_=uhdzLeW(=OI*S!P6=~{{QEbb9bziY}ytm%CUD>@# zZN6V{evjnscVTwT60{a~pdz&x#r|1pR{M!QWyNWEKnbSg^vFF>Vi{+`<%)CDj^*z$ zo;D&!r^zjtr8XD-FdJdya++p>ape|boEzXHF)kb~7^fCzB_w<1$BmZX#ouXbcq>0Q zVKh7qGaKVvANVuT+yGZIoX4+?Ax>T=ms7`^>a*`)T(y0e>UWc)8;le047Zm(@z=gNVap|oRTs&BeOK+Fr)WLkby)P5{x5r}fQ)!4TE`-aq zLcBlr*7MoU*91uT!b#rftilTHIzE8Ee!U2P{d5MtKHVh1&csEBFA|fRf;T7qK=t@OIe=5QnGY}e?1g~2{;rvSi+LVhcI7KkK3jq`?9K^*7 z)9{Diufo54K0^e0H9pv%fYaLoaqyJ@ydmP^gEuqr)yaA_&ptm|g3FQsoZTLRw_fl- z<*Z~(@MLc_0(6+rjo9Y#w_%5yk7-1(=TxRw4JzYi3NYuRJa!h^;yN)ot{sDG9>bJP z&tu-4M=^t=%4-(m{%Mcn{@KsrPVQj!3;9ld3M)Gw!}2x(X6pk2&ie$IBD`zYVzMlt zxh^@ni3*CJkr$QDp)xwpJd@+I&nq)8M^z&R$HiL?^1Be}N7*9zUelHkuJ3K_?;H_9#1 zl0;Kbo5WiCpfJG}Kr*>DiL`v3U}b`><{5z*u7LE`>Xcw5?*O?n;f|8U<%jTR&r7=y zDn2iQNQCbCuTl|-$gWJxop^aWfeSH|@*`WlZ-l0cpR$s19< zT{v`NF1|Y5Ecnd8h27D5-0=La2*n6-#G};bcZK4jB)n&~`s3tgcRc^N4?K(WBx&{* zT>457pC%mUnO3FQ#LU+%Gfi;(j~Wnr(D-7X2l;GWu9dG}FL9k8oUZZ1BoScFzm}h8 z!&9wrlfO%MwMUBZVKHqgPb@N)->pL47TB_O=q{Orr+lBcWtQ_j^JD*lvJO1*LLBzL z=!uSpYfxT1g!1SC)CJd|-oFr4k`z^W=bfZYSrc@^AF-~a8?DJ94Wlp(AzL9-MwnHR;xNo^rg=tu zn?yQTNSb8JWZL^T0cxNr#zklcT+6ByA!B*|L#aUX{kb=S<=-2noF1+3wVIn6@w!_` z1EbVmM7M{CFTp%_vWxvnF|AOzSb|f$xG^>@UlO^7JHev_R9566g zut1IT=J;-;`cz=;(|I_uI|oN!kHWhz`{VFSK6r<18lH8>fhR_xd0q%yy&^HuJp(K5 ztH6N|F&g=}udt3bQuH2i89#6uj=i2bi#ZFlxQgq_qDSmB} z8+JT?3zD*8F+s57{3CBT|Fe%IMqWrNFT-ocr{h0AZpJsKi?p}tXPjocKN1&q255_h zb321%ZDHc?j(Z+-LXc=3#j%QN!ST=J^AE4bv?mr~-`ic-w=*C0D=OjTp9c3|4C9_A znak)@R1LJ@_^}SWf1(F_w-uptaX#E6S#_t0#$;XA15fh0y*Va)?t5bj=HB0ev!}Z8 z?faAQr_0^==4=fyg;DDGG~MG@`p~C1&N#$E?!1xO4J-4ik;L)^`nU z#M-{6v8MYmtnFEkH681)w0;>DRV~GghP!a*%X4u0WV>$Ov6aNB-N`thXN%P3RxToY?xnBd(U?U3&o!Rh=u z`y+X4QXS;rbglzTs*fH#W+CeP`-cWv+$dw3G3-je)N%1hn;`G0rKPf5G9;-gLY8xy zK4*nWO?VBa7l|g%nI_!cj_$+`J<7RMIDKCAO5N;U*uc%zHLBgYQ8txkTILM27tBEY z$|)FlwhIe4Rp8ZE$;U1@@sc}EZVAWbV@-n9Uc7&}1ZVdo;l!2@Sqr@J+LJEm9`F=C zjfKzcAqevp-R&=l-yI>Cbd_+cCefynT>0BhsO9SXPhM~dFT>3Bz1V-S1KW4ip!=Q* zxTX{dmqutKbDPUeb-8Vx*^h;Dh1Z!#n`BwAJt9V8-p2HQAH=Z<<$C$ub{O-_7->9} z;vrwx()tKe&U)f>wb!8 zc;UmF)Qb|2AohS!I&{zEX(DncaZnvkUWU8iUZruo@W!?QPeo=1*uf~SeD z=LSWRe62dXRy8{Z1QFw`Hk~SXXV3CE)vGa(Gs_gch=!jc_#VuggFfwzJ%o1Q{^qb8 zxfkQn8XSuv7s-)sk;g4r*mtl%H@462ipJ^fVR&yFHF}stsBmc*&hB7{P8ZF7l7qos zV?S~q?0UfkF-d7~zA9KWG;wX|b1Q6(uCH5BE%_{GEdTFLvV=jCU0^IXkz!?_N8OQ3!`AQQbyh$QV_f)LaXzf!X`nN& zXORno_$I%rQGSiA|37iZjHdtP9%yTaRv_$}0Da@sVU#z72~fHu(wQP8xo{Go6gPCZ zOMLTHY%|HucF)075!X}mW}z#Iy%gHfpFSXhZLO9Ya@UH@N_8B5!9v}=nvy?Hdvox- ze@$o=I^s*wpH_##v>HrFE<%578hR7cG5L`pymNREm(TR#gLi9j;b5u8`M2cyPASeF zEX3)3nK-g59WP0eS>Bw6N%HSxV{w+5I{(N6?#@XVcyd1Q=Vkc!ujb*a_iFGy9q}!J zIPtQVj&DEwybBiGDapo|7y(_705AYfzX`-7dBZ1NAAm_W_+yd)(@DVSY$wf3&gpLd zTtMTRj@rAX;FFK$;=lf~3jgu#QvBgU3oh@E*PXfJFMH@2?OAsRW9w5BWub}2#l5Mx zuq#FqR$ux1o$&S(qfs?A9^>6vc}Fk`c$J(jFw1qgN+n!?X^zFQk|*@m6ai-~N}{Ku zEN(99W2U0_fi-ydy@mMTY%ezMAH>4Bt1&2_#e7Mw?-_a$ch7hZs|TOKn*NPg)3*U@ zCO?TaO2Cg|Mf-!`xMLD92MrW@79e|K3X0rw1t>`(G$Rl{HeGkIGF*gsm3+BVO<)S~ zTI8EDUVuqZYH}^fGX4Yo!vJv5Ju?CouScgke=}53GSDWUU_D53UH4R7!%we~i)hU7S_?4er zMQjV82~bv{Ma#eYhw$f(SN3SdUyfV0h$l&sCZ*|dH&&dbPDqtxx(GS086s|C5hsEr zi(~K8`y_#$jh^JmI#7VkNEg+v#3CN$ESQJ+BD9yb+>aTO)Nw>M8(6c7r8{F9N&*`Y zAOyk)ygo*fm{`YS8fhFZ;3=f&`tx9Pt*ghWqs_X5MRWV~>w$uw0GxV_!rV`fYMD6$&*16wuqU>I%%RABh5C@OwdF#4abep z*5jNcN#dNpgX;~mwwh)V+NZ@5`1=cA&w0-4dJC=6~wVOTG!BDzo)P=;#H9MpK{q0YYmoe8bzN$JMqq)sfZ zU5R(q3hA1J+QPRAYa_1JI3s?EV~aN^q&XEz&2k5}C6S|OR^v?YFRv%6 z*p{ESu$?A^&fbW8Zj&x?ua#vMz73vvHlrH&xmVz776`^yrBv!xigXgb&g?s}gnWjl@f;(TKH?z-I6g=f#`M&o+yLjpX zLa+8WYwie6=dLx6X{U?8Hpk*)RdkmL?o-FJGB;X2*AyKpGre%GR-sXJPfqUEqaVv? zoR=)Yw88~g$*H_0%hWhCoIYFDiYcg1oQ%1T=iu#4et7$N_BQp#!RLMO##63%>Hb?# zI$iGnz-Yzf#OvcQ*cXQryL@nVt3OU}4KN`XKP6b_BtKS{=@jj!C!%Os3SQrph)>>0 z$GII`2ZUQ+6%OAp4h{3=vy04uPgV=!26_?R+X?T`61az!W6gcV_{+(7e0m@nAF!|K zHXpqAnmbOs;ws^eQ=2`7qbFhWW1|pJmI+Tz;}ra9&$3_3ebhCCUGL1p*PlzAt-%NT zqOkXMZ#0(2!sFUF!7q29B_aPsD57IC@!%U%@Soqz!yhlU=+xc=FZpBniXco1Duvt6 zWKH;m@Q7etIb9NYCe(gzrP#2c03V*H#_!J7;oFO~_`{h}e0nGy7xu*A;wt0P+(}{cGawc^8HTfO~r<+}GV~m-KQ^mc-+ggqGT~6ovAYYFcEkd#V z|55iBKz3eN)-deAWP*0$#7S(6rDkSkW@b{8ni(Z^t3k|6wk(5WSu9x=TV&a?#mr!t z9b*zFlW~%nS^r+=JWqEklXw1ktKP55?y6lEy8E_t@3YQYd!2purg_F5e~QlQWQ_~o5(A9}Ip6uCNqsZS^mXs>jj*x61P> zXr#&W7C-hwYfnC|zuS+me?J$$f4>8_&*x*qR?(+rk+Aw@nEE|swtY`H`@6!`-<4a{ zpq$ld!n?xj-0))WQ^m=Gb3PuLU=Q-R`FLm;&ns>p44qsHpQ}x=!74P9+wJ6g%0%M^ znfyNS&&o6oUWpi&h76AiltxZLabzD#LYh(IHy$;lx-9Y0_2K35WLPUy{X%@JHjB2Dxhk6_XS2_KUn z8c_IMZIGE9&!*wz_Bg%yO^&5;CfC-0b~m_0akz2IwSa~=Pl~7`*=kMTCK#(qqLp#R zX5{4ep-Hs5uW&y2Ses~E2lVf$QzIM@@;IC33%8TghlIIV#$Ipgc$X1=2ETjB_h_)u z7-#jFiFNl!wSnd?__z9wEnZmq&F_I$99W1X>o}x{cGns{N#0Y@@WXT^P`uzh+cg6z zHVNu;poUMePeEm9rC@rR((rYmn$P?4XJSF^(<(>j(ck&CYcM%?o~l4=j+Y!#q9v&t z6~PlQ-ZcV+_P$6M?G2w_1RyFS2`je6;l@60I=SG&ZieK#yWNHar|>#SxZwONZaDWc zhXgvv8ZZ_?*-5ZsKiPlL`h4IxHbd$2e+YS=_=&Pe#?%tgwrx*iQS}^5&2EzaQx@Xv zgVm&cWuTjz+CLdI(6#=iM%TFJ^j#f-TV<3PcsEQlhMLf8K5zWKiE*>#3w6A;((&@z ze0{~~x(B+0!Z=&laI$j{0*A*+o}Pz-z!nsSO~Lr!R#f^FqFOXTQ)mU+Vj9t3FcZ^? z1?PE^r?;*h7$EZW6ybEkNE|=#=hX3ba&1hh>^IQ#KY#tN4YY-w^WiOjl!*}CZ3^sUD^BH$ zSIKmY2x^*TbhHUtvr2`JDS!&Si_nuZRaK$!$OkJ`x-jAzJ(fD?Y zBXZJ9C1Y!luNrL zIoM{6voDRn*0qlzEyo`gkAf@bnh@|Ke#+tYdm;ugHiby{ zXhv51I-I;b2fu%>6<^*g!@aZVcxq1_x+cv>M{+;9MJz37T8kwUw_;KICM=Oy)}eu^ z2)J32?#Ab_&>-Qi>>0>%%ux+V)2)SMxg#}JfM~pP0lph_-`QjstPLw z%m4*Wf0bP8B%8Zg1gDu`Gr87Dwgind0j(lm0#iuW@h7hOxW~_R2{=JXkcP>>>D%yi zY}YXnBdj({QsOyYJ~JjRG|<9D1f@HtDU+O^OOW6@&N@LvcQ!ayy^SN&iSN)VOiY=G zS*45A6ew1mv7C6ugjMLsnu8W&f)`7|Yht<(Ym+P2XrO8|u7c*-F9X4;vOMnP@`#Yl zk9<)!zX&G|#Y$4_AxW{2?=B(bp&>-g=P$hCB#-SyklWyl;A+E4Ta1fI6rt>77~F&L z05KcnJZPX%m^0iY1o!Glrx7Gc5!x&px92VvjWz@OLqk`(QJ7Pd6Qd+qjn{t+EvPl2 z4}|)A^1DiO^D+OX#x@0mliYg#Iv8HVCAl2MBUEWfb*n(8cLVY~C!o}$03~kOD6oqc zk&`ZBq7iNJ?P!f_!OYTmSktu)n_k$AD<5{_%d6RV`}Htff7Kh8X-Z0Pckt!`Pg$$H z(7z!E6-{YK4@*ROU@>yt3*;K1z!rQnXmC+j6WiR8W`aBf&m>#+nL`cHPu=16uq4nVUcoj?$C-QwJQ)tZ$2plQRKDl; z?Kr|I$pSwUayk=gPGU=;_!e=^ewIw2dBi5xA{IIBnW`dH9hH~QCe1NJlH_)DrT409 zYT)>`V19PxN)g#p<@#EzHd-cTGO?bjjdS7jMiJfxwasW>7KB%}`QpkO`M7hY0yp2t z#n$aX2+fVhs7L)JxwVJQcU|D*7=qGCKG?8!1oo{PfrINtV*m3au=m;F*tKRDwygRs z7B3osz}hIdIfNsrI2kW*x5D*T#^TD(aX7ni1p4P#Afz`NOE;9`)LZSia-|!m->k;= z&9O+Yh==R=40Ju^hdrA|;`qiV@aBdmapJ`%aQwx`aYW+97lxy1jyHmmB48t2ppss} zo~^72zWFV9>&krm?W?)?%ZIJ_;EZUI18%ZTjY7+WK-m8_La<7+jmJFw5FQnaXOETP ztJ~#x@2wd5S!XqT=+w&&c;;z;L^Nc z^YP{NOnh`Y3U@fA^ngE3zwC*ld!zBzsaotg(t=4#=VC^|LiA=%!Q%Rrc&={?)<{zQ zOxFgi=~#mmjf=6UYA)v0KaCfUJ&X50TaLf~eh&WagNgX!W;s4QpP`Py-g(s%3!lqG zOy&f{k0GBACe_5fPNs?d7{NC=ow#SE=}=CWTfx4rHq{9=Ih{v4h<)OoVq0Ta5^BMF ztl)jXG&eY1W8O%rmDn~oU4BNHXL6sd9hguPBa^bF;89>J}3T_*cMz< zSW|2(PM6>j4=uF0IVK6zzX%7NyksEioBQ>*um7s1nDNUB51V1){+0A(G|7XD~W6s)U?(|XjiKTo`Y^@`Hdu@vL>XtPe4(4 zw~DfWYE*jVNGSO6m-LqL8>!guJ9lylT@r#>v zqTMZdSiWZw66EvFuuYb0gPY%}D!HaXt~j2FwZXp%=j3=|nfNBQ%`mVYEgY|oVk(^L ziWIeBBfq1JGvQ}qS#74P%`x)3!Rw0KX{PDqS#!Ig;SG+bhG$iIAaPEPH!vP39B*RX z3={L}1PL)tj+e*UBxeGx6KhtQx;*Trs!>dwQwKQ8XZGTXO$MUAxnIQrqRAvLr+!x< z`aI4uP8ns?@Z8+Ua86gpwOI*B%*V+0v)yyhkun*RB=>Fz*BEc-fS|I)DnDlnh8Yu< zVP?fsn3TIf9qlBSb3>fHj&oeI5dNg-p`W{mM)HE2Tq`Aw(by%q^M$R}IK9nYnPX>m zIO^EWTRyhK>FvUO+Z^%M7CSX0aNje(fnQv_u0UgM%~N9!Il@Z9@*yV`%J`<1H?c3d zH-{C~C3Fb4y@=JVD>1uB)~v)rv;@ba+$k8IKjQ1y%WFDneNN5Je4bcWSkwmBKjK|i zoDtt9#`W#c=S>`|^@12@?k{VW!-yo||2WwF$VWmOby~kOSC`*4FwUc$ZodqI|F9Uu z+U6oRpcMtdlTheaiwf@oG=)~8*1rh-Ia4r0bo(^HcTetAy)h;jU)#F{&j`k8q^%N+ zub3z}Z+{7kT3%FU+7iL}jPm8mPbm@|F{7{-%WCJMH>X3ck0imku0SKkska9(E|??6 z6`mOi+va1rb_CzV`5?xr@wHP7t4^#qM#C58WZ>n9AkZ`gx%m~n6dE;oJ#VE1OQRk9) z%X54K6Xkarq8n9`-5ArN&RMe|>5SreaxpAHM_QjM2j_5so}$HQGsHF%Z8j;biEdGi z3Of@k(HfB@NxCF(t(AE1Tp@0~o-D#K09W_>;o3n?>+%;u_Qd&JuBsgLJdLnj4!Hb^ zJ@#+6KuNAEEPv>yOfah-dZQ>a6K{Xih5z_s4nDa)0k@9?%g;&@{<5Pa;PyDP%?>9w zSz-U$VF>V#6VM3Q6md%ki4T;5GFcWNt6cUQ2oZDuNt&$Y@_$0vDIT;nR!f|A?6)tZZjjfw)(6%BDRn2)QiYY>l zcQNvOisg1Z(%te9XOpI;mpVP-uSGh803|pD%sd9d9W9Q<_jy#rsetmOqx13Edp-F5 z+l~12dNDpclY&dHMdRrmrRZ#*j)s`YXiMzGtg2O5(y>Ji6<|LF9{*TQU>XFxNCCZo?9VOt>Td-*F}zN9un2=)q~{A9qMRV#G@q%Q@}KmW)kmEP{xjqkR;pK zwW4vRK&g{#y=yf{z9ukD15FdKkgo!!#3+ht4NB8E<74w^2OVqzlVY0*wobBj!fl#r z3_s;$Yk{pofTSGkxyH{1D)P_{4qaM9EQ)Q8#rLf$UJq@!e zSD~kHv6|r3lrR~ssZ-ICG94wMZ3r9|2M-|-ZJ-6g^-*n}anOLvBYtrDp%;9ri?HV< zFPz`bs!%(e6;eLCT}XIHXfn3J$;}p$0FTD{RThYvAj$Z{K6)CbGSGx{4FgSyWx=j# zpbZ@3G|aRC!%Y1g!>me^`-*to1$XX(HyUXM_KmPp#+it9gPaG&xA9noCf|@UU+^h_TKBqAI^m0mL$sDK)AfyNv%&;cH67!n?3tu(ACn5%ETrl3znNA zs*vwdhy;Ul6$XVEV+GqWOrU9!8D<$pvFDw z;%cfc4KoARW^V)kJ~Osbhs3#X?lPEQsQo}kuQCf2Ox%XG;=x?{5XoG7}Z$LZtSJi6JDJQ;Q2wF>iH z=~FPLf|DL4kxuKy{F`wIqxVC9;Lu;y}1eku)XHUwbZMn7!c?2Cw$4{4N$lfcND{pzX-;HUA_4CKQ6}K zKJCG0R}0ic-s^iLLD(}2HHE&g{Us~VXt;^smUYbDJ{k@4a`4{qK-}E#itB>0>wBGW zP2%!iN9^9}gRZHbaC#yVcHbA#{XQtOaqC1pKD>~IFRo|f%d08)=u`;q z9rebQecm{^KLpp$=Hts-HMoB&1v~a;V$$M1%qWdoZacmlOXgp;ltsQ~ive zVBJYR|4XN?7>N4je*Nw1zp3eaDW-8II5(?PXrQsOoGlv2@$r&;XA9<%RfS53GAI*- z^GgCsF(sRu;O**&S3`vGeR?JkqICU%(=U-32l|v!ezRn3{{jMmT7K&F7=HtZKHXJ_Dge{wL z(B2z@F;0nUdYq}Zb#UWKZ**~kY?Nm*p5I)KzkSw=zkW6qU)(OoyQiXY=ddS^y=;%_ z$@001j-L5KGtQju!Rd=#c>VQC?0q={PcIKeXk{!M1$QPNtBo(=X){q)i8DD{CC_FI zeow^^^X!4(YYL84Wx3iA21+%fb^Laeiw^e4!XX*+fN zOIRww6DED*7h^(DHPYmo$goRArgM%;s#%d@>i1ZMb?SHFYhv3>sQK97`&hZpN=L2- ztSaYeBvpYb-(&A2wec++&yDm!?XJ0-!8JhR+{C(}38>xGTEOa3_VpT?RO>J`{2;f> z_lbGocz@w{Vps9|ugLMn(Jj&Kn%}+Pu8wJ&9M8=$PD>1fgM1F_mv1x>_09b%@)u1e zIXLl~A{w64^W!Y^dl)1b50!P66=zw_>4+Q0widGf3jUMixnloPRfWc(V6~x@XkejB z#uQ8&KOYUTo#;&O#oVf=&|9=baNVb>&uE~vrOm{IFwyW&MyLu=>R$Hv^0kuOza$Qm zS9#&sOHbnTCeh@ZZ1BcrTb$TvBVmo#U$VsU7p-t?y%k=4b{zJt8itffKCpel7ZyJd z&HmdMjC0F?MM56j%f=(Pu261sFfJqw<9-=A28NX{I zuj}NThqieW@cg=R$5+TTn!81btG$n zK2;+5xr8#o6vp|wIzKltZu0pc#x3w2Coz2j|ZyF zZ`Gfi=D1*-g)GFm!Z$?1`YB6_&pv5Khf~cKl4Uk?F<7=|MS=X+CW>?w?Y{m zsdfTNmWGmmDYPX>zzJ4y2$MIK6)6$&hGvM^&Qw(@?2Q(NcmZ>!h`-5_U~{5RV^o7O z(b!92X30WK%$TaqS#!gGu*f<#IF0ZcOS$ZB!ka!ZJ}G7?0Y(7*rSL;>3Or zT-fD?^E+vj(J*tv1yu^_tjfXZaG&08hf`Z^adGn)Oke2?k8ly@-*;ElVpcyAVV#qO zcYZfn2zLfPzEXmlhXQeFmy4=CJ0r>XY5CnZU$VlwRgc5hK3o&309I9%2zd_}W_)bM z_g!E$Dh6eJEx7&8RN(j1@YQ>5_~dFK-gz@xC8<|-JL2Lldz{~9h0_~{A-N$GE+XWd z9u9z~V;C~WN20Pj2@_k2(N^7o?g=w6yJ8*|)+~~kFT!OSrjPH!d{2SU^c|(oo~>wbQSn;T%pqaiqTAP7re zEI>m|E6V+=RDYW}6)Uh};ub}?1mw!eY+Sfm8)*Vek}wIF4tM|fB`6QBR{hZOT=R8x z3dz?X$yiyaV`L#`f*qrPDdcMaGs+OxOs)w`?oe}*CVQut2{sdM?pmpH)IzullqN6@ z0#?0{NWeoH3~SR&ggL)2&zTJf2P;zqkP4U@h>Cy>a1xYEuKne`rNAbs5_AlLQW<6> zU?$W<5^Pm{CeJa!_J2Ya9-CwXP2cRVV=o^@*spz&Y#A-&OQ95|j1g5nEBI#OoaLIK z47M=&T*Bn@WEEP9L#p1v5+TiGx+-vjs&eYfouQ6%u@4gaq|BVK3KO$tDO|T_um|FF zG$i#Q%X0!e9}9)YFL{JBP!AGd^_dDkIFno)l2JD!5PM&=z{wZKDO{e|$mv{Gczpwj z*aFAbjm42?hhvA}b>*TbQPS;#UuR@tteX(-uozf<*G(Vc7Q!9qr(l?8%0M%plb|?P zcqhpUuGNuF22CvqUHz$v*It4>(b&Y4hMEfX*dSJeh!q)|&kYGJ*clYpX6V18f`3On zH~9A%Oyr4q8s!eM__J~^K>nZ67U@WKDMzw<9n!sbcf0~O58L3*K6_lFfhKFz4Xoh!j$oLm>gts+)~VB=Jnn3mhIa;-?P9GttK#I+)2;S3)6(&T9*&Rj2)fyT$| z#YcQIQPy~7C{tX1kI6fKhol^)cBHj&rcE=$6jy{R&l|W_&xe_@*0Z_paSVl&r0(!spsN zBd=@ojGUfm&G*x&ESI07=;qN6if&exN6F`%Y@e!nps+H9y-n-GYS0|rh`u}#dE*x< z4)4jEjrnzJF*$FcF^PAwsyuB@nu=V({pKCXxPEFp?w+m0tv7RV<46R~?Q=q5i-`K_ z9IRMZf!9uU;NrP@Ts$>Fq70W#mEzLL5?p+<0H=>-;K1HAEPW~o&dC|DW6u;O*^+cx zj`4+?ts5MY^Rf5D6ny=~9DM!J6nuKM4EImO?#{=U8 zOJn4-9V2V~*hhTTV~c;F@Q*a9(+x#L3VTp-&YD*Yy@p!1{fb08Ql!w-%slJM!< zP59zg1>O^_as5>vY}=KH?nT|0DT&b3ygn>zUX5oupT*LOPAtqT!IU`*aP9r&_|HEr z!GHdN?@=edx;p`1T+6{nXX5boYeBew#2qWvY83i}k>!ry9&uJF3v)uz~HGOeph$j`*RHsgniZO!8k3T+kDh+Uf`w zIeWk`7s0KPXyTpgfWEJea+Ci#eN*sDPUr9+VxHJ0=E>`d(;wA5?!jB~y5OGe8(48l zvHfi^(2_)hCkxhjOMG)9oJlql>{wY}*`S#Q8jojZIAR_q*jy9I?X8^buSyMwMVU$26c{u-=v7 zI9S98?}^@4{H%5NKo#2eoG{KY3F{CP!-BNbJPid&a^3*Ry-el1+&$JMRnB6G|`L%ntsB(D z&MMLUslo$k4zk{J1AGAM(RyQBIGYKziqQb3+PV5WsD=ki3 z^Yx6pfc#GVPL5|wmqDy+e%HSvd?Y_ZLzh+2gB#mJ4RRCTL*fb1@51q{F5%z+ayzHv zE36Ba&EX(k1qw2!O0$S@z%osqTw6kn$eRrO`R004k}l<`qccH=*nhJK1nqELN$N6u7FPU@pRQPO`oS_(dS2C<5g(g0SJakvR6kNF0A&Zl4{2Lu-d& z|I@=nuRnp+i+_y?{k9mLlnU=+$wQ~+V)?2ZytJtd+xFF9@9PbC^+YvxA0CelTar=I zkccsI-C2FtQMD_|@h!)S&gb!T#~REoXhnBMA=;L8;lyhlxO{aE&fcDj!>1=>zR#M}eAT;tZn@jwvYKVu6Axm9KugFj3Bdcb9PJXUV$!tMK$@&5H( zTsY#7=b!ULNOl4&e-jM5e`S6kKl?+;A>?nf!ZcubjNm$6bZEF}cy7cBuUh9I%cl{U zejUj2Z%1cF50*D9!i>BYbf?tGTHJ-Hg|jg!y%+PUmSXLcmsFbunrJl3&7<22=baKf zK_>b7^0swY-LewPY9;rIE*BiKN|BXkESNAcZsOZ8%M2bLz_@U}9@aOw0j_ktfpNv< z0~jZt6W=CZGd~~Xa$=mv#`U_uyqz21Y`LN_&P_1I-2*wfzBP5dsz4Qt`wO4b40Hd5 z=)Rv1^0^ucLOvJln;7?$=T)C_`5E%L*6$H;k?Xd44KBfKP3<#_Q|GA$O`59Dgk&_^+CXv;_*0pcN*}Ae*FGk9X`B}fqTb;apM4cHaX&wh~~@N$70v|$C1|-0GlyE^82h> zk=OZGPXU?;JNqaU_+_IbrWDh&8?b0XH&)cmz>50mSXMm+3(H$EE3Xoh;<8ZT9fc%| zAeG?he~S(hu*=vK)J&Zqi%=`>-D|_2-fzVp?p5NmYX$h|LOR|#8IK$6x3|;l53S6trcSVpDWNUy^-Dh$wWKCnz>sJ9PVRn`4M>P)@5R!OV z_Dx}}l53i4@;x=-LkO5jwmA;}TVtT@IEc_k-Bq6y8XruU<1HjX5&Q=}$({+}NOep_ zhEuBgTtbG+XDH;$%F`mBB2|UPquSLW)q?RFwM)%OysR?q$e4;5BGCGZm!dV5r?(x9wy*9YH+XmNn*W)tskX6lEAJ^2BYE$MZ<=VaJm%rbW1Mkn*E)hiOV;)r_dK*GPr_so zadqK!s12z^dqTUOzO6RKcx|n~tjebZ>(h)$UelH6Zb+Pls=4iWeRnvn9t_2`{eHN* z+ZorkjmDan;;{X-CcJmE6`x$M$A_1z@cyLX)bstrz^}Gye_!bVq0^%7TwxBH`fd50$put;(n-MP9CS=HZX7C zUUNFdcA#*%pW^hPNi_{L-fGiabG@^0x~r@OF2ecl!t-oS>?rH)x5+?bMG7~*k}V@u zf}La)trH|!Uk58xB9LO2f=s70`TU|q&&y{epGUk+B62+P1m{yQF{MlC_nMFjv?sP< zTG1SZ^_HZ`>X;BW$#in9`CVZ>VG7Cv@=@j(jI6W*JhyWaKK*D0K7VgIKD^O_yQd3q z@lY5xZ}!2W^^th@VgWwB)rJqQR^z=3CAfDo9cK>(BCR7Cj*_IC9H@BkUx{@d@v?`- z54=&^nSd)-%kcGk_4wju5#B!)gIk9@a8@vOSv3CN{xBPV``r|LawT73=$ha|P3hfc zkHgPfA#`E{?Bug%mC9gZZSb|=SH-|%BaHhd$1ALxKchlkV}RRrC|;K&T5r;6epi}9 zRl3X1@O$!{+60%s>F`ruI4Tn@1R*13-F7ZTUT`PU116y;q!U%44XBN31A9#K1POc3 z_U6w$9QcaAT9IxXJ^0#5Iw+}>Oj4v|FLXgoCg0#*+B-P7vr9ntc4M0?o8=O4?Q8q0f+g@wO zg)0rXcezmTpM?+3OPtG419RSaBMSH52*A!)0+5{_0*im)XB3S3gifrU_&k=?EkJ)x zHTtKvBl3~s6sy_^u_v0JX!uy09OU>mAj`iEy(ROpqIrSj{Pma|UxK!n3Up=np*we$ zYI`-kbOE03dJ)g|(?r{(j&l;@s|Djc*16I!%vW`8z|(E3u&{g*Dg*Q68W9aI*9edK zQ^T8ixzhAQ^gJ=1AexVwJ(9ejbUZbDq{1!nJ=8ceH^2vUyus&Ym*KKB(qr`fGEyl^+Qxzg~0Yh{KD#=QmK94JD}v&W&CqtjGZ8ea4} zrw~uAn`NNsfByQvFwj;aYD9#HYm#uBI`%;lPO_5ZT11jMnnQppu^lRJMvPGg9Jp$qWQJB#0kHxF)u;KYpc;$sBvG4gOvGchnvFfR@sF~@BoVoyH zCA-5pDGKA#^WmLUh`{^;xTj@fta}RlV>7V+jYjoZDfKQ(J8C z#*5>yO9s#Fw_3)#_8mZa8FF8n!*TLrXJ@yENhc>i=9?($g2 zeph9lv7+P5wsGiO?1Ri?4~!lYAit;kI@t1cMYz-CbClcg-*_U|E=Y|Co)nvgsUkjR zWtU=3PAO((7NS2n4HF|GQRC&0Y+GN1Jnk)otI3y1xXa_AXqi=lOLu$l0P{c$zxOZu zL?BPWSNCi1`SpB!d@&90pGm}>3} zT)k9*Q30}>jY)-k4FX;{X`|lBA^~>@0gHet4`~1!%*V^Mkf6v{gEB!t`9Oj#*H4lF zll`^`G74=PXEE}#3YY|&0MD#U82}~;m|!&`+Sp~GAg93AyVE3JKGqGf%_ice$Yw%q zf|AuK`j)_y@gM|T`V(4!uTc$xet^HZ=*fgSKQ_1z3^7 zN+%xGWw8+{a7QU}SmC3z}JLRw`ZjM9b~k8UahO~{z>h%cOkw48qy z1e;Ou2=Wg?X1+i2D+7^RCK%83M#A`DG%ZQP-lJtWd#)L8Uv0$iZZ_hxo3)aF7vsts zN!YM636uLHU>%(xq~{};qj_c;X!<@)DkWqkOylf<;PXRbKok!Uun1uxQA&NIagxAW1!ln_G!0|ylHl+&e-U~V+d?3} z2!KCLv~ej&b16Z(R|7J9TTvX`jN0%9HRxeVS|?_W@59W($>>e3k!V3*?Q(QXoQJtH znz3kI37&m93Ojd8Qu0(0rj^gd;-(idnnOoG2`TpJY6pwQIJrx$$ku`ts(J@Z8)tDkm{7}lpov!-Xi8u!T+4H^7EpK- z>l)jVDF0fEYLje3o(LQWHoqs{d8>A?nM6MxGGLx@`W~n165AfiFyjO-PU+KCXC!3% zpU^mQ)&?3)av@>%Qy}@W`iwhR0mGu?b%MVY23nM0mz zYtd7%NQv%F;dBb`mZW|Z=S)NM!Xj+h=828FLb3McSS)@q08j17!o`bC`0zrxYSVD@ zwOHJKJpngfi^JttLvdk`x0>3e`W;GKdc__mx7xxjo5wS>$)ze&zUPd>B`x^mlZE*A zk9+XJnRMY{Pi3BQcn^mJo!@Sa)7wX4O0zR8ejBdZJ7}})!CPHvu3wuw$k7eWxc|{y z{QdW{g-;9d_A!546@I_6(+cZev`0dEFl>J%f|gS^gh8 zt#SONkvMfQ2w%L@jDLT>37=j}#(QrBDMRnZUMIY{%NtG0OVOR&f&Sb+OirpsPhu_> zZ<>lPznqQ#{L@1G$CuOb7x~>k+%CfBmoxD3=~%q~Mljw!>Wg=eIb-FrbR>kAAbynK zSum}{wj{~CrBP0vPoP;Ykzf%89urKkN)$fVQ?a$!<|Y{vW8zwK_|SxU5c4{zCf?PN zF7mpWP|Nqo^`UIRFhpzz|AW|Okk^%Iu6C&f_kwv=qfl&n2;LQ^D-lftO$%;To6$I9 zP;3+L+BDb4IccKvcN}C*{&pB>e?^LQ3{tISO(xdIGQnmwu;_NK&7!}klW3L)JsGOZ za#pBtvz+>!h6bz56XfxP;0fxOS946OB-oW`h^#|TRSL?2~KmYys|{ zE5yB%8MtvQ5?2oTVdECj0ip3~lD;y}XmbC=AC`8>uno_JO+*e{Yjd%3XF5K;k%Qk~ z&%ygABXRYh3oeNU|MY${{`zq@KE0BIw_o$a%>ypDvfED90V|x`G!A>88U^>dXjuKw z6PDkR*Z-cZAwM7|lcy!f*G#M*NU|9Qr!&aw2Dh7`G{L~}P3i`1pjjI_UGcv>H)x<~ z?x)^RCYrp4GSPm<$$e}(9*p1-l8-qRBiFwL*?|+)vChiq$!JgLR8!F>ruC|+eN)sa zfT2gdHmfQX-F#fMeAh-@sbXmOC2bpp>sO&Ps9Ft)OAxsNvOPBY2;it*Ua_ zHbwC}e~!kP!n)Gn2EG->hvHph-89ck18o53OseIU(_r<00Mj5R)(woSjWJWh8#rxwmfOX6g7 z<;}yas;4n2XTI?GY>o3&y(z|aEWx9aR0WzIX6LK0!XX`&-*v&bANyjoXt0rE0x{Ck zA0tLfJn4_oj|O1uk3<^_XY4yrh)-_S;iHTBc>B#*+&UVB8%F|g?XaKV*auhG(|o@> zE*`Q)e}@|^9toDuG63~)4Orf^4D(C6F(tbMQ`bzvU*D_2*T3tqQ#RZE)F#Z64o59KYg7X0rO>pXyRg1UYUW|YL zWHLU#mW6ju1mHG@8SQnzzO5EWPYs6UBLSk*J&@~FjCoZHu&Qx3<`q<+J2nZmKH*5Q z5KSsvtQ63Jj z8R6&A#s;|L-CBL=emcn*7DpC<3^a>k6mRwWgoP`{G0>8l!5m10QCRAHqci0El2e5 z5TuzT%*1G{2qyt4NthsH(_vQla95uT9v$vXR;h@13Ku_z+~(=HxoP0^x%bqwOIIcHri$; zW7gL3`0{os-aQ$Qt8DBqV*Jc@8@#!}0=w5dj@$+>*gp{S*Kcll8IACG8ur zSO~Z)Z!YrObLD!8Qwg@|Y?E|({iH}JU{Z+7b&+hFDq>pzD8$SDNUF~Q`>PlUHo-YG z!8XAeV*=3xC4``com9ocypf%5;NaTd!^?cXaq_kO?mnH_xZO3)9T+P zkKMxaF?(S-R&TAv{O7aLJv9RENpWzHYtupg9TeQkM0;FF&LSD_Csiq(n)E(mJrPVkKtRJ&)GTWth@81+)8`(bhQ~i&~ylJJfS( zpT(5?IcSb+6R}l}B99zoJEkC&6--8?SaDa26%K;u(IU2mACd&$tZ3xW85-i;_2iHd zRwuL1iivMxJC34zfOxfmrgpE@^Fq3Ub58FIW@1dTWdc0})1h)rnD-UNML6o}lffM; z6W<(nO?-O_`O*ky-vr{?$LN>B-74Z*S0QNwjktFDnI0J8A>zw4&=}vABb?F0f)uBx zS?P+D#L*#2D8-F6Do^DzBkpOMb7PE~989d)8>=+5ewgqp{@ zYS0?nqK+5z=FdZG(iGKSXLj{7=oHMi3)X3*wWdx(YeGLV^CqIJy#zhob?EBpKx2Iw z>gUyA-~K$@Jsf}wyFGAmw>vKF@se9loPWg?=Q!YJhl7M3!gFrxIP83O6dWQ2(=@?N z1MT~+=v>!<&p%s$uik0H{S$F&a__}mj>>#GzmvajgR|R4p{KZra;OkF&@$vao5y~F8%p;lGMxv@G2u@Z(u=@em zfkEu6NnCz@c01xcd6xgPg#RO5>~H4;_${AxBnJQa@nuX*CuL3{as+F-}}F$f6N zLj^{Aq+rHuC+vG}430cE7O%fBT5g}fmglW;?qmV}_Pb7eb*B)&I}?s~U-QE4gHCEd z!CSjrG5hHRR0kHLF1#A`5f!Lu>cGYKX5+uUqIot0fBCRQezz1~T+PBK=MwP2n_+n8 zxIf-L;*I;qZ1L>UbQHwaAVaiVnzi7W`cH&34YOpcXrxMz+j$M~SZA^%)JY<`RZ=~` z>2aFV72X-dyO~rQVp~Eb)eMue23q(ZRXjOj&lF(q%7 zO0atc>oY5#LPwVHJM}yHJ#9Lglc%F7xD|8e=iSMVd?WNqVIe0 z?14@!-&%%kdy{eIbSiG1OTwMkd~kWU6|Nj`#rx+n@$tE2!Iuwi@3#|dSmW|5f}Ne# zII-Cp+n=>Y`ZT#N^JCzh5(f+4Bv}3?O4b0HU&6yf`JLSUfRUzguEXGd34P1oF}a?a zVTg86hMCFvd~9;P4st(@H3xZJH6V;fI)BP*mOW49GjT3LL0~I#LwisX(T%pGE=)}8 zM=OUn$bBBI?aG=apV?F_u3!B?za$P6(I%QC*OFjs18o!L3dgslPC=1pzT)-_+jzk{ zHM?Nkig}=}U}O~<&9X${cIrW1S24u7U{)p7#JP$gn_~>}I5)+I@H?@s9vf=_H*L(L zTxP|Y#<|A$1N<(}n@72Hg=xqjzpD+hK@C4(o~a&5K1!e46X*Zx1gn1)eiwW@83P44 zb=yOJpBt0k1_Q0cFH87aJ{M!-o9n3=EF|D&7&Saw-=sOFsj73;r%vAMc=`Njh_JVL zt7!Pn`O9$0QuvABVRk7r#Zr%$TNsd5@EX zi&_JCLhS0xHrTz@3dI#(u>5JDd`_N{lcZo;ZYySF)nHOe7N*W=!e`fW@YVeW{ON;s zd~vT9ADqv`+b3di`&bz69HJ@bjup>|HjIdY}Xrfto zrgIe*mNcU`AqgEJA!zgoM2?L={2udGcxN#IO|(4k60B@pg{3t;n3i6M&d78$hZmwI zxD4g~m8cABL1lOwDnlDZhfT%G_UExy4Gh|>`W~v|oMyFY=Sx`Huo5#1ThSI*g$aRW zC~(U{wnLKA?bL!P?2W4mrF4r1Vmv|g9#0>}GcT0uL}68Je(P!6X1k^bFm7O367K;4E@6H^PZ=290rYIdSgxh?mms z)P}@3H^X_f)6CDkf6Yzs2;q3Y{|}CE_ANuGEMP3>qk+aNI^$86O z5-UDKun2CsUlY@T60a2GIZ7fw%ooli84<~2f8dF6B1*?T9DuPu_QSaEdw?Ub zEgy15N@XEV+?Wddc|QLB`AmFuuK^#QO~pHJgy5}%q1gUP0xrA}jmrnU+d3+Ii`^IC*!U`;3R)NLK%P?*ID`H1@Aa$%P8kSy2lG}(UJmB*S?vxt;9s#|h2*aF3HTbs=I`N-hEx>>L zeug9wHTd{KvKoE=&Ra3KAqmQvmp$bF?2g6H#G(4B4(xud9iQJAkB=|rQ{n%*Gy#EiRdeYXPG~@RNfC%#v9pvLDyysCm4DJ(0YAt;yFE+In}IJJSTFZZ^zHGd7Rs;?D$| zi8jTy+PxC;)!~Ln5uo3OuOlxXMW7@YJk}9QaV_{&@2@PP+@YpGVJUc$2P;A6`SemU05%J*qrqdD@Dc*7%Mo>D@w%c*IMw0dmZ@Ta=DPOB(<+a z;QAqdTonO+Wk17PP32{!Nb^#EILWnP9syA!^W}9czT>8jcXREq`mSJ}BuZ=#359ht zD0=_VK%*gM60zJW(v^Sv&}7;a-~&Hz2Y~7ZW9JOhocCHgKOOiB@N7wg_ePJRFZL`Pu zEyBlJ?Qm*~9Zs@U!&ZsSHp0=CIPubG>{>SxGnYD{piD3%*M#*CT$JfW+*pY==vY4y zpMEkIzrWjvdvC;vDECxV<`)Jk&}?vc(b#WcO{$jS`)jz&nr+S~UhuOvLR@l0JEaDOZMUeZ+?~3pyz6Jj($rhYj z|JWA+o~c;$Tn#>YyAq#WEyhO|v+({I;pF39c>9P8?!M}XoBJJbMMVC-jrPcB42A0x zez1K+*5(vnRP?%|f1xd=EwV@TR2M9LArkLgufSj4t;83X67bQR0eJhcD{k#)KSOK0 zxziEzR!5=OGaK1X=_v6p!MuI*@LzwNga7>fbo~2At+JMs;>&9}_~b$&K71=2@4g;@ z`;q{>bHp7Nk2zxL%q&z3-m5~YQ0!fZ9H&&I+a)56y>WRmK{#C{(8eZMqFg^b8bVGt z4KVU~JbB%~{D9C_oGutwxF0mohH`w=sA$1-?(T5pg^lAS(VxS|);-IMV0k$Sv;t`s-k z$ik(AAvm|&Lv$#67u#dq8b<`QM4)%RCoa5V15U(0_cG0LTb$W47AZ3&Ie%1e^nGW9 z6xHCs`Ck0_^M3s8lPUPq+qL-YayD+COu)G}lW|Ej?)gI=c>Y-z%vs`&qg$+0B71S? zNSuAe0{2gZNK&1KyC)NI{dgQs9*)H;`%|#-Kqgk~%s}6&bW}D(z{N2dRzKy@QQ>68 z>pDz>YyjU9+9)%___?8aKscP5L06gcR-Px%E5nS}9dCz*&M#QGs*iF~4|%9t_KOW3 zE}yGo0SbdVkQLC0^6*Ysi#Zj2GFoEW(V5bXp1hfApc9kp`PIv?wtIsz(f-?T0ZFi_ z;roi0U_xkvtj%qxiKs`BR{`=|GG*;Zg&HuWa4p(Uu+EBdWtvmZ3r>}GR~%2Q>%`jB z?E`oxrpIVp(^%)N=6!iANi{cSl_^G?8=Kw_B-BHLCnFTjgN%e)C)I;kXH^QTNp<@b z(?C;IXFrqc=!c@KhHyMJy!}sv-&HuPBVqRPxo=px*+A4c_p8t^U#+)#GeWP&%&*L& z^u)3<$wvw2+lwZ&Pga_pn%?B{DEYZ;SHXDt6r~YWYX-sil#D5uS}<2trcF$rrZ&WS zik2ypTxod2D6fj@5Ur4^^t4es{G9_uq=g?PH;m_pK%J`=F4^=T}xn~ABZdFTueM{7_3 z8hiqfW8(|oU-Ia-t~{ll%<{;?vWEFsP}+*Vq%5?B#G*DJ6~$hnxgE2S>6n8e-wITQ zHK8iF29wjfvAlIHo{>CVRiH_zVi&evVxHdo9H!*t zbJ6Ao##P@lo*YvcH;psFxFz*FtIRoQWN4mlVti1?Q@5*XbHyQgW0r%16i;*WTXY-o z9V(n1sxYlXRhTMH50ac+Z-Dy=#+jd+7^jxkr^Hx!#-Ra%af8pP=egNsE7zEbaT;iR z%)FgEuS{~u@wo}^Cizl-?f+{7t#=uMAM;gjW{M3f33NiEt5h`k@=c+8%|wO4Cc~WE z#b(58HqIkpEDwzqAg9`=p*5jX7WhsP+SRBDsz7IAhk$doB-j0DOX|V2lI56Dxdxrt z^R&Zlm}nfCJ6=BMxG_N@&?03qr}4sRijr9WK$76^Ilz)fApb?;`*Q!`Ak3MUj`!bd z#~(lF$M0@6;+@mks&~q@gZ{XAI2hNDhT+8i036a4|VfhM2l=u5!Onfoy z*Uje&h=G$ZX7CI|&D__Tv5fW%%-L7QVcZiZ8At;^T8sxc<5i)@}<%WoHPy zU4t=>4TVMAImzPgYzR6B?v(TID}#@Vkvn{>qOkGxGW`9E>G)4ppMBh+f&FF}?!PXJ z$g6%hyWJ5dHdx@fXT1p^k9s1iR3$3KgMsXi1!e%8(k2_Z9+n%M!7jEWl(>1tYnZ zYsy^YRX-z=uMqA~@*Q-w<@XG+r*^mmm;@*v^ZTZw9X>o8>Bgh_ytJO$L2$AZ+=&xe1&F-XH2G=*HU&3<=|~d( zu?Ri+cYPba4(~XG01+1nmeI0c%Az?YRP8{pPaXTsFd<<=9cPuG8Vi%8d5n`~xqR<~ zCBflw8=9xwt!fqlR~K5Xs-1drreR`gFFKM%U{82Tm4Q#pnuC@UPUGxXlYJWFIl8%0 zK7$Z>|5#SfomTm*M#vgtl_tcT0KeY^$-+l-Nv?UC8iI3+ha3@~7K6=43*`Dv6mks2 z<-H!byvGff_DZPTD;If=;f(V;t#Dw&Ft|I$z~YBKu#kVdolhcyW98aQmcNw{1<$Z3 zSUE)LovD8@F|OkQ1C22th9wNqtYXMxEwJ_Ov}v5_ooNvOLdfjdWGU+lOWiHMBVmr~ zHjR5p?A=F&VdmCpz}K_!^~de_=!K%bLK;1W!XN~P4$uzM|KH^T5Tr)h1 z!Ni%Bquh~JM>Zvi=J8CDtdj^kQ)D{|wuxhfYYA4KnFgB1xU<@|CIK_JOC6vp(8A&J zu(t@QFAPL|Grwq{h5goF-qSGQ&{%cEOPOZmbMm_S%y`Q_XxczyL-ABes#EQg5G9Ed zD}mVikU!5&wDQ0TOirDGhKNRWe7iBSUQOcdD-evwPsWtId6--GEV}YIX8OoqM}1@k8bY&B7nq1z?-&$k#$)DOHym6)3@6r)#PM|_@tVZZbtCZdbHlM{sSTRi zJy4SG0FPi#4EvRE<3n7JJ(U2qCRfY4kiV!EAG|*Uf4JL#w@=90KvB*K*e~aVf%q#OsLL9f%^nB$ePZ?y2Q5x z`69S8Vjlt()$0xzgHxylcu6Xu2TO59A z81~3Iu>2`YcofA5j%00q$PJZiJMrO1bMS8;bxD$3iw`ekgn~*}v^Th5-XdRIJQb@dQ~r(p&~8n@hnI5k-Z|Y9`0Y1i@b>FrxP3GL z$M$=msy7mDV8a%_$JCM|XoYOg{h=#9FdOc5W zCua4_&v&9Jay%-0k`O;8Sgu<((l@L)zytA7t7Fqr^1?HOWa>zbi;MKLPJu z%*XrZvT*l!ByJq`H;#gO;j+Yy{q9)3L-dWWzvu=pjFtByC^jASjfI#tqY~2>R-<=r z875B7!G!Wy1V%()wB$rKKk}1*GY5qUhluX$-QABrebkGu-l@e$XJc^hh`VsQvpObD z1MSMo))+q{5;hJYsETO9vgQ?-kzbFt@JQ7Ad!xbM2Q}WlNVD*UE32Oc-!##9@+QYK z53@@qV|uo%yD=$f^pC&<&uCQ{8a+yIB)rM#_~X5cQ6X3@6K?KEo`_{F&j_|n6HT{W zTG=HSZ{L89%$c&D$066fNb;_Um=IizJXgWEeTw4qc)4C^nrVz{oIa@I$>qejc{^yJ zsh^d9lQzzVU|e2@=6I;!RvpU}4XSxubGZto;Z4k&7$=`=t_AVi)gV7btS33!XV#;fu7oXSauo9C>1vigL8MN}tF zL63-S9u?wIPFtB7qD7s>E%VJovw*TWJWGHXi>ML4@`f0@34D`iP(2!k zd9#F2g;?J|UxNFmbA(`%)p4$CuLj|&0F!a~fG_rLaX`lc7fe|Zgk_sDF!iNY%vjfj znX5X{-7`UmKM_uzl2D6)wfep!yaYSRP9@PKI8Aa^bf;PJ{i_g+wu-?hYXU9;V;>2Y zq*#cbRWru#It~v(-_|<(`J-lhb+Z5;oQ#%)H~??ICIa}Vj~3P<&hPAZ!SR_p!a~FVyIPuMdo5#BXhlL|Zf_L_P-_H6VPE>VrerUGqHV2+?7<0MJhn}No$p(T zsL?vP&2-L1rc;(G?@kte&{d{_bHRG75H3lV)uil$rh)=naGgK{&A3nUrN~yq%Y;sF zt`ogDA>wE?ScIf&l5MzPIBeKZOa~9%A0|I1*CczLvFeQ2W@$IgGPAr}^+I6+{jgCj zN__v6W+R{TQpaFPu9{%o1j`h7@);QT9=d}?0%p}IlXQ~1ovgv1fAUQ?(C#2Y5|kvx z>GD|#r)vX^n`7kkaBZ9m?v>~k+y@WW{SVU}iF-DD)y6q1Q+Ny{(K;Co(JgAAKqGgq z1^4aoZR&VerwHrG8Pf#wYm{-`kulrg^r>>&FA1LrxPb8}bB{#1TL{WsgHY@gfr7*U z)HMkww>zV|-4&DCT+z|!h>8+B1jc&cNs9=K`)MF-9u0+Wa163DlTetKhGY@XZvF|d zk!#+)x)yhCbm8~6t3^!5s0!4JuQ=fR%d#G9v&I`6hoi3310yWM)e%igNlLw45>VGw zf#t9EXY#5^?L07cQ}yZSz>He{LMYV?$y2 zu(#45%H(HaAeg7vw*Gk#64IiuafdIi@A1%;aI#)q*lB?aJI3M6)-iZX)+QcVy?-hd zmk#*jpuFa;%D9TRyudZiWTOdm1j9Jz2DSid~X2&6r@bb%W|hr8dwK1=Eu;IdiITa+7dK zH98WB^?B;(U{Bs$%&dGGGb&c2Gjq1m?P@5P+%`scqpocVUffwE+B*PeU-880olZEl z-5$p`TVUQaYmAe%$Krd=7;PPj=oA+;m06>@$O@rJf+zWR+quUhEG88Tb~WP8oo0M; zy#ybgPsckaB60hOVC$e8u1nH-{Z%(LK;ZJLE?E7H2Lc3pR{o+*p6|pTznF%jmZjy3*WT-5(kcm}kmB(hJryivdU8swh zh?=lQG{>}|Exuiy3gA$I$yw7?mEfESi}6g?ivuPar*l3f$@Y?t7txU1E!dOw-y#9| zUd1TzDiThQR~uq%CQja9kh3{tf=9VjCCZSN42sjuTT{bR>r=mzH$*$CWLxg5sr$o( zV~F#|2RFtBgZLJ#8(N-|psC+gpJHx~iEdZ@j|`4yLQPClt23!)a8r!NnX7O=j}xlO zCka(?CLC_7@cxemnyyA0^N=;3U$Wjn)HnC5z$Y8gBZ8G-mNX_r-Lid-suq+rRUP9r zG(0QFlLg=001FxxChw)>?!zMB`*?aD9Dj2Pz_(&?Gs2caG@ExC%4|r=iv_21PDm@cyMfqR4Ak z4Zl+SDA%*-R>zcbT)Z>|fBpSJeD%>xe0a4A_uopz*`rbDU6Bkg|0In2uJG$a_DYZO zGbBEUAr9Q&hDgw_JrO!q^i6p@kjbqWc`(}zBmbc#oMHO94$AJFh}OS7EMJBTJ9#JFIY7?)cc6XOzE z-?P}oO|1(Kat-!WO*7E+KY#tN4YZX#%j81}QOE2OjH7n(l3*vY7n%rd_Ct$hdEb~w zquF?{B3bTOu~8vKI9=_CF}f)+O^dJ*QK*Y*Mwbw9OI({#rP7AJoaqA2*=o0%RVs7n zaA(goz{x6=el#UaMx`WuwV@?w3d=^5B+ktt38?apfZtZ^gcaxegw_xs}NJ`Wt-;f?xL8F=cII(%}c4S)Z1x`@mf_>WJf z;LH1!c=OzNbgwT(SxpMuydx#44usXe2oWj+>p`O9ZZkn({aq(meuqcRSRJP;m2_wW z&BHz($Bsk``KJl_hvCj)PuzOdRqaOKGUBF)fEyG9dmOP{gm6~22gd!_PyRM91o_+Q z@0&tTE*N=jTe-LrCpO|gzU)`WOuqiG5uaU%!#l^kaDAV%N*tbA>45wiS-3)CVfR=R z@)~oo|3o?76&mDk0095=Nkl>+gEkypdh>2Ye*#43aq9@fN%O(eD z4tc1Ko``7`Ph)=b1}tpdh(&Fiu(W+MdP|mz_{v6veAe-nNyu`^N1DAPHIjVk9!UaB z0Vsi{h65Pnt9PwPzD%mk#CqrsmI!MBrK(I3@=YLEg^;7<@9;<$ccdt)3C1u1pSm?- z5S)@|lc*&KOqPI}$u)PS*?^Fth5!f<_0H8`kbrs1q?%&dY)DA)&FfAr=n}fwxE9-X zLdM>|4KUF3&Hg&F{Qv?Uc2!f=Vn&82GlR!Acm#(AT8QA-k4I=^JWY;7Iq1roh3TbBC81u0iJ8*{(_P?E4_2aa z2dg@=4XG{#sOE9+^bSl;t4C*iDOw}a(Hfp8L?a~3UV0h%Xq!=wj{Z`_m1QBmJ_7R> z+hEPgvDo(X2uVJNqkk9#J(3sV8dD)EM8@YiX}xjaIPL-zSn|3-L1oC zSA_f|DP~p3-6KAD`-l@(tPezRf(ZJb`50#PAkGJjvw`65vaCTO3Y38+!r%eJOc`oJ z9@A`>wX?9M2D3NM#@e&Xu>1BRY`ZcG^Y%|dtsD;BGeq)qNXZN$(@Pzq;B-c=e4}~X*}1v$!I(*|SXXF}QlRgUDLvSqw>Hc$1HI;`6H1W+r0GfO?0TbWO4+|ly;C&r5 z$P|{zEkej7UWM`wTydEkET!AuPJ5svUFoDL1I!wvz#nb{3+<9}* zH-0JR*FPt>ON7grRCCu_G(d6>YNK0G>YafS5!hv}A~IcqfT}e8>;{az_k4idfvF2nS-nugdr|(R}%cpBGb!#OW+p-YSP>9#wC|6C1 zZyyc9mA%ejpVMTvU36TW`09e;ke4PV}_ z#e1jI<@duRDGyQ=>`IKk;(~MAt?|}|aoF+9NMtqn!)c_CHro{!P{6S|RU74&Pm1_1 zjl{lnBXH>X(Ksq=(b4CJ;mEU3;+3aCa-I;%>-KWIxEi9 zE;e<+D7mf#%O_IFiv>>H6JD3+ z6sOZ9SG=zIoS3&0(amJqOw_32 z&^S$hUhOmJDO@O=-X$0ptfxwXE&N`QHXW0vS7Xo1{e@FLhzv$sQaz*Nm$d^6?H^vPgn^M>zXB6IPnX z!ih{=ukF{zp|8Jcjm3)`5LT9rU01sCUvm9_dc6p5i|)R0NKdQ1vd={lYbRV3t$jf> z@41)l>8D$E~ z^1R$1YM|-wX=8wR*Y`EA4;pAX(RTQmuGFWAmg!cA{D3CZM0cV#q6rOAEoh1Fl+USE zRS&UOU6&>Zjz-tWD3V;t7_A9d-Av5 zz}6KhfOYj59;On?b&3=*=oruEBg$eL;j}m+cu8eh=cp^4vE~|=8(dnw?(j&g`%8S6#XTx#naEPo0-nj6J z8_vGss(LP-d6^h@!pWUuP}CHG+IcB><8UlKIGKR^?3H#jSh$|1y2PsixOUJFSNC|} z*fuA$FZ6|LXq@2N8v!NNxc+7a-aVUv56;Bno!9+w=a3t2vIi+!G3>I$i|Z|r5HIV; z&;5|?nuhMQN=!;FL~~RUY61cYY%w-ocRTHyT-VKXm_s1uA1cyWlr{}vD2(M=d#&xj^O;@%>Vj+vl(Fgc^h=x~2xnYv?XXRQmbv&PE<)gy* z0DlkSTIqPX&s)Xgh;W<;Q);29wIV4K)4FU;ldpt#<_?W#LM+A^}G!!evuZ3he~L;JPXU!yBH*wW2x1WI1`!~TzbIMUdyCZ258p7+*ARwKb)`e+>BDQly zY!@uTZ0=%}El0B?a~y%sGH~u@HN;OsMaTqH`6dW3qfr+Sg*rbTIrCL-DicNpOdh@R zPDcOYSe)gU{8z(q^>7rf908CQ!xSO`u5l6R=#)R=6u=sh9 zh;&E&IzqIn(Xn}777qJg`=IE#Ns?^O!hd`@4S&3uiw{l+sO4{Pn8EQEha+r41gw7> z3cH7dz`y5@s+L$BJQR=XuO;Bd@dVsH7KxjO0_6Si#GBh)v1DZsqVfumGA>eTZnn%O5$mT64`9%y5Lm0`|1m=+B+6*Sysxr+;u$kzoBVE7JV{aAl z8aq}@uJr%_O~4G2FF{CvvPy-!)=aQ95%azVsR>So{f`F;m=$Q>8Ut;|s|fssCt^i3 zbHX8epF|0MNxW<*Oaq+c>+_oc5z>;ZNchS7sgJMe-77xFMK94V4NY-vXpL`2O=z{M zH0?~9h`!uu=*garshr4Jx>Vu2BVAANVIPIolxb)djMv6=h$t^XLr4nhh;cu`xNijN zeZx@e6^`7RVr)6wfKP5$;G^>i*s#rClI19jjgLTdt}`koSfQ)M3O$qT&_Bf<{XO>R zljvFMfmK_RvGY(N-oH|cFYh+t)0@?J?@Wg3kH9HE7j`=dv9gJ`og|{8(Kp8z4nka( zl3@Rz3^XlrDUgN4f8wQgsYMj%z9>V)v=KaxVf}CbQrjwV`u+^yKNbT2z6kikO#JP` zHhgs>Phob~7Do~9p>P!eGW+Nh`CdIfmjCbDN4->{d_xk1tGn%RS(4?vIpJ`4LjInt zds=K$6!IE$hrO&*>`TFkMX?r1DE2KwS-=Do3pOi)YD6G401H9%zM(Ou4NVdq$y3l* zI1gQ!eIm5GvAlVW5b!2727f`*T4cEu3I62zkZXeETOM4CituJ6+og*r7LFLl9y=PV ziGpiR@S(uwZG!bb5--IxMRlzFET7Xo3?oD|DD%t^*2M8pOb^_bpHmfQlH4&^QA%v9 zzJ+G;Jb-P%w1mPqu}drqu2m>38|E34Z6g%cCDCS5tzszYnj%|5^+l6gi|^Rs^G`lE z5cSRcQa#XQJxt>8fN^?yV%&&8wK0}3hCA58@vJKAHDf=)W+D%vK1<^Zcn3mM$yeFef_EmwIZ|@ zC7#JBb&EimyNE$cS!aGOT*_Xd^1kxeoN7XBv}&*xj*k%$diuU3#9uAO*PqW5zMhB= z&t>DiH^OoEL?E8tmV~Vb5^(-lv>qU^!v+_&Tj8W6#ZN66g~((djFjZlT3*{q688Mk zT)cIm3?E;qR6|HUzFdS4PN(6{(Qx5me_RpGa8X3~xt$I;ZA|Ju^vp;U_qxHwI#`6g ztOLUHW*;`Q*O5AsC9fYG6pW@OTl981pl`ARdS!j;l9;~03Om=k;Pin=;ounDd@WRT zi7Sq87>id{4M+2AYj}6%M~R20hap@VG}btRAw5n|#K34drOr(2QxD^HH`a z7l{dyOVl;s)B8R6>&Jcg>_#>2zm~Ld`6<%CxkD$p3n6@TQI5Y|4O zpw=)GY<r@MsdeP}ku^nhloUDQ-+LXN6YC7l4l6iQl z^Lb^WJvVhL7PhWMd3d8b8ph;0O0-|5a}MfaJCW`v2|E*Hnlge_Vx33A$?^Xk>zdn5 ztj9~RDxJ^MG>;VBKpxk}wWosHNR`^&dMpBTXN$&2pj~W_4^>dyKz#k!SQMe zzKL}e8ski^b%IT!jhxR7GM!v2yt680fY*6T18wZL!$5OS(G#?Vzp2Bx`9 zrm9J0eN}y=M5B|lwT5R8SHbra5$Z9YC&nYk#GyT95}M<*hUal@H54p&I(n6cpMzQD z%S6X7!=&svlB2T^8vB*cKvPnmx@Kx4Dp41dhHBp^RCvrO?UH>^0ok4LP3 zAQ)3^mUO;t&5iGr7|fsVj}04qad@*0-rQ=VPN%%J*&1iISmUMVERi=i1S_8Mmgi+X zdYQutTyWtPXI$LlsyqV5mA!7byiYX5ZdV-KV2_+W;ToT0RIMn-_2Wsnb38)U6o1@4 zp~WEEHXap$WoU_SlXbZs z9Vwky()5%v(Vp$wDtY>IC=YAW)u58=5$EZSIjWE$-6=<&XOWaXVoi;%F#W&=xQTT$ zPyYvu%kx|(BAGMFI!=RJZ-fovTI+ZtUl)w4iZkJJVw`@_U}Y(}e9$->(DCYaP`?l5 zannFEb^OpxaE0*)xZDg@np&c>WRiiV|M~0xVu-*B#10QcswEAxP<6B`js(nc#Uq7` z6#+BRmc)k+wA`3t<>4!fG)wr5lY=&8U<6 zt#PfAT=%0te>VE^gn%W%o?WpDT{*Lr$ks{rbTp^*slF*CUTK)%87|-qN0m1nSbfB9 z5SUDS0+5grin%L%acHXuUjfJ2oz5bFop64a0A#l_F6?o_sY5=veYq5$-|fVoKc0bq z|8zF~^wD(u{+(|8?s^^WpUM=H4#QO%al1XSZ@V)lOpAp}P&BONgXOWQp~$BL4mXCpV zKnxrOl-~A1vKU39tRn(jxBBDME+3rR>yJzOCH8vb+%9))Tpxsj`T`_bX31wBjdV$r z3q1-@;F^i%m#?}?B{Zc>lSDaL?IKeQr^wIN$4x@M_XGi^h-MMm6wM@7 zg3u&jGX}_a0GI|i4f53@J4}-;jWYw7CMY#9btv!}0$ZE7BC>z2k9L^k%Q3dRW>k=L z1#GH9#Q^661S|>nAUHKZ2f;Z=!rZko$=CLWPTv{>?eLDn@RJ0DM`}pEBws|3<_O%WWk{!uej~(aJ?Uf{jS4(G=aJdZ+a9C?_ZG zj$feg&8a@B*P&p$jl0xF8F-mr9xA*dP~{nlDz7k9OVqjtp)@TA8@86??OUyQ_hKck zAB(~H{r2c-cEi|*!_^RgaX$;hC=r2A+Q;Gv*BCtR9*=P;h3MbeEdqQJzP#It4=(28 zT}h5_ACJT}4iVWSq^WkE?bU?8Go0|Z-5MKSvPNo}2P}TzqjtB3;#}WS7!QUv$%G6I zv+Mz}Z9L~7!g`!d8lHWj6o36<3jXt}h4}ijsrbV?jriT=9K8Eh6z(1i5X`#amG$;W zn-PcQd&}|p<{URZXlw(*m2gmRn__@i9r)6xggJjg>$^(~|^&Lmn0SMW)aRfH?})nrTI%wt?auDnmI zD;%4`TE1rxulYQG$LS{q#!0@4fK80+`!vlN8g=?(0M znAVMH1+#?HXJdNNe6?#mwRkbw1?vj)zO{29W0{)cLyPYh7vfAg627 zOyBY`Ih~ah`a0BAXZm=D-lZN8+6JfVgxciwp^3HTbcKCl-3<9Tes7S|2XU^jE-_q^ z>TiXC_Se56%O(U#77<7sZJ6T{(ZcPj9R?F@zW@Aeh3l!`eSZ_6-cu&i#I&j~7p&_F zbG>m!Q!Ujo1Cuj)M87wpHl#|ht~bG_=Fdd0B-yMyon5g4Q$@#jW^teZ`x#5HT|?Yd zluu~El;yE_d7TZOeP$fSx5>3_8-sDO&P2rK;QY;6H5BOM%f)!_WFmI$4?)$MBHX=P zh4(L&;^u2nxWeODdnBpd=Yxx)SI_Klz#ALK;?0etB{}A?XbVY-$Kse^VcI-L49iJH zKwd0%zGQ*D8?3Q!-AHVC&I0S6w#M^M*N8Wv;zf%w;zoxl4bM$6_Ad=! ztCh#7;i=O###KmOZt8ayx$wDcw|sOaPf}H;bz#*Cw|#bc&lD-zVg{cjfqE+EhnQ3#|qAp1Yc>^amcbwM7~oRYC@~jiIV!LCbX%ZXlw97 z{}wE1S&IU{O2xaeqnYDHAeOHLHFg&K))>T ztWud(CKp=G)9NUv0;d{*KUMWhDi5wfp-Uu6C8;iv3o!V%y7c}5P?CHe34(K2Bx<`n zv1PRtj&2ZA-E4_7+iX;Cq&IikWB>jD0c#e1cfAIGe0MUwdcPNccy|guzte&D&KBUt zp&${@&bTB%y12_#l51DI_0l+0&-Hwm)E*@sQUW5;V{pf8&d~ z%{{=EQ`B^=kKPi2eAo@Q_B-P2wlT<@;*Y52bd)a2#gz3aXj_|trd6?c`Wbh;y3P)- zz2u0uwz}Z-HdpN3c z1hPTE#x9m}w+#c01Z?}GZ;yd?WanY{OA9PV80_eagI3QSKgw2@KZ|l(~kXetrdxpYOy6SDSG2tsGo_H3S#-*rUJ82V+M? z8GQyNDgGWSS0vak;in!*o7jfGd^Q7LebkSSt`tj>8iK28M@dMLjm3o+&#Q@F3~ivD z-fDv*o1R2fkqgE?5(3@SUqr0n_n(IdT_vK;pn*m)t;Mx6)%f^dTrt)n5ldI*SFqa}Y&Owe-HVQ;=SB2D~hJ6bI zSuxlsLc0U~MRPGZy-z-~saV7f^Qz%XCI{6m`)T2)`7>iNV-ZiD_m<# zCov3sbBd#$x;waA#h+)Ra5e~M9~6q`2*Gir5G^CZ40$eMc!>NgBUBkX!NYm%NUlSJ zbTz*9Zq)+@xKVMUamz$Qn`rEPz$#SUD#BG**P-!k{=B@F5HN+dDYn^U*c90`&sc3b z_92^Zfq`}hk&pW#lLp$j2*i&J61)rMmEh*Cwb_@7<^5r5V~oePeSRaHF5;Tir>rE` z)n}|c)rmFNfS}=#DD)}8#FQ>&nzI)gtI&FL`i0MDt6l49;}>F4!?WlsT!MBrOn?R& z8;$p)I;stY?&&CY2}g}@92$a>Q6CV8Tzg5Le(tJ<@2G2Au5nx8xx|U3!q1s_TR8gm z8_AMX$Km>+FeSDx9}L8$gF(WfK{&J54Rh9bptjo;vDMzN4vEINpZn@j`s8d`CoCMo zQL-op7bQ`@`Fc7&yi|_QC5iv!iU@fT@wW~K;>sR3aDpcdw9{Lx@WzW{@#-_9v2f{V z1cZtXkZX!<6bATMtp_?(a?Qv5_scrM@%Wz3k!XJ|1E+WS;f*cMIJwqw`JZ5MdL1fQ89%+qA^236KWAMsTBe8GI2poK77!Ex1BsQ*k z5{>P4a2uY8aO+s~yan$NXiZgMzrOfgMfC-3X~ zG|h(^=O*?wr!&zr1h$b_4<^;$5(Dk)zamAndy-{@;&insCTp}Z$p!Cx%t?yTl3>S3 zg3Uf>s>+-kZzR#IGS?Uv9Yv0p_nmlW@6!pvm6#}+T_x8MwP=dwRQ(y4k~Ix|xic}X zXg=mvuSV~9Vx7alB)JxxwU^Ob8&at2nW3oq-O zL3D!(y*8Mk@(A*``Fd*YU}&Ae$GS>ILU^53J*v`N{*I6233=`XDDf>tiBBo2L+j8K z*NFz<>iVd5G)A|pp#n70I@5YEqj)Z!nY>

wt|EJi$&YlZ7Llm^wHCUz3o(~w89 zsNx!w+Dc=9xChO*4&Vt*Rswj%PIrv8_2h#Nc$|T<|Wp3hSaXsog__^(rEC`m~k+A)VFT!&s zVCJDdyn3-33wD+vsW=O^a-Fc6%gnp2zsD*2deZhN`#6kBj6?JU!AV&#Y+RyYIV=tV z$w6qIXphCSt*~x|1r9tn1}9#k0dK8xf%CH7oZF#IwX^#IaPM3JKEG0e+poolHVDAY zLtePP-%0ekjjT%+IJRjNR?6BFkq`m9N4?~FjzVwYOswr#i6v!in3kG@$lBXv~PM}(_@b!pqnsX+$ zhnVDij{05a=LW{*wL%rfb&pcD0d8t{;(NHp_ke-M4KvaGp@MO838(0i&vk*z5QCij zE!ZYT$>&P9ldlczQlGnumgBAFa^hU;_W^uMaGfyqyBhGPaBg5-^gH{Sw*S92&{j`b zfS4zIgoNddR=v(N0n<1m0jvHA0@7GDu}_Ft9&@Cy4-GVmZB9Wms#HvotqE8{Zu7jx zqdjRNnq!)j*lrfEatfU4o02sHGnrsdSb-T6RtW*m&`GvxS~vyuv6GSSk%u%ZmNm*7 z^b;qQC=NBy7(82T^IbQLkP9tvLIh?kw8qBOR@nKR751*P#QJ9)G4q)SoIDtgJ8!1p z!^|UyGCk-WEsJJ%NG( ze^@*g4BPKJ$!qa-2`(BmgZBwICir%8@nz2H#NR(@!RJ@g@$PG0xO31EH}~0M+jGOP zWa&7(xs{KdadWSuEJ(VVWw-o))~y_aO)Ey@m1o9c&kJ_wog0JTq;h0fWgyidN5GYU zXc4f9Vhg&Dw3Y;WeC9?G`%gAY*adiDqV4BIb7TKDJH5oIx4%f-`ljgB5-515faJAXz zjHF9on&hhqn80JgOj6Y#GznOV>j7{Qlmw_=)SMsY?;C&=($y0Q=vF({OsM5^2oq7`E7;b%)Y>T1m^R|qI=tk*=dS|QQSG*b z4k6$UA>Vp5N=#HH+6+ui?^omRr%QrH0&Y)ZMM|H9p6bILk@ApQ6uU(u-_9SIqdkxi zo`ILQqZjtd7uadx*S&g`;5Pp22gj);VfV1&C*X0-~FZkOSHn0BB8|Ni+L z{O)EIZXXX-eNQg!;qH=y8ZdB99ouwN1{z87q+tByW-IJkHylMdlBoWSN2d+^4h;(1 zA)$pclVwvxo8nr+G|=SdEPot`vidk2JCTD=ZBR$S&mu8an5QKdzYdnv>pwiRcMiQd1}#IbZ7Lc@!0jT z?MSmvN19DCO1uj(F|`AevS*;cx57A*Eg=cEsz4#01;eI+W}0U_^2P5F+f1ObIW4=^W;wMKz62-G+gW zIli0;H~S?Cw~ymdF22`HtSP!#eLA>vJ;3RT*Mku?PDEjRn=;EeM1&JLd4GD*97(Ka zU`EkA%qm-o`L$~?DQBMA5F@9zCikEzaSFF za{byf9`!?1N+yo)55kpK!*Tgg1g;*5#4UtE-Ao5wew-|U8Pzd%@g z*8`T{aS$xaI`IQzo%pUJ#*Pa`!IX4ddNo>-_6mG@y9Hm|YsDwmEAY-+sUqS7geP5; zfp$&=^_i{KI3?@DYwIj<_~{Wy>kWXtg|Dhc*PLvw4ZPL&72h+MIEw&|Oi981jjW8b z!J!u{@Wv)$F`o+X+)Z80(^5q6?Y+@A19`BM{AQ|0SNLgYR2R+EY^?{dV69nPqj zSttol4!RPX&>Gi-j+BX5+W3?tMsw7#5M{DTWIAP{DzX)&!3}b4aC3_TK#1v~3H2ba zGbqxzORhNG5ZmTtO^&Lkd8SPB5jqA$_dtRzoUS&+H10XDMjhEym1o*KBd41feUQ_c zRC8U>nCGoN-YL&X=vE9vgPd+AD}yGQPO!gy1{(1kZ^1;1m8IGkj}cB!9LK5@!TG2V zg>z1}R~0Hxa7`8cuDIPuunq05H_3=|jsGb5{5z5-sUD~7gT@LK8fyLdGbLf`6CFPb z(?rA16n^i{n7u%KDgF|`=@eo;ZP_x?+nEC)4BNI zW(#hqq0%CLX;M{@!7)Ho(qpw?f-2S2$M* zue1cCSTyW{^$9q*j}tc2@&4&d+fx*{7%p+l>j`C=YK{eepODh(jb)PeT<3zcU6Z z%&FbAerK*o(_2l@6&+0dPQ#p=+z}&8%}%Tf#s%xdIXAx5Ksv!XD^keus^6K3^Pd~Z zu-ZhBTeYzznnv#P{UnzU#X5PNX2XDnm(an&se@={`=~tZ+7^l1Z$#tH>+D4qj_a=mKsEk<#TnOLvB#2UMYBc9HQ|+l zWv|Z#{xnB&<`#T(HW^2E`y!+;RcUR-k19vjJkQn-YJ>ed>`N~HmhTD=3x8V4^EN;5 zfW@#V3=c~}M5Q;T_l?8zORTYFtu6MwXpKWJS>nh`);Rl$kE|ovc;}R!>`CM7+CC>- z-Q%c^j?+A2f79OCR`836gS}j1Y#lU3*19zvE3mk{71L6(&>fwK_RttK_=Td#(GMZN z6|F973^6@uq!a&IOK_dABgTJhRH4iD&%sqePw+$*H$kG1ZkLK|$4q28WFXHa8^!L~ zs12>e2%?4;(0K%ejhO01><}zlh>7jCffb~69a8!*8+t7 z#uuE45?qpIaR@5kXHzs%Gs32pwm- z+ceG;KuIV9Oivq04hknAw!0o76La9*6p6@|02EAgL1=|6C>hC^v?K_pcX;8}@kHD| zorSkgX5zYta^*a4vsUi&`JF=0LewJf*&k@#^(O=?=UZXGYkMI zU?>767?@ydB6JJL!j6Nf`1p1M{`5g7{&24bADl@P(eH{kUL1pMPd|pNBu`jA5+KBC z9uXPbR4A#ffm>#-}ZF&OPPkc_#WiBWg>BmKLQ?hfpbzCHoO>*%lmzB>SZ^)wap1{ZL!1t zm&T#C!WH9w5uz$I2MlFpq`AP`A`oj2kH@EX>T&l(EUq4Km*kOs4p?<&FW9h>u)!I@ zHYazU+$#9qVuLp~jK%Ebb_fXb62kR(z#tzCQ&79ZiUL=N+cePhU;*BuI+cn1;OgBXUCzq$p$u~6Jec^hQQh| z!KVu*_KrhTYY^-`Q{XP5Dbza-F$vj-NzFrKWG4I_BH{R3AK1wCX)ENbj(w-s1GSeLvXpQ%Pr_Dn63KV6#JFS&lRFe#MA1wXRy3^ zIVR^!SG(54tTxJ$R1o8)amHzV3g1G+Bwf8DtrKWvnDLrp1&Uyt_f>_d!nJ9fg{rB1 zp(A+Ah)Hy)>gA!v)vKdhdPj<;ma4W>M=te_l_p?w_iC`Zggbh?&tt6`_gb85JnQEM zh}Q@$wE5l%2@4sUB>ZhM(C%KnjnK!ulyMes!KSb5cOjh4>T-$XaiI$LDzP?xhLg4Z z2P#tr@lL}`9r=*!i0c7^n`If!x#&voR@Gmj|DYPV_NAl zbYx5u%(F!UjkKx47riL;FF}cWG^)HKQR5e-S~O%@`@!QEp0GDI#vFelygnufHH%|$ za;H1ayyA-s2ZC{Fe-JM0_LAU^wFl1cc2lO***zA>%L)@7^;JWC$dwE&tes%-Xdv?P z6Y$2XQMh+*0zSUch|g{{;NvSLxchn>uI~57MOLW_9?!`dadw-m7hA1xLe`#x&y2yE z<-b9cpC7D$B6yQELK|pi7|E{+Y5`e8Byk7~jKYfLW3lb2v3Pa8=!*kE`0!F8K2?(i zvvB8lIIirrNBR6c}O(-pBLaI|7 zJb%iy&=vNAk3qzDS{=cp*rs95ic=-H z&8iggxWVg&*jA(OM+)xQr-#Rn$mxJyY`sCo>NA?> zJV~OeP1Pouq2K=@xn^k2*VU-`UPY=k1F_BoyFR=cJy|{I&Z36bM>*$IuEeyGC2DAp z;`cN?S-&BE8d^H$uxQ4mhyW71xeNOwX=DN8ET6dZr^(u5mWJ9%`TsnrOtk5nM-% z`-6X<_sKs)474B1`uG!G$A0qKQOI#EK#A~nRcI}0A{tQ@)_|JuMl^=kp)sslwb4

B-6yTX`BtkIs0Vt#7L0fTUVXY zB$s%?Uog-6?&kDtaxZyZ^Sd_7O`Mw*IvTSkxAU>a zzc$uP{F^bz>kKE+@J>I|W}JR)SAB$Y;kUp*bB;%Zyuaa3_=$!R9VOZ+S}@KDh}7@u zm^L+(yw8JGrkcx@i8ch=UXN-0u1}N%sY=sHG|FR|wT2I`l^nfK_@P&AhR-Noh`E)k z&{w2Sl90=lhEJQ0`uHiL8;eCx_`y-~A@%_$e|!9r`9Y*Q))-}#glM_GIM^UaG;ZhI z7`!RDH#P9h!>l6r!4>1Q$dy;@@XQNN2+0mbX>l&z{b(}2{%jULx>|}`M}u*3j{{y> zYXKYQB;^O_;V{IFv03)dg5M*)nLCIsxBa0vEJwy*bcE>Y%19JegjgRMv|Aq*P1{ zk3vgeI2wF{Q03u|bW6ELAJ#n)|It8G-hqVvzi6B*4bQ4m!8`kY1`988C_su$3bLKD zkn5U@Lib#hx@V)zBOA3eJu`aMW=Bi zg{PN4j`(m%&VR(+ZPPd-p_{?KH{`JpuhV0G$Xi~Eb9>!!ZigjKZ?i=2QdcZqHX0{3 z+2Asr{Cy%&_POBlZU>y-VS^Jdj>YyhPoSvC6IP>B;PT60`2NCQ7T{Rb%PiX^AL;U0 zC0QmSMG~v@F~P_f8-gO2bV-ovF~O%ugm$_Rb&Uw7gRNlh3?C66lWj zwt=G_9f=)kCuCwecRuG~ddXr{Y1)y_<6MGqFQ( z>n~a1wGCD{`0N-o&lEu!91NT9>uOK6TSGxgtO@R1hxuXRrb^sDm5QtEcedLJ7hYi> zL?

Pi?W3*R)WLMNe$9me;lrY>&m>wZoBH=ZE2ALR6K?KR3{bY2w*U8EDExlfP{m zXf_nrBB<>im22?VzHpTF)K;!X)eB9;x5Fs8?kB`!)e8Z*crXli--ySpW09(1>A9UY zcw?gl7S6T96GG;$KlM=+wbtL~8X{u(G5IV+D1_FRq3^{^9DY?2?xPX7evG$)*uFOk z6BgzpBqJAIR^hVVcq!sl1T6nHk}>;+C0M0lymti(Jc?BXT4g{b>H3X zs;^+KB-LH$&gjAF_Gj>X&ju`RST5`E6lA#-C}T_;W;&8>5|GSs*n)3P;pKB|ARI9w zRFNyqaCJ;m5<1m;%h;(>m1puZBwW=?L9RO<=U^`s9_L^`6jnnGU|TRqGNr&Ug$M5w z-wKDkHSQCyOt@7-&BU7m+Ys62Zk8t62g%no&WUjqYF!}tlIWShu)@y?V;{2mRv2hO zzw|`Bsxp)0c@#N4Q1vt=w{uXC;&e&6)gTd?=dun2i0D?;r^+l*teuLqBa zLQPm5x+Sr07SY`h&Pud8OwFGmiS=|$FPw`RC5y1Iel5Cl=MHdsdpd^-Oha{KJH|_L zS?nB)2_7M;MML5kA9(zNpEV~eX@0lobXosItX^e_SJyk>&26qYwZjFcw>#m?4#DB3n<#_*MG2S_y zs&~L;y|}Q`P8n!y{h*FVEFWf{Qn?O!J^qsz5X6#v+l-PV zWnw(mZScZ7m&)+h&!*zfAF@wb8Qwjah#LpJ3^~h(o5WR&9(45kX9K$r%oNfs2|HkPW z_hz*zk9TlGOqpd8?8O&3j2kvOp*H)XXcJ9uh=mNNsblm(5>D}?VJ^XA+T?0A727b) ziAR&u)vmR0u!(OaxaE1ht8I#IZKM(F5AeFe`9E;F;N6)*USj08r~-|HL1>~y5A&B0 z{Uk|t#FG-T*7B$qt5VDjGBz7m6)HTgZB(0@ygroUO`NmRJW$r==C~F$B&bn%qBdlL zV7?xGd3xGofBr1FZWd#9LXldq?#Nq+ZEXW-WH2;qKDNqpHHoV|=3a746l`V>E9X#ZnU{eY%c8*+kQ z#q}C*)~p2ir6g@5g5WOskc+&p_P-SVkc2kfy#R|ER^Wx+^;q4!0E;GcU`|0bW@MJ2 zCq5GsBjV8(8i&@f1T==FBE>dRU1!wpdOgx6nw2ul8CrJ?9`}@>&M@w)uPdxyTR`Y-#n_Rk86gF z2ouhTP#mxN75|pz88^rDi4o$DCWtN;(AD%zvIjQ54gIcpn?JAdtdnoSE;-%gcZGEW z=L*~Z7{t2P3w&J~XwQ5LD$uf=6A<>eKSD((D=gDA8_kWe2tqBqRZ1faPlh^oO&#KArYrv z4HEA4#nl5IqK92^QFQy|T{c*|)()W!k!YS>iNAc%iBIn|t3G6ElI|{B>|JMzu^CCK z(%a1U%u2LDT$?x_G{_aM1y2fVG}T4(TYp!+CwaxVkzt5v%fOpkJn{M_XUP?Kgxn4n zchNYrQ^6r7)C4cB`7Hw7f?y+fEDf4~MYZ#=xMHGkat7Li!_nXygc?sjRJ!?~z|IFj zzmZ%){;e9%|2@CZT(5H)^1L?CbbiD8G=zelj1U|qA;TdZc`msq@hn8SXRhcc(E&dB zsweHF%zokZDY6dKVP54D)z|pxu9whLFkkgLOf*lANGJ_IfN`ClliL-><+`FdrZ&MO z7gv>L4`5tEo>Lg7hL^mNCr4Cq%Wz%b;xBomiE;BtHa{n`Ac+6p0gM|sCnhy^HI~Wk zI#(Cm8qYBwFm?O?kGj8(v#ZLs#c{fuzU|O>2$4$4-QC^YHRY)!rLn41RowAl2_6Cj z2qZ{|6C+C8AOwOpw9A!u?|bjQ_r`aQx%RGeil*=H{yv}nqf?*H{OsfB)ZS~XG3H!z z%}I@K7Q=~agL`7!1m5Jl=^{AKs|ld>|Gfif4-ejt)Y~FZ;3@<>Uc?Tg+gwFprU1nc z3rQ28%uO#@}OIThIAoCe2F z!J{vBxc!U(NSv42tdmj zU=xF=&>{@4i^Wqx99*-=~B2Mne%ehV%Yqo51<%*dp1SUl`30Hw>5iq0M1fB|QizromJA+ehj}Wj1(JWvJ02P$X zR-q|4M?2UCpbVe|Uo$n(-r2GTk@7!i#~v0V6;YTY0-FwXfh#jjxrN(g_!;uGQ^1Y5 zHdrSioBIamBEseIs*oDN_dFRRrl~+*R_z`oXc7sRnk>_i$l%s zUED%2Ev*M#DRrnyYQ*qR2sS-6QHy48e{v$)7l*+$HdQde8bQG_ZVkCmOwws2UVKF8 zrqAlY`|lRu^wu!JwT$^U>16xk_-hPOxrs1$!Ct|@?&n0fKQ#$k*NsEn{Ajp`GB78k z%Is2G*vj{)m~|4db`Z_ro8wKsTYf_riKD6mmOo?jK)v3C$I* zs9Nw%JkxnLPPF2i&a_~i*k%Vm#kPoMlh(~_2)0}oai{n;M?=0TLdqs^Mt+DLL(xZJ zE!bw=BRbI()`By$x@d51vJOQw$u&@1Ti6~IM3euQg@{ANLhNi)F|D5<{Gs?KzKL@c z+XmCdh--&&ZXf{@=gflE47A_;V+YVazW5Q6ZVpE=GtD@s-eq$-7b_Ap(7~--?4+G8 z#}qm{A|BaDAm(=RI#;g^r$^mx0%T%q(4LGGQLkLk+- zu=xo$?0vx5)w<)Jd1 z-I^CuiS{d+S@VFgF{$v2m#Oia^XQp*n9-KZJSY zz^|&7e8OHZx9%dp@73oSvk7Zp5~5y2a<2w&>CO0#1W)>4I%v+qUysnt1;5Gp?4VsHSB%W>Da{QQ@DdceC z+}`?OZjZ*jF*B!xUNaeJS_92B9yu;@u8DKOHy1B#2b+!%^*fsc5#!ACrGw2fEQ)jC zat6r^u5Aa~fpz=w0&mfprNfw&-K)-ZZzi{L4q$QZDh!F!wyfbEtPh~3xC0f2OcRr?64WsLUsW>hg```;6*t~8s+6Mg**I6KZ>>^n9#8JWXk(a%&^O*^V z$;=gv8-dX2?bv&C0AHMK#^pn~g4cK)6GwLVv2@(GJ{MOG#NzUS6ui%7Ty(&<`fJ8H z`TzK4FTDMnJBs=lKoeYA^T3ERZQ;wt?%UBJbH{X4 zHq64kQ}4%f3tqswnfK$q#$GHhtH;8;QVeD0V<0^Tvy-zhGcg0b37O~->xg1~!C0Bk z_&a1g+d9L#&EI4E37YjAzWtlzHaVSHXbhgI_7USa&+#n5l<0PJBzK`bX*#AQ&p>C= zRP-dbVP^Uaxn@0>T{xuMwFk=j5ppr-cxL9A>te7Tq(Kpz z+np^)aIH@CFS(_Q{BAJM#|*z4tg8v0jdrIQyc=GZ`9tRj^ZD9^^IE8Z7G8>U`b>gj z>US=PQCo2vXTEONPD75X`0L!pDH@(xXeP;pn0A;0*Ur42fJ)WygJ8{aE~@mTbz*+` z0IWb$zXHMcG7MKPQ;gp&KKioeM=(&aN;N!bHyWhVisnVF2PXsGSLCU zSX48F1(mZgT;3y}-;Ck11`Ib(McaMdcCEqk(4BQ*F6sl zY6kGojKx?{(TM((baX|=Vrp;{+Jd4`?;C}bJMEwmaV$sqy^h!ckIXn1?L+N9!g-v7 z%=7%MTnOhj=QJ0VmslRch=?&8ggf{JXmIZoFOADNQ~SUF)NW;tnWJIqzA z7y&5>m{Dw7SbrO!%z(&p%SUH=x2B2Gxn?SOf5BYc@-ZahdRhHFSlRLrR!@6E2$(C@ zE7VLwfd%Nun2W>-xpG3|<%GqG(20catr?iJtN~{~7{)gr4&b8`^|-V@3+H!5XCRGcI&n?H zhIM05Q5XpKpT!yQSpcAu?MXn0DcL!jfFZ1^5hwST;_DCE@Q07v@XgsGd~zTL=il_k zyU)Ae>Bq((yEp+Jw8Hovy?(fF^{aD>Z5|Im4!r1wJ`Yk;M2qL_;`OPuI}?e&!agA2u+9YkGb{21pNFwGZ9czg1G)H zB+ku1Kz%-Z{jxOR#n<@C7}fDCLLef)8Sfo$!XH1MhkyIkF#dF9D!w?Hg%5Xy>$b1+ zZ+hX}>z>+#z$&;Yu{y zSu5d+sI$f0XcHb0yr+v3oig6kFVr#sGKNULbm|y9b2!lqjLZ49u+8&Afy6ODd^-ow zY>XSH+K6lop#3shU#E{*0^0=9D7+0R8!_(KDyHPCa|emvF^7%C*t0AL>|}tsi#wshw4B7Loa#Y$cvM4^uNp@;Pa!I+C<>}?fy6?I^gie$=LSfSR|BY!cCk?)5yfkKlYgC_a%-`!>s%c+&g6& z78e#`Fg*o*v9V|jh(y}hFnPW8f69?HKOzK2$>Ok56sL*6E%eAntzSNxgNxB7qPsn+ z0bS``g7M{8TDt(VvO49s91nIqrM2ox{3?{YO>IM4Y+Ko;o&_>~SX9_>I>mM-&Ej-h zY#ZFuspe`m#kR>bWx$NQuJz7XT=-^g<1%Fw%yu`PPYd^YF6cy9cz0g0wv4#dEgp8@ zT(i!M;C8HDJ4CmG(+#g1!EJH7e88QB4T|Zx~9&-JsT47_EVG4wJZWLX|>33mEYV|h$q)g#P+8q;I*}5(SMI8 zLPIl!17py%v=pb$bm7Z$E%@+oJ}$l!kN0;);^kLkvHq11yeB+(VS6yny&WJfPv16ObRFS#(cIUEz$2yyKvP zcOUiQn{(CJu`3(XAMC;dElcod&kEez*oUPhwHV6E$3R*(=B8v}R#GPV5;M>fm#)?= z))B$2^D-`fVFKNpD@;(2CgYsOac(_l@GnQ1`$py#%gE={Klb7wGtJ3|>67!(6x%M> zX)0P`+t41@h8c{E@NM(D!MfpnE3=$g<)Yu?Wd5OV$Qnr0?{uuSBm)Fco;>QSY>rN7RE%}`b;pBHZ*~GSD-on0(bu))J<`QE4nFpS; z5cQAyS1R*1x6Y-DkDe`@ZghMS@>t859XKrwug!r(lbPmACezel+t%#n{s^Co=|Z?X z)+7AQMKI!D%@;p?F;+D{sGaqfG_ElVVpagHuV7f4O_c^uL7GcCrYF~< zJE;cU33=#A%tLQt4rW(3qy6zgJh`tMCr;Mmvollh`N;-+E`Ix!Vsd!L((S05jPWWjrD=5(xT8p7&^ zE(~QCqAMy6?Lm=f6;7`8jzH2K)T>+&v%lN=y}WJt8oVFG<|Dt1)!%J?kK^^W@qY*t zpEYfKhSnc15o}fml%kPAw5U3?$26inwpDiq%#%5Juw=2|Uanumy_i?IQu&-&XEw&| zz?s=Ol5G~ZM8h*U?g)(A7`HI5w^NnRSr?5MXJA}0PCmCVt_$FzW!PPlSa!Y+_O;;x znFHgdB$ADDE}WYnn&EQlaD#i|*kIZ|(rmsb#vNQvd=u{m^OHrp_sg~V@BIC@189#8 z-HWu_B9PA}0WMrMVDL;%5`$(kF=kIp(8zYaIN6MHQ;ZRyX%kI1&X_UcWd9WbJ4E(~ z2|z1?t2GOaT|VsM!z_BYq>fvM7Gia)X(+I|gIhTr1k8FIy#mnM=nnbZGz7~)3;JmU z++xe|K7?2b_ZG!mHx&zwvNPQJr7 zZy|^9(kXa%XBIv=Q-QC}7UGM;3AnUfNPgpZ3_a+Jn1W1ri(|l4RC^+@9dzFRt^k_1 z2;+pl4jkWCjLXMr@G*-W9xoRw!PO&qxV$$37k5VLesX5f9o*o8P3y+s*>&Ua?2AFz zwj&Lf(_sB$Fe_ zmu^IiliMewP&Bxlt;3L8=_nv9^c$>f(SE<>xPq$=Qq|2jXTFfR|;+hLE zbfP)lB5vW94rY`X;&q4z2fk^+=NPD;n;`s@8{aU+2@9 zUwgm9sb-P!!UbqgorP4F0^PRAm0*FriyIwSa z=zyaZ|La1U@IN!tsR8yrGXeKJG!coxGEe*{!Z9B>&ktgr-w%R^M>6W}Z^B2yv0q+n z#W(La<169+Pmg8d;{&nyaCZpa-{uc)MLD(E6Z_XsK>Z3Chn@*WN62qA4z-vHPLoX< zdUF&igR?PDg!-K1WXy_>M^AJtn*1Y?HilTSwyJR3ioBnTh8)AOB4|^@k!5yR{^ShQ z_=%$(Qi_&{8nnhtL1#+4I7jm_P`p5HcVl+J5V~^*Q0QHvoNhA7&0-jNJ=Zl$I3(FP z)Xa#mIb9uTXHGZx=VMwloSZH^9(g;no5|^>Ru3D5n4meYxe1Oo@Qa*fqTQ> zc4i8>o6a#mkGMCPXf~%i<^pG4*UwkXhik#%Yo!Jn&oMLixiwa|&5oyo9WU2FOr9$c zoU4P)j5Ni%%-a)Et(u2U{BB{};b=SXP6wM?bLJJ#Lw~`5W~X$ec3^J794u`xTLW2i zoLDy<`?!;Yop-qaF8V#EAD#L0P}|mryqP7K5?_GEuq@&FbhLyeU`kRJq8kb^A2RT)i+Azx|{izyEYLzPZ?p zPu@+&TU#PAePsm}7q((`-E=H1t;b+iA?BuJVpdW*`V!O76PJdr*i>{zr(%ZKw8#`R z2F4?2qKvU#`!vV7*wdoj+pacskj-B~Wp0mt-&)w@tBb z8M%dZ(O5j3i(?!!kfdo)znc--&T%q;p$1ZySs|IDO)E9(eYF4HlyQasSE#^EI11le=`}sEG(N$Lfv} zVtW#Ii*Yf``0(_jqPdGiHN08)CbqwwWy;-RM#IM=&$C!G{Gf6;cZ$)NmA0UAsbYM% zVkuU&J%(j1YcW)FpKAEN{Q2n4o{O&RIl7gTpHb`{hdSRVGzCPVB`6w=)2Cv~fp&a) zsTW^d?84QPjkx@75#HaMfs4CM!pFJoQ8>R{{MH?T7`#6T<%{#NYg@A5E(@1-Muu!tZ*SNsVKTo?|``;9YFd4sLA(aRYEJct@4*Y%@D;S~wY~kBB8|Q)};!j_r zX6`d69payZ#;1Dl&tDGU>q}E{^35>p-{6X^>&9c|-G0azOodl{9^(89#II?^U_mqH zWfr3+HVIQhqR=dSTS#I|78#Y=KpA+LoM-$WhzeZtPjM|-8rq0)Z zxiiKEpTz2jh9{oovv{WON6eyR#zo~gv=?-k<1eR1Nz2jIkO?(%<3#KC9p z!tw`vG4yyM4(v|H?=Mu~pFi!vzkf4?|M=}<{M)aF@ay;6@WFv>JoAh=kPX$yotTGG zzX}0YfdERz#ho%PWNc*1gk2~TbcJs*`s8|aXU@T#;)SS?9)g&LnUzwK{bFrO@UzHsCaoZ9}va*Q0<*lgCN6 z&JMO&oiadlWT!Zy-6U!Pltl~!Zy-305%@7Oaa;?3d$;UD34pogX$(LxC{iEQgoTwN%-B#MWtwjFV4Wn^ zQDBFtRV{0#RX2 z!7nq-V2RqrJhpS^A z;uO2%l;H35mI-KUPlmgk7w;chnR5)1jUq`JGh|KoUH*>@BCytD=IW`qa7$uvXSbLD+UZv(7wy#2%&Y2lq(P%46%B-|`cHJb~}%$$z-RSU78b|sqQ z+6C8zVg)D`Z0C^&1lz1ts6la=7l>sIl<{cYrm<5L>+n$IrB7nF#|*)9ie{8D3&JK{ zmIR2@;2AN;Ys!FtoEw?*lzbKA4iaWIiaOIHBAZyZOs)@!*5I3>nHFssNj6R&9}(I# z3;T8<;?W}8%qI+%lZ3B@478+MBQ!wH&ZiVcnd9O_c+-T}$0vx;PSwKW?7qvunTqep z>2iHizo5PClvdZYf*7-@$VAMGl)ixUsSUS{lX8r^`ql)EN^`b zgOzt$EKMVG}vyLKK6qkxVk1Ge?bWj zZ1%^gx5II9XBa-%5r7Z2`v`u$aB-^#&cES-6PqUEwP$Wed`~6<-9!s;e&O#^u6N84 zx@aKdkYhf6Of>2O61DbOM`#q!kEnbX{^gb;#OtHQgLbqvZphh^I zV{3%dDZCxLuFjpjo{OyvzK|burJ9^BxT)irp=ZC|`E*NIy><7F&F-yUl@*baP~QVq6)ZE$XLePwVp zhRf$`rYUQov7x~Hiuu}+b9TWzEUa0frC64>Jc_}pyG6e*68%0;^?Q#va?>(;F(tA9 zZ4s%M7L$o-vFYfDOh!vs67p8GWB2RXII$}Sr?$u8p7k(ub8U_*J=GGfib zuo~}wKLC^bi?HaST6}nM2LAZvBK-5$i}8n#drfWC9pTvgq957@;?Wq8j+rS17|38S zJ5!wJWOT(Op))!O(<2kn5s@GmO+Z^{A{qnZQ0x|s*ju^S#{io1d}Pf@H2b&Nrg)}V zzj5R~9cyC4@@p*LhDVNT))br)-K-4*>cZ<#9bBP~^_1{hv_>~zYJ9WStmU@6g|&C1 zORkO4?_z@W0*@TwZnN-B?$GQ~87~?rAHlb=Y{9thP@;Ypj#vH8EnI?i^1RkNQ~ho_ zY16R|VaBJ-zec-{cC2lqt~O0QM&n%r<>Yt4FfneMgX3cy{4TE{w=;NdG`!(=vq(ng zT0T?elWQe_))ZMN{yFg-k96+Tk@L;%^E~FciGCLi&yp;}cPi&;Tf@s78zmSfo^2o9 zq2Z0uKi8w2yOxA@%`~NN-kmxfz3H9G=iCC!T6#;T+-G6@5#34Bn=k&n@VRoj`09=E zGmyagcy|QLd=!R?q!Mh}QHQG+`tb2dW_0D^{Eh^i*&2@1Z;7w|W-v~_5vV0sXzY~# z?rWY{_goO3+?a?fyCQM^ZPqjteJ#3opJ?5I=h`Dz}n z9L&PeP2Sk`^dv03djdj*r@enJ?-wn~4$np28E6PfRg5?Il8YmdGA2aEshM|d4p*~q z@3?QU%nWJHuhcyHdlcUetQ&K@j~vMiFV>@HhIpQ9HVWNyvO=hE49>8hg`?O zGM32_jOWZnv8CZl7##P=RbP*bF4{3yF>VFT1na8dY1HqQkxSBLOr=iD6kIclTsUMb z%P^#A=@t6uk>W3E@QnVn@Tl>1Z2j(J@;~Si4Z;_>V=E{MxA1%p|4iGUsv;A$il5~5Q8jYjbOJOarnCD`+ND9-Ll#noda z`0{i$zBo~Wj}9b(MHX3@{OBvLIPm;L?0E7{tbaZTCyo^1&sQ4pKfdU~|N7lR{NF#X z#{c=tz4%|hUx7b;(j@|+5QpCk#qwpoh;=JLk!Puh=TfAM74a+AE?z_yJ8I<$sPbi! zuJA9xto%U@pmD{^M7CL2I9q|Kwel?Inp-<4sIwWY(W3*D*(&Ocb4_59eA&W2PJn6l zRWmb1fT==P07U0j2-M!%WfQOoq8ZZ_EP_x#irRsfC0^O-(54}CeaHPf_99BB&d+1Y*C_I^g@9L%p=j1?^pDZU3P zRzqtX<}<@|fdud)|>0A@XrjV`}vs0|wraP5j`)85xyCDwV za_xL>lIwm;Bnp<);Jss=_|>JE_~yg8`0C;`eEMEFF7HdiC6e>5D4Y;+Jn!CYxQ7+M z>wCuewCfN#NY^;dzCxUHU+%(JCkyc5?m%4J=8ZFNa@)5!+=9DfFHgqtmnLD$^LL`G zFb3{7BDSzG>eb13@8yX&ynY-WyMH1cc+4B;4(H?FzU;?8f6c#bfUp%jqp+q$-7aER&S#ili^7`1S+BXA zcOy1o1Z1>kn~l^sWw$w*W74Pg;^>hUe13TrK0iMdABdH}&W?@=%8akp< z&=!$|sl|2(vhL=1>Tw^Wwk9C{mk}uUh(^2k1Rde=Xb+7= zTSzQggX7Q~7>lNW7&Q3DqS`A4`7Y6jx<$sfjAPdQJwa=2E zT{h`dlWS!j^5gH7-_8Gd1nU;wxqwKl`w`!QLkGXxcsCr+EOX5`)1a8m@y2ZYGr*zF zwcy(X&7JF@Ir#lr*}_>HRwRD9@S)&W{qJ#!a;;M2nsJ+UvK)CmsJYBm&NR{SycSG6 zFhO$iH}UP@a2w+$fEF+3GgcQl=M?noZj#>gE_5Y#U~cX#Ew|L0JD`EIyW1bZ%64X& zJ}4Z%Sny3hof>|=@_BhkBcgAMg5S>r5!cp?6Z>j#;XpagY)in2EdipH{c+;;Ae>Y) zwTjsx|K#gt8}{kVo_KA8H=cZ&rIg&|aUXe{H4-P`rKer6;+1-Q@ySAbcDW0moGQU5 zhm&z-e+(|~jlc(cqi|tQG)~DJ(y%%QfgXwQc3Sju1kX4IWp2137SWm2nD=5kuAJ}0 zzx;M3{{6QL@#UEcoZlXXBd>U1`%^AxUE&Y_Ti6K1a4i>fLq%sr-x`LPTSE}>%U~Is z+zM*(ub9onHlN#^Y#tZ9G;M^E-Zgf0% zDG}p_!&AX}_6d;VnG#^)zRU>vwC*?cUlS%t3j zxwyO4?6Y3px)!rbObrFrmE(Q(uIB)ACiZFpU4H30jkb5w}XhY?IYsZQ^>|$POQ%@p@;}fMex*2+pCmQ#-_=X&rzG=SEOOYg*E-wSS-wk!yte)5^z4kCkWv=A4wL2lm{<87`y1GSOVw8;?(qWaFd#@mBE62gf#f;Lr<`uuuMnwGWNM-kr(#!_^M_$2W8FzkWB2 z|MTaS_`m=1px7GxuRpHBpFi!wm+$4_;?5AfwAK&)F~x`#rzyLt8&g(QBWik!f+tY` zo5gW)mjPTWYb<2WLU~{%^4zlpm>C)4$sGLGHu8PN&XI*e~qYF;HG!ds>mgCElapI*(c<;ptIJEvQ z>|S>(n&$sW-iXKVFE-$hA9vyFb2TC;5^(f2e{6ro3+o?pL(d#H1o;#qM&{=D zJH#0f;})+Rnd__kOHdb7j@p1S4WP}+8&DyhBXd53-I{44mLuXimq9ZTaCtI*@`P-) zyQ5gXE4x3k%VV}!j*cnKR+jJMDit$ONV?1>4dPa8IiJL6Xk*-gbAxr0Z8}Fc-`RL;C#e{ z-_s}MXyz$9d1)7~v>6yE7{Jo{)tH$xj3srevDzxnFjR9dW)>~R%)*7b{e!`@w$xe3 za4VDROZ+kr&V{aMB!?H{#dkXJ)#W+(^lTeGJX|8;I1%T!h2fbO{1M}wAmf8$$C(QZ z&YUr8%sglCWt>jmpUU-?xh)~H2vsXnvH98YIQpUsj&UoeVEV`llW|yhv~iV;4c|Dy zu&H5WN;cSYgAs}LcX)FAih0WT@w_n{-Z#s9q+d>g^hm6V88br3W!8fl)9*@zn zZjW=C&O4UbW8ZcvwIi&U-%oox87qu%VeW*C74Y)qHm2x z%*~OAzb!_08!(7g7sNU$6{rg=(Jfvx^ZL;c-J)AHbA=1CCW;Oe%rooE1kK6m#J{MSX0W`b*S)PlzKFmC2JsUQ2(XRR<4z?Za zFb+1e%uP2$>UYN?nesdXXco5{jGJSyoX1>`0t}T6V{ZOj?YzrOw1L98SX8$X-Pz)J zmo5|s`%$cDdkn+X_oBC8k(gjzG(0)JGiLy;@l((qk&5=PB(y{%qwayJII+7Br+21l z*5;|L;kbHsDt`Cna(r`j5Fejx!utpEacp}ej=sr80Rh+}oVIOy7A{|yjo*DTfUn=5 zicj7v#RvP6h1(;r>!RJ*5GMHpk-FmQWmf)elEs@y0Q+ z_cr_Ey)9xqZuZj-yeGteXT7nLueoE>vmWTUI}H5~hT-7GJH@9ThbPxgK;7yLoIl%u zZ$#sMd?*PY?2f>t9ii&~UfdBR*bl}f@nzqa<9pluQPPkF_gjU##a}1>ZGE7=^^YTw znv#Quw$$UZ54!L#zg~#{@tcMC^`%C9uqO&)L%ryFq#hMZ%i-Hl2~Te}ND%#?3xnn}&2M%X zF%@xh6yok+r%d{AnJDzgL$!Y?>Vhj!>05+`&}#JSB6zc4HcK^r=7dz$@WwZ1nJ&@u zBN#Wn`3S~^|JhkTV^a1gjK_^h*0M@5a^HFJiTcD&1~C6W7d0Kf-{yIPbt}_UF>cK8 zxWPFyO^I*ELYN#ioBXdaZsXg=xT%XseL*ugHvzQ2^A^s>h95xs?GZw>Jdun-8Uofr zkV3%R?p46rXLOPnJhRA`fi(tI=m>H9O*F-}lY?yo)I_))#t3(+OE$VPdQ3r5Ida>_ ztgJpPty?AHZ2{&M&c~{jhj6!u?Ri!AU}o`BlZmFa(B`8nX9(GzM_-k}{<5byv6JxBI#=X0hr{c}1UacZ!O`Mqi?dA!`={LgCxpgH{D%SXxIO^x za&g^+?4tbR5m%gv#I|fCPRmAeSr(!~65%T%*iN^mQ|)nmAUu8Wv@Q3~&$#=@tu4IiEB6*69ouP-#IGkfSIA8cAX2?cXv;OiEvqSrZq zW(#n2xD_l0kV-Ui^pOD;TCaEVm;@xTfx+!DXpL*d)Z|W-co)eeF5*m_GXhh^wHB~7 zpv)6v)>^(0usYXT%vQkcma9RV1l>x&RAX_l<@4isUqEUHs|ZYA{oifEHQ+Q8mzk&t zNPi_oleMJ4GQ*i$^3A2en%-94AP5;<`?`+i9)_6NNzIE=ul_NHRAOt@<>x>I%bSq69&gO+!c0 zG|VrZFXY>$1xa)d!(G_>%vikjF~QNPTucW`J&UUcqXo$jo8-ju*bKoS9$y^Fz^_hM;)~;X zxGX~8qzILFHhAKVwXS$%)g(+RE);@_PzQ>0chqe%-%rd&rp(P%0i~$&E5eMV4)kTu zMpaOy%>C)evz+C887GCV#)#(vHFk29qlmTw_blYQXPd6O44%n%FoQHgFvS9+DdW@B zm}+eY1^g(k<^Q+mIfu|N=bw++K?IHo3ToS#wneqW4E9xETaNTdR?1%)GgIu0Q-g2a z!b#^|93Ntw1Zzsxi-cb%GTPZDY~%1g72(llP3-}10E)L#u|vG zfe86sGs^{g(bm?k@8oqco6}iRAwrDXh89#T6s&V$YzBJ77F8`3XL|+ah_GH-e-G}N z_P90_usd-o?umI3-Ssgu;!}|yGac);4PHzjv z(Ko!1np*%LnV;E}mAKcym>7?WRSSC@L&UP;N?u1tlUjfaYyQ8Gad@*h4lZ%XNRo3f zkd6DE@Wz(s#^AZ9?nLoo`5&Xx;Vt9J^ZWkr{9XV&uM_^iE=b0MjENsa!2SA2czj=; z7f0Mf#<9*Dmhjc%4}uXiF(0LO58=?ksW`Z!2%8=cKu~3_VBE}C#J?>%&GrwP%n9-t zMsS+3Pu}BeJihNIIz+JJUxX`aBxNK7&>f;?zdo6p5Hy zV~{#F2}Od{YQI9%h;_uZpf6_*szYm))0t(aL33MV%h7PUnFmb3oEc7OMr^y~AXA)b zcFxv%C^CjPk28SAz&ZI;F(ugI|Kh+mZ=2t0|858785AH77+gCXYKPgp9xU8!oa+CY z*G+bc@VJF}^1HKReQni1Gl32bkn6%Fx7Uh;EgCIN?&nb3tAm||bm4jOeV%(JvSq$z z%|q4iqXK9&Vw{iJ&bG-oXW@3vm92?WF;qH?*~EIaAeB^@=~%Z}7(EEz!7ib{hWts}=aoM>Fxo8R7WDIr7>#oZlXb z6T<0h9`b?jooOHv}W(7eR762tjfT`Wd%c5xX{z z**N5p-)X+rwo#AEYt6i2KWB{3X||e$8E5RUuBt8wFo4E6Q06!;3{h97P7uf1rvjBB z4X6*RM`L&$ri4|YJ+>Khiw03E_cilGFs;rs@h$#dzKk0#j2WD#>rt^T9G~ZwgS#=Kr#sYyE#mW1WxlcheXgP|g)hf0uUWJitYD z<6JwNGtM16&-nT3E7mmpB z*eh)K;fLd|iLr~eXw>7c`O4#7IQh~f-1lf8f=ZJS<{FEXtPCWJUmlYn^H)PDK0lcy z8a~2o>wLpUOE#T;(@%7~pXm7jt^a*)TOdB#<%8NqIdFH0R}OaQ0kIzlf8UjgmO1tK z;B>v4k+%xQg*V9Y;kO9J<$vMtFpJ>ke^)*q!8jk2|62oV#J35cQTH2M zlfNAtPUGik08QpFV%!A7dploFtFU~VTNpN!Yia`{vVl%>B;30#0Jd2I9^}Pw8nFzNbV4Hj$fHk$y zm`Y#dSFQxypFbDf>7D4y?7>jUFc#IWMtj06EUUj;vs6}2eOw55mDWV|Cp~&V{!julaVzo0lt%y;P+$hrR9oej5^v{kXXbyFC-^z zdw?^D=J5l6xXa*lyHOnITj)fK$!!lAw3^K_0tk5@!6%bW?bKpid?yB14`tz_gGsos zHx}<~O~4CJ#-MCw3VQEL!G|YW@VhHL_)-X-1^$n{>WdvuPsX}66OmYyjLBSuww!fi zGWb*g8<5!!w!=(Y+kiV05i~gxX%TVA$w^0gMF!II(vTV_PO*0+yu^uek1og7w>$8k ze;C4l{%JY>?_ci4|NGZ9Vh`c}{PX?z_iyImw->5#^?+C!#FZv;*y9fmXb8zuU zA>P{>k3BE=34-w=4-ro!V3}N*Nx&dEuLlEYTun4J&=Opa+33M`sO=7DIqvxwEFM6A z;ec-G)Z*f~v#_vcB^qO0TlepV_9#x=7r9 zqupx60-3U__qoEzEniF*C%(m*{%M#sP6Vrh)9`8a*!Ox0&h3oDnH_OBD?Fl3$KE%i<`Hj5(5)!6gIStb-Qe6^p#QEELoiAiFLb>80sN zO-@3PM-)87Np*IjP1cGvW@-Ek#kPYC9nX<~ER*+f1>H;jKkwV4;X7Uk#We!ap5bCb zGA=SVjR}L>O%XDO1;Z^M1wAX$acOI)%r!~4vOfXu?}`wS;fMV%c;M~FC*g^ET`{2{ z4?gl;A#(1L#%Id)<2F4O`KFVVFY|vY+T+@_YZo(3%l%7G>QjI+pFEU$=b*%kU4XJs z?3syT53YRCvV^>IP~^p}+&MCqL|lu5Za4m5GZ1#|;Yt~^Pgx%+Qm|!&atQo|P&H64 zX0UFAhGyN!QSmL;mcbQ!Wy!{}gM@7pd|Lz@#?*3a3zsnyG;=oARA57q(c88yk(v`E#`BFc-$S{i9#*FR5RRo~$7Zl`R#~ZEBnj*FB(FXvU!? zr!PQf_8^J^8x-sQKb7k%9FV`D2cKW;!YAi@@WJ5ztkv%$|JR6RGtgGKD|* zdT!y?3=AuPru=D{GsYMI_hDA6X0LMI;G7|RDLC_#xyIwpNW}U?A|pB;X`#{Zo4}yF zXo0&j;T~QBkL)V=)Knpht5jh zd`yjN!OYybs1d=P?Tzf4(x{Y<-<}MOCD6DBWCx12u3i`#to{8S99_9DN)yvQ+&NYkE zv+MSXwnwqB;X&bacIq=*Iko6`?hx7%r=mV09c>TR;pDbtoDe(yMucWdpWk1IFF%;8 znP=yB#cOS(6I*<7YKtFsZ*oWXvRK^zOg;Ya#W4Qz%_{urgBkerWCgCU;P@UEBoD>u zt$x`2q$hm5bB(_KUI5&$7d?MVIy^j!;OSQa@9}wpIpOQ;{NY+sg2!K}#K*^L@Y%U$ ze0Hi9AM8uRiPwX1aDzJ@c+dx7Rf#fAnc;56=!k>N&*$;~hZ(2RVT${|x^FPy2%gDf zI(M0ccE!txcHsOba~&O1nbYj~&p4_KqQwaBrXp!V4zj%}Q5M#U>aZrwMyn62L4WCd zG{v@{#Jf!J_%IoC`8%|8z*cW}{zr5Ob4a(;FDyx^KyrraTBHwI!B5z8~smogfj zHHw`$+X|kI=y*%N)0pJK^OWaVpiM%CN`@5U)bRasf4F=JIy2{BX5Kv9JM}RvZ+Q$u zRdIM(qu+^j#km@Fe73rrQa2&1xQ)f zhQTzZ269h4vr{Ox^XY86rYH&L3khe)N#sgnEYn0soNODU&ID|XU=0!x!@4QFs0%9= zV5T_OeFD(F%x)~;HV_fk9VvZS)xst%UE@??XF&Q#8kY=fHvf2ij}M2w1R0K-xvFV+_wsu zo7alT;3r{v{5%G^*(rGWHKvY6h=t*z5cm5#LvUe7Fy4429NRZVrzzw%Ak=K4VQmIF@Gr_~Jmg%m3{D!!Y^X@*72HdyUV*OB*WjufOia|Ni4rt$W7cnQq_w!&3a^ z>QsF7UN)}m3C9Q9e6e+dH*y!{;HjNm`1oiMPHc_F-WTNYCp~b_nouNUHy};`6!FV& zbY_Tf5~0)(*{HP^%6tn^E+V|lC)eR*TR=4qwgD#hVdt~S0EPDLGJ%VbWo8O%rEptt zwoGbtut~tIoiN(LCO~?W&mp! zCUuT-Gn+94(br4>Z8yS1#IQ*~(pYh-CnO_pa*|-#w7DPY$S0U)-9tJ(+;S5?CP7Fz zN!wi8BAd8&CS7A@n>No)q+q`;vO#O3%*>gk4F$MT&8*Y8MGHiKut5z#POz6EKaau$HKR_ zdkz*p6omt?a>dIR$2SYPzV2>6Q5`u}oSt6=^G_bCC=V$4- zGXXSpY{jYeyCoT3GKM{FiH4huRd*pe53X>_{9qHMEsmXoXb!R+Z8qtc_gat1|Kcm6 z*pI?kSJ2s9Aw+P_OxPfJd{5>n`K@l(`C{B1>8NN=!L}D&MM&^}3&Gj9f>cl(c-{?L zAD@WF?;Z=+#6tZ|Bw~_Kic5}=ZyLgHOGWJ1Y`NwsXiJ!mdF4ydm)EZ%o0(`;LFM9H zm!mqU6jcF5DEDKXq+G3kSR~}k;8~#%Z@ycW2FNJfh;L>$WqafyV{*0@uIB;_w|G+E zk({|DBS5f3;v|MOU?yfr))1y*cY>t4M{g|oa zN8)7)xCxkZ`&Ez3W|&F_Z|On@0!GbiNsxJIsAA)|p4E)C&$r>&yKVUBST3&Yj>Y+{!8jz&Nb;;KcwZj~Z#oQPGT`Q00FOy|@;}GO zX9Oy)I6rf|bFqWtKXELx$J5ai)hvR03EGoqpfadRc)eIKZg{=aiPsIM8)uq)uAJ_k z4Q4m7yCVa148U?6rA;KK2m$7K`I)l5$Y|%|#H=>(Hv90ycl+)QAF#MX?!pQ(y@*Uz-j%AJ7SaGi7 zbek7zSgC_;2hGF^-RWTSwsEkTZK~Ti?@Rz|8Aq#wZD*qxK3C^jxZPx@nES*z&r^l) z!Js(T1I0t=&Fs}}UW3K+Ft2P8rl-#oo*%|Ng7syB^?4O5MYEd>^O+XryK?5CCA}Go zUM|Opx6^R!%~%}X6o}m$y>Mz@5#B#ihjY7=HS3JqyUdoa0KE5_2NpaKg*6+B@w*Ru z@Vn0!;meCt@#%?je0VSu7k9^K0PU1uY|~md__*i69U`z0dv+- z;zA39>SG12eLHsxXlYnsO2$xf(t2hP!4ZsS@@El~r>dE;D2->5mhO*K0+O{w9zopZQ! zKIRk-i;v!e1y#$iTDNjOgxS_k5@wcD%X3j|M&?|!#Z;g!J_|L|)3NT6aoF|LSnPXh zJoY^^5&MM$-`y}#%bvXR+?`mv)*TBU48-C`Lb2-6B&@zK8?8h6@XIR}eJxsBxH#a> zXjIKi$Bq|o!{H6%v46uP92C8K=p_%l`!dZ9M_+NnnO7&_wasqGFHC{g4Kc#)1~;}b zDaQMjB$PDBJn&Y6`0i1-ussrISPJT;NjUoAL>yRu7am)42jUCT;3H$j;L$ST-!eOx zt=P0Mt(djVnlIG&y!hsxGTu|>wBq*{tMK{pEPS*#0`Kn(#D%y0MYD^&?uCP|dSGyM zFrs~91TS19Gyy0E$#s6<=LwEjhA3_g8;y$lVa z6XSuFpH9EnWXw4-&4_JNipPO*V}fNTv++&ST8E;;ZU5ZHu><1<_s$rnrcrE*Dc0pP zSboLzI#0~|`!^AI5Yacvg&Y$jB0FBp5HO3yvQQx#1~5Z~BAZ9eQc*|S09A{Pv&IFp zl4LS9wa`e!HZXZKK()cDPRflU=v{NsU)ZmJNFAEeOO$(0&R(XT9DMvQW+>; zE}$gwntEs^OJyFWWy}&FPeV&eBkD5iP#j;5*l_|FaV&Z9n4Q85G&sC)Uq30gZ#H>Uh%;KlgSp&Tk9G1u@oXIQm)$ z&TLP>`|l*-{apm8JT8v<0U>2}|IF@7!K6v?DtH~_NE2W%sK#xeB4k4=+mO}Uf!4(x zm~(e4=HFY7{uPy&HdKJ@=|u>ttbkWRxrpm>xW$yh%QYXqKO*46(Y~M%=RcT+|NUEL zp{>II{pb7ezyG)#|N2E2esit}pB{+8m*#xUbxCIab0+KI{WJ}R)1JjTy3D*FV0@#=# z+!292a^E%w0oz3TZQ^x)oZ!^4YZI`%m)~*C1kkqZLAXpLY)j6p1KsYy?H@w4TK|H9 zvx#X!&M8P4&+aPRc9S6Z=AL)9yr<(#69Q(<1mamSZE(%t8I6-UCok@q?-Sx3EE(3i zNS#UT+R=x#lA2?tp*L$bmNha+{$+Zw+7;FW{HHb!h%Qa!9DZ5xwJQ|BYTwFS87bW7=^^O|9e>qRM7cLk zZt=m%&0aYCsvEX$bVcXV7>voRfQyV**Rfe}|B;X{w{6KBFoI7ZEgxpGvX&z2B69Aa zF>t1@Gi2$Ux$0q=V;->xbp-RyCXWkA>*@`&t~BV+K!Ci@nvXoLXHeZ6o#yF7`d^f)^hihs3E#@D;+--ypw*PAi?Hw8{A* zu+rrErpWb7MN2}L%)tw_eK|4S9NQ|QyAE~XwP=W_)j(RgA6K>5KtxQ4x5y($h&M~K zQQ3u;S*6@6&02?5L1ifPDbg&JY&Dq&D8z+FVue3!r`q7z*r?mYuVoGb9{u88f&LS z$oZR*)`|J$_Et{Q#fvph`*MabON1P^aQr}q>Lqb_!a%sUwf zmHDHvvmNL5rsMREB%InBVVVT#Ru3QS6M-9-nWv2gG=Ode&+Qc<=L_Vx}OKG}&+E;i!p6Z!Jp(Ksi9`^*+^9Dc7{t!o(w$oXf_^R6^q%cHRIjVdOA3XE_$(ZbGUGJwg~J3?*e4G<;u7ctZUf@ z5#=(z62+kl5qy!)iC4qx!s&7?43}GvEB0-C+W|B)FDUl)zA@){f>~&G0L|uZ+PAqq zLdGqRCWyw`DaNs$C}R6s>B32zo9r{waX0xc!|zFVB_Yd29BgjKQwKXmgX7BcV$2j` z1{$5~1aiDM*mfpE#3%>bncq#Jah{_HnNPXRYe@9_%&a~w#nPK0*sl=hIkz9vQhQ8S z&bG(QR?bpx^Aem3$7}sV(eWMm{b;_gS8yGRL$8J7z(ybJe$fjjc4P_W({SRA5U}Vt z182v#1d0U-SNq_xC;aj3hH!j#yaHc;I2T`Dn2Jx1m*MjMG+fvjp;_feh11upnGBzj zTCBXk3~%k3i_>SjaphDgK0cg+D+gn6>77s<+!29!Ytj)FoQA~2OtdV?z?v7r1YCRgaHm`rnbZ)WDyzXz^H;%V_zB=pHed{yS zQP%(zO*8?s&c4&+Ie9JTH`caF9G`yzJ&b8TT=cp{zJ{8s9*D8YRy%KS@&-;0fgr+#;1MryW}3E?h= z5!@1|f_F8&Z(-QRxTW0{_x5}AIqN>1Y|FR{gxLLd0L|IHjd{LaYb05CC)S_22D)&v z1{$?{s^FBqd8*)CJEO9Ej_7w?_!j-nOkZXVCCMC+C>D2TylVJZHvAKvWplY5M6>a2 zFm7;8E@$WGr19zK&7P@&a(2}3OrDCFnO)i>Y`WmPJ#hwBHLt<)78699S-4oYb5hG2 zj4wiKQa74|(oqncgo5N4q|_y$Y;Kh3>;TMN>4V;7{-_>^L3Vp8I)?)B^b_9L@mc~- z>@UW-V>LKneg z$71>eG03e>LCBp^f*U))WorQ2LJ>AG9@*ttn6fMr%byOzhUZ+^0fbiBD^gL38kKawl<=v6E@U}0` zZ}k%{_thA|31-v1iBUPs^O!@WFX0< z2&KVIm|wMA8$Waj#<>vAn#OfuHK>n}V`zmF#vML7%dprQp20C@pBao7qsX@eS+3OY zHpX*w>~XQ&;Mo{&Q)i4|m^OlO3(vMWVcq8Y5k42J>O!Aj++bS9suR8)7&o^~e97>+ zZJbXg3(otS1}!xG*MI-*0NMld?nBfsf}z=GBw;buNuj_Y0c#BfmqZcVR%)mSZLVB# z@??aY^->r>OCSk5WvQ4|ECHy3(*aQ1Vc|a*E*HE?F6N@jMVOU4TkEEDr%l7WB6jmx zh$)dB+5&%h%OhCP{ut(H!Erj+#>uA5$Qj1;v{rN{7o#_+5ZwudXb4G{-@{BZ*0^#w z*cw0+aqa($c=*QGAbV;JT4oiZysH4dsb%o`8M|VV$YhW+1C3E)aZ){h6kq^APzXpy zOn~J6vq*%;rD6Eda2(&{jZ<$1D0!b3JN<^gIIc`j^~Z_L0XQWBm;0*O&F8%rCgPDN zT#;Oy0uK=)nquxm!0a00>zRp)Ia9Fl$7FD{&K#>w|8ap+(% z-r7})7vHSH`d2IPz<>OD82|c3 z4}O2K9A6$zz(>1+aPci49NRb%>mQql#V^$0Hy7#@*r()wV9M?8XFc%tliqmbF?VFf z79-}a9CT&(qrXJNQ{G(6Nbf;obc>K~trBouST!mGOSNq-LHTX+W!E>38P+QyK$#L= z8B~e_uY3)h+2ormuV*JIu3{OWIlyQGY?N6JwK~`WL?gy6fa>Gc^CPc!CSU_lXD~Y7 zw+YyGur)~cvv4^V*GL!6&ASl&^B|L5D)`lIx)amIp-$JW9^9IpM1q!yl3QpQG-H>p z*xOe6L4_j1nhjp8~fjkz`?g- z@bbnKtXW%tht?F}?o~Nx?#+aIda2+vLu;idG138-&sGAp=7tfHw2ZH_gJz?GXHI5b zOCsdwSV!x&w~)RaJfrEVlfEy|WXJtf=F!Y%3_Z||w|39PyJu$O()lJ_VaxsfnK-pQ z8pk#V;?PDf?09w();u^F*&WGn*RAa)U|_z>I0F<`>8|;Rxid|!V=~&3yD_I=Pz2O0 z^oWsw+Y&ki-}Qpka+LZOYG)sV?_3mlWb1Y=78Ylhzc?8;Y;n!)zZ_5O^i>&DrtP$I z+;bJ*45k-)(z(vmT50SS$V{~F!ni(0T&r2PEnJV7Vx73Q-X|Dkz}sZnd>h{mB6i@L z-KHob>(!5Z*g`~Wj-L6)2Zg`S-UFI;`EWMGbc`W;GQdg5lEYqtwHm-h4ZxjVOL5A z=H$-A{PM->itok3=|dST-p__e9EmRd)B*R#l0>F2ugz|=ckdmK#aF7Ex_Q}q;YAeo>7Obdphvh zC$sR+S3B_AbCvky-88(p-F_=c@jvNsn*J=c}-!dED zT#4q<@VfQ*$kCYdd$=&hOeveQZH(J^AB}lqS`&@pUhqEgIMwQjLsok4Qq zoes8W`HYFq4z}saNuK8d8H41^FpnFeMXgHcP*c1koHgr=!N~T+WA?;z-Rc z>{rdsEnb~TZJ3?iqYgHm>*;AdSlRLrmbX5Jg;UlDx0BZ`{VrJV63o|6S%imP%EG%Z z2V&pzUfA}W2M)g;kE2_nl)sL>#`;KsYR5!JzxS#)o_WFp>z{JRrFT;B`MK%%>`V(j z6kJ~19gDMXQ+xYj|4SZN`LH+EJeiF1$I9?);l|I7mueki)<`?IEl4vxF1+Q9*Wd6! zZhREnevtrQ_gEywMxZD?9ED<05fPX?HU=I)Vo`7L4Twv@soAQ;Im8LOmf6oa9y2`a z;C_Se(dPV|^K0e(&e}lcFXzl!X49GsG-KRuuDXZD=j6StjmB%5JTX@t>r{^#)Wr5+ zUYTgE+&SpYnWOyP64$PaWHlkxg546~YKMO37SBR)tXX4=r2&`;MQ(_aF_SblSu@nA z`HOvvkR{xnMXU?Ivt$bwX1J|eajY11#;`G4n~yly7OvIwamQ^9u*vJWutmHYez!4g z>~G=RWT9D(wPM{9&p!B%A3&=NEl28@7|kqa&BM%zZ1iV#im=uhyX&)Vxnf`RJOgON zHiMTe+dyNzL+1t|HnvC1DS*ZxIWb-yRBajxh;E;e)GipGih=yOm@C?_HGaB`oqnut zUyBuO>o8b#j|R<*e@)-Er{!WrFz2A@I08Ua?tP7Ke(C zL{!d9LS4YG z5F|u~!rdbZ?l;ClYbJgRllor8;%Mzab3E_#*bf5WF4#=Stij_iwBeN<&DgXt7H_^B zg!f+a#M#$eaDIy$&b=`%i?^X^iD+2yUA%w91$*k5U^O%MnD6Cx@pmu) znm8d#G<>@5B&iH*)kLITVmx~`y0Qf0@l#d9*Mw9F#s#n57RH6kbphN7<7`sF5)9Fz z5fTOC^wX>8*ZUSLzO&s-LQH{RoQpGTFu>9&&bW3q9`HlDx36yJQk^&V>Y5g&a7 z&&0RU@Vsq5PcuHcQvglOReYxYzjqC^`}^-j^o`*HMscXM2&pMB&J=J0QCk_ybEX?- znx^y_pqeZd?XV=moCHiqTRV4+M7J%VT26MD?PSY8%Ob8kF)S|Incj^B6^n#`J25@6 zO>3vjm-`*bGci527t0zS#N9KV)|R=Z;J5)OBi&v3OVBDJuPrJU)1&3HVpHXqgfefQ zXlAUK02;S?1^yx$L6h=P(%pooUg^c#$7bQ+@fPenP=zO+D?oNj7W{9GgxB@qa2=n4 z$v*k;lHW@LQV=kkyL;d&Fz%xP~RexT5A^GdCmn#Uh%`J*8^}ygfcT*4sGUCWC9axs zyJrv!ds~@j-)R;w6@YZ2)%X|zOmLd}j`zr80xmib_9~W5xHbWg2B;C`x{4*ALxPDG z5qd5B?cKZ!G2&RW{*hu@#WYu|Qq?GkhUv}hV=dOj-OVN?qr&~FhV$1*S>9tG`F`ujl_qA(GHcdP#IV`kDesf{ z$JMV8vldLn8;6$Rv#%H9KYrbZfB9r8esi%NpTAdvE8NPmD^?u#U>tqf6E8jPfwbv~ z@R*bce-R8?Pst|Xk>4L84z)O9uK9w?EV(|tDzImyOh-#}BW9#^2@%g#r@AtzQfnlz zbv6TMMMAoTu9-6L=A%Iz?tHIO#arwhNxF)eIF6z&O|YGh>R@rgWq!!>%ood3QZA5V zjyTu}!Y3v(#RSntFlw7(*_grj=*P_C#JKfdKK6@n#Vhe^Y9BewCgN|KifnnGiUA>S z*5zUl&Hej+*GK>@)Nr~8ZLNVuXL?+M$tow$Tbyp{BT;-OYhXLZ#VyJv6?wR(4itd5(RVHw* z0W><-OVN@#fOL;4#0BP{=aF#id+9E`r`>MdarlLa*!=Wu$ek7ozuV=y2;LbuXAo^9 zcy4WF(ku*`I-8E*nK@b}kJ}>HHiKbek;e$+&MLxwnFl@<&i!z2GTz^lfJ?h#aAA8m z&b=9ci?6%m?N_|eFgq5m6Osg@{_q?dhw{bk`1RRRT-uW;x_}+XJhA_ISG@7aT}W(8 z7H7b!r(|K!o;x(Fg&A-^i9$(D5#D&G1%LRY4}bn_4!*h2h>s4W;NrFr{qECSSg%FS z%kvZQ`nub&^g(Z&+!cr4f6#{Cf7pqy&eY=L!-e?pofPcY9D&xMB!qd06C(2fvB4l3 zuO%Jq+Q?Q63g(%0!0e)K5!@XqU2;9ApdzqB#kReT%bC+zl7Ru*DA7U;n#bRnq5+yb z5!ZCY%fyIxu2w6jizeXW7z68Jf=#Ye6R!@;JMKHMZQa(nzB0Ws;`v*TDmrQFkN;t?uye6eOWHHgH7j}nw=OIp0{*8 z9qz=t*eOvQY;~@q)wr0c*j9dbVBCRov&D<^DRIti_1t~H`o%M|dNj+tN3hP~To4-^KtOekA}$ zS!jH7fEw$g9T%KE@t7wbd1Mlfiq^h-AP?^!$iao(tZy13oacvQn?$cZdlw#l(of5& zT-l$B%lngYVP}*EyO{+_ZA(mE5T|w3`XI!Hi5CAssOSXY>+8cri$}su@W<`kS`XC< zo{ey~;M94}a1N+B2hhyxjk7-5Y)xQ3)8O7X`_8W$r46h(%LL9CFtg@2HAfJQ!82+G z4WQ|~XENC%Zoy5HOm5!->ZVf!S*lFMY3G(IFY%6nCI3`E|PuQ!5+b_ z^0tL_3)gD)V@{77?QH^RTnKP1f*YJWcT%Qx8gB>K}m(Dz~ek}yhszOUN%amDW z+7w8zP5vgX(}d4inuVFCc4IK=E!HQGyg5qz^H`am<8(Jq6fw>D-r{oyzO7({n62Tt z-VCu!TzDrH zXLm$m{SzVZh$_$>Tio&Cetj?|iRO117l(;1@t8P19xgJ6xe9LGzh_D@Iq*bmeanb9 z;!197C+q7&;r_!2xcx93?mv+4`X1|tG7~*ezKh=%sJHbq75@(1V9GApIJV8q0bcSu zTz?vYH>K*!J$rE!lxNG)Fg}D?>1zGZ(GQ zE)S^@jOUMFTo=Gy1>@iSY9aM(WfKudmIuA{_~jceO@-?3kpw86 z>}YYUnQag!M69b=-->YCtJPs6By3izb!A_ilN^si%r9S{T|TBK3IV6h&>AXCJ**Am z09cHrjSpdU$Kx2Nyn6(kv|bV0)3WBFIi?CNp$TXYjltBgSXB9nD3HlRooZ&L{8E0W zX92pFwqeJKdHBsoJt9Qg@#(QLe0(4cSN4Qr!^@$FOwUDHRXGM1m*f5iic#KKjDX1r z@D``l^M-hMh7=;Spb&{=xri&yMPPa!Tw_WRR8)aEcZcAur(AGkqbJ_u_As%-FHXig z&y2^Tk4(n&WnpNa8;kblFhr-u!s{k+$bJxo$*#FbC@e<%^lX$hrNcKs&TB?K9)7(Z zzrHvPzx{X){`lz}{O-yOe0i!uw~<`h6^V;G!sQr+5B9{Wy}vs~+xwr`;*WP<^u)IH zx1u;(e*4d(Fi9-CI1~3gRfIizi*Wh9Tzq*V8=oCX$K`k8a9~Ri7CaPzf<;yM@NhOh zIhuoy_QvW89k;vfmq~5=Q?A(lge$W8OOWbcgg%)(2TJCnEv^MkQH|)z>{D*^Gq^yZcK*P*lclR zv*3AC8a%!q29N7};m%<7O>vkYxW3Cf6RrJ4IC^vnzB=22tH&yE+kpbDn%2GoM3lWDw29wq-VMNAT-JzPwG0f9o;(wVH*-Ai0pR zDI{*=+{Uy6=l0{;ftTW3189*55MxcuKm7KO7NY(!|8xuIFC&D{*;OskIMyOC*+r3@ zo+YB2r5AWTxaE$)Gj97bLfq86V74h&tl7zrJjyHsvmoZcze9M($J;5KT}1w&FPELmA3%(ah=HUD@Ts- z@xZK9%vqg`2iAI{_nvUn)Q7^&H4)x2MmTrt0+|NRnR(8-W+FC!C>Z^|U`ZTettC1t zh~{LY0%-Pab6@c+xN!AKLw#Q^o_R46N8j?n`L{fA?hOx|dTkPRzU+c~9}Ps=)C2^2 z#|TD6c#Fdw;2Mjrr|WU`KoU+04#}ejpLfO1CnlhKxd?N=3{zXt7TKH!RCM#U2z9}y z>%>e9FDb@{r>gMl%iZ|R2U7*(1-P_R91P*+)35vD#3oN1dC>(so*aYDWq#Q7j5j`( zIqZ*DXX4MF%@qyNto5PZ-=BpuZ$#q7=X{Y-nGW|`qcy{xW1Bk%k|t!KA-YYS>aO%2 zEi&Go*nz&h0ZdElmAP8*>Sj2dndAi`w2A*3nHQM3Qs7&m_+rMX&FKu>nC$X0R0Nfy zz`IDCYQeeidA=AI$+#FxXWZs+!|BEp$AVvnxHh;R!MpXiW1g_zE00rjTMJ{xhR|Ia7zdjfg2utl)aw%|+C56R zJ#mcD?<@#yHw7{d_TLDgX#gTp`GU=1y0iKe>(uVtwoNTRzjBe*$!&~j*KNT|1?Nke zAHhuBNy6ZnIM?~}wNyiE@dEVTU5s7NxZuGx<50UW7F*W2oxMdW`3eJaJx_!gSUq zznAzHpeD2uoWOEP%3uxoMkXsp|FfMrgvQ|-?8 z%WXT$+!^Q2cz5u-Jg@i{9Uxc_7Du00U;p6q7NY)f|EfYuwXPvKJXLT^?$(kF)bHw# zv$%W4$PS)#E`qCH9;=t&((trq@%z$ecVX}LDLA*k3TJlb z;na>4!DcMpeLD3N+>xLtO`IB#jnWkU6666>o7#IKgrEtWw=D=HVN_<#B zGS(|+EnpfuWwV@$;L2>Vme=WPoprd;->SJS+!0&EuHtbdSkAzizXqb&NRFD2K`isx z4$L}ltNgaHeyQ6}BavQEk2ep`$N&4c75KMr=Ht`%%5Zd35Z-)zGM28MgqXq%WTuCp zx-byarzD`Ur4Ah>t>}o(Lxop762?R+21WvC{Cs}D{stb6o+0j|ZckPXUmjeqyGYm= zsM4=UH9Ujlit+F%C=@O>8s1>M!lzKKQ={tqmZUBX#Dxh~67FQzO_MaG822eaZD^I& z!K8LK8s0336XOZQIGb=OzDHx+=5zb85v>0epPTpEwU0H)MSMJ#@grS$araCd zJW_{ETQkuzKMPF*8F=HZ419R34qu#Y!mrLx!Dq*daQU4CT-Y9lb6bPN*sUWJ=id&& zIWg@fC<5WkYo6G||_z+y!-ary?VyP_u_-3pksERP)_) zb*slr5!+m`F7YivzKjt9vrvFpCJuPBlJ6|c5wX;q)F}jOx3OH`-q99vXvB;{GGF`J$Se$fb{t(Lit7VcO zG6K8lQOqe_j-K2hbmtDBE2m#3^Eqfu>_oO_nP!`5z|0Dqnc$h(%E@54*~%3Izo<%d zOfSLwg*8~PKt6v~A#$n<;2V+$_g}MG{y{Wm_GaVEscIp#DqP;5j!V1aaef;+ zR|aYTiXFgoYxbLd@|ZXJ?hS*F{O@)exDnCzZ6kiaZGRm^vkBY?WoNOhNBJBE%_x42 z05;#_^-tlb?61R_Gc)inpZDQ+S32<3nF?GzoQ)6nr{IGdLBCwHQl zwN1nso)R@h=Gqx3^)Ey2WCq2vQ7Xh)6I`tsY6Hc?m{+5!*Jdjlkv#tS!QI49j~t*Juz;KEqwa+CP2(?PKBeD0QZ@xhN)F z&UAdOkHVwiom;r+P;=g8<{4M8vs^Q^J{4;sQD`UFtJXJ@OS!$v1k9ZS=LY}G>@nCk zTR#N%GG+^XO0^^BY;FM-PN(ReS8U2Nv?g?+JJZxaT;BFLhHA`KPBsT(_bwASA3$UL z3=!xvP@ml?JY0*)$~bt(Bw+IGNg|R1)gfe^QwGmiqCvMi|0oo`KaYpIh(n*m0{A54 z!BhS}AHj|hn?`7kG74ylP(51r6>ml;`}`mXlNg}Lj>3$o5tuzU2D9cxqqjE#&Go^E zN{qzBJL9x@0PCi?e?Jf*?$H=npCb-_kT?V$c=!3q*#GPVEWOtgAvvj9r_qRTV&CCZ zk2vcxN4fatVrXSPF20kFPu{D*XD3RulQ;RF!80z>9eHsQwmmf#&GW+$JuMIKz8s9J zM=S8>Pv_!K44_?V#%J%9;QhVHc<+rc9NgfBvf&hXc*H;(6v=lnh!%gBXr!PzbY;!P zjP!1>Ghd-_FUvFZ=JktMt`bf+S!c8o5zCDc_2|y%RY!b=IM#VS6^Olq+q=xz;=&B; zqp-dTYbsTT)}YY03^`s!%IOT6GdqRjkIhC#ux^>bwX=N-<9tltGx~kxb*?-U!^W{D z-tD8ozFDod18N5I+L=?{$9gGeu9X1VUGW+;Q|->9SPILrFmT2rIbO~$7s$Bfi(BXE zWYZanW{_Mk&Hy?W%UHjT0Z>PViorRZYt`=<}PXQFv9 z0Ni1_t6n{JZ9?z{tnJb_FP3xV1i~|BbjH802;g9nQfdbI!+xy z2j2wJm{k-#CRO;o3{&K~OwZ^;r8w4HRGkvpfIe}y%L5rKr+#Ob?HrW*7Nb4B9dio@ zF;uYx^U9Z?HMs{lo~1~gm?;{PB>+qvt~_~PxqqQ(zIs#&-t)y5$P<1q5N@Z2C%>~h zw`Q7;VAq(9bLWL`ecYZ4_&V#cQCh&_ccb48&K+9b!S4p^W)9%?ac0U)y3PaZuYnA- z>acR*?-VV;lE!Rt%WtyvJA>j}{3gDc=}Vrce&+%>=YRy!^UM^Ra))l+W>c`rU}iVCR= zOAt=H6^4^gNohx0YQ6ZpWuj5ZRf047dx%;2E#DbH6D{EObJ1e?HF$mRkWAQj+hcc@4!O1Umu1fmjZOAPs7sE=~y7|>r2i-b7&IsCJR^nl3De( zRuR+hra62C{x3XoAw1Ws1ocrZn2|9P=>PEIlu`YtAjTik$K9_NoC-1B9D?&Xn9#Sj3Z(-c>)AKxu@no%k zY-@DG%{IObmK|o}-Flw*w%?<#XBL_G=%VvRYj|Tu!`nD_;&Pc|n7O9`wC^*3_IJ)e zdwBRE#NHGsCnniiv9h*y3E)!bWRrLq<(3mi2Z)oT)~6Cjn}CfKkr(=Cw}Jtuo0Zmg4c9&Ejmfz;~~iGvaN z6zvFfemk9d5dv({yTwn;AE(4YJ|+Lhsn>jPb~B5jPsT$}g<$rqKm;YG!2JdSJqlsr z`Ixb+6Ym`A#_vAvz+XP;!5^>o;I~&g@zwbjd~~=72ezl;i8qVz@jemgZ->ff$pp5^ z6NfgsYNwMu>nCI9Q!Z#)C8EnqfaF%9E7nbMZJMr~CpI;qT|4^ZxygN3c5P*A+-%eY zRiax&ISIJGWC7+B&ldvjMy_Xx05Ve@I%Y=Xhy%x(D%|EZC8`k>0?q;fX92~$kC=CX z5(Kj(=+xQ)Fa@M_G@!GLz_g73bq1!~b_CD_Xqw6J-9a>GN1OKzfX%9T?6tCm^YwRh zyEdKaR5{-ySO&@{rg=`a2q}q}ge=!0d2GChDidg95RD|vz#D^}w$nT^aVfSbt_}HW z_KtOZxH`jB|JnI-wKhsuYA2>8wqs^?AC@$%Mr}kZnq(XeSFOZ~_H~$Fd!IH5=@D_w z;h zmBYEZo$TDU2>IQiGCu^#Zx0l)Atv*~donK^5&XXVxGS=|;^22f9Q>mTF}-NmT3%e>X_n{0#6%+Kq;(hiOBSIsYc8t78k93QzH%mKslaC4GuBgTjBQ4ZkT46AG8?Tx zNW4IBp68LL^)Od90sbYpe-0d;K`-v*LBjmGU<;dEmCV)n6C-zL{nZdoBQwGp@y+o{#(p7*%GGfR^}_Yk-UcTaFh!Fcx+OptRp z=@)VGI6uozuxp%Sj~@iWXF?RJmuBMl%Pu(d!g##>>{!fN9)^VcXn3=(ip&WnSY{is z?{r(V!X)=B%$%2q!<+qaZbv*W?TN=Zb|ED8Q%kQ+mlTBEX#%F5~ttoCQ zDuSzYY}AHSiSsjE_&s0imvTX@RB&GtRE5seF3ig77hWF{P9GM$&q2A&7ujxk@?MTP zHm6|FJX5=Q)<@LId_M)n0Tr5^LadY1nPtxGM|S(N#k1jUCyXn;ogcGqQ(Vi@=JXNV z>&mmiz2R%;0GhdN0%#^^ZqErO1I^>w380BSr%q>GS;f0J*tXNGWmqf+oAa;X`W!tn zD=ksxgH)L(_&NsCSOb*-bS@+qC!3234t}?t6SKvOO_b=!2FrPiM4NCr7sc2-WJdB- zZ3@P1UR)%r46a9S_8i^jMXb*)Tc-R@2b&yE?wFq1hlIRVxCUnmw!<(vDjf@!`(yih zcf9+OHx9k*Ek_R=eQ^>NuL*&FRw|N`Ko8!ITHYcqAO>+*Q(VPNkP60FL0GjPc)9e76n)Mj}CmwBnuZ?U$ zT|}c`uuvNUwTp(&am^8a&qT5CbfteOrpWUQl+P|0P;O_Z)q%1lm@ad4WoU!w_(J6l zmSM;hF5#BXioi1Q4;oP|{LbbA`GWU+pCUOH=(exKaVeSwW@FaIxZ!LXIp2Y4$MbxQ z&pBQ@TEE-;Zj9JA8K;KlZLDjTZF#TDH4;Fp3@z0~u}tM{Q+`Fen~R>}IcHr-?&49d z7c;xX1m7&lB)6FmLp`5(r*OFV;XEHB*ycRw96%HOPD}@i?qR1?mLOqMFmCf5E){Jq zJ~|h{$?L-vONFOrp*)}l3#(UPNwaAv&{x3hMh3@aJ`kQ@{jH9iAhrZp4cnNWzt3F+v`nSs?)=3!|?8wN6q(3_Bf_ON6WyT>5%<}krAKi>oh)i1FA ze`1}wi;wen@v)dYg%{k5M1xNe-ZdC6lR2*~VXE+Qj`BG(Oc^|5>6K3LwPxqdML#t> z7s87d=|XsUP_4e+@OcK3JfnW4fXi;^a>b0>?J%KMW9oXjTT>Vl(Ik|ALS0n0y2 zqmwNsf~$nQ@Fe1jz$z@PS}dTPh0f$@m@WVv5(4foU4l~ITFl8G#FEB`vAm51$Cqn_ zyIV+kMp}>7-=f%_5?L*Sf?Lnzgo}7$x?kwn3{+M(;M~4soZXd<_qGJ%=quh@Pvy|_ zE_mUo$rxPhhyHs)aOq?>K0Vcp3#=dXx;IX}BI0yuESm1mz`+C6`1<@bTs>B*tHAH? ziNObZ6Y=4}bbNFu3s>LGz{dxYaq*pay!=`++7}kWHzFTi-xtBJT{aCcxz9SgwjO(r zcH&Q(MbeG0FSX-~vvu0}^3sk_Jos1$sum?;?b-nB+As+R*rrz;;lm;xxGj`U@sXEY z@$N}F+nT?xDoQT?%N<$ zaE^f0e*SNO^KX){9p$#c$+|m{TDv7y9L{Uu&jipSGze~jXBk=mGEbbF9M=pJG*=7@ zp5>S{CKgF|#_B3I9q;H{q7d=R2!zRJa8hIz9?uK$%yVK^rg`2t!Ey59Rw^UdSj(%} zuT(p9(cxlUEvAAGXxsb6DE6(^Vx&tN9~68)hQX?pLcoIWtl5I`*_bJ}yK)9m7TO|? zJd500*=Lr8$yj7#fsor|OwO5#Q}34H>ghIIJz0;Bj+Wx1BL(>ESOI=@st||YtHRw+ zPeDO#3B1MGa0xEQycJnE^QOB7(-}18wnuKcV$C8ts=R;vrODX2VImp^WzLf8|6L;2 z@z`i%0PVY1zrM>zfI^O=OoTJZ`e(7I=+40NFGk|fmJl3$EeMBS_7lg#N9F=?GB$e2 z(b%DlT&;H%98biud;H*AScK$)60F$Nk4s-H!2kTZ7k{`=fsgja;M68pwBH+s;EC)u zB>xAubBZ{L8AAdt7V*}oY5Fa(EvO8v6rm=f&Ak9+{uQVWtX0YuDqB(G`1Z;_HBe;!_G?{0dAI$v1ZFSc~7tUXX8__II zGTYpf)3Zc;Ckr=o6_E>KI_J`CUMHu^T&>%>1^;}13g-mg7h{1~<5)WaXSOkbrea2} zmD$=Zj;M^~w27G_#>D9=8r1A^a{8S7Ia;2fCB6%7@zb%W&TQXa*zllcmG|WKE1!1? zuTy+aO`d_)s2og-OvJRPL}X7eBGUh7GH1(m4B(dSUkDEf*F}WoqkhqJY}z*+pMNj| zzq!PWw;zOSO$}zTYO^7yEgy#C1K}plM`&s~hKGYOyf_%$ zodK8_%ccR`I%zV$%W#x2J#gT;$#~=O3Fuki zk8%DPirbLtBJA1Vhci2wu~vyMPFLXS(JZak#ufa-GDp1r>_lWY$p0}W4&H+E5nUqR zX@Y1Xj{Pc86Var>@+ykthz8Mu!WV+y65=+fLIwADPzkP(IZDSOJt9_Eheu~ zliQ=`3357H*^d_8#_Rx^6Q>jJGM>m=F4wu^^pP_bqW*FJ7(nChkEA=}ki|uC-9bW( zbJ0sMFL>5DSI)n7*%ggtg6j4nxoJ4S0G!rAl>5v!4--5aPUktIb4}xUvb4MKd$4E$ zJ41z8ml@(<%@%C4fe5*sTes)r&&6=%QnaV^$Xwr`^-k5nZd@b$J|De#1Iinn8O(AV zKyyqd!f%Y%4$~fjv6#waJo~T*_HS^*0U5go$!Vg2=iDC+pU614{5T4saq(zr_C;^I zH_B@R;Tn-7V>v?W6$OlqMNLZx7W5nIZ1+V>P6+NSs6}aA1LpKj!Gig%m_NS-eVyef zF3v$fcseG@d33!oPH-*r*!K;d4K}~c^M9N3)yUc3^|%^8)7eHpKcWR_q5a%_3QKxH#utMP_@pJG0DkH4~*$=7}cqdxYpM;r2XV z;dg3yZtE0&XBXITZr@hCeur6@9>Kjl=D@#sjj3_u!1-wXPMkYq-Eg~kj*Gm+x(A+p z;5iFX|G0l;K}8xY=OQ;VNf{K+744qq&iPe1oV84a0j1CwDAWWEm< z%!b?;E=CQ{&W84R;W17%yumjY9!!9ouPkPYvIbH4jCd%kk32bR2&>0jGAP>o(4l+hVlw z-io!+@XyaeWNbX9^#)<}0|DaOx?%J4?s#NvAX+*jG087oOEPdA+d(u99$UdA6F@S! z(X2C;Jz-tLTNB{pn**Ql9C&!93Ab{~w&7ueFEI_E@ph1EnVBU+cyeex?rEBXh502I z%*@0{VzA6Ma@Kc9^+ z-z&hyw@pXkeH%P*_kErS49UaPj83d>UyW7uGH>QqVrFudd{-LULX%PC#@&7lkn{8H z0Gj#Vxoy)VBk_EgbHM+dcxMnTW3t)K*%aG`df{|tnr4VL%XQ5`vEa5cs2cTx(dj9j z=*^m`eBLiyK2Wkq{PcyGmN5&JA$2m|a+S{+Kw}mfHGEZYrD*sz)QBD^@GBQSreX z56rzAQ9ln>kfjRXRBQ{GG96rI_h$EDxNM<_vL1A%PS-4zMYStbY&S%<2pO-?T4;+J z*2snB7SFlp&YXo_5p5LT(`1rwjw(Zyk4z$WiJ+G63%xS~#f4?q^{Nk!zQipLuGlM% z@KftNv2eaWO6vn~XM8ReK9YvZC#T|r{h2uZmOqZX;)XrX+>VlI@n{>0z`-{|aCvVE zF6@ZJIT`${tH7e97k9^~u^!pQoe^>j$K~DOxU@S2v)2|NHZc`GBIeWq7QlM>6ky$} zP5AWvHhguu7M~m|!IeX~cz<6i&dcY0u+Xvs2*Wnl9oaRQG{<{8Jb*b4&32_UZW3uLtoT-wff8 zpY-7OA5YigpFW*w1<>SooG8TJx3e*0T@zk=B?1T5yI|`R6S4Ji7reDj%sRfkc09tS z=D_Wj0$iD(Sa>*fk`Sd3aI=7u4tBA3G2-t?M9i(sPK=Xrk%AI&h?|9&I}$t4li8oI7$qn<+O6&d^@UypbPxvH5y1OQn8&YuOR_5OGQC8 z3Vdc&+BO9#U!%so+16v$O<@T*RV#J~mcUu%9Z;F7gj9=Ew@skOhsj(zrBTnq|;}9i0r+y?g zc`YY=1{#TbZF^t75M8>0HB}3eF05RF*2HcR*i&?S*GeH@u3!yREW@m#1(+oy%vJ2Z z{P}24??<**l{oT=2)i{AAvY(8QSc|oI7mRm*d#Qpt;6Yk#d!Zv87}Ni!?~S8&SDqd zNyg=O^YQ8NN_=ys1h4L`Lif@#q~#Z&dRh$DJnD*9*Nw-nXU1aZv*RoqkGI#4!;4Q% z5;FJ2%$a_e7?n9{^~xctZ87b9n`Q(u18C013=~ghfN&d?80%|!G0-m0>0B{l7P*t0 z<@s?w*{E%a!r<~y+_%OL8`e(37ICDvJvmV`L>cJb^Nienb^=~}!WDyyLy%q`i}2hE zJotJo{`Hey{N_3T?k;eU~ZFHzxEEq2PUZqH{v}=MRx3+;&8bYp){~oMK;Mc zXN4dO7jVS2ha()s=9&6%wQq=|3Ec;PT+$Q)f0T8BdKeB=wxSr@6u zJ69Wr)QV_tNt}lIm{t_aJW#+)G{H9oIJbB)3x<3$f^BEBFl@-zzHOe<%oN?4ZAjNy zU^`>mCSp5q#@h@O*{jv&y{wHCp~CGSeGBJD7d}Mzj{}e*SSF6uvDVBKE|QrHQ^V<| zkUO8#qQrE%#hGRoKh6QV^-JEHG&T-#tb=w7c{^I0^Ee0347bZ;oChrYQz((snL*hQ z)u;<$T-oem{ge#O0SiP>52G= zNXDXQG6QFvuN_-A1AY<#AK{&j``U5*M3->dOnh-}8a_Hwii^7v@WD1uta~^NX^qur z>8?P}6Af7Mayj~+Dna#(LU_2Pz)P;J|4nhI>uSYAyJq6p=^9)*nT`ENi}2vK7R-5e zDtcG7qN}F{4V8IFOiY8fXNquPtYB9hS=L^0a)_D1;w8L4-Zu;5Lo?tiIMOWikzkuW zKm2Xu7R!m_l95xDhY@7AfOJ54z?HZp%;+Tq{DnK?QfWI@Yr=JAVLkiiUJMC%5g?MKufF zi&SV+J2HUAZoQR(WoQ(gFhxXniF}U3sW$aaSso->bOX87!RgL8C#PFDm!m$ec(?BB z>rLR?!RZd{kH$R5t%Z9TzYL=BIkScH+6bT}2tUW&60W#a-A)Hv=FCjtd7gJV*LiM+ z=jmAUb=qxTFwR9X)$k0Q-6r@J=Q`AOutlRQw+ptJT}qrAu9x#D4t9{0onmiY;x(!b zuM@1##~k5-F72w{iGiX4<$>DpW=u`$#EPah+Ld#-<{ssCE}YT1o>?dkw(xv$&=k!k z_4vLoCQL}jjQ(J379QKP-d)D8r)FJtEsw+Guw;0$rl~l`lYbJ6$-hW|+x3xPol`fh zb?R$w|2PH{C!}HWq;$C7m;~P&lhM1pABV3j#;?Eb#qU3B#qX}xxj1kWrJ=h#z> zb(2V!1kP=JYBZ{ad)4wWf_3j}A_J`=sL0AX%d&iP;c+%)o0O`F3f#(_d}pliew_OA z)b9zmMV@9xTwgCqt=Jo`% z%e7>KpkDFgrYADfbUNl03w{N=g+3LUp}4e>LA14)SGG*}V4>n%v7R@G8HFn_a9=Za zZ;r#st+6=%Ry2;k8LH({UVFt8)g{4j|6u|q$aoIOPC|XNADUZz5L+0FaWaQ_e~;Or zfu;@`F+w~zo8UzCHy5piZ{5UCNJ}n6=j>JtKhceW=Nr+oBo~R%ae`Hz6I*i=UuG`j z{~bRr4J)TUfJgcs!HU}Hn4g=Eq0CIoOHae>q$Cu&iq83|!TczUig97e?e-+J&Z)!u zANS%vf4>a>^A9WVuU`-2H=M19t7t;qDMn_s7s<TF3b-2&I z9g5?xdE&rx6R_&;iHJ#vM^$4uw!G+$Gq3w-5z^B#_)otXsD%yBz8!3AYoOL9Is0~? z-aflE0Pk<}#e!#&5fqsLF990`bWCR*_U^90$M5Fg{Em2>ek)wJTpi!+gX5drv3sK% zy5~m2O(sT)Y>el5LFQne#xj5N`C%5eWaKI3ho%*fNeE zSDJa7&I+AngK-{BMjp?p3N~wn#8<|=pUemL%0s4Wwm8p1rg?KPEuj@1@y(c()u*Z7 zr2%!?`koC1*p+kflr zwlxkXwnpK^8-ZHD`^?r*oZA_P_xI=G!z1PR@K6!19!$rTcjNHV>&ck?Xa*Wr#-nY( zAA{Yl80zy7^Tc4UJ7#veqM}KJcvcd|i(}%=6=@+&Zu4N^i}jAUWy^yBF+NW67UJUV z?-J7*Kw|)ofvPbn@ODdsr%Mt%?no4&9wVPc@^=Vq716fQ;bnKB+tM&0E*DW1F{p3x z!K_YC4A1ey!ae5? zM>hN7;EUdP?9uUv^-EB3Zpc@@UyNHnV(&~vnh+9g`7(aW=CV%7S|)#mgi0I%o*E%+iVghzKsKI0%%$nM4fBHTYQc0k3;|A z187&ye~6GD`XEto94i>st(+pZGbWolHQ915^T_G0BEW^y*>Rh}HagbEsTNMR9BVca z2)%*9aMsTYRmYlhEra38=PFcqj(8p^aD>z4+*6?2ydEp#x;JMQ2D$Ykvq!ggu@f(O zz9V&}cJX3e9j;n0Z(WPIrAsw%PEOY>RKfhTj5(MRUWgj6NTlCoIx>-8Gr{5^T`9ta2!I~oWqkYjhygd|! zb93M$f-5qr21hT>!M}dB2!Hsv58qsB#&0gv<9C-D@Q2F{_}%4NeD!`APM$5rx;^#i zS<{HR#zG`UCc{G<7|)+XjK(f+f6w1^m<3)Vg1bqymJ!_S)@h6G z*}~@B2TJ9$S*C#jbMih{wOQ-bblv7+ z7rDdm2hH%e1M?2t8_qWwDa<b6bE}gmNq@4!@qf=l;*iqP#_#7Sy>A3ptjNHlRbzBpBdPmUGf>XBSr zJ(Ppbj^yC0_X_Z_91reE#{HYJF=cTv;!@Hv`BuRiw|_ffbF_WO_D~U-sM2jgnD!nTFJCI}z4zXGFO^aGQmLv`CGWk(gFqNb zfFzJWcq9-`IDrsIc;V@$q5XC5-ghV7%zN|C#9RNjzMcEzDM`(}6VWdM&76$*A~ScI zlP7bpz1H__XIh>T>-%>ca1iy~U7F+SwN*G!bC7(#Ph#J;a^jjT&*-PqxLpxagJrcVu~K}u;oL#Y z%kIO%QqHWs5q&xH(Gu5zP2!_(?Y~PW2wK^)P4dSE2ji=RFIJ*u&UW0irw*?^nTk_~ zlknz~2{{k6vfFgBEi;43&zID_D^2>ekRf_}#A6!{Td@@Z(4&;DRZcLcE1q1E_-%%63HnyLAd^ z*))^{tW82Q6|{_Ov-T_V3e&YKhS6BbxH@Zvd_)csX#mPUCU%+pVXe8c1k4XBPBsZx zUK#;PFwe~!!n)Q?+B|e9t53UqtZ814@$yw@j_r^Sx)$39?!;Cx)?zJf;H;c$ut31c zfl23;EJ1%}JLaYqp*t>LGM*I7@*)FLYA~as6jSrd;U^*^`1^@Us==I%F0T&H9Zbf% zM`CeGGB&$P9=u~3vf6VoO$yqyf^6J$n>gc#{P5;Op*psZ86PPUZ$BaSq_Y;|G}<(7 zzx8N1P9F+D?y3@m|C%ks5>dCJ6i+>zj<+9=$B9GXIza!c_xa=aeKYXPzL{9EH3-|xg9KM?W% zK?nYPrdu6@zkJ*$j&ujUI$4F!U(CWO0rkyyWumLJ7JbX3uw`o?_FOv|_gps(cV0gY z*IyfmSxe#(C-0L};4Ary2q!o>J$CN&GL!|?pkPLcX69&dY&a_$$I>VeG2Rr_i0;%m z7|NZC;llY?P`22NpQ1su)fg%o$E<`|XdvhWm<{2yO0-1Rp<9TTk@NPnUNmw#ea8v6 z4t9}Wi3&XCEAM8a%v0VVb%ANX=@PKD^8=LI7MK>O1Se4Bn|18B@31%|0(GT5lCLrV2>m6tnqy&@_h-Gb;@E9HN>$7c*N*H zM@e}Qt>kM0X#5t$et*tf!Se>Kp<1!e{}fQ)iHDEgGWQr0`xLqVQIo_I`ES4OlGX zYw*1iV--s=RI*44`)(u)$#QH9W7+auG7(27xN0^I-4}yb4~7Uq1>?;}1glTR;oYZG z#i=gDM=w|4qhkg5@Yyt+c|ILqyi%ky@jmoK5vmu~VVV^D{#O@k;M#vu2?8(4N6?Q_ zh0FzGY-OUNI37V)WFXu>7vTXp2$>`z{pTXm|AxTUpqbZxu!!T-P${1l4`SekdFbBI ziJry1XlkfKLWJD!=V|KT@?0f%ifIzFtd%&07&)+cD7%OMJR3oiOW-HY=d@U{gbGZL zt3qI41tNZ#h43H5BRf7DyAG!6;NpLfvfvAG7S24IF3xovPCgu^v+NFRlJ7n}%^_gt z_ryYf605ab?9N#*vr-)CLY-xim`}aB1liN;Q6ABQ_RMjNiu1FmYNgS2pvC_v0(s&b=TFK|vCVhK zF-zG}yqp~GmoJ=d17^bMPS7mp%5-E4pJz&0V4Z3$1B73=W>a|6GR25)z9YUDR)+DL zc${kTx?-Q@LOhycnz5=;L|D}_9sh7h9BX#vWEY^GtYP$L4`XZ3Ejq^OrtVv@ylJZj z(a7lws#go{IibKHsspm6tdj3{iInx8uA3l(sezFdv}^85Ap^Cxrh zhY#imUOMp2yR-4T(+&9To7G}f_(~k?-@j3UKfYCnGbeL!>thvYUfL))%SY%>S-Iq5 z*Or+UPl_o`caORI-f3d2xZwFZeoG93ewGeD`HoWs=l(NG zohAP%10O$|f-_HZR>Dxca=;IF-tLd&(qaS(PN*r&v#`vbDE|2j+-49hpS9F8D^TKJ zA$%v_`qCW5eBR6k)W-CnM;xa4rAxHc>3I1fZHY$A)44V+(UzerUCK3aqU$ZE6Z26u zXcMmQlW*FSJy*MUH$>IzWCP@L@;X~creB@o(Ti&G?A~JA8l7sbG;^G4)1~X%INfYB z?;3&aayp&r;O|{H0kj-I6Utsg*Dc{Rc$ z|HjC-U2}L1zW!)PgJz$;n1hdB$kd@&zBpcr-@H+U-@a9kuTHn%%Qsu_(JPgB^Qk0k zzatUx&1DFZ^3LtfY3!V!88JkSAX*^g7pY=tl1|^~Jm-30-INuALFM|1X1a+vt&$x-uxSnIuM_87Unl{z`lu@PxoNEAqJ}4DGgy`-oXDB-88k~3{hlcLJzB77^t)i% zE1zjz6TDNmvlkfGtf+8XlbXPp`sE(|Zn&KL$(N;T#RyxWnX%C3ny#FZCssCZMs37w zj7mP()OD*?gs%||zeIfWMKxDoL5 zg_k12X9qOcf?OUp2?xCBL^IjVQG<+fgDuH}C4&oa@|Q;IiEb>QE>S%Lrdm-YC! zuNUKw@68flI}`6cX=cwpa&HKtXV)TD@HZ!O4whB-V@YwX2G2P5*+_Z;29t8p6p}7} znf;G~UB`%9jhzTCxVGBeH4{kVvk~iAQ;Lx%oX#=lvnCa3g;@Hea%9h_M_G6`+R{dK z%(Dej26HHug_TRSQd}#-1>Z~R*6OfJvy(bRyHlH2ppgw-WV=-`&u*ZDc~Vwn4x%Zp zRW&>-#>nUN*^5N8uwt5AZG1vw9v-Jn2#`BE-b=%C`)qDE*ghBE2IGd`87N&n@OPet z^TwsuBJrnmsxv%Iz&Y3;TbOacLN*H}ShE<_rnnZ3n+`h8+AAGDrEDn%^GDE|DnJv*u|6nM$FdTT!o;{u3lV3O zQ)7pScn{MF3f_1q7%$2-O&ik?d6@|7@5%qJFTuv^yuV>23{!m|Pny4)itG=i=QL^YNS4E5&)P!&h%L zia2P-*KfDr>r+jVUZDblE$7~RA`*A($iVEJ*+@!jKuS+GmM`?j$|b>=J3jz9y(tJ3 z$2^Qp#0g4A6wow?c4e-BF<-v9>3ggLJd1G}eomOk6och#Ihueo z&xx}sPW4x>WMTh9c}Qz4(wZ;r2F>6!1K1SJ4!%fYF`-4M85uy=olDTWdp=rL^`We! z7SUH{%X{OPFc!0Cn~=NQEAZk>baV^;KCHp-U(Lr?$CB|6?^omfH}bJ$9mI;;005hs zNklRO zOJ(@P7d;( z;QierF?d%5EL)Pl{dj~1m|i#-iiE|L$ZYMx$8R*_5ASygS2p0QlePHmi3)r!82#i$ zDJPB@`-t`ehi^#3Cog2+%VVi{=eY>1-js}>z%mu(#H{kSycdIAvsSa1cJMWE`)`}_ zJ+;gCjE1q_hGju!Xd!NRtP!W4%frcMv+(*eS$O`53_QI*2@mWHL-}|a;-|>Ba;`D6 zl?jKl9Pu`NJ|2sc?<@Q^tys7{TPru&LX%@prith-3Tj1j(p(G{EYfm$p$5?wW4vsU zXg5}jah%h&;&d-XcSf(^z1|VqPS8Atg1b|$9~GOA*2E5-&5IZ-1VSb#InB;!NQV*gB%U|TfuNx}PDP5AAbvOd*< z-->4KNW5Y%0pp61qwKRqF)w5q+C?J z?7U<5I(J_+wHR?}%`@A#VAF0F`d8rg#e1-C-GkV(Vh?T_+KBZHgIHA1fUdYACb9<41os?loa3E#W%OgHXn{D`188vg-L_bx zgUxX{IR>KD@QP~>&W(QbIMp70cg8q9k6t%8KNsuX#_qlsLIABIxERU5j7Pd?G-`N` zZ^m9D8N%Ddb-cWrNWrG@#a%3)&seszRT?!sF-vSy_nE-iS$?^c6{c07G3tFbl3-cC zO1?Y0X^$0-Vkl=&eRlGBRcNF38rvxPowIT-77foHB;(}ra?$Xj0SBtKV*T}Pc=pj$ zoO&`|{Lfgt`*;YJ-JFf!%o0Qi&a^^~d?a~3I;04_YX|Vcd-L$GUyb6=@73V_m!({r z+bVgPdYGC;@bR-0WL37~p;s2*zy8BU{L7c4;^!9PlV@V^;gK-B_gD~)9hi=Ja}(e% zeyR2(@nF#y^H@iE8`d<9U~@|^)>YPERc$TS^)zGa%5E&*(1F>#^+<`$L73z%V#vew za=*ZlY}gWxnCZnD7pae~ z!rZKCjAj*Jer6#?Gm0>jnuj^jS)%0_+~j+3{wD)y4u&HI8`1nO^36;c?Hc`Z4KB)a zvD`4dNu+$3TCA1e^w-m`E|zr#@@BT6Ml^g^&H|kjVuA2ByK9?ZnrZ05ag7(#PcL2| znyp=Uyhiw3G`y_Y(1qQ#yTniDa4vHM>jvXZf^joJ3H|kAe^U|8XEy%1<#b}*@c7@> z@HFpjZ;VegSHBwz5l!}YpG4rsrPm?hC(&B#oFkUaXtoe837NIih2ms0Ekn{$g@7rV zlP;Emji3`C{w<&q#C#cc8c`Ch>DJ{CusYcSP63Dqs7^*8VY81e9co6lIUd?@-muQ@ zv$AoEh^$T>V?zmeaF-IW4n8d4WbG9JI$XL0ec65Jj4ME0AYW#>W+*r4Hc^V$QYX)A}KT`yHAWq2nuZbO} z9Sp#$2RY!szoZ4=HIB8mZzdkN!yhp{W#Y&R@%}Id5trp*W@QOl7sO!Y>Hw@><%hu~ zBH%mn;3tJdI0;&Q(}LMMfc77o0WrmK_>Ys3J*xmacBkWw!zuXSxg4B%u~3}oVzo0b zTt_niCiz8MJ2?4BiNKSm zm=|q}1%uxRxrTVADd7r%c@wYJ5>j}}@9;fq>j3#CDuBdV6WXx4NgU+T1(Mh1pgy<^ zLs>#h4eL-BISWGtV{G}D z(12^M55lv1XW|tG(KzG&%6pvsb zH3rYzcVIBcJRcoUgsyuQ;OkFD@vpxb!#{o0iNCzpgy&DxqJ4e~qNL1Z0L_SDr=_OH zB<%;W2)HT-bLP*&*Kbyekk7#fPo>~vj?;EDL1!bndv7=*V=CmEcwMXSpTy|kPs%g< zoory;d@K9RVv-j83n>S(3bAK*Bwl|c0dGE@fD;eN_Q5b5KM*GGCvVYDXT0Zh-lp|f%=~w58GhK+Z$>k!n1+&Sc=*|@fsEl1WS7|p+woc*L zXq=@_gJ?p;BgNyGlR5{pVkod{HGsx$U2F#4lQoDz)>LQop*g-y9IhrbMAoB0abB%6 zMy6erjU*w(3~}s9xR!{S*SxW5i1_RP+C-wAXa?)v7$^Cf>lr}%?vH`?#hK5A)1%a} zwtSu`B0KAf6di1wm14Q#WHUYWvJ@Tjh-E_T&rLZHB6;Xth^SpX*=7$93!t5!AmU|; zc{JG%$BAaG>@#-Rw=U+TBRK5(v~TGxkG4S5RF~D1n=6-E20`TA(dL`VnsDI zB{@X-+;R#zojk6*PMgT-=MSKHW8U4T=%!PBp#;#_nk+*c=(JxaA@!0(b+An<6^=oh zF5jD7x#S%Yul&B`2<3`%^O7}UT6x^Tc!caL-wVbK&P@dwk9#}U7VBlfl~_`=V%SXq83cMp4_x+a|@W~sU`24Ldd~v!PU!0nQ&rUSsW6`~@J{66I zZOwT7NSY46!X6^;9*))-tUr0B0$;w_iZ4&r?9E zPFt%ufiex2sncz`cX~|k&vKkWQsMWj3sB%!DOQDw&?XcIw<0&N4Q0`Tn4Ps01I4Q_ z&X#D^Y>BqY4A(#hTgvg}O&hc#e57Qdj(thp-YSlDOKb~f$IrrS(ePbT4h-fG>$)j^ zw(xtSgLmO~!8?0ErCpgNyp)C%I>3T=lh=%qRPfjyIaf4-3u9|Y! z=y>C=(?@TTa(ZpkIvs*#AiWFCVU^-j_8>9% zUX1b56=<1r4Q{->3a>mGjW-|Tm|($Jb6Xms^Yan;Lv|SzJu2K56H|`4Hw@wHFNW~f z&lltKH|y~6u{4~1Iu3Q)$|WB=K_$t{Nomd4_sk&v{Ph~0iTcaeity3V7`*pn7~Xy~ z7;hY$fopFLL3S}4sZ1PlLGdS+aA_)fbLPl@8O7$-er#xH!Zp_{!GrIv!Eeqq3+q02hQ;Nu|GNnu+w&h;mkJgrLi`LZPH*YoJpMJ9t|Me0y`bi{G=cZSXx2-a7-&?e)X+duQnO)AvlrBfI=?_f3JQTP%go^n4?p<@dB@ z2**+SX{t`09rCLj1phQ$gn=CQIPDVwZMN-uw&osJ<8w;7p3mQs{}+BqF5>G;u;$tX z9Nrs=7Z1hbwTHzXipB8e7DjDRP(J0O;|1_YQNoaZqu(F`ap$;FcBWIILD>3CO#)}uYK9Rt||7%3dVXwiHu z6w$r3R>ZadbdfmS^9n}Lk=%}11g8>klY+B7xf8wFbJf9SZI*Gcn+2ea+ERs6;Hk*$>8G|!N!f^Yp zWQ3NMB8=m&IAT_fK{0vu>_zkN`;SKPFW;=iA3yBDZ%;-x&i^h_iwT1pTk zdBk+*6nqemVp==RZ3fUT%R>7=3x4;0E51I}f{&ihHlqH?D4cqXGvJ0GqP14a2y?B+ z^m!aYbe`?pa)D*DTRw-eoV!}E*jI_WZw|u4yThb>3dGTS&Gxoy0+AG7iD>y|Y&B#_ z*#2+(+Zs3*V>uds37bfpF$T0H+N2U>POCw2XeSyHhqWczSot!oy;`Worr;}-fOSyP zn$;q*7oaXd3q*2TKDx)V@$v%!IQfu>=R?7G?O*_o z-#=4_OxZ}I$+L-sbCM_N@ze>fd z)Hzt$zge7>C0N@qBwXHuRV6K0R#b~61yxv>SB80Mg=h%QM8dC3xx{O=*4`;gJmx%u z2F``+*`h0ZD#tKkZ7zdog0IW+1oKs>jPAzl)M3mmT!?Y`RutP~;#ApjPT6&vL30M! zdgc2Q?=4ZaXpU(V+&7zAYr*_L-aH)#kz>I$h;ydA&ekd7SaSy7oGAs!6TD}7#zr%k zH@FueF3&}tF%F;0>wI2|ac{G@wix&3`U@$5mPXw!M#q{CH7!w$72R=yeRZ%E<0e13 z`Q~ghfpF_sTO+>NTFvTq8#FT&Wa407Vw~(42k$H&*m`ZYI9F>L*I<6(u<&~W8iI<@ zm)eR|P3ti$sTb`jJ=ofND>ezouWq}}v`&#c!WN73D_3aCb9NC%iw~l1v?W4JP@3{iJ|3a}b9cMTZryq~T!FvO6=beFg`2fp7fx_lK?0R_#K0eur*AExq$$O)5|E-fz(4LQ=t20e&c0J#G3(tR1bM7U0 zv}^KijO69QzzS504^$CYfJGJauzTq)>|1{Td)FPnol9=V_5EwHp=lV4id)eUSB&&4 zQ;m}?oaNp1%ez$?xtwN#Xv8`z(8L!MtW%jjG$Z{)bt&gjAH_+}kLaaKZ`R-AUG_sVyjhoOS`!t=vu5q_^H zzpIm7uNuB0r~;)kONHO_wD%J8l&$(5*rtBhif_TEH-;x_cWOCfCOF1cr-q+3Kt`WP(oB~**(ZL*RJ&8dYoLIY?WKPP#K+XVG)~(bYB%N`M=!tK@lCp8}<2IqhzedNnUf0PW+MTN5$I3*%Gf2*$ zOf4(W*Pv>6C3fsC)J7a99}L9W9r8T+ISBtg1G=I~V=FPdb3Xp^^-}!xtM&NeR2$B` zl#lnGNx~bCMI&uYcw7FL28SjWV&R=Fc<;@2{PuJ+K6xoy$0C3Ganbz`2a69d6MJu; zg4)(3_+OrPP9U5{4Ul$K7KTguvA$&#TW0lR^O|M&{7)Q`A~qvr=TJ+2VWk*fU!^@he*brfIZ{3{y1Cubo>OW3M>DaM7Fm{Dg0`k2 zjFVXq-=^}~VD@YrdrXhJ`{sOZPsBOP-M{-7Xg4jr770I&(3&ffZoGhtmcR(N0EJ*B zfofnzG8O|fmTbL2Yu3hG;-%B=9WZl%OTY?HSwmu*i4&}L92FYHc1>8F2F%zBty2o& z*%1{O&g#bMmdzrrIt9F=SS!TK*}K@!n$xGNgI!@}(H<^UYLbtWFVZ3Z5`%?}UloQ4}W$D^ZZ6w4|G zu(o*-Hn%Osn%aIWEU3d^s)+88Or&1Q+C{!EFHrI~_0r-_pM|#MPHh@W z$9i7z0s;9DngpDzwQh)P5V75W+E8(@g@8+D6eEq}UWm}PT|cyiih$7&FoDJ3&iTP< zfoP8NTzkv~CI8!nGKl~KXwxqWki6{XAsuAWDR31BQW&$@G>roTo3ROvc|#r9 z)?%3Nm4r*P+d{k!`SN|c4mH0;BFg0(aY6wORymwEgn_h9j1|noy4l;%64#B6lwPcA z-G(iFcVcb3;9Fax7;G;PN18R(g9Qsv;9o7z$>0wIXi|pAH=*d}SX~hvO}KYwFkaX< z6EEKvh*u8;X=}7MAB`44%@!#!BBG;l>X9hC^+<#^t=_jk3dNlnB9aA1#FT?IVlw*j zWX!*R0Dt&+7{7n76Q8}3FT|Lxv+utDR07UC5srZyvk?^{1-9VM2D`j*%=-lYI2pyc z#n}H;H9mg12&W#8lV^$q$7_52P#6yH2}F2Bjf*djdEHZ=jmK#Qd&bQ7^)_}r441qc zP*Q`s#R=H7aT-={3`EBKLWCyOAi^o1DYQ&^F6OzHy{{=_SQau+9w$z9%H`SG~;RO-c0VODzPKP>A@?NUsNmD~=nroY2ml=DL zfj%+Ge>zaE2g8P-4M|TF;3nPN$Ksr6?ApbdWxx9YG9TEt=Vx=e zYu;somJ7=1hR>~0e5=X*jC13~>w89UIq8 zLqc~JA}`I6JTLD>gjRfL1@gvAaQAKgIJz$o#|{MJl?Q?an?X47P!QgDI7IElL!o&2 zfiOINj~@nCC17S!DI&gQwm$E&zztY(|J-dJ_q0lpcW+yqy2gFEpQI;9-W(|##`;-{ zu(G-v%Sz>)7T0OeY%Hq~^D_!Cl9rEw#9TB42{tYkeZc#A?!~-d&ABh1g_Q=fP0pu& zA`XZZj%7=Xq_Ru*%zD(s^r16zKIRosbT1S~X|c9MW8mCO7PJP7Wz81Ez2a=OB~oY$ zugA}p?RF8|98YZsgCe-+ip^pWExt`KFMKY%UK>`6D*49cGfR*q&W#4ptX7=hRGR>w z!Sh7Cd)wKZ&cX35{%JuMMO-iev<%Vi$;3JJyW?E5l8iwwi*aicFzs=^sPTEetI>^K z60VkgYH{`=v7AqS_wfC%nI~92vZ9pVM9KhOFTb5LIS)hm!&u$879+yz{i(AtklL!_ zp|N{5$IE4&SU-n7N!ZP6y*k$nnsYYJg@XNsHS16p)2;zxbs`;X7@SBx6|4nL&qw=c zneygqhhp)@BXM}^@kE?@l9qr|PsA(7vjzHd_xPi8Lke!baR#2>7l0FoB6JLR#Nt_8{(9yc5^6QC z#vt1A`nB5flwEQ;u>iYocO=cmti%q{@51$|J(wf>-j_#yU!WD@%}H}mAKipn;dsu% zP5oZ(SA@LD;^Z@c=3t$CVlk>5uVy)$=`Mzi7I0gSTP}BxoBazZfEFcqjv$tqU;YNB zRl5rYttpNLm+al;(eMtIS&k}}m9xdJhd+r zQMonp-^D){yo7hQ~-Y9pjDfdVXmi*3=JRbLRkVJAE_$H_o8>Sr2}9ssf+Alp$qBf@pSDb_Zjh z=!uH1JcLZiF#$BmyF=pzc>VDde0Z!JAH9&J6>+B@4#J~%2cdPo;P7%ME+F5~2FF?_%tSg^>7O{d&b-7n`@)Bh~r+dGKn<^^+H;{a-|uWtFAJ;vyxvm!k83KK-5 zhR+XZLPbZe$RPqZ7RNeTSBYyImbt9PwB#( z%(>_njC0I1&gR{mAR0cpMKrwNoHOwTSEJJ31j#wf7dgjhc!N*N=Q;`Hr4uLfa?R|U zigWLDZ@e4h1Tbp`H#%tg_T~R!04)+}zfMGwkZ_^^B%Xl#c^tGqwF6QQSoP%-sK&Ha ziWtQ)9c~rVLgF-!pe9Has08IiW0z3lWSc*2#WsyicZY>|*EES}7STO7t4Eyd4y>pX zAzi&r#8sQtOmFGE9UHrD!?MOr8ayNUaxwu9K0IIco8spP0jD8SJ`hPm#}INdkdlR@ zrTCX4rL70KYX?xYy&oBidy!V%hLq?kB+V#5{FPaV7O@Z^&&3R{V-9I+G0FIsWFUNU z0YW)3L0CCLf=dxTr4V5pK-6h&ZpH8f(JY0bheT}v%G*5W8)8)uIcsCQhxsl9N1TvN zM&2ysl(r*1x)M<$YV;WICC?i9vt$HBl_9#b7;{#HVDtJbas9@tuxOn>3g+Z#f9A2G zPOPk*i>>X;u)b*?mX|eSL9RF|X@%&H%|*q`bfjHDq2vHmK&=k^Sph#DWAKdM)H+le zxDuhCKB)l3{#9s&NT^`GkccrGX}y559L3^X7fj7VrhGRx3AJ4a&*vz6+eGjgFp_*t zEtfYq%{B@BLJ6Q9e&A_Ly2#&yWyP~M@lr&qLoC9|5iEvmSu0@1mx;TWBF5A>o3$8; zSB>AB-#pzS)eAj^*NmrxU~y@!pXH?GpO-V)6PwMEMm@#1)@G&5*6G&oTGg+H%5^#&SS@ zm$QF{UMT|N*O_L#mLJ4O9+3YgWw#+=bB$ZhcpL9!2$Z4q5-_)QroMVJ?#+ml?LC=&=nxWaPwvn)9qR6^Aq*6Z zVW?y=2J%Owyl6tT2(~g2C;3v=FNL zVo-n8`NBj@sjWmvNU0H-%-iyP!=?NSmv_Lf zBN3662yCuF$>MA*-WrA-H%`UA9g}h3wkf#(w#m5d=Ba31BlxbYKg&U(I#-ebL|9^Z)Ubdw@Ka}DelM0x_MYo(1?-D5)7pkU~Y0Q29vTekdTG$ zm@Kq}rXzn6oo3$8%?s}D+TU?_-T8f!N2u{Q(UagqaV5^iX#wv86d%IkEiDpINDn zqB@+KC&{NK-v<986V;`e*n9U(921B7l>-rY?ZHUAekf9hDtN=O;|D|W^ezt15{AIa z8r-nmA4l&E!0|)E{g1`sj};V7_XBKu~qupPsEt9LLZHi^MY~hmN?85ZEc-z z)&IhS$AJnJmfblQpf{^uo$5BhIrV!- zTCYwfpbob3`xu&IT2LEWjS6w9i^QpB@GO5y4zjP#(jXfB3CqQ{Wx3tg0mD6<{%z)B zp4XD!11<`eGW~uBQQzGqdq$y7(04YT4W><{mgQ~5GFzV#%fzv2byK0`t>Kl!9ef-8 zZh~gq<~h_rif_)kE$^0T*1wPWMLbeDp+M>!Y;4~y-(Jf0LiwKJ&n+xph;_5Jp(?al z%KCX&-*KaMpr!MFwgkMN8y#i#DByrPae&UOkAl^}SeA(}vBf zR^rcJkK&)c9K`S6X~37S6=?U-Ge_AHUCNe6!twI`q1d@61m&d}+Q260m)U4(OvS!E z!FcIll(t@ddEX2?zGoU1E)PQROwlLgTftih%Q0e4z9YY_#tF1hjd@0zq7yV8R zPd}Zy(OYj@e6tK9zI9f7FAeXSi)q9AW?Qf2{hWKUVsy>m-?;_ab&Ib-%nu@jfRnTa zi!*nyg~|l-)k4)9tU|y7QUg>gj=f2j;Itqm0khUhz-i43q^1VT0jazQdmpXQmirdY zC%U9H_D%9JP2g-%-8yZt!ammw zw3LN3DQQ>&@t9<+`1mF)-P4W}FL&VMH~aC~sXl!AW;;H3y&f;VRE>krx8wT5L+HMH z47tNYNQkc1pquH&YRqQ%^O*&Kcks;AAh_?@rn`3bHRmxe=Q=*10`qtG;H}qN@WrWm z9Dl6@?Kid~F|JhqC(qaCak`{NN+ye(QH02#GDL|mjgs${d}$Ke6Kb%iv>mH!2eG_D z9Glz?M7^SZz!>ATOy|HpSrl2%7n3Tg&KnX%b)G;?w> z$^to%aEqClGjB)=(s6Bx#_vqAy|_*sri$el65+)z9(1lbMp8#|rx0)ty2Qcm&mR}T zJ&rlDUlUY?a=&5}N&%U3wQ;Tsr-|U6l%v1R8U{X#70JfIHv2|sTQHjZX+deyths5t z2Lp3iQV?7S%aI3;VlrExdEwX-2=^vli(|#FAy&R8ciS5G-864()6|I;v#ozD0Tb8O z3Z~^_Mg2+=m*`Dln~Q-_(YOtmX#-6j>-tf?L@mGVFeW{!(g z7t^U-g>z?=q9<>z;9m;(nlUUaY!C;s09i$K=w2R!y|?<|nY(A;#r+%`O~muTAiVxS zh#DswIB_rlTW^eqUr`CdJzY2*aZ9|(J4)*6z^P|)@y?M%r|T!p2=3F5MTuDVNA0X4 z1W19;X2YI)2+ox(6|=H0&OpR3M8JxZ9@bKi*x?Gq43#0UO3Ih%MS@pi)}AlMYxP-p zFJt!JH19In7|R&fEH=+6pX7J6OprXlJYlc%#K@F=7}oLZIA|Z|MhJ3v-Z1Sl+hsX3hM~Is| zPa$c}VSp_{%DV4*0PXV+KS$K}!;wOVS1=aMR)t>qfObwrm{-8eaJnf2yv^{sV;VT4 z@HV)oBdzaJ_pMWH@b8UzR7@-m6 zh?tx&4!If9LEnkwl!rNv>wIl#*vmk3lSpX!X=+aT%C(ZfAVID ztcwvjDOX#3+uySlU&L#Oyk8xl--A(N${SZ_@1K><^Lf3;glh$#w(ACid}^}I$uK%o zS};~P8~sUz=!(rkXH2>r=vL}4dv50fV^p$M2vfv58l6Rp49~CO>3Bmn2-9o ze0So3-B?EEJ2Bf)ALXyxMv`Z&Kt!sXNNSnCXT&1-C*7X z%O?tNgJ*Z&)MT4-fPuAu3#bKJoZy?S$Ij91`i)(jn^tC)=LOSV7`NPRb-SqmJIj>c z4c5(mvThUSeAkL~DHl8eG@M#tku@Z44#daEz(xX9T`K~+R+Uz8GL9rN^yq$XGj?^QiLb&nvSRU1mT5! z;<(-yA{H!MAB30o2a6LOh(~XoiTYK^h`KZxGvli;vL*}<-x**6ZwJG1+!J_`n||#_Q~0b@lwA+G2>{HH%ysh zG3+?W4wk(&yiK2Yj^TIDIV?NG{24&I<+~q1%bHQB`Od|((Q9t&(X((Z7&ZFEVAs{~ zcHhzLG>6BHCgQvD;&A7k+4shvdMq2rXb>$yeDosu?n|+%Ws8*iV;Ia8U#@UIR>`+# z3$udhM7&O5WgjTaAVDkQ`HwP z@y|0kIL+BQACJ}v!CrkJ410FWL`1S^XVJ*auMGHRFDl2O-H~|tKp0*YeSd7m)F6q(~)P@ydAhQ9BN;@$Qq+ILT)c-=S?hAO15%6I>L9z~Ew34d>(f zn8>eFB4N>UYDmrz+EF^VyBe{GwV&&bDSBZ7AcJ;Z@5-jHoAA5{Cz`?UIH|<-E zobLSZvo(CI;Dj8RdU+<&rJT)~UW20GcH#3rDWB)bH|2Qc<2qhBTa_;)uX{B7I{BW< zF|TNWaDTt>I4i+BHDKPJ)~6c2FK-Op!tD*=^{619i_dHVS`~M5NaBg3)4Z#H*ORI%+vz%-s?gJ=}oY&za81@2lA+Y2gIVs4%gaNax- z+sm=Kd5d=C?8%&m4V^;3eRpC_+l?C8<~RywAkxJck#$%0DAFbsDJjrw=8ODQDw?-e z-QEQJuP2;&o~P<;HUQ>fjeMTz)E_?l_}^;t;Se>14c3{bRffl#2oaC^~UF*D<(^) z)GeAy;mW4rMrgawXYNfSKsAUaWX)D!Y^{(W8JU?omA>>6mPSPSV7PQ)xOTgKg+O3i8_S9Y#+jO#9VrHW{pd94_!eVixNw!?GrApb% zVw6gHq3ow9IL+_7;H2PY5SO5}=|+TGV7liSqNWkZ+&2NVd%%UrZ@W%wuoxttFJf(8F`EXDW1f)ji0tpVlpky}Y=T%}z@_*G#dDbh^FtfM{D>en<$-5@w`FmJn?JwdwP%KD zL=)p6JK{Ro!tAeZL+pok5 z!T7v_aSVy*UQo6`Cl(ngS&rK9S?XX@aI-t-;<^pW18i|_IekbRx7i6TD4UU?W4qCb z@XpugH_LnSUL@bf%`8WJ(Hx|Wj-qSN5*$4_ia-B$HU9YNsAzyjoO!VTM;{7DL1z_0 zL~QFo=I7jVqEVd2OwLDoaUar(x}>})HJBlO6~h+SraZ7X^Hi3}Z$w|2hq%mE#C3Ed zX?`CvmJA|$=^(Ne4Dn`K4@qx6X*QsZJYEUT=+W(^~2Z z!91~ED4bq2m7HE^I9<5CSdMW#GzM-=p7S0r#RzH}MB{OrezxhgPU}^|34i)X}*s)&iRcE#wP~OEY58ZT=ltwY4ZLATs!I1^SUn>*Pt0qHIu=) z<$B{_li#g_9ghXcU*HdLaM6}EM*X}95J;3N&SVm@rEEd-w)5-8-!z`175s8Se)<>ymBzi z1lAr1!uGB4@Mp``i%jb?Ek}g+k_wt|_@NxU_Eb6Ee!dDHzFZ}KLJdAWUWt#672y46 zGx63FiFn~aBp%*14Ry`A2)rUkeqYK9=Res0V~n^n{7#N7k+O2v@;$g|a2+<*w_;6c zF;*4jVQykNdXlQKx?vbAE7~xgQ-+bW0*s^=V_sSjD*ZXG5cRorqJ^tf@0lJ3#*%ey z18DqxgLCuS3`&^cZ}`nKkTL@yLmZGPCdd!TKM0_u<7Jjdk{R*~3 z*Xh{A{W+t;=~DI=jbUx; zRvFNx_|({pS9n^Ji4mXHJV7!qbI&!c z)Jy!y5=YqcD(+C2@y-}_%MiNJ=@EtqyukcWUL)>;{X zZM$`tKGcrDo&Zo90qI7&E%_QFP#v(+`28`0)8GS!e4Y-)COP*8LBj zNx_p3#iDyC6`{Y%l?+KyYTn=Z%mauYMIllIKtg;g660qhiruKxA+^^Vu(&}ulhL{D z37FZ;ulF@Xm(e?lPd{3SfB9-5{_(S3{PEpJ{N}YneEeK8-aj0Ijuj;c_RrP_(rfg?oR#-Z!{Zo;36=%i~V5-n2FzLJ)$3z2l zGNqHU;xy}cX!6Z;Of(%kX{FX!b8NeL1*0O& z2XzdzjN znDMw5>3T4)xVCsU;|cIv8}c=#?^xcWb*j^@$Pn?hPzCl-(Gn?uhcGv1E>?1iY$4j3 zh!*tb%)_>U+jT63WsRG)R+@e9X~QDSh6=~glRGTu^+>+y$9QoY#`0=0Dxz|3T0ZIm zQ$@@>Q}%}zqkb$IcilP@&+M9kXZFm*(cRphhD|p{Aig$72t3KbnS)8ip@Tgs6XSB~ zaMxXdc=d7-*&AI$1uRIJO%TyNQBwSh7IV-7}2hr+xo<*)-g_R6pj~1 znw(y=0P`Hjx=*w~?$jd5d;E_EAIzgRfAbwMAA9mJTcZ6~{(odG3OCHfp{Hlzy^{m@ z&6y?m!`5yy{(WBncRNDWOv@DI)X&u7Yc--|+YLi5y!l)u-g&789~`g8 zN3S*D(>I#%o6~Lh-FqGQ-TPwi&c<&~x8Tb+<=AW0c2Fw#XJ+02z+LW00aJrNOmBQ!B>4JCS zzC_NUJ~VmGV4E2BuC+E?Z}IIdx(%JLgKvXzqrGT`=N;{? zd~dMpG1WJQ?+vzzdxQOW+ig>EZp>DEE6y)B#&^iyAkNuBh28Hsfc&P8YsJypAUrUJ zxx&?)fp<;o7Qy;3DuQdVsp|%9g|>Fi&6v*?=7M!P*yMUvhI5jNLjP*{pQbCPt(3A@ z()=xDSyXZfa+hXd$F-N^p6e%xO~wt|u0-=H!G57Q-IpYp>mA&(0@|;n3X9hT;NUHj z@aP>g@#JoQJau;fj@}c5WB1I&mYe1MRuzjbFgn-#9=ngqyDY53gS*0U^!{YL{75EF z9L~Z!&*b2JXB@S6pGp)5J07ncip2AKXQFPX0DjX&V>sHtK0md+_daLrWKWlx=tkVJ zIgT}D)mWaNk0m)d7)(e<`mYmEGBXct(Rt{K%|Ty$KKc_2v{i2A<<#ebWiKtj z02;Ax%;XOm^NU*+2=>*vrjNu5GFI}L-a8yCFmGCkV7yrz>wdJRjiV!H3HljCD_Uf* z?(n*qTtN7}K^$xkzc19`7&r!6OYCga1XU<^mkOt|VysxPPRF`HE6AwfiSu&R@Fs|s zE_kPdZ28>fY>Rb^Wy9+R??%rvZ^e+Q~Vr$BgM3s!8E&63a>gGZ7}U% zc0xC6gKJ`uy4=OLHMv&(c!O^j*VOMW#;x%jmL1ga>@CJg+vbYzw!Zy(tpMv1|7>2~ zFqTL@Skp-c{8@p-Hj#nn+4PK%cUS%!sqm0}fGkXx#~uKMD$(H5J9_8nb18OdkIIk`wNK7XYc zpT3lf51&caNrc`z5{LH=$KZ{J!tl^NQ_<3xg20Qj1iz`6-P3_rzu1ib>%ZQF|3UlL z8}RRc-GIM*K7=n%x8m(%l{orvu9Pn^2rg}Kw2Hx@?b=B*f$n6*qZ<%E*SrI46Vr^Z zuJ_iM1AoJCJj+w%bip?(#eEDvsb-B}> zQ#3rg<+3|BE6(Vz&B+|laS!XnN3T*oXMo)J=;U>_EHxM}(Lgy{pIVG_TxFBj48~o2 zi|Mw@=a#dHWi1P|GE7YIFUQ2l^TDF6R`&fb3ZSuzhquu7v{12NwIbUgU2BGH4In*T zF~tlSTX1@rg47mbX3dvk$=5pD7Mv>Xg@`#`*4ox>;%IF`U-qchVl60LAZc4LJ9z+2 zv7I^w8fWEPH|G`|`;5fPE?%4shGg8AJA#s+I)@lcriq?Zfa`CM!|P9^;?$9JoECfg zNE+TgoTQ}3u37IMNx=J0C*y->((wK>99T47027JpZcW4V+zNzUWB_S1z9pdvX#(2F z85LNzdp_QMZyCP$XdHW=?L*1@4ulDLaDY3WOXBi&)+ZPX3a7AOv(?ZaB_O?eF5W$} z2><2lW%!5B`Zaj=)$7Ihhprh z$tazcBAGuG)&6Oy^h-nWlvHF~E(Nya33b*9W*VJ(3IM&2+|!!mD;_t(+ii`;U<^UZ zf!<4lYET=|jCKKAU#1kiLa^-4$rdYY3ck3;8PBeKnL1At+QzvqbDV1vJfmaHKpM%G zq+CgG3NY22mS`MwT)`;-V_svwYLgdTf|v+SE5q1Pd#`P zGX(2aOiyfW=9rK!Ym}^6!u1fZL&WFe+I=_fyA<-}Hm|3hMZ(BbudJN}O zVevjN59SGJqtVCja>Y2%x?H8Kztm=;3tNsrKS?#lOqx-n>pcdk4-9w>$e5 z?{dD2eVRGP8|UP6wg^n&H;{dn4g5X+E?Y(vOe@0tvPD=s`&x0nmI~$vF$10XQ{B>(z_Ge26jpQSTA^Is}y$KX15O$Oyd}*oF$U^RJmR9xXHhU;~D5tuKJ6jD@y%q(UmcPP2IOB$8)SC)AEdtwK&-vTd8OqZD|8K z5tYTB!4jQJSv&bOge|Twlm8l)kEpa#M2p3R6(H)0WCRO88J;!R^Wd2IKB@?*1Nm6A z)eraFcm)pLJQaI)T#ddBa=f$*VUou=W~lj1=N^*(qx^DFy*?X@{jqrM zk$AlIWFk%6l55*7ORd zS+m%;X7O$@Zp>(WV*Nr0pykTDpuRd=yDNV?xK?aC+TCNK**zMZKDvW-#k6z2H9f|6 z>WypXI4*ld^kdqv*Jr4VaTUfd0&W4Cl?on&wTy(@T*%wFJu>Hehql zPVLUg2|_rtHd`WVuv~1gkU_LAlm`|gRlaX&&H{k8(it+3N@z{1lAmTzw z^x3p5aLNGUmffgh#n)tjQ1!mxn&}>Fd+pn8YZm(~3v@^r2j2$c! zf-8Qr(!hGuMRlS%p&xB2gBZvaK9{oC_~&L!G{v~+Gj_k9U%ni@+4ImC*(7`}KDuzU z`sl>?48gcqIUBglEL0t@ysq5NV7d77qTlnU7N~AdQ2a8_c?Z;3VP-I{8s9m`*!gfz z?VhgX?W+Fu4w}Aw`OgQ?=5NKci-Nrf*sYm9*U@&#*8?^&7htLK(p``m;`K77DL~~M z0iOw$8IYb!#Ilz2_~-fC6!awERn6NlSg;VinS+>{J&0wsE5!NQfSQOlw5Rl8ix4nt zu~yB#PQb}N*2{D%R)UnR&^j|mka9)7g3ORd3Q`u<_~ZMn zBBo36*|7|K_;eiJc_JKdKN5<2_XnaPFG-y76uFQ4oO~{IlAV2;Eo{Pm%%;Mzh!KKj z;347S1X**c-B zcSB4E8e`kAq4Nf8?zEdY5K*@qaga&Ycev-HRmYhGq1jlN;4~^x6tStt0o7Xw#X<(Yyd6|6< z#h%9uS$d2_YK>U6rfKFHb4=Er<0kl^Y4$w+rasTv*f#G$9cd+B<52TDatSe#Jw*iX zOcO+FPUsOQiQ^-&RSJV>bf$%T#hGT8F19>u3~xZSf2jaILBYQ1x+RXW35*x016@JF z75p-ARwGs`_^t_(b6JBWc&`vaUKLo1DoHC5tkbFHO;{nKdx7AWGd1;R_F=qu6f2w7i1$&KDnHANik+8HZ2pM+pd;UW}@d**JW-79X6Nhp*3! z;dk%#69SqW61I z2oEjA^4%-&-~YN4fBJ9^K7WNZ)p0m-AQ~YR^%`U|*tOR>I3~s~ibil@3m$&5AOG`7t>4 zNVquhp@{FO7oEU+yU)q8#s<)|)roSs>2}Ive1Eo#VEMqZCfI|0*HO`yDGuiKor|%; zmBS=tPZGQa)uKAG4YQJZ(Wled+rYWO{DMkO2D3(6rF5qBpf03Fc)Qea`c#M0MMx9( zrnRZ?xgT*)P8X9kg*iE$_%D;=S(CCoc-6e7fpgb1-Ggh(>vY16BX0JMab~^?C4k13 zm!80x#rQAJ$MJ5f6rw2bz?WjKHP~#bsJm_U%mpP6)Q1M&a2_9oa~aL zm0ZM+!+*Lh`J8!Gjs;5^`J6$sU^z#5i(SJsAG?^Kne*lPOA-(#dLTZt7;(krNK7wA z^o$JE3NG)-+Ur_vkscxWb8Q9=-t3RZc7@~F{gHU}!5Ex)GzM=y7AqEmlZPU)Z?8Wp z$|QgNTpT*>PR4t=oKD;kQ|7<$yC$J3q6V8g7Gq6SGnV8QVnJpeM$>XIJ1iA3`ds!u z5y!?n`i1v$%yPUPjz`B)bjnI{h5jE$Z&=fcZz)G@5RI58-kIOBrwIQ>&O%FKA7-bG zU{3Y|3>2_6k(pRPTbS$Q0_>{0TE62_w2FqS4X70T&RMjbR%ec5Z7av}n4Zc}M1YvrWIv8{gdK`^~s2 z7hP%HI$rs#*xBfeZ^l4+7v>j?2!~&bI^lzQ$uA6|b4=rvv#!?}wW;ApMY~hOk5#TQ z8a}fFa}rBY?4OO8$*D+>%0)&@9^$5^2zH2Nljkj#mA@1})U5_P-siaIy4a8XWjqQ+ zzt4`$#XwR4hEfYKmt%b==Att?3srup$h4RH!9eWaH4Tk5g0+%a*!Dmd{>vww_@^&N@UP!26JLED{^fT|@y9b= z_}%FyeDOvdK76qduRj`(C+`VCcw0R}e__iqu7z8r3`h{ossS`&SpTEJveo(L!?`uj zF_w|k87vzV-)5~Cclca6on;U)F69Dol{c+S_`DvqQ5|TC@53C?@C>4H;s70@!D&^# zsD_h5tVDZKxA1wDax^*IR)lHK5w<#09WUjj#W?v~bUQ1>l+R^dDS5p}w$rYne$P}5 zZ}OZq^0}kqo#7W~-hRB{d0Pf#AoK?u1MTk|K-)OB4Kp|fnj@-|fJI;%k_qX8ppO4`mKAVT%ood71dx9|GY4P4S*_yw>DAj03~<{Oy~Km z$gswA&u`JBKw!%iifz`4@|$OJs%*~k6H<%j*jZW#59Z9(Zn`>liU71XYc851ngn>| z3Nme(qM#I!EzY(T*$kLfixHGH0zw5PYpMmzY${4Jt`uVsEkoWnTfwnKni(_61)MRH z_gn&I{xra};AE?mTzQ@gVL5u}8Tbp ztATNwE;bS4=3T1bmUU@Bxz<=NC|`yFaisf&d^th&0}5o0Sw*=JYIzO)+5O)Ex!asfJHvL%n0=jUv^rW-fOL@-O?EP0eQJ?b12 zvr4cQ53Xt6B@uC(c+or_hoD~y@k~lYMtC~XMbw5}m87-cdR*@1nz@e+im7Ru`+H~O zM=?a>vALzE44dz*|b8GYEax;R2!=i?AC~gt`)+i zab`YkokH9Pm!dkz1kkEPWS0u|+48|Q2lvLdb(F19Oe?lE&pIaODbDqr3#$d%^ov5Y zjCH$ddviK#ti6I~ifwPqySVq595=jfGypl>T;mwe7fh?b=Gt!a9a^lbnSCug63~*dC)k0jxj%9nuq$PG8{ORjkk~0;-lAF@zL=*d~hru?>(I?oF0o)j|;~? z#xV_9i54pSABZ>Zn~s}z`6IKa7@_k1=$yO3CI`Pd|QJO8(jdp%}V01JReW(#(BcmV4BC z*65IFz+CYzrh35OUxR}3Y!d=#vTZQ$h;0Y&Oyj#|9%E%lo?yNrxE_tMooG+($3V_J zBe+XU*AI@D#4g>fF|&lH+3L)zwW+M_unU&cYiPpZ#C(PDImbt;kTk>T2JZ}_phvhSk<^5T`67YOYKkxd$eF4*0pWd@lLBk z>SX&`Z0^5P9PAs^!KRH@v$djF^%@L_7AWwq)qJXfEGM7pwimWESjO4%b#3ymCm%cK zOu!=TJRBzBQanBfj#)30+*-g(ZcRSdf*EvGhC) zCFi1idJ-akYOkO8|MXdT59%Db$N3y<#jcm>`TVcWGjZPp+nh=WCy1tHth^WYzMykm z8#P;d0d;1MqAO>C_+sqJxmdWJt<0BUOgO$fqZbX~wdz!BS1#dp^1BI=7dRDQj(!(@ zC(qXkkFy-$Btg{i)b5tw8AM|@?%XN)NE83T_9Rg*SB#r&i*Xv$G~YTnw-$VX1keiP zf3r0wE4#dLZLsa_UmL8lwYb4I8@4db8`H!x&B4A0!;U!^w)nOfH=Isf8|*8_bzjcC zJOf33rIJ@UE|%alQ_6k8_fU4f4#%=cu$wofR3{qPFy|I*>AORN7M}6OqRHbXpBsKN9H#kLFgXDWnx7|HglP2iE0Rzbn2D~~ zJPn-n#ATs9GE=xFRl7iQT-BH!}ae{Z{zv{-#jWYm=?RiKW-xBXKJ)DByRq*@-hq}k zUI8>}rJpiLZZK~6JOyc2=AzKA0##wnXo&67fO&W3Je_TOq-YG|)bYaUI?HBclW;X# zojH8&;M`!`={+JiH$Hl?ynDg2lnI4fJrb4@LYfrT;2e6w!PBM0jYw| z1u4@!AXRW0kg7Q^fx@gOqYumL*9+;+R|k7AqZ^AW7Gp)@Mzo07Zit?ZRn1#PZ12Ll z&YPX!nSirqgMidbgFjfbL zuihV|GxWXja5&z4EDCRloqQ-5`|k-r!(b``uB=AT^hyLq)*&Fd5dj%uF|`O1atZ!X zss_^{F3ZRMhpX_#sYZPMYN7U@epi5a`teA-^GFaD-ymW)QONasCR0x|2lPLX%r0b+ zwRixpo)D4#=MDI`->=4>&vfH=Z&l;VSMx=%XX1mSiFk)yPaY1#JBR$ywV4!ohj={%EXJS%-?J6j2E-z^1)wqF z+?oE?8OWKGi-PGQhQk`gVQNQDdausN*e8YXtk_l^NW6l9vuPCBBCw|m;25nIF>RZG zs?b&k+Zai?aB99rvt8m90j}GIfGO%Yb_?^Ht&ut#n5J(v#U=Bi0i`u!B6r3G5+6%iRQ$w;W$lUkkAzeLA?3*CO;P3=;l%`UwQ%a@@=1$G0L)~!=#dVSXp!87~bZ%|t(PB?8?9PF-)F7&39 zC;>NyWFqrQ1`))Bh~h8d^zJE25Sk|)oY@+z2@a)Vg6Ftqamizf5h3*89~j{s{3AKe zKpZi^V`6%a#|$wVv&V=X*%za;Y8=+H?3`=OGnh5;OC!bvYg(gh#yl`VFt4=|sn_~! z({%gaU2L1@FvLsa|1(`Ujrdjq7E^I8`@}nIu#|wM%p$f+1FBFHF$--eedx&^L7zC) z?TPJZ2&*x|T8NeoG=pd5LcW!PZ?-toR%aB}VztD!V3(~>D8x&JlxqafwIO91K&uKa z6O5P1n%i=YT$42^NAseKX^U&qE!*JR$)6-%$&YMFqj^)(Y+#w^M#y*gt_RRQeg8Ae zyeL$;obTTT&RkA6=E1&VnC9Ty3*!d&*35Cs>*gF|rc%t}U*6^Ae4p;V!M%fhS({cr zynnis`E}szZ49C@aBiLI6^(1P0<1Kk7Mx9+tx~r3-z8jb0%zmG=?heNuTh?FOz4sS zldXAH^Q5+}kbEha3>2Q2GggV?kH+ETku02gIs0&)C6AYR`; z0|)ND3N>qUkdc#z$X_Q5)(l=P{;gR&23-`3yxc-iSCF3OY!g?kd=m}v@Pm!`%SW^EyEiKE=?keib2J_w9x;Oz(>{1ALDCYn zo9PEfV({)0ks{usJ;Ad#9*R<@{iZvDkuW{S!M5O;*p~NU$_JVm3ypy^<#T%u&Ej63 z%^Um5>H2;R-VNpr-nrHx8U!clSLTQ_RfY;FuNz}KFehyQgHEmWpzwNILObe1-InBv zdkSr>3@bLguGqIhb7K_Xtj(rFt$25My^5G8w+r42L?2{a#nvfol|uZU&FQA%%OkqQ zY_QjI)rAy5^90DL+Xd%mJJ{ZQ?{TiVEk>-vk(@LCC@C+s{uX!*4ZY<9LLrg-)Cn^ zHHK0PF_e^xxrsTL6P<;$O9hA2Af9WTXLR=Udg7R7as4+;&*ASnX0KO_JApG3Kx6qq z2ixqkYgGR9ax}!Wqa~>ivr^}xCx+s;6{&-=3C|-Lo6eope-*P4nV`G}rk29dg zS$jip{Jt3&T%L}}IgRM+&BW_RBJs&f1^Du09ln024PP+^aJ&?syj*}!Ud+P>&!yp= zBMEr*!Dy6iC`0(K+yI(2En6I%va;X!@q%41^A4n)jc>hHeHU9+@V^<~MzxZM&uswB z1ktDs=%aH|fs%k~RE0L9HmY66s_V*}t1Z!3Ueiafr;qONxnf*+STW8DZsBoj)q-&i zlF6F`Y*K0(((~Pf9J~y0BKRx4Wwk|(6Xhv-7IHt1A;|AZ%myXFk^EdT78#%2S z_&bk*wq@Zp+6Ua$OlzP_fXA8lT%k>K0O|nMn`9Mun$EO|$6RxOO29gxGfuPtq^1+p zH2w3EP64FH%*)_Q%AZ!K{cV>C0EhF2(J2lX11+r8I1(gN_B zi0kL>55hwSqHy1XnYev#DQ@0TgI#ykWB=Y}+;?9K4&2v>{dZSk=gm3jT2_FNxLQPA zQG^E`t;B}{Bu1`JJ`^HCSAfW_qGAX31)#nr89_hKkc@pEk|*I_k%{c_xp?li1^DN$ z*W=&+at;3Zw@dNOsRn#;T)_Bz8a{kFK_lWEx8==4p?KxN$!M9KgCKdAob|_3c&HhX zOi-$L_85=xcX|HAXVGI8yac8Km!LMJ>utQZDcBr=&1=-;dOF_%P@VaMg8ParWL?E6 zZOc#}*@9UKooFQ}Rb-3vEr8VSTw-McxQZF#TnqS=c-6TU;-#@Rn$ERTgI&eeD&kx# z0gI7*D;#hVsO55Q&SW!vt_E?=0jCwB1R=plP#Rz|kMh|sqySpbMd4D=u&@!Fidphi z0#=;5MqH;TsTwnBmT1psPT|Fxf0~0^VweF5Iy5O)n6*te@113adEfcdinPTcoBa=D z4WK8TW1*RuUj}mLE54gz+pwwYW^5g}Q@e65tl{`s8wKMO+N{M|r`@_bk{Zw)k&VKs z;#~YPPHXAoMDWF3k|EAUx(3l$tHW**Dhx>^>U7f?B(XSnki6<0IHS3FT-FvJDqbhz z#am>1%tMaccE&kV(Q3`4pJV#FypG0wCr!*?SxlbS2FxthCIryD%o44V?u29ZK91?{ z@)@k?H~-Og$2R6V)ZFL0EA}fw>XqIE>|$Fv zPjGF?R|#0n$-A;`0%(S$Ij+oiK7jVY=Q;?w`G2eclV_(1C%Z<>8MC#~c3;yRtlOZP zNq6~P*B(Bnnes51fit_!GC?_=+kBrqPTU*po4|Rxj%U-6+N}eMkCiS#Z(0}TBzNif z8l%MvFehUkDnr}mN(wR&>X(9GDIe(Y z8f?2|fQxgp%@$~xSq0efR0sa>Rt-LVDGwh$mx}kEW~=oW9DgVRbJk@cLdqTYS}&9P z2mdr3ORufR8^_D=@w16|S48tR;xMd8Gu zD7B|R@;C<#_c+wVG&!A^SL_?y*HUPEI9<+R z3l$aHhSSOA#;G=fyHYej;q*dniDo$6nhBgya2sAH?-@>?D7@1VBAWR93nqX@Cr2V`F?QO)Ua{M{z=9p)#y4=CFVBF$ZxAi=0&N-fUX|PVsK^?`)(piaZ z;$Ux4Z1<&iqbaIIClBESQr(%uC=j0D%*>m5cVc7DZCKi{SzD^8IUGM)xdwHyomwGg z_{iZzCy%=3z3<@c9Gl45-)_Qzx;FRp-sW*u!UbHOhV=LhWVaL|ceDtFqa`RV%|*o2 zGz9S)ElcD&53U@3mFu}}#yzDO2bV30^QY&aIXqX#Woir0(Ajlkel8p=Smf{8;F;xS zO;fFI@$6;9s<%m+Y6oJS&*wZ3w-w(~UQ9G`t{F^|V;mAggJ?6#Q6JqV-yBk1b5WQTkV|UI( z!M3a6EzZvkpm{i6@+Eyv(*o_gA3!S!te<(0vFLXO&=lVqC|{2u(eS{n6LtRihqJGAFZt}Lb z*`S%_EK?DF_A!G`YqqQi|7olaO~lq_*;giM;4GH7a*i9E#vy7_7Rp!V;^Er^aco~O zUVktgCm)H_A)a*ReFo5ki%&irisScB!_a6N{H7NnCNUSwZp_7dPekI~=d$tf%VqfJ zSib0j46$^4@GSKKTeT(N^@pO7Hs5sNbannjQ+%q)f93zx|ID?k;k^TB9*wUUm+R#J zc$?8lf@yCf#$|0Q!=u=Rl<|>C1t`@ZS|gg`I(2x4me_XGI~Cxp0JGgSO@Q3h@do3z zWhpDb*31=@wub2b=xw+fxTMO3E3#*p+j0-}u+>DkRcsq@&aYUF zhPWOPd*T#Hh6xi=3;KC7f@Y+mFgFura|_VFH3hfq^uwdOCyB_Oj2G^iga>!czXTAhZs* z*|#8c0tQ;x(9G|f0?8VW8=zX_bsiDmF>_xP)pE|nAe!?#a-8p;K{N`#3=zB~e&P^^ z)$5ojRd$j9f|4Rz1h?(dWkE?(P_mVom>Czvbm<}(>p~r&twhXLDlP$6b1e=$TcMEz zOn!IrnFpLkXqy^qE4C$HIwVZu$h$xSXwN+KtU3n7nZc<;z=BuBHSsC?2B#L=mVgZj z+wBQ>o?u?MX5V!N&19Q1BXYbuIyFqw8biKwEq5pe49@u-+LJp)P%pz+@j{H1m~MWY zKAGKqx-#Yp0oP(6e?GPk+=0#gcVKDbHg%?H#5M&sgXWwCuEZ}72^Z6`i&e*t!802` zji)mvMF>7!;aXs;va~(fX zxd1g`b*K-o7fe8 z>8j1K&?xv!0L_>oU@NXku&%gP^U9ArX8Gqr7z6FI4?aiWMd8kW6MP9y4b~0L)x0>} z72U46_!iT$;ymdd6WpK8>0C>?@;Ob@T+vN4?^K>q%GeVBa>4gvEUR0Kf$X{H%bbfP zBDz^?&2i2+p#Uq!ILP=lLwm5Y^?KoSif+L@F)zke=H1!j$edgzdCxd_ninMxM*JiR z@u9isUYLqQcTd9ecTK^O+o#~k+a}|o9arI&8?Qv?#spLh=AoiUobn)XtbU%N!!ocH z%GrT&W2QV%e&qJ|Vh|l)h$Z{F@bw7_*ms?LW6IfbTq_#5_QtzhtM6L2 zy@O~KK{aTIYDPmugW$GYIoz~FF$0I2R%d79-k1i=$>}rQZrZgD=9SZBpPX)jXr_fZ z18DivoI!{k+@2jcH`rjhDGv;avTb7{>AhvS!_BBT-Z zQ7z(FZ^w3Vu-CNTs4dYJ)rkqW8@zA8V9`<(i8D?fV%{WvjP7)Jm}ySB$IjmWPnmN+ znp}T2=hDH}wLOQe(SMSHnewjurlw)$lyn6Bk`oj0Tw4ic?xpuT%iI8((caV{43cJD zm5PEXDJYmE*!q>E|0Gs%NL_BOwLvuV9QsVeF)>bDTN8X5j9Z+$Sl8?HUi?4&4F}_N zswbFfsY271XriYIW>la#wga=`+Qg}@)1DvXcRJP#pmA;VJ6oNJhIg>86=aT6&GQxO ztoRmelj9jc(_ong@1}B0yK%~W*aD4OQ+Zl(Zp`I(w+t{wTv(33aJq1ohE}RxHh43b zGlPt)t<29mk;J@#dIj ztZZ0^;rvn6?;MI}e(3_8rF9^0Ov>j{3>J(DpWlit;-fEbzD9KWCNcWx2IK5GMxEc2 zQjLz-ES(@jF)_hEb$n9E!!$SD^0htJrW;&YtXZrOOEe9Tlk1i5-Fv7nkX?=2ubYg+ zy932v4^_YYjYDC=y~4r5$K>dfhk|iv{|vM?v-`K;{^B&`H5B3cJ@LZj5jgQ!3Qj&2 zhc_RO#px&GaQbi@PCXGPX|Z_afk?#FH6TPVtkeM zpmI!Kgc$!kjoTz*Mz|@gS!*>`%D|a8*aA%2NbzDc#m+%(L@Um18J>)Sj0DtHq@uAk3sv1Y7+VvI zhwqEWThGaptua z{O-&G{OQwi{P|2jzIsc3^W`F(c{UC2acs87qH*ex2%Kay(}Vt)y*dw}{^AgvXT~3E z9%tK{g3je4% z$J&l_W88^xrnTmY+g@oC%}YRA z(v@w+xI?})_GPEQHt*R4#7V@OF2}iDJhK!dMf0(wayjPa4T~Ua#hm00!MxzRXd#;8 zx=@J5huKM;xigyVemaM zh^9pB**EuOdh+EtD4EHYVwGB3#lSd8S4~8-sjU)R%Q3brGvlC$h!)(sElN#y&3eIg zgE-M#6XUgF44Qe@U=>MOkUcpEY0Oi?Egsx@8F6i~ZM%6{n-DNu*$ zM0m$`S53ShO>r$46Rr=RmXET&3T(JF3Xk1A8L#e}ffJ$`PCP)t9)UL=5<3*GwcxLb zz5ZYXUK8R@rd>`iLxo&Z zB==;OZRRx%lsoy0rhFrOJu!gh9;aFH9dc3R1rtD{CbrlmMhU z>d%>n4V^b?tJCEz*E-{zt`*F)#W}V72DE1kBlYS+XF>sD$?~r3JDB1=oo0(O%VXxe z|EETdwfA#zVz}2y=l&06TgniQha5uFAiTMU{r&%xIln=!jT9$7`WIq9BM!~~rDJab z3ikP2(`Vu_#k4iU^`3ouzUMa$&JFfiF<|t%;@r_Mrc0;UCbzRyE437{ULRJgGicLE z*1(x!-Eh2}1c*5IV4b>L*5r0#oh?m??YdA?`K|o!Nn{E!EhQWI-7Rm|vvz0!hk{GjmiAu~=ohj}{jz*H>V6pxr#@#DWQ`1zsWc5)_4 z+H-L29T7NoZy=uC8-io^1>=-Dbq@D&-2Ldnlga8-fY|b ziMWDuZ`)WSOv>QZfA2{IwlBU8;c{aoN5{U6j3=5o&jl!(c-n0jsElq~plYO>r0W5t z16&8RsZt=ayNzivATNkb(UpMZdO5C9Zb@%X>d?UoId)1{S`S)co6(m&s53nEWsjmV zv>ELwz1Y-s3pR<^UefXtcFWfs5FYNWli~9-0U>w^Uh#PMUL{Vu1f_{`{K(0WMwGeU8 zq)e18@5XyC*5b3by7A5XefZ{`c6{-AH9meJ7atr=#yd~OX;%(*3uVi;mqmD2^b{lD zYG=kDF93R*`5gh7H7{Tqk?p;2g|;PObG-{l0@N7Od94P{9ARxu@9O|JNt~Sm5loC` zn^t5l`D%@n>@%Qtc2iJWYsK?fgH5oKa2vwP6__S~R-w+dHFobR5#VQWhCa!61f2};&>xB(k`ZpY&K%{p5b2N`A;PFqvmlfMudlS-uU=QPm#rZ#}a)@Noy z0k;L38$eUy(3WU6ubRA^az!qB^A})g%NDF`U4`Yf^DtK2hNkEexhDto^u`nMC74ir z2$9O~u*DniGm*s3Z>Bu@8>aWQWrT}g7t>~cg30fyDY;q<8$9cOw8v?Bjoe3`+q?t* zhl%9N=i@bsZ8g`4wqq*so##wumke=o1B4VECz`==<3KxsaWUfCyW19nXgsD)wxqL0 zn%Hg>A*mP_s|&3_o$Pa68(Jy&E=RH8n_W6g<%`K*XJgylcFC8)@>p@MZOc=pGk=deMNVlz1;$DQTQw^&B#w0xai83&Gd+!#EYdMftHbKCO0c)J z?+&bKyBTAGb&i9mK{V|GvJr#D%TXNMpm<{*4*Ri_zct^+W2QWZU$}hB=p@VtNyoIS zGZFBURB@yve_td#`GY7YuUj*Dofa-!Jac*xB8qB}*3y9N*^NkTX+(5&3&OIR5LnoR zIh*Tn=D93<=mgN-ITDZ4PcncOEuuLLi>{4E)Z`oozhaKKwVWRz&l@;B2bt;VXl%_u z-|9?UcY7F~*gF-+?w=u|eI`yE^jCZRU;s`WpatSpSwC>EKROrZAvhve?`O*cgGcio zJQ(LY@!;R(b6Z{+<2!TT15E?xaz9sOn}C`rCs-aN3TCo&B7w>P%ju@Isq(tzbhb2M z(EKd1%`Td@mYV#|z*$3>5!=e?D!83iXkyi3rE(9QKtS@H;q-~YF&FFJ-MZZ4)@%Un zLK*{&mESII{|27bCgR_g3ls3I*fz#>TBCcUDf5PbZkg+F8eR-?_#$?|p7hw29m+YvRO7^Zq?^jxpj#*Sh_mH0NA< z4`alnV393lWZ$-aB_>TE&D$J3z;rLn)69RS@i$y-+il})bItoZ4mPo_8i_5>gzKfu zi2Mb|y~a3G#e?3%Io~^-z0`seD=>dc3?99G29ECb$8+MhzqmgL$Hb3+eg8CUzA*-= zML7ugxo8jZx%GE`7>~%wqJtZX_2IJK5#JP)aQyE^a!z|_NFbu57_Qtmjpb^vO!8&=J zX7O#y206y>_rEBB7Gpr?W!?ZaWO^}yRDo?o#BHPP+rNUOMXfWu5iLJnrDGywu@DP~@lhraF<9Np)St#_G2-4 z_pxxX5S@X8W07#?%eTbX3XJ2Bz4b^K-h3zo$L|lo!dr6i*gb)GX7>y+KRkQ4Kb{xC zdu(3-o|E*0cTU3W##jXYGDlmT*=J#mchHnfl&8J8>=9LEg=FM67%^QvQ^yN}~ zC}RD+rxJA*uD2eJ#EA#P@zQ<)=B}$zS(lF>d5_w<&HMSdCP*+0My$kvaJ~c=75SMUz!1G zDsnEAE}Rdu3un0C=X@kAcvT`4oLLMzcvlQt>|5exn&6R+vB9m2X&XE<0W^bWgLyk8 zF>!47g^0z7dv&(ue!Q+Gyk3ZR42#N_U~bNkk}iYeqwLZp`0h-bi=r77n5)gecVJEX z4Hy=zk5^G(uhBJav|y_aV>@mM$sD6ir%aNjFm3Jyn^|{<;zek zoZgkukM-@>V^hyg725L!j?KgVVTQ&#kCup3Kx8Z{U{m!;+zIWW+SgK z37t&|80bmI+_6jyEz8Edl{uKVHU~o+voTlJgY&b{Qj-Axz#P+pj?XXmcgqA9@7CPl znZxZom-zN@y15SrYd8Dmc};nEb{R=dPZNF>jQf>=Lol$rmTh%bI>XM!B|_ZcbQ?6c z!84{S?k5OsCxE7A0_Pe$7u?qfua|Kc1`Z@HdB}rr55`?SckypI-Rv8O+D^2=fDgw- zx^Mz$I#{@3)8N({=bEl-rcW#n&TkVjZn@py*}=JN6U&Nqo2FPdMsByqjnNUe*BTw4 zJtZGK89i88w;J<{$1pp-RrNaq=*0T`GGe_NjZy|IZ`!2ep)IN3C>ox#>I=tn+{0Q1 z&ex$oe<2EHR%_n1II?-wYe8qyc6fvsb-Ko z;gU?`Or{1mPBr;lwYwNGPmZTX*Y28vb8@=HIoHHF%L8`hB9Awo#qZSb-dL}qeiy%w zJ&-uduHsoSZn@p!o0{H~19o3=E_`me-S9gnK}c}|Xy5%Y(8@G`W?GG+wEATn+ERxoG?v$>Zh07091a ztog}c+12n9185VCTAf41D7MAoWSiqJQ`?i%8B_VYPa?2kcoU{wG!x+>xWj)Kg$ViB z+z$Ieq>6BxZnyQgScHhpNO}3(kNA;ea-O}1N#i+^=_FHdJ3_W;;nqYXYPuZfzBw*L zAZykZhw->%5Oco0-kcxvvjnt6wPH*_Igmbpo|Hb!6(G*dnUDUg5zI*)Lg~x~6ih7_ z(5}L&mg@w#E76xdhCv}*T7NG4V=uz2!~rRg@-X9~P)xlj2vaT!6bry)HGf@Cl6Q2q zh^oZw7%bT~1H10kxcS~p+_ygrFWr9yUcdh;ymsG}cy-?ucxCUEcuDNoJy+t{ zU02}o+b%}Na5mak$71jGm*cUUe~l+@y&OkwzZ^&JyaG?j`uZC#M_xgg{7#JLca6=& zq#s8kX?8X4*qeZ79!kQpue(iG$hG2}{mo)qE*)~=*w>iIcxoW``$DLJT^ zmWQ&bIVhc+g_6nHD48NA`^92~lk<>sbuQ8_%SOBu2yjoo zjQWYeccc{9G>helW-v`G^LV5b_-e8rDTOw#ixTk{BZYUAY=_JDiT_0s>O$%;BAA|+ zJ%XX^Q7kN5j?tpU=o5T*rp!f|e*?0wEWrGdWs2{SlGW(T85dlSE6#heM#TXi6QUkQ z-n43YhD7ah6(|-Ud2l9!;_^*te)27ef7*1l5Y4M5&zk(pV+GTTu()A8whisU`i?E) z01RVsaSO(>%g`2{ipU>^>A4ms6R~A5!Lr30C+9O;{+x}eiDqzUF-G%FbIm-@B+sF#dVGDd44)iJ#mPrPaBQC+jvWZX8;^wH)T6;T`H(-}c+elO z3qQYn-!vTAbrq)1E|U9t>kMPuCzhrv4*3jzYLlcay6U1>T=nBbTyaSvF271_QW7qo zmW<2%l5pu%*}gbY&WUo)=e4%XG35{OZFt?57r|;%b?su`;@kWETx^Q@w!Mj+gV7^p2 z-C{lmMN@K-C!C%xWlX}w;+RPOA%8Q-X}R2TyTQAY?qFU`aUG!?9w7h0#eA6J{O#A? zbrAL4UH->^|34A-ec^Hs)-BGBh0`?MJ#KZn_r5o;TdZ@R{O)o*Ihj0aC_9|2PThmk4ez&qD5{M!ys1#Cy)Asgh2d8xAL@7YfG7@doQ@$X2YUqTniv zb>Voyb(wIxH`a^A3I*#qSLGn}vP{Ht_H@Zhs^1+Q@6qsraf5Axf0yeG#~WVv*6+l6 zD6Y6@D)wA=kAtZ1?vgF=IU(vW2MW`lQEMzJ@+XyG zu5k9M=Ibz4wif+4#5OhjIC|xr-mLlP5PzP0?l1q}V8_kZChwcPYxAmmocks(Pvlyg zzwJFH-p`!xeGi*9Q+O`|mj@)ExI6@PyIz3?M>wLh;sjffa-NGni%0X@hQqzh``Kyk^IG%1m-YX>@6BCn zP}5fyr?pCzs^iio3K}|Nk)eoD6oL}S?4q(=MLyWdLI@xzpg;lyNMc@9cEt*TQAV&5 zNIR^}?gD`rC4syekXHj_>MNMn9}-B!kOv9mK|->rJG(QT*?!;q?asO9p5M&*@S8dJ zp5K|q&543@OKafd)jvuN!)y)7EOA}dGP;{!KEn4}dUnbgHyp{M??ylw8M_c2yTN5K zGnYFM(cnFqm-a8+tv=~*)IA$NK^bbhnb}Az4D>xz5O@VDq&U=aHp>%3l6% zN!jMF4l+F)f$APa%IEd*NjXU2CRnfN)KnQD(1Ws&oGVEc4TiA5GeR~@J==S?w!QGt zG@0Elq%Bo>_3@gqY7SYd=;(Zm)Cq5?IZO1!fr@sZ)fPF19l*g1CInbI(h+b1dH z@6gTlHftR;{w4q6T)Z{0bI=m`niA+yAcO@zDqfRcgb#S4;UvRF6;}OT4p$4x;jC_9 zht~*ErfW~i&s-9|*-|EQpIauFFTnj{+wrTtn-Z(nkL)EDgHx!OP!R9K|S>T3V#dW0Rb@P6V=DJfO-bHmSBzTuN9WGrIcW!5d`W8UNCyU&3*+pQO=!QgH71;nEv0>{Nu`FsF^zer^v@YA`{WO^n-dtp*~wiMAe3( zvunc53PG?Ft*lFKxBSRT=jvEAZlQcEsh{|~V^)}@Spg|fS8*6&h%<4_gsPfhrQ_Z> zG$wvO#)Dqw7r`9QF8Onxs3CkL#V`}guTogTP}bvbbR27+9)e;6)N{JlGIW&zUNC&912> zGFiBZ6w|AkM^J-ZZQtWSaWk#t-Stj*q>))s4PXmox)pVrb)q9fD^Ws5%Z=tA_v7GA zULf6!UYO zQ&W!9K_q6hB$}PBOIV?B^F3&-LgG*(aa#vyrd`WGDlP-lcNBgR^WO;5e^#HP<@7y?h$vZ_C1=Q937**^Rp**qiokbeM{Z`+Ac3M0~*El&-_=+OuSP$#B5 z0sCEi;>u33nkFWRLh~4d@8yZG+bJH+nrl6E{z{eo+W?Q(%jbhpIukVPjJ#pitlYc7zG!* zO+`{zY{Vi2wv7oy(pk;qI7j$h5l>>Ke#FABJAO-vevLj59>OmR~r; zM)l|UogaLJJnOID+m~ZjoFs+o2p~piKM}<8gG5&}a?L;C;^)yY^u0JFk7!86uWO3G z6eFT@{8K(~#YsoRg&ss*QUY55)H5BzPs4oi#~n9Hi1XD~gXRfg&WIu1VY6aDHrZ~F zaYB#vX@}|Egl|hN)$ze#jq(~x($ZDyaMtuu;SR$w*l&aZTqqiQw6lePN5ULi2t z$6BzBi|umJAmd0^2!4ZR>&0!${bo(QBQKmHK2t52$J_W%IQ^`3)K6QEJ3Gryi}|N} zl1iZzAa&!AAb-_!E?BPlGj6)B97AD7Gm%)Uc{U&)yQH%OYCGmb`~{(k_qGTuw^3!I zYwqa+q_ZPVQdcA49nn6EZ<7&vv8v8u+5=QITpY5NB!>fV!;ZbY`4l+Cpi3Nb9)}+g z_?F4gxi>z;W=`6R3`B)Jl}8>y;Y$pKw=KnH>M6YU?>}1?s>UHo^2bgK{$tp!*5`sr zFRRGLnd~Y!r#Ok7Sxd>F$$u9!b|9zAFyzsT#wJGPhXj!D&6~Pw)eWC`%#xp!&I0M% zj#<$y=LU$J0ck7c_J~5eYnMlFqloOc4kEi|`p}5lFgO;QCpgQPlzx`Z$nrB^BLLw{ zf>UU{W@G}NUdtk_rstB*P2H8gObbXQAM}H9B$U`D{-zGKKQgG^MP%)eCT1_4^64-mNe%}7!M zW+hfM%Jmw+5twxlVDr-!`HeMs#}|+!DNyW|@7^K%-%$e4VjG}e>wg2pyGT;*!T%E=K+e~G!~~KT VHIZ_$J}9uhICbtM>ks)i{skeLdzAnH literal 0 HcmV?d00001 diff --git a/requirements/base.txt b/requirements/base.txt index 538912b36cc..9a4099a6162 100644 --- a/requirements/base.txt +++ b/requirements/base.txt @@ -4,7 +4,7 @@ natsort==8.1.* prettytable==3.9.* protobuf==3.20.* pyyaml -datumaro~=1.6.0rc0 +datumaro==1.6.0rc1 psutil==5.9.* scipy==1.10.* bayesian-optimization==1.4.* diff --git a/requirements/dev.txt b/requirements/dev.txt index 3966fdcf396..1645d008dcd 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -10,3 +10,4 @@ onnx==1.13.0 onnxruntime==1.14.1 pytest-csv==3.0.* tox==4.11.* +mlflow==2.9.* diff --git a/requirements/openvino.txt b/requirements/openvino.txt index 90ee6025685..cdc824ae0f2 100644 --- a/requirements/openvino.txt +++ b/requirements/openvino.txt @@ -1,8 +1,8 @@ # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # OpenVINO Requirements. # -nncf==2.6.0 +nncf==2.7.0 onnx==1.13.0 -openvino-model-api==0.1.6 -openvino==2023.1.0 -openvino-dev==2023.1.0 +openvino-model-api==0.1.8 +openvino==2023.2.0 +openvino-dev==2023.2.0 openvino-telemetry==2023.2.* diff --git a/requirements/publish.txt b/requirements/publish.txt new file mode 100644 index 00000000000..b1b55833bd6 --- /dev/null +++ b/requirements/publish.txt @@ -0,0 +1,2 @@ +build==1.0.3 +twine==4.0.2 diff --git a/src/otx/algorithms/action/configs/classification/configuration.yaml b/src/otx/algorithms/action/configs/classification/configuration.yaml index 1d146c4c92d..86dcd72a68a 100644 --- a/src/otx/algorithms/action/configs/classification/configuration.yaml +++ b/src/otx/algorithms/action/configs/classification/configuration.yaml @@ -74,7 +74,7 @@ learning_parameters: time will be longer. editable: true header: Number of training iterations - max_value: 100000 + max_value: 1000 min_value: 1 type: INTEGER ui_rules: @@ -354,7 +354,7 @@ pot_parameters: description: Number of data samples used for post-training optimization editable: True header: Number of data samples - max_value: 100000 + max_value: 1000 min_value: 1 type: INTEGER ui_rules: diff --git a/src/otx/algorithms/action/configs/detection/configuration.yaml b/src/otx/algorithms/action/configs/detection/configuration.yaml index 1d146c4c92d..86dcd72a68a 100644 --- a/src/otx/algorithms/action/configs/detection/configuration.yaml +++ b/src/otx/algorithms/action/configs/detection/configuration.yaml @@ -74,7 +74,7 @@ learning_parameters: time will be longer. editable: true header: Number of training iterations - max_value: 100000 + max_value: 1000 min_value: 1 type: INTEGER ui_rules: @@ -354,7 +354,7 @@ pot_parameters: description: Number of data samples used for post-training optimization editable: True header: Number of data samples - max_value: 100000 + max_value: 1000 min_value: 1 type: INTEGER ui_rules: diff --git a/src/otx/algorithms/anomaly/configs/base/configuration.py b/src/otx/algorithms/anomaly/configs/base/configuration.py index a9989cc6128..197d18260d1 100644 --- a/src/otx/algorithms/anomaly/configs/base/configuration.py +++ b/src/otx/algorithms/anomaly/configs/base/configuration.py @@ -14,8 +14,6 @@ # See the License for the specific language governing permissions # and limitations under the License. -from sys import maxsize - from attr import attrs from otx.algorithms.anomaly.configs.base.configuration_enums import ( @@ -97,7 +95,7 @@ class POTParameters(ParameterGroup): description="Number of data samples used for post-training optimization", default_value=300, min_value=1, - max_value=maxsize, + max_value=1000, ) @attrs diff --git a/src/otx/algorithms/anomaly/configs/classification/draem/configuration.yaml b/src/otx/algorithms/anomaly/configs/classification/draem/configuration.yaml index be617c1831c..ee410672c48 100644 --- a/src/otx/algorithms/anomaly/configs/classification/draem/configuration.yaml +++ b/src/otx/algorithms/anomaly/configs/classification/draem/configuration.yaml @@ -226,7 +226,7 @@ pot_parameters: description: Number of data samples used for post-training optimization editable: true header: Number of data samples - max_value: 100000 + max_value: 1000 min_value: 1 type: INTEGER ui_rules: diff --git a/src/otx/algorithms/anomaly/configs/classification/padim/configuration.yaml b/src/otx/algorithms/anomaly/configs/classification/padim/configuration.yaml index 10dc2e99fa1..eac893d019c 100644 --- a/src/otx/algorithms/anomaly/configs/classification/padim/configuration.yaml +++ b/src/otx/algorithms/anomaly/configs/classification/padim/configuration.yaml @@ -166,7 +166,7 @@ pot_parameters: description: Number of data samples used for post-training optimization editable: true header: Number of data samples - max_value: 100000 + max_value: 1000 min_value: 1 type: INTEGER ui_rules: diff --git a/src/otx/algorithms/anomaly/configs/classification/stfpm/configuration.yaml b/src/otx/algorithms/anomaly/configs/classification/stfpm/configuration.yaml index e270577ebbf..ff3e8ca1517 100644 --- a/src/otx/algorithms/anomaly/configs/classification/stfpm/configuration.yaml +++ b/src/otx/algorithms/anomaly/configs/classification/stfpm/configuration.yaml @@ -295,7 +295,7 @@ pot_parameters: description: Number of data samples used for post-training optimization editable: true header: Number of data samples - max_value: 100000 + max_value: 1000 min_value: 1 type: INTEGER ui_rules: diff --git a/src/otx/algorithms/anomaly/configs/detection/draem/configuration.yaml b/src/otx/algorithms/anomaly/configs/detection/draem/configuration.yaml index be617c1831c..ee410672c48 100644 --- a/src/otx/algorithms/anomaly/configs/detection/draem/configuration.yaml +++ b/src/otx/algorithms/anomaly/configs/detection/draem/configuration.yaml @@ -226,7 +226,7 @@ pot_parameters: description: Number of data samples used for post-training optimization editable: true header: Number of data samples - max_value: 100000 + max_value: 1000 min_value: 1 type: INTEGER ui_rules: diff --git a/src/otx/algorithms/anomaly/configs/detection/padim/configuration.yaml b/src/otx/algorithms/anomaly/configs/detection/padim/configuration.yaml index 10dc2e99fa1..eac893d019c 100644 --- a/src/otx/algorithms/anomaly/configs/detection/padim/configuration.yaml +++ b/src/otx/algorithms/anomaly/configs/detection/padim/configuration.yaml @@ -166,7 +166,7 @@ pot_parameters: description: Number of data samples used for post-training optimization editable: true header: Number of data samples - max_value: 100000 + max_value: 1000 min_value: 1 type: INTEGER ui_rules: diff --git a/src/otx/algorithms/anomaly/configs/detection/stfpm/configuration.yaml b/src/otx/algorithms/anomaly/configs/detection/stfpm/configuration.yaml index e270577ebbf..ff3e8ca1517 100644 --- a/src/otx/algorithms/anomaly/configs/detection/stfpm/configuration.yaml +++ b/src/otx/algorithms/anomaly/configs/detection/stfpm/configuration.yaml @@ -295,7 +295,7 @@ pot_parameters: description: Number of data samples used for post-training optimization editable: true header: Number of data samples - max_value: 100000 + max_value: 1000 min_value: 1 type: INTEGER ui_rules: diff --git a/src/otx/algorithms/anomaly/configs/segmentation/draem/configuration.yaml b/src/otx/algorithms/anomaly/configs/segmentation/draem/configuration.yaml index be617c1831c..ee410672c48 100644 --- a/src/otx/algorithms/anomaly/configs/segmentation/draem/configuration.yaml +++ b/src/otx/algorithms/anomaly/configs/segmentation/draem/configuration.yaml @@ -226,7 +226,7 @@ pot_parameters: description: Number of data samples used for post-training optimization editable: true header: Number of data samples - max_value: 100000 + max_value: 1000 min_value: 1 type: INTEGER ui_rules: diff --git a/src/otx/algorithms/anomaly/configs/segmentation/padim/configuration.yaml b/src/otx/algorithms/anomaly/configs/segmentation/padim/configuration.yaml index 10dc2e99fa1..eac893d019c 100644 --- a/src/otx/algorithms/anomaly/configs/segmentation/padim/configuration.yaml +++ b/src/otx/algorithms/anomaly/configs/segmentation/padim/configuration.yaml @@ -166,7 +166,7 @@ pot_parameters: description: Number of data samples used for post-training optimization editable: true header: Number of data samples - max_value: 100000 + max_value: 1000 min_value: 1 type: INTEGER ui_rules: diff --git a/src/otx/algorithms/anomaly/configs/segmentation/stfpm/configuration.yaml b/src/otx/algorithms/anomaly/configs/segmentation/stfpm/configuration.yaml index e270577ebbf..ff3e8ca1517 100644 --- a/src/otx/algorithms/anomaly/configs/segmentation/stfpm/configuration.yaml +++ b/src/otx/algorithms/anomaly/configs/segmentation/stfpm/configuration.yaml @@ -295,7 +295,7 @@ pot_parameters: description: Number of data samples used for post-training optimization editable: true header: Number of data samples - max_value: 100000 + max_value: 1000 min_value: 1 type: INTEGER ui_rules: diff --git a/src/otx/algorithms/anomaly/tasks/openvino.py b/src/otx/algorithms/anomaly/tasks/openvino.py index 931f325f373..a5db9c21894 100644 --- a/src/otx/algorithms/anomaly/tasks/openvino.py +++ b/src/otx/algorithms/anomaly/tasks/openvino.py @@ -188,13 +188,17 @@ def infer(self, dataset: DatasetEntity, inference_parameters: InferenceParameter label = self.anomalous_label if image_result.pred_score >= 0.5 else self.normal_label elif self.task_type == TaskType.ANOMALY_SEGMENTATION: annotations = create_annotation_from_segmentation_map( - pred_mask, image_result.anomaly_map.squeeze(), {0: self.normal_label, 1: self.anomalous_label} + pred_mask, + image_result.anomaly_map.squeeze() / 255.0, + {0: self.normal_label, 1: self.anomalous_label}, ) dataset_item.append_annotations(annotations) label = self.normal_label if len(annotations) == 0 else self.anomalous_label elif self.task_type == TaskType.ANOMALY_DETECTION: annotations = create_detection_annotation_from_anomaly_heatmap( - pred_mask, image_result.anomaly_map.squeeze(), {0: self.normal_label, 1: self.anomalous_label} + pred_mask, + image_result.anomaly_map.squeeze() / 255.0, + {0: self.normal_label, 1: self.anomalous_label}, ) dataset_item.append_annotations(annotations) label = self.normal_label if len(annotations) == 0 else self.anomalous_label @@ -202,13 +206,12 @@ def infer(self, dataset: DatasetEntity, inference_parameters: InferenceParameter raise ValueError(f"Unknown task type: {self.task_type}") dataset_item.append_labels([ScoredLabel(label=label, probability=float(probability))]) - anomaly_map = (image_result.anomaly_map * 255).astype(np.uint8) heatmap_media = ResultMediaEntity( name="Anomaly Map", type="anomaly_map", label=label, annotation_scene=dataset_item.annotation_scene, - numpy=anomaly_map, + numpy=image_result.anomaly_map, ) dataset_item.append_metadata_item(heatmap_media) update_progress_callback(int((idx + 1) / len(dataset) * 100)) diff --git a/src/otx/algorithms/classification/adapters/mmcls/datasets/otx_datasets.py b/src/otx/algorithms/classification/adapters/mmcls/datasets/otx_datasets.py index ad950763948..6f0ba443ebf 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/datasets/otx_datasets.py +++ b/src/otx/algorithms/classification/adapters/mmcls/datasets/otx_datasets.py @@ -264,7 +264,7 @@ def evaluate( confusion_matrices = [] for cls_idx in cls_index: - group_labels_idx = set([cls_idx - 1]) + group_labels_idx = set([cls_idx]) y_true = [int(not group_labels_idx.issubset(true_labels)) for true_labels in true_label_idx] y_pred = [int(not group_labels_idx.issubset(pred_labels)) for pred_labels in pred_label_idx] matrix_data = sklearn_confusion_matrix(y_true, y_pred, labels=list(range(len([0, 1])))) diff --git a/src/otx/algorithms/classification/configs/configuration.yaml b/src/otx/algorithms/classification/configs/configuration.yaml index 03c327f88b2..18de282bf68 100644 --- a/src/otx/algorithms/classification/configs/configuration.yaml +++ b/src/otx/algorithms/classification/configs/configuration.yaml @@ -92,7 +92,7 @@ learning_parameters: time will be longer. editable: true header: Number of training iterations - max_value: 100000 + max_value: 1000 min_value: 1 type: INTEGER ui_rules: @@ -335,7 +335,7 @@ pot_parameters: description: Number of data samples used for post-training optimization editable: true header: Number of data samples - max_value: 100000 + max_value: 1000 min_value: 1 type: INTEGER ui_rules: diff --git a/src/otx/algorithms/classification/configs/efficientnet_b0_cls_incr/selfsl/hparam.yaml b/src/otx/algorithms/classification/configs/efficientnet_b0_cls_incr/selfsl/hparam.yaml index 6ba275a893f..894d85d7186 100644 --- a/src/otx/algorithms/classification/configs/efficientnet_b0_cls_incr/selfsl/hparam.yaml +++ b/src/otx/algorithms/classification/configs/efficientnet_b0_cls_incr/selfsl/hparam.yaml @@ -13,7 +13,7 @@ hyper_parameters: learning_rate_warmup_iters: default_value: 50 num_iters: - default_value: 5000 + default_value: 1000 enable_early_stopping: default_value: false use_adaptive_interval: diff --git a/src/otx/algorithms/classification/configs/efficientnet_v2_s_cls_incr/selfsl/hparam.yaml b/src/otx/algorithms/classification/configs/efficientnet_v2_s_cls_incr/selfsl/hparam.yaml index 6ba275a893f..894d85d7186 100644 --- a/src/otx/algorithms/classification/configs/efficientnet_v2_s_cls_incr/selfsl/hparam.yaml +++ b/src/otx/algorithms/classification/configs/efficientnet_v2_s_cls_incr/selfsl/hparam.yaml @@ -13,7 +13,7 @@ hyper_parameters: learning_rate_warmup_iters: default_value: 50 num_iters: - default_value: 5000 + default_value: 1000 enable_early_stopping: default_value: false use_adaptive_interval: diff --git a/src/otx/algorithms/classification/configs/mobilenet_v3_large_075_cls_incr/selfsl/hparam.yaml b/src/otx/algorithms/classification/configs/mobilenet_v3_large_075_cls_incr/selfsl/hparam.yaml index 6ba275a893f..894d85d7186 100644 --- a/src/otx/algorithms/classification/configs/mobilenet_v3_large_075_cls_incr/selfsl/hparam.yaml +++ b/src/otx/algorithms/classification/configs/mobilenet_v3_large_075_cls_incr/selfsl/hparam.yaml @@ -13,7 +13,7 @@ hyper_parameters: learning_rate_warmup_iters: default_value: 50 num_iters: - default_value: 5000 + default_value: 1000 enable_early_stopping: default_value: false use_adaptive_interval: diff --git a/src/otx/algorithms/classification/configs/mobilenet_v3_large_1_cls_incr/selfsl/hparam.yaml b/src/otx/algorithms/classification/configs/mobilenet_v3_large_1_cls_incr/selfsl/hparam.yaml index 6ba275a893f..894d85d7186 100644 --- a/src/otx/algorithms/classification/configs/mobilenet_v3_large_1_cls_incr/selfsl/hparam.yaml +++ b/src/otx/algorithms/classification/configs/mobilenet_v3_large_1_cls_incr/selfsl/hparam.yaml @@ -13,7 +13,7 @@ hyper_parameters: learning_rate_warmup_iters: default_value: 50 num_iters: - default_value: 5000 + default_value: 1000 enable_early_stopping: default_value: false use_adaptive_interval: diff --git a/src/otx/algorithms/classification/configs/mobilenet_v3_small_cls_incr/selfsl/hparam.yaml b/src/otx/algorithms/classification/configs/mobilenet_v3_small_cls_incr/selfsl/hparam.yaml index 6ba275a893f..894d85d7186 100644 --- a/src/otx/algorithms/classification/configs/mobilenet_v3_small_cls_incr/selfsl/hparam.yaml +++ b/src/otx/algorithms/classification/configs/mobilenet_v3_small_cls_incr/selfsl/hparam.yaml @@ -13,7 +13,7 @@ hyper_parameters: learning_rate_warmup_iters: default_value: 50 num_iters: - default_value: 5000 + default_value: 1000 enable_early_stopping: default_value: false use_adaptive_interval: diff --git a/src/otx/algorithms/common/configs/training_base.py b/src/otx/algorithms/common/configs/training_base.py index 8e924899ef7..d4d2b964dd4 100644 --- a/src/otx/algorithms/common/configs/training_base.py +++ b/src/otx/algorithms/common/configs/training_base.py @@ -95,7 +95,7 @@ class BaseLearningParameters(ParameterGroup): num_iters = configurable_integer( default_value=1, min_value=1, - max_value=100000, + max_value=1000, header="Number of training iterations", description="Increasing this value causes the results to be more robust but training time will be longer.", affects_outcome_of=ModelLifecycle.TRAINING, @@ -291,7 +291,7 @@ class BasePOTParameter(ParameterGroup): description="Number of data samples used for post-training optimization", default_value=300, min_value=1, - max_value=100000, + max_value=1000, ) stat_requests_number = configurable_integer( diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/mean_teacher.py b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/mean_teacher.py index ac8f99e5240..150140c9d7d 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/models/detectors/mean_teacher.py +++ b/src/otx/algorithms/detection/adapters/mmdet/models/detectors/mean_teacher.py @@ -14,6 +14,7 @@ from mmdet.core.mask.structures import BitmapMasks from mmdet.models import DETECTORS, build_detector from mmdet.models.detectors import BaseDetector +from torch import distributed as dist from otx.utils.logger import get_logger @@ -182,22 +183,29 @@ def forward_train( pseudo_bboxes, pseudo_labels, pseudo_masks, pseudo_ratio = self.generate_pseudo_labels( teacher_outputs, device=current_device, img_meta=ul_img_metas, **kwargs ) - if self.filter_empty_annotations: - non_empty = [bool(len(i)) for i in pseudo_labels] - pseudo_bboxes = [pb for i, pb in enumerate(pseudo_bboxes) if non_empty[i]] - pseudo_labels = [pl for i, pl in enumerate(pseudo_labels) if non_empty[i]] - pseudo_masks = [pm for i, pm in enumerate(pseudo_masks) if non_empty[i]] - ul_img_metas = [im for i, im in enumerate(ul_img_metas) if non_empty[i]] - ul_img = ul_img[non_empty] - else: - non_empty = [True] - if self.visualize: - self._visual_online(ul_img, pseudo_bboxes, pseudo_labels) + non_empty = [bool(len(i)) for i in pseudo_labels] if self.filter_empty_annotations else [True] + get_unlabeled_loss = pseudo_ratio >= self.min_pseudo_label_ratio and any(non_empty) + + if dist.is_initialized(): + reduced_get_unlabeled_loss = torch.tensor(int(get_unlabeled_loss)).to(current_device) + dist.all_reduce(reduced_get_unlabeled_loss) + if dont_have_to_train := not get_unlabeled_loss and reduced_get_unlabeled_loss > 0: + get_unlabeled_loss = True + non_empty[0] = True + losses.update(ps_ratio=torch.tensor([pseudo_ratio], device=current_device)) # Unsupervised loss # Compute only if min_pseudo_label_ratio is reached - if pseudo_ratio >= self.min_pseudo_label_ratio and any(non_empty): + if get_unlabeled_loss: + if self.filter_empty_annotations: + pseudo_bboxes = [pb for i, pb in enumerate(pseudo_bboxes) if non_empty[i]] + pseudo_labels = [pl for i, pl in enumerate(pseudo_labels) if non_empty[i]] + pseudo_masks = [pm for i, pm in enumerate(pseudo_masks) if non_empty[i]] + ul_img_metas = [im for i, im in enumerate(ul_img_metas) if non_empty[i]] + ul_img = ul_img[non_empty] + if self.visualize: + self._visual_online(ul_img, pseudo_bboxes, pseudo_labels) if self.bg_loss_weight >= 0.0: self.model_s.bbox_head.bg_loss_weight = self.bg_loss_weight if self.model_t.with_mask: @@ -214,7 +222,10 @@ def forward_train( if ul_loss_name.startswith("loss_"): ul_loss = ul_losses[ul_loss_name] target_loss = ul_loss_name.split("_")[-1] - if self.unlabeled_loss_weights[target_loss] == 0: + if dist.is_initialized(): + if dont_have_to_train: + self.unlabeled_loss_weights[target_loss] = 0 + elif self.unlabeled_loss_weights[target_loss] == 0: continue self._update_unlabeled_loss(losses, ul_loss, ul_loss_name, self.unlabeled_loss_weights[target_loss]) return losses diff --git a/src/otx/algorithms/detection/adapters/mmdet/task.py b/src/otx/algorithms/detection/adapters/mmdet/task.py index 142a340ade8..ebf33f616b3 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/task.py +++ b/src/otx/algorithms/detection/adapters/mmdet/task.py @@ -311,6 +311,8 @@ def _infer_model( dump_features = True dump_saliency_map = not inference_parameters.is_evaluation if inference_parameters else True + if isinstance(self, NNCFBaseTask): + dump_saliency_map = False self._init_task() diff --git a/src/otx/algorithms/detection/configs/detection/configuration.yaml b/src/otx/algorithms/detection/configs/detection/configuration.yaml index ace8da1a37a..a332cad8711 100644 --- a/src/otx/algorithms/detection/configs/detection/configuration.yaml +++ b/src/otx/algorithms/detection/configs/detection/configuration.yaml @@ -96,7 +96,7 @@ learning_parameters: time will be longer. editable: true header: Number of training iterations - max_value: 100000 + max_value: 1000 min_value: 1 type: INTEGER ui_rules: @@ -454,7 +454,7 @@ pot_parameters: description: Number of data samples used for post-training optimization editable: True header: Number of data samples - max_value: 100000 + max_value: 1000 min_value: 1 type: INTEGER ui_rules: diff --git a/src/otx/algorithms/detection/configs/instance_segmentation/configuration.yaml b/src/otx/algorithms/detection/configs/instance_segmentation/configuration.yaml index 7dae320a37c..59cef3af6c5 100644 --- a/src/otx/algorithms/detection/configs/instance_segmentation/configuration.yaml +++ b/src/otx/algorithms/detection/configs/instance_segmentation/configuration.yaml @@ -96,7 +96,7 @@ learning_parameters: time will be longer. editable: true header: Number of training iterations - max_value: 100000 + max_value: 1000 min_value: 1 type: INTEGER ui_rules: @@ -439,7 +439,7 @@ pot_parameters: description: Number of data samples used for post-training optimization editable: True header: Number of data samples - max_value: 100000 + max_value: 1000 min_value: 1 type: INTEGER ui_rules: diff --git a/src/otx/algorithms/detection/configs/instance_segmentation/convnext_maskrcnn/compression_config.json b/src/otx/algorithms/detection/configs/instance_segmentation/convnext_maskrcnn/compression_config.json index 7e0cba46aa9..c88a5c2744e 100644 --- a/src/otx/algorithms/detection/configs/instance_segmentation/convnext_maskrcnn/compression_config.json +++ b/src/otx/algorithms/detection/configs/instance_segmentation/convnext_maskrcnn/compression_config.json @@ -20,6 +20,11 @@ "algorithm": "quantization", "initializer": { "range": { + "type": "percentile", + "params": { + "min_percentile": 0, + "max_percentile": 100 + }, "num_init_samples": 1000 }, "batchnorm_adaptation": { diff --git a/src/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/compression_config.json b/src/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/compression_config.json index 7e0cba46aa9..c88a5c2744e 100644 --- a/src/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/compression_config.json +++ b/src/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/compression_config.json @@ -20,6 +20,11 @@ "algorithm": "quantization", "initializer": { "range": { + "type": "percentile", + "params": { + "min_percentile": 0, + "max_percentile": 100 + }, "num_init_samples": 1000 }, "batchnorm_adaptation": { diff --git a/src/otx/algorithms/detection/configs/instance_segmentation/maskrcnn_swin_t/compression_config.json b/src/otx/algorithms/detection/configs/instance_segmentation/maskrcnn_swin_t/compression_config.json index 29ac4407c10..c8b0d164aa9 100644 --- a/src/otx/algorithms/detection/configs/instance_segmentation/maskrcnn_swin_t/compression_config.json +++ b/src/otx/algorithms/detection/configs/instance_segmentation/maskrcnn_swin_t/compression_config.json @@ -20,6 +20,11 @@ "algorithm": "quantization", "initializer": { "range": { + "type": "percentile", + "params": { + "min_percentile": 0, + "max_percentile": 100 + }, "num_init_samples": 1000 }, "batchnorm_adaptation": { diff --git a/src/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/compression_config.json b/src/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/compression_config.json index ab687b9c6a2..af3c5ef8937 100644 --- a/src/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/compression_config.json +++ b/src/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/compression_config.json @@ -20,6 +20,11 @@ "algorithm": "quantization", "initializer": { "range": { + "type": "percentile", + "params": { + "min_percentile": 0, + "max_percentile": 100 + }, "num_init_samples": 1000 }, "batchnorm_adaptation": { diff --git a/src/otx/algorithms/detection/configs/rotated_detection/configuration.yaml b/src/otx/algorithms/detection/configs/rotated_detection/configuration.yaml index 20b4faf33f2..824093460ac 100644 --- a/src/otx/algorithms/detection/configs/rotated_detection/configuration.yaml +++ b/src/otx/algorithms/detection/configs/rotated_detection/configuration.yaml @@ -96,7 +96,7 @@ learning_parameters: time will be longer. editable: true header: Number of training iterations - max_value: 100000 + max_value: 1000 min_value: 1 type: INTEGER ui_rules: @@ -424,7 +424,7 @@ pot_parameters: description: Number of data samples used for post-training optimization editable: True header: Number of data samples - max_value: 100000 + max_value: 1000 min_value: 1 type: INTEGER ui_rules: diff --git a/src/otx/algorithms/segmentation/configs/base/configuration.py b/src/otx/algorithms/segmentation/configs/base/configuration.py index 02938583ecc..eb21e4b3761 100644 --- a/src/otx/algorithms/segmentation/configs/base/configuration.py +++ b/src/otx/algorithms/segmentation/configs/base/configuration.py @@ -15,8 +15,6 @@ # and limitations under the License. -from sys import maxsize - from attr import attrs from otx.algorithms.common.configs import ( @@ -102,7 +100,7 @@ class __POTParameter(BaseConfig.BasePOTParameter): description="Number of data samples used for post-training optimization", default_value=300, min_value=1, - max_value=maxsize, + max_value=1000, ) preset = selectable( diff --git a/src/otx/algorithms/segmentation/configs/configuration.yaml b/src/otx/algorithms/segmentation/configs/configuration.yaml index 4e4d859ad5e..fa835827add 100644 --- a/src/otx/algorithms/segmentation/configs/configuration.yaml +++ b/src/otx/algorithms/segmentation/configs/configuration.yaml @@ -79,7 +79,7 @@ learning_parameters: time will be longer. editable: true header: Number of training iterations - max_value: 100000 + max_value: 1000 min_value: 1 type: INTEGER ui_rules: @@ -391,7 +391,7 @@ pot_parameters: description: Number of data samples used for post-training optimization editable: True header: Number of data samples - max_value: 100000 + max_value: 1000 min_value: 1 type: INTEGER ui_rules: diff --git a/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/__init__.py b/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/__init__.py index 1c22c536057..d1ed6bc32e2 100644 --- a/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/__init__.py +++ b/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/__init__.py @@ -14,4 +14,4 @@ # See the License for the specific language governing permissions # and limitations under the License. -from .openvino_models import Decoder, ImageEncoder # noqa: F401 +from .openvino_models import Decoder, ImageEncoder, PromptGetter # noqa: F401 diff --git a/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py b/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py index ee18acd4bd6..1bdc1a473a3 100644 --- a/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py +++ b/src/otx/algorithms/visual_prompting/adapters/openvino/model_wrappers/openvino_models.py @@ -59,6 +59,20 @@ def preprocess( return dict_inputs, meta +class PromptGetter(ImageModel): + """PromptGetter class for zero-shot visual prompting of openvino model wrapper.""" + + __model__ = "prompt_getter" + + @classmethod + def parameters(cls) -> Dict[str, Any]: # noqa: D102 + parameters = super().parameters() + parameters.update({"image_size": NumericalValue(value_type=int, default_value=1024, min=0, max=2048)}) + parameters.update({"sim_threshold": NumericalValue(value_type=float, default_value=0.5, min=0, max=1)}) + parameters.update({"num_bg_points": NumericalValue(value_type=int, default_value=1, min=0, max=1024)}) + return parameters + + class Decoder(SegmentationModel): """Decoder class for visual prompting of openvino model wrapper.""" @@ -76,12 +90,13 @@ def __init__( def parameters(cls): # noqa: D102 parameters = super().parameters() parameters.update({"image_size": NumericalValue(value_type=int, default_value=1024, min=0, max=2048)}) + parameters.update({"mask_threshold": NumericalValue(value_type=float, default_value=0.0, min=0, max=1)}) return parameters def _get_outputs(self): return "low_res_masks" - def preprocess(self, inputs: Dict[str, Any], meta: Dict[str, Any]): + def preprocess(self, inputs: Dict[str, Any], meta: Dict[str, Any]) -> List[Dict[str, Any]]: """Preprocess prompts.""" processed_prompts = [] # TODO (sungchul): process points @@ -174,7 +189,7 @@ def resize_and_crop(self, soft_prediction: np.ndarray, original_size: np.ndarray ) prepadded_size = self.get_padded_size(original_size, self.image_size).astype(np.int64) - resized_cropped_soft_prediction = resized_soft_prediction[..., : prepadded_size[0], : prepadded_size[1]] + resized_cropped_soft_prediction = resized_soft_prediction[: prepadded_size[0], : prepadded_size[1], ...] original_size = original_size.astype(np.int64) h, w = original_size diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/inference.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/inference.py index 9aec96bde56..1dc39b7cc3f 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/inference.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/inference.py @@ -17,7 +17,6 @@ from typing import Any, List import numpy as np -import torch from bson import ObjectId from pytorch_lightning import LightningModule, Trainer from pytorch_lightning.callbacks import Callback @@ -118,17 +117,19 @@ def on_predict_epoch_end(self, _trainer: Trainer, _pl_module: LightningModule, o output = batch_output[0] annotations: List[Annotation] = [] for label, masks in output.items(): - hard_prediction = torch.where(torch.stack(masks, dim=0).sum(dim=0) > 0, 1, 0) - hard_prediction = hard_prediction.numpy() - - # TODO (sungchul): consider use_mask - # generate polygon annotations - annotation = create_annotation_from_segmentation_map( - hard_prediction=hard_prediction, - soft_prediction=hard_prediction, - label_map={1: self.label_schema.get(label)}, - ) - annotations.extend(annotation) + for soft_prediction in map(lambda x: x.numpy(), masks): + hard_prediction = create_hard_prediction_from_soft_prediction( + soft_prediction=soft_prediction, soft_threshold=0.5 + ) + + # TODO (sungchul): consider use_mask + # generate polygon annotations + annotation = create_annotation_from_segmentation_map( + hard_prediction=hard_prediction, + soft_prediction=soft_prediction, + label_map={1: self.label_schema.get(label)}, + ) + annotations.extend(annotation) # TODO (sungchul): consider use_mask dataset_item.append_annotations(annotations) diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/dataset.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/dataset.py index 51b78e56880..476a2c09d69 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/dataset.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/dataset.py @@ -207,7 +207,7 @@ def get_prompts(dataset_item: DatasetItemEntity, dataset_labels: List[LabelEntit bboxes = np.array(bboxes) return dict( - original_size=(height, width), + original_size=np.array((height, width), dtype=np.int64), gt_masks=gt_masks, bboxes=bboxes, points=points, # TODO (sungchul): update point information @@ -247,6 +247,20 @@ def __getitem__(self, index: int) -> Dict[str, Union[int, List, Tensor]]: class OTXZeroShotVisualPromptingDataset(OTXVisualPromptingDataset): """Visual Prompting for Zero-shot learning Dataset Adaptor.""" + def __init__( + self, + dataset: DatasetEntity, + image_size: int, + mean: List[float], + std: List[float], + generate_point: bool = False, + generate_bbox: bool = False, + **kwargs, + ) -> None: + super().__init__(dataset, image_size, mean, std, offset_bbox=0) + self.generate_point = generate_point + self.generate_bbox = generate_bbox + def __getitem__(self, index: int) -> Dict[str, Union[int, List, Tensor]]: """Get dataset item. @@ -288,7 +302,7 @@ def __init__( self.config = config self.dataset = dataset self.train_type = train_type - # self.kwargs = {} + self.kwargs = {} if self.train_type == TrainType.Zeroshot: # check zero-shot configs if self.config.get("train_batch_size", 1) != 1: @@ -300,12 +314,12 @@ def __init__( ) self.config["train_batch_size"] = 1 - # self.kwargs.update( - # { - # "generate_point": self.config.get("generate_point", False), - # "generate_bbox": self.config.get("generate_bbox", False), - # } - # ) + self.kwargs.update( + { + "generate_point": self.config.get("generate_point", False), + "generate_bbox": self.config.get("generate_bbox", False), + } + ) self.train_otx_dataset: DatasetEntity self.val_otx_dataset: DatasetEntity @@ -331,7 +345,7 @@ def setup(self, stage: Optional[str] = None) -> None: mean=mean, std=std, offset_bbox=self.config.offset_bbox, - # **self.kwargs, + **self.kwargs, ) # self.val_dataset = None @@ -347,11 +361,7 @@ def setup(self, stage: Optional[str] = None) -> None: if stage == "predict": self.predict_dataset = self.DATASETS[self.train_type]( - dataset=self.dataset, - image_size=image_size, - mean=mean, - std=std, - # **self.kwargs + dataset=self.dataset, image_size=image_size, mean=mean, std=std, **self.kwargs ) def summary(self): diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/sam_transforms.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/sam_transforms.py index fd9b1a3057b..06d04ea817d 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/sam_transforms.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/sam_transforms.py @@ -4,8 +4,7 @@ # All rights reserved. # -from copy import deepcopy -from typing import Any, Dict, List, Tuple, Union +from typing import Dict, List, Tuple, Union import numpy as np import torch @@ -38,7 +37,7 @@ def __call__(self, item: Dict[str, Union[List, Tensor]]) -> Dict[str, Union[List self.apply_image(item["images"], self.target_length).transpose((2, 0, 1)), dtype=torch.get_default_dtype() ) item["gt_masks"] = [torch.as_tensor(gt_mask) for gt_mask in item["gt_masks"]] - item["bboxes"] = self.apply_boxes(item["bboxes"], item["original_size"]) + item["bboxes"] = self.apply_boxes(item["bboxes"], item["original_size"], self.target_length) if item["points"]: item["points"] = self.apply_coords(item["points"], item["original_size"], self.target_length) return item @@ -59,41 +58,51 @@ def apply_image(cls, image: np.ndarray, target_length: int) -> np.ndarray: @classmethod def apply_coords( - cls, coords: Union[np.ndarray, Tensor], original_size: Union[List[Any], Tensor], target_length: int - ) -> np.ndarray: - """Expects a numpy array of length 2 in the final dimension. + cls, + coords: Union[np.ndarray, Tensor], + original_size: Union[List[int], Tuple[int, int], Tensor], + target_length: int, + ) -> Union[np.ndarray, Tensor]: + """Expects a numpy array / torch tensor of length 2 in the final dimension. Requires the original image size in (H, W) format. Args: - coords (Union[np.ndarray, Tensor]): Coordinates array. - original_size (Union[List[Any], Tensor]): Original size of image. + coords (Union[np.ndarray, Tensor]): Coordinates array/tensor. + original_size (Union[List[int], Tuple[int, int], Tensor]): Original size of image. target_length (int): The length of the longest side of the image. Returns: - np.ndarray: Resized coordinates. + Union[np.ndarray, Tensor]: Resized coordinates. """ old_h, old_w = original_size new_h, new_w = cls.get_preprocess_shape(original_size[0], original_size[1], target_length) if isinstance(coords, np.ndarray): - coords = deepcopy(coords).astype(np.float32) + coords = coords.astype(float) else: - coords = deepcopy(coords).to(torch.float32) + coords = coords.to(torch.float) coords[..., 0] = coords[..., 0] * (new_w / old_w) coords[..., 1] = coords[..., 1] * (new_h / old_h) return coords - def apply_boxes(self, boxes: np.ndarray, original_size: Union[List[Any], Tensor]) -> np.ndarray: - """Expects a numpy array shape Bx4. Requires the original image size in (H, W) format. + @classmethod + def apply_boxes( + cls, + boxes: Union[np.ndarray, Tensor], + original_size: Union[List[int], Tuple[int, int], Tensor], + target_length: int, + ) -> Union[np.ndarray, Tensor]: + """Expects a numpy array / torch tensor shape Bx4. Requires the original image size in (H, W) format. Args: - boxes (np.ndarray): Boxes array. - original_size (Union[List[Any], Tensor]): Original size of image. + boxes (Union[np.ndarray, Tensor]): Boxes array/tensor. + original_size (Union[List[int], Tuple[int, int], Tensor]): Original size of image. + target_length (int): The length of the longest side of the image. Returns: - np.ndarray: Resized boxes. + Union[np.ndarray, Tensor]: Resized boxes. """ - boxes = self.apply_coords(boxes.reshape(-1, 2, 2), original_size, self.target_length) + boxes = cls.apply_coords(boxes.reshape(-1, 2, 2), original_size, target_length) return boxes.reshape(-1, 4) @staticmethod diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/transforms.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/transforms.py index 4b1c507a7f8..f53fb4b3457 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/transforms.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/transforms.py @@ -23,25 +23,26 @@ def collate_fn(batch: List[Any]) -> Dict: Dict: Collated batch data. """ - def _convert_empty_to_none(x: str) -> List: + def _convert_empty_to_none(x: str, dtype: torch.dtype = torch.float32) -> List: """Convert empty list to None. Args: x (str): Key of batch data. + dtype (torch.dtype): Dtype to be applied to tensors. Returns: List: List of batch data. """ func = torch.stack if x == "gt_masks" else torch.tensor - items = [func(item[x]) for item in batch if item[x] is not None] + items = [func(item[x]).to(dtype) for item in batch if item[x] is not None] return None if len(items) == 0 else items index = [item["index"] for item in batch] images = torch.stack([item["images"] for item in batch]) bboxes = _convert_empty_to_none("bboxes") points = None # TBD - gt_masks = _convert_empty_to_none("gt_masks") - original_size = [item["original_size"] for item in batch] + gt_masks = _convert_empty_to_none("gt_masks", torch.int32) + original_size = _convert_empty_to_none("original_size") padding = [item["padding"] for item in batch] path = [item["path"] for item in batch] labels = [item["labels"] for item in batch] diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py index 3b84daa72b8..9581d21ab41 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/segment_anything.py @@ -10,7 +10,7 @@ import re from collections import OrderedDict -from typing import Dict, List, Optional, Tuple +from typing import Dict, List, Optional, Tuple, Union import torch from omegaconf import DictConfig @@ -334,24 +334,29 @@ def select_masks(self, masks: Tensor, iou_preds: Tensor, num_points: int) -> Tup return masks, iou_preds - def mask_postprocessing(self, masks: Tensor, orig_size: Tensor) -> Tensor: + @staticmethod + def mask_postprocessing(masks: Tensor, input_size: int, orig_size: Tensor) -> Tensor: """Postprocesses the predicted masks. Args: masks (Tensor): A batch of predicted masks with shape Bx1xHxW. + input_size (int): The size of the image input to the model, in (H, W) format. + Used to remove padding. orig_size (Tensor): The original image size with shape Bx2. Returns: masks (Tensor): The postprocessed masks with shape Bx1xHxW. """ - masks = F.interpolate( - masks, - size=(self.config.model.image_size, self.config.model.image_size), - mode="bilinear", - align_corners=False, - ) - prepadded_size = self.resize_longest_image_size(orig_size, self.config.model.image_size).to(torch.int64) + def resize_longest_image_size(input_image_size: Tensor, longest_side: int) -> Tensor: + scale = longest_side / torch.max(input_image_size) + transformed_size = scale * input_image_size + transformed_size = torch.floor(transformed_size + 0.5).to(torch.int64) + return transformed_size + + masks = F.interpolate(masks, size=(input_size, input_size), mode="bilinear", align_corners=False) + + prepadded_size = resize_longest_image_size(orig_size, input_size) masks = masks[..., : prepadded_size[0], : prepadded_size[1]] # type: ignore orig_size = orig_size.to(torch.int64) @@ -359,22 +364,6 @@ def mask_postprocessing(self, masks: Tensor, orig_size: Tensor) -> Tensor: masks = F.interpolate(masks, size=(h, w), mode="bilinear", align_corners=False) return masks - def resize_longest_image_size(self, input_image_size: Tensor, longest_side: int) -> Tensor: - """Resizes the longest side of the image to the given size. - - Args: - input_image_size (Tensor): The original image size with shape Bx2. - longest_side (int): The size of the longest side. - - Returns: - transformed_size (Tensor): The transformed image size with shape Bx2. - """ - input_image_size = input_image_size.to(torch.float32) - scale = longest_side / torch.max(input_image_size) - transformed_size = scale * input_image_size - transformed_size = torch.floor(transformed_size + 0.5).to(torch.int64) - return transformed_size - ###################################################### # forward for training/validation/prediction # ###################################################### @@ -556,8 +545,8 @@ def predict_step(self, batch, batch_idx) -> Dict[str, Tensor]: def postprocess_masks( masks: Tensor, input_size: Tuple[int, int], - padding: Tuple[int, ...], - original_size: Tuple[int, int], + padding: Union[Tuple[int, ...], Tensor], + original_size: Union[Tuple[int, int], Tensor], ) -> Tensor: """Remove padding and upscale masks to the original image size. @@ -565,17 +554,17 @@ def postprocess_masks( masks (Tensor): Predicted masks from the mask_decoder with (N, 1, H/downsized_ratio, W/downsized_ratio). input_size (tuple(int, int)): The size of the image input to the model, in (H, W) format. Used to remove padding. - padding (tuple(int, int, int, int), optional): The padding applied to the image before input to the model, + padding (tuple(int, int, int, int), Tensor): The padding applied to the image before input to the model, in (left, top, right, bottom) format. - original_size (tuple(int, int)): The original size of the image before resizing for input to the model, - in (H, W) format. + original_size (tuple(int, int), Tensor): The original size of the image before resizing + for input to the model, in (H, W) format. Returns: (Tensor): Postprocessed masks in NxHxW format, where (H, W) is given by original_size. """ masks = F.interpolate(masks, input_size, mode="bilinear", align_corners=False) masks = masks[..., : input_size[0] - padding[3], : input_size[1] - padding[2]] - masks = F.interpolate(masks, original_size, mode="bilinear", align_corners=False) + masks = F.interpolate(masks, [int(o) for o in original_size], mode="bilinear", align_corners=False) return masks.squeeze(1) def configure_optimizers(self) -> optim: diff --git a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py index a915862523c..14c8e5dd6f2 100644 --- a/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py +++ b/src/otx/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/zero_shot_segment_anything.py @@ -5,14 +5,17 @@ from collections import OrderedDict, defaultdict from copy import deepcopy -from typing import Any, DefaultDict, Dict, List, Optional, Tuple +from itertools import product +from typing import Any, DefaultDict, Dict, List, Optional, Tuple, Union import torch from omegaconf import DictConfig from torch import nn from torch.nn import functional as F -from otx.algorithms.visual_prompting.adapters.pytorch_lightning.datasets.pipelines import ResizeLongestSide +from otx.algorithms.visual_prompting.adapters.pytorch_lightning.datasets.pipelines import ( + ResizeLongestSide, +) from otx.api.entities.scored_label import ScoredLabel from otx.utils.logger import get_logger @@ -27,15 +30,26 @@ class PromptGetter(nn.Module): default_threshold_reference = 0.3 default_threshold_target = 0.65 - def __init__(self, image_size: int) -> None: + def __init__( + self, + image_size: int, + reference_feats: Optional[torch.Tensor] = None, + reference_prompts: Optional[torch.Tensor] = None, + downsizing: int = 64, + ) -> None: super().__init__() self.image_size = image_size - self.initialize() + self.downsizing = downsizing + self.initialize(reference_feats, reference_prompts) + + self.zero_tensor = torch.tensor(0) - def initialize(self) -> None: + def initialize( + self, reference_feats: Optional[torch.Tensor] = None, reference_prompts: Optional[torch.Tensor] = None + ) -> None: """Initialize reference features and prompts.""" - self.reference_feats: Dict[int, torch.Tensor] = {} - self.reference_prompts: Dict[int, torch.Tensor] = {} + self.reference_feats = reference_feats + self.reference_prompts = reference_prompts def set_default_thresholds(self, default_threshold_reference: float, default_threshold_target: float) -> None: """Set default thresholds.""" @@ -44,75 +58,134 @@ def set_default_thresholds(self, default_threshold_reference: float, default_thr def set_reference(self, label: ScoredLabel, reference_feats: torch.Tensor, reference_prompts: torch.Tensor) -> None: """Set reference features and prompts.""" - self.reference_feats[int(label.id_)] = reference_feats - self.reference_prompts[int(label.id_)] = reference_prompts + if self.reference_feats is None: + self.reference_feats = torch.zeros_like(reference_feats).unsqueeze(0) + if self.reference_prompts is None: + self.reference_prompts = torch.zeros_like(reference_prompts).unsqueeze(0) + + for idx in range(int(label.id_) + 1): + if idx == int(label.id_): + while self.reference_feats.shape[0] - 1 < idx: + self.reference_feats = torch.cat( + (self.reference_feats, torch.zeros_like(reference_feats).unsqueeze(0)), dim=0 + ) + self.reference_prompts = torch.cat( + (self.reference_prompts, torch.zeros_like(reference_prompts).unsqueeze(0)), dim=0 + ) + self.reference_feats[idx] = reference_feats + self.reference_prompts[idx] = reference_prompts def forward( self, image_embeddings: torch.Tensor, - padding: Tuple[int, ...], - original_size: Tuple[int, int], - ) -> Dict[int, Tuple[torch.Tensor, torch.Tensor]]: + original_size: torch.Tensor, + threshold: torch.Tensor = torch.tensor([[0.0]], dtype=torch.float32), + num_bg_points: torch.Tensor = torch.tensor([[1]], dtype=torch.int64), + ) -> Tuple[torch.Tensor, torch.Tensor]: """Get prompt candidates.""" + total_points_scores: torch.Tensor + total_bg_coords: torch.Tensor + + device = image_embeddings.device + threshold = threshold.to(device) + for label in torch.arange(self.reference_feats.shape[0]): + points_scores, bg_coords = self.get_prompt_candidates( + image_embeddings=image_embeddings, + label=label, + original_size=original_size, + threshold=threshold, + num_bg_points=num_bg_points, + device=device, + ) + if label == 0: + total_points_scores = points_scores.unsqueeze(0) + total_bg_coords = bg_coords.unsqueeze(0) + else: + pad_size = torch.tensor(points_scores.shape[0] - total_points_scores.shape[1]) + pad_tot = torch.max(self.zero_tensor, pad_size) + pad_cur = torch.max(self.zero_tensor, -pad_size) + + total_points_scores = F.pad(total_points_scores, (0, 0, 0, pad_tot, 0, 0), value=-1) + points_scores = F.pad(points_scores, (0, 0, 0, pad_cur), value=-1) + + total_points_scores = torch.cat((total_points_scores, points_scores.unsqueeze(0)), dim=0) + total_bg_coords = torch.cat((total_bg_coords, bg_coords.unsqueeze(0)), dim=0) + + return total_points_scores, total_bg_coords + + def get_prompt_candidates( + self, + image_embeddings: torch.Tensor, + label: torch.Tensor, + original_size: torch.Tensor, + threshold: torch.Tensor = torch.tensor([[0.0]], dtype=torch.float32), + num_bg_points: torch.Tensor = torch.tensor([[1]], dtype=torch.int64), + device: torch.device = torch.device("cpu"), + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Get prompt candidates from given reference and target features.""" + assert original_size.dim() == 2 and threshold.dim() == 2 and num_bg_points.dim() == 2 + target_feat = image_embeddings.squeeze() c_feat, h_feat, w_feat = target_feat.shape - target_feat = self._preprocess_target_feat(target_feat, c_feat, h_feat, w_feat) - - prompts = {} - for label, reference_feat in self.reference_feats.items(): - sim = reference_feat.to(target_feat.device) @ target_feat - sim = sim.reshape(1, 1, h_feat, w_feat) - sim = ZeroShotSegmentAnything.postprocess_masks( - sim, (self.image_size, self.image_size), padding, original_size - ).squeeze() - - # threshold = 0.85 * sim.max() if num_classes > 1 else self.default_threshold_target - threshold = self.default_threshold_target - points_scores, bg_coords = self._point_selection(sim, original_size, threshold) - if points_scores is None: - # skip if there is no point with score > threshold - continue - prompts[label] = (points_scores, bg_coords) - return prompts - - def _preprocess_target_feat(self, target_feat: torch.Tensor, c_feat: int, h_feat: int, w_feat: int) -> torch.Tensor: target_feat = target_feat / target_feat.norm(dim=0, keepdim=True) target_feat = target_feat.reshape(c_feat, h_feat * w_feat) - return target_feat + + sim = self.reference_feats[label].to(device) @ target_feat + sim = sim.reshape(1, 1, h_feat, w_feat) + sim = ZeroShotSegmentAnything.mask_postprocessing(sim, self.image_size, original_size[0]) + + threshold = (threshold == 0) * self.default_threshold_target + threshold + points_scores, bg_coords = self._point_selection( + mask_sim=sim[0, 0], + original_size=original_size[0], + threshold=threshold, + num_bg_points=num_bg_points, + ) + + return points_scores, bg_coords def _point_selection( self, mask_sim: torch.Tensor, - original_size: Tuple[int, int], - threshold: float, - num_bg_points: int = 1, - downsizing: int = 16, + original_size: torch.Tensor, + threshold: torch.Tensor, + num_bg_points: torch.Tensor = torch.tensor([[1]], dtype=torch.int64), ) -> Tuple[torch.Tensor, torch.Tensor]: """Select point used as point prompts.""" _, w_sim = mask_sim.shape # Top-last point selection - bg_indices = mask_sim.flatten().topk(num_bg_points, largest=False)[1] + bg_indices = mask_sim.flatten().topk(num_bg_points[0, 0], largest=False)[1] bg_x = (bg_indices // w_sim).unsqueeze(0) bg_y = bg_indices - bg_x * w_sim bg_coords = torch.cat((bg_y, bg_x), dim=0).permute(1, 0) - bg_coords = bg_coords + bg_coords = bg_coords.to(torch.float32) point_coords = torch.where(mask_sim > threshold) - if len(point_coords[0]) == 0: - return None, None - fg_coords_scores = torch.stack(point_coords[::-1] + (mask_sim[point_coords],), dim=0).T - max_len = max(original_size) - ratio = self.image_size / max_len - _, width = map(lambda x: int(x * ratio), original_size) - n_w = width // downsizing + ratio = self.image_size / original_size.max() + width = (original_size[1] * ratio).to(torch.int64) + n_w = width // self.downsizing - res = (fg_coords_scores[:, 1] * ratio // downsizing * n_w + fg_coords_scores[:, 0] * ratio // downsizing).to( - torch.int32 + # get grid numbers + idx_grid = ( + fg_coords_scores[:, 1] * ratio // self.downsizing * n_w + fg_coords_scores[:, 0] * ratio // self.downsizing ) - points_scores = torch.stack([fg_coords_scores[res == r][0] for r in torch.unique(res)], dim=0) + idx_grid_unique = torch.unique( + idx_grid.to(torch.int64) + ) # unique op only supports INT64, INT8, FLOAT, STRING in ORT + + # get matched indices + matched_matrix = idx_grid.unsqueeze(-1) == idx_grid_unique # (totalN, uniqueN) + + # sample fg_coords_scores matched by matched_matrix + matched_grid = fg_coords_scores.unsqueeze(1) * matched_matrix.unsqueeze(-1) + + # sample the highest score one of the samples that are in the same grid + points_scores = matched_grid[matched_grid[..., -1].argsort(dim=0, descending=True)[0]].diagonal().T + + # sort by the highest score points_scores = points_scores[torch.argsort(points_scores[:, -1], descending=True)] return points_scores, bg_coords @@ -147,16 +220,18 @@ def __init__(self, config: Optional[DictConfig] = None, state_dict: Optional[Ord super().__init__(config, state_dict) - self.prompt_getter = PromptGetter(image_size=config.model.image_size) - self.prompt_getter.initialize() + self.prompt_getter = PromptGetter( + image_size=config.model.image_size, + reference_feats=prompt_getter_reference_feats, + reference_prompts=prompt_getter_reference_prompts, + ) self.prompt_getter.set_default_thresholds( - config.model.default_threshold_reference, config.model.default_threshold_target + default_threshold_reference=config.model.default_threshold_reference, + default_threshold_target=config.model.default_threshold_target, ) - if prompt_getter_reference_feats: - self.prompt_getter.reference_feats = prompt_getter_reference_feats - if prompt_getter_reference_prompts: - self.prompt_getter.reference_prompts = prompt_getter_reference_prompts + self.point_labels_box = torch.tensor([[2, 3]], dtype=torch.float32) + self.has_mask_inputs = [torch.tensor([[0.0]]), torch.tensor([[1.0]])] def set_default_config(self) -> DictConfig: """Set default config when using independently.""" @@ -181,8 +256,8 @@ def learn( self, images: torch.Tensor, processed_prompts: Dict[ScoredLabel, List[Dict[str, torch.Tensor]]], - padding: Tuple[int, ...], - original_size: Tuple[int, int], + padding: Union[Tuple[int, ...], torch.Tensor], + original_size: torch.Tensor, ) -> None: """Get reference features. @@ -194,8 +269,8 @@ def learn( images (torch.Tensor): Given images for reference features. processed_prompts (Dict[ScoredLabel, List[Dict[str, torch.Tensor]]]): The whole class-wise prompts processed at _preprocess_prompts. - padding (Tuple[int, ...]): Padding size. - original_size (Tuple[int, int]): Original image size. + padding (Union[Tuple[int, ...], torch.Tensor]): Padding size. + original_size (torch.Tensor): Original image size. """ assert images.shape[0] == 1, "Only single batch is supported." @@ -212,25 +287,43 @@ def learn( # generate reference mask # TODO (sungchul): ensemble multi reference features (current : use merged masks) - reference_prompt = torch.zeros(original_size, dtype=torch.uint8, device=images.device) + reference_prompt = torch.zeros(*map(int, original_size), dtype=torch.uint8, device=self.device) for input_prompt in input_prompts: if "annotation" in input_prompt: # directly use annotation information as a mask reference_prompt[input_prompt.get("annotation") == 1] += 1 else: merged_input_prompts = self._merge_prompts(label, input_prompt, processed_prompts) - masks, scores, logits = self._predict_mask( + # TODO (sungchul): they must be processed in `_merge_prompts` + # and it is required to be expanded to other prompts. + point_coords = [] + point_labels = [] + if "box" in merged_input_prompts: + for box in merged_input_prompts["box"]: + point_coords.append(box[:2]) + point_labels.append(2) + point_coords.append(box[2:]) + point_labels.append(3) + + if "points" in merged_input_prompts: + raise NotImplementedError() + + if "annotations" in merged_input_prompts: + raise NotImplementedError() + + point_coords = torch.stack(point_coords, dim=0).unsqueeze(0) + point_labels = torch.tensor([point_labels], device=self.device) + masks = self._predict_masks( image_embeddings=image_embeddings, - input_prompts=merged_input_prompts, - padding=padding, + point_coords=point_coords, + point_labels=point_labels, original_size=original_size, - multimask_output=True, + is_cascade=False, ) - best_idx = torch.argmax(scores) - reference_prompt[masks[0, best_idx]] += 1 + reference_prompt[masks] += 1 reference_prompt = torch.clip(reference_prompt, 0, 1) - ref_mask = torch.tensor(reference_prompt, dtype=torch.float32) + ref_mask = reference_prompt.to(torch.float32) reference_feat = None default_threshold_reference = deepcopy(self.prompt_getter.default_threshold_reference) while reference_feat is None: @@ -240,11 +333,11 @@ def learn( ) default_threshold_reference -= 0.05 - self.prompt_getter.set_reference(label, reference_feat.detach().cpu(), reference_prompt.detach().cpu()) + self.prompt_getter.set_reference(label, reference_feat, reference_prompt) @torch.no_grad() def infer( - self, images: torch.Tensor, padding: Tuple[int, ...], original_size: Tuple[int, int] + self, images: torch.Tensor, original_size: torch.Tensor ) -> List[List[DefaultDict[int, List[torch.Tensor]]]]: """Zero-shot inference with reference features. @@ -252,8 +345,7 @@ def infer( Args: images (torch.Tensor): Given images for target results. - padding (Tuple[int, ...]): Padding size. - original_size (Tuple[int, int]): Original image size. + original_size (torch.Tensor): Original image size. Returns: (List[List[DefaultDict[int, List[torch.Tensor]]]]): Target results. @@ -264,20 +356,21 @@ def infer( assert images.shape[0] == 1, "Only single batch is supported." total_results = [] - # num_classes = len(self.reference_feats.keys()) for image in images: if image.ndim == 3: image = image.unsqueeze(0) image_embeddings = self.image_encoder(images) - prompts = self.prompt_getter( - image_embeddings=image_embeddings, padding=padding, original_size=original_size + total_points_scores, total_bg_coords = self.prompt_getter( + image_embeddings=image_embeddings, original_size=original_size ) predicted_masks: defaultdict = defaultdict(list) used_points: defaultdict = defaultdict(list) - for label, (points_scores, bg_coords) in prompts.items(): + for label, (points_scores, bg_coords) in enumerate(zip(total_points_scores, total_bg_coords)): for points_score in points_scores: + if points_score[-1] == -1: + continue x, y = points_score[:2] is_done = False for pm in predicted_masks.get(label, []): @@ -288,51 +381,117 @@ def infer( if is_done: continue - mask, used_point_score = self( + point_coords = torch.cat((points_score[:2].unsqueeze(0), bg_coords), dim=0).unsqueeze(0) + point_coords = ResizeLongestSide.apply_coords( + point_coords, original_size[0], self.config.model.image_size + ) + point_labels = torch.tensor( + [1] + [0] * len(bg_coords), dtype=torch.float32, device=self.device + ).unsqueeze(0) + mask = self._predict_masks( image_embeddings=image_embeddings, - points_score=points_score, - bg_coords=bg_coords, - padding=padding, - original_size=original_size, + point_coords=point_coords, + point_labels=point_labels, + original_size=original_size[0], ) - predicted_masks[label].append(mask) - used_points[label].append(used_point_score) + predicted_masks[label].append((mask * points_score[2]).detach().cpu()) + used_points[label].append(points_score.detach().cpu()) + # check overlapping area between different label masks + self.__inspect_overlapping_areas(predicted_masks, used_points) total_results.append([predicted_masks, used_points]) return total_results - @torch.no_grad() - def forward( + def __inspect_overlapping_areas( self, - image_embeddings: torch.Tensor, - points_score: torch.Tensor, - bg_coords: torch.Tensor, - padding: Tuple[int, ...], - original_size: Tuple[int, int], - ) -> Tuple[torch.Tensor, torch.Tensor]: - """Predict point prompts and predicted masks. + predicted_masks: Dict[int, List[torch.Tensor]], + used_points: Dict[int, List[torch.Tensor]], + threshold_iou: float = 0.8, + ): + def __calculate_mask_iou(mask1: torch.Tensor, mask2: torch.Tensor): + assert mask1.ndim == 2 and mask2.ndim == 2 + intersection = torch.logical_and(mask1, mask2).sum().item() + union = torch.logical_or(mask1, mask2).sum().item() + + # Avoid division by zero + if union == 0: + return 0.0 + iou = intersection / union + return iou + + for (label, masks), (other_label, other_masks) in product(predicted_masks.items(), predicted_masks.items()): + if other_label <= label: + continue - Args: - image_embeddings (torch.Tensor): The image embedding with a batch index of length 1. - points_score (torch.Tensor): Foreground point prompts from point selection algorithm. - bg_coords (torch.Tensor): Background point prompts from point selection algorithm. - padding (Tuple[int, ...]): Padding size. - original_size (Tuple[int, int]): Original image size. + overlapped_label = [] + overlapped_other_label = [] + for (im, mask), (jm, other_mask) in product(enumerate(masks), enumerate(other_masks)): + if __calculate_mask_iou(mask, other_mask) > threshold_iou: + if used_points[label][im][2] > used_points[other_label][jm][2]: + overlapped_other_label.append(jm) + else: + overlapped_label.append(im) - Returns: - (Tuple[torch.Tensor, torch.Tensor]): Predicted masks and used points with corresponding score. - """ - point_coords = torch.cat((points_score[:2].unsqueeze(0), bg_coords), dim=0).unsqueeze(0) - point_coords = ResizeLongestSide.apply_coords(point_coords, original_size, self.config.model.image_size) - point_labels = torch.tensor([1] + [0] * len(bg_coords), dtype=torch.int32).unsqueeze(0) - mask = self._predict_target_mask( - image_embeddings=image_embeddings, - input_prompts={"points": (point_coords, point_labels)}, - padding=padding, - original_size=original_size, - ) + for im in overlapped_label[::-1]: + masks.pop(im) + used_points[label].pop(im) + + for jm in overlapped_other_label[::-1]: + other_masks.pop(jm) + used_points[other_label].pop(jm) + + def _predict_masks( + self, + image_embeddings: torch.Tensor, + point_coords: torch.Tensor, + point_labels: torch.Tensor, + original_size: torch.Tensor, + is_cascade: bool = True, + ) -> torch.Tensor: + """Predict target masks.""" + logits: torch.Tensor + scores: torch.Tensor + for i in range(3): + if i == 0: + # First-step prediction + mask_input = torch.zeros(1, 1, *map(lambda x: x * 4, image_embeddings.shape[2:]), device=self.device) + has_mask_input = self.has_mask_inputs[0].to(self.device) + + elif is_cascade and i == 1: + # Cascaded Post-refinement-1 + mask_input, masks = self._postprocess_masks(logits, scores, original_size, is_single=True) # noqa: F821 + if masks.sum() == 0: + return masks + + has_mask_input = self.has_mask_inputs[1].to(self.device) + + elif is_cascade and i == 2: + # Cascaded Post-refinement-2 + mask_input, masks = self._postprocess_masks(logits, scores, original_size) # noqa: F821 + if masks.sum() == 0: + return masks + + has_mask_input = self.has_mask_inputs[1].to(self.device) + coords = torch.nonzero(masks) + y, x = coords[:, 0], coords[:, 1] + box_coords = ResizeLongestSide.apply_coords( + torch.tensor([[[x.min(), y.min()], [x.max(), y.max()]]], dtype=torch.float32, device=self.device), + original_size, + self.config.model.image_size, + ) + point_coords = torch.cat((point_coords, box_coords), dim=1) + point_labels = torch.cat((point_labels, self.point_labels_box.to(self.device)), dim=1) + + scores, logits = self( + image_embeddings=image_embeddings, + point_coords=point_coords, + point_labels=point_labels, + mask_input=mask_input, + has_mask_input=has_mask_input, + ) - return mask.detach().cpu().to(torch.uint8), points_score.detach().cpu() + _, masks = self._postprocess_masks(logits, scores, original_size) + return masks def training_step(self, batch, batch_idx) -> None: """Training step for `learn`.""" @@ -355,9 +514,7 @@ def training_step(self, batch, batch_idx) -> None: def predict_step(self, batch, batch_idx): """Predict step for `infer`.""" - results = self.infer( - images=batch["images"], padding=batch.get("padding")[0], original_size=batch.get("original_size")[0] - ) + results = self.infer(images=batch["images"], original_size=batch.get("original_size")[0].unsqueeze(0)) return [result[0] for result in results] # tmp: only mask def _preprocess_prompts( @@ -399,7 +556,11 @@ def _preprocess_prompts( return processed_prompts def _generate_masked_features( - self, feats: torch.Tensor, masks: torch.Tensor, threshold_mask: float, padding: Optional[Tuple[int, ...]] = None + self, + feats: torch.Tensor, + masks: torch.Tensor, + threshold_mask: float, + padding: Optional[Union[Tuple[int, ...], torch.Tensor]] = None, ) -> Tuple[torch.Tensor, ...]: """Generate masked features. @@ -407,7 +568,7 @@ def _generate_masked_features( feats (torch.Tensor): Raw reference features. It will be filtered with masks. masks (torch.Tensor): Reference masks used to filter features. threshold_mask (float): Threshold to control masked region. - padding (Tuple[int, ...], optional): Padding size. + padding (Union[Tuple[int, ...], torch.Tensor], optional): Padding size. Returns: (torch.Tensor): Masked features. @@ -422,7 +583,7 @@ def _generate_masked_features( # Post-process masks masks = F.interpolate(masks.unsqueeze(0).unsqueeze(0), size=resized_size, mode="bilinear").squeeze() - masks = self._preprocess_mask(masks) + masks = self._preprocess_masks(masks) masks = F.interpolate(masks.unsqueeze(0).unsqueeze(0), size=feats.shape[0:2], mode="bilinear").squeeze() # Target feature extraction @@ -436,7 +597,7 @@ def _generate_masked_features( return masked_feat - def _preprocess_mask(self, x: torch.Tensor) -> torch.Tensor: + def _preprocess_masks(self, x: torch.Tensor) -> torch.Tensor: """Normalize pixel values and pad to a square input. Args: @@ -452,6 +613,36 @@ def _preprocess_mask(self, x: torch.Tensor) -> torch.Tensor: x = F.pad(x, (0, padw, 0, padh)) return x + def _postprocess_masks( + self, + logits: torch.Tensor, + scores: torch.Tensor, + original_size: torch.Tensor, + is_single: bool = False, + ): + """Post-process masks for cascaded post-refinements.""" + high_res_masks = self.mask_postprocessing(logits, self.config.model.image_size, original_size) + masks = high_res_masks > self.config.model.mask_threshold + + if is_single: + best_idx = 0 + else: + # skip the first index components + scores, masks, logits = map(lambda x: x[:, 1:], (scores, masks, logits)) + + # filter zero masks + while len(scores[0]) > 0 and masks[0, (best_idx := torch.argmax(scores[0]))].sum() == 0: + scores, masks, logits = map( + lambda x: torch.cat((x[:, :best_idx], x[:, best_idx + 1 :]), dim=1), (scores, masks, logits) + ) + + if len(scores[0]) == 0: + # all predicted masks were zero masks, ignore them. + return None, torch.zeros((self.config.model.image_size, self.config.model.image_size), device="cpu") + + best_idx = torch.argmax(scores[0]) + return logits[:, best_idx], masks[0, best_idx] + def _update_value(self, target: Dict[str, Any], key: str, value: torch.Tensor) -> None: """Update tensor to target dictionary. @@ -506,98 +697,6 @@ def _merge_prompts( ) return merged_input_prompts - def _predict_target_mask( - self, - image_embeddings: torch.Tensor, - input_prompts: Dict[str, Tuple[torch.Tensor, torch.Tensor]], - padding: Tuple[int, ...], - original_size: Tuple[int, int], - ) -> torch.Tensor: - """Predict target masks. - - Args: - image_embeddings (torch.Tensor): The image embedding with a batch index of length 1. - input_prompts (Dict[str, Tuple[torch.Tensor, torch.Tensor]]): Dictionary including point, box, - and mask prompts. index=1 of tuple is point labels which indicate whether foreground or background. - padding (Tuple[int, ...]): Padding size. - original_size (Tuple[int, int]): Original image size. - - Return: - (torch.Tensor): Predicted mask. - """ - # First-step prediction - _, _, logits = self._predict_mask( - image_embeddings, input_prompts, padding, original_size, multimask_output=False - ) - best_idx = 0 - - # Cascaded Post-refinement-1 - input_prompts.update({"masks": logits[:, best_idx : best_idx + 1, :, :]}) - masks, scores, logits = self._predict_mask( - image_embeddings, input_prompts, padding, original_size, multimask_output=True - ) - best_idx = torch.argmax(scores) - - # Cascaded Post-refinement-2 - coords = torch.nonzero(masks[0, best_idx]) - y, x = coords[:, 0], coords[:, 1] - x_min = x.min() - x_max = x.max() - y_min = y.min() - y_max = y.max() - input_prompts.update( - { - "masks": logits[:, best_idx : best_idx + 1, :, :], - "box": torch.tensor([x_min, y_min, x_max, y_max], device=logits.device), - } - ) - masks, scores, _ = self._predict_mask( - image_embeddings, input_prompts, padding, original_size, multimask_output=True - ) - best_idx = torch.argmax(scores) - - return masks[0, best_idx] - - def _predict_mask( - self, - image_embeddings: torch.Tensor, - input_prompts: Dict[str, torch.Tensor], - padding: Tuple[int, ...], - original_size: Tuple[int, int], - multimask_output: bool = True, - ) -> Tuple[torch.Tensor, ...]: - """Predict target masks. - - Args: - image_embeddings (torch.Tensor): The image embedding with a batch index of length 1. - input_prompts (Dict[str, torch.Tensor]): Dictionary including point, box, and mask prompts. - padding (Tuple[int, ...]): Padding size. - original_size (Tuple[int, int]): Original image size. - multimask_output (bool): Whether getting multi mask outputs or not. Defaults to True. - - Return: - (Tuple[torch.Tensor, ...]): Predicted mask, score, and logit. - """ - sparse_embeddings, dense_embeddings = self.prompt_encoder( - points=input_prompts.get("points", None), - boxes=input_prompts.get("box", None), # TODO (sungchul): change key box -> boxes to use **input_prompts - masks=input_prompts.get("masks", None), - ) - - low_res_masks, scores = self.mask_decoder( - image_embeddings=image_embeddings, - image_pe=self.prompt_encoder.get_dense_pe(), - sparse_prompt_embeddings=sparse_embeddings, - dense_prompt_embeddings=dense_embeddings, - multimask_output=multimask_output, - ) - high_res_masks = self.postprocess_masks( - low_res_masks, (self.config.model.image_size, self.config.model.image_size), padding, original_size - ) - masks = high_res_masks > self.config.model.mask_threshold - - return masks, scores, low_res_masks - def set_metrics(self) -> None: """Skip set_metrics unused in zero-shot learning.""" pass diff --git a/src/otx/algorithms/visual_prompting/configs/base/configuration.py b/src/otx/algorithms/visual_prompting/configs/base/configuration.py index 44998684aec..d7383c28c69 100644 --- a/src/otx/algorithms/visual_prompting/configs/base/configuration.py +++ b/src/otx/algorithms/visual_prompting/configs/base/configuration.py @@ -102,6 +102,36 @@ class __Postprocessing(ParameterGroup): affects_outcome_of=ModelLifecycle.INFERENCE, ) + mask_threshold = configurable_float( + default_value=0.0, + header="Mask threshold", + description=( + "The threshold to apply to the raw logit output of the model, for each pixel. " + "A higher value means a stricter segmentation prediction." + ), + min_value=0.0, + max_value=1.0, + affects_outcome_of=ModelLifecycle.INFERENCE, + ) + + sim_threshold = configurable_float( + default_value=0.65, + header="Similarity threshold", + description="The threshold to filter point candidates based on similarity scores.", + min_value=0.0, + max_value=1.0, + affects_outcome_of=ModelLifecycle.INFERENCE, + ) + + num_bg_points = configurable_integer( + default_value=1, + header="The number of background points", + description="The number of background points to be used as negative prompts.", + min_value=1, + max_value=1024, + affects_outcome_of=ModelLifecycle.INFERENCE, + ) + @attrs class __POTParameter(BaseConfig.BasePOTParameter): header = string_attribute("POT Parameters") diff --git a/src/otx/algorithms/visual_prompting/configs/configuration.yaml b/src/otx/algorithms/visual_prompting/configs/configuration.yaml index 86ea7154d7d..40187dffd2d 100644 --- a/src/otx/algorithms/visual_prompting/configs/configuration.yaml +++ b/src/otx/algorithms/visual_prompting/configs/configuration.yaml @@ -117,7 +117,7 @@ pot_parameters: description: Number of data samples used for post-training optimization editable: true header: Number of data samples - max_value: 100000 + max_value: 1000 min_value: 1 type: INTEGER ui_rules: diff --git a/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/config.yaml b/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/config.yaml index bd923e0b6b7..097390fba0f 100644 --- a/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/config.yaml +++ b/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/config.yaml @@ -15,6 +15,8 @@ dataset: - 57.12 - 57.375 offset_bbox: 0 + generate_point: false + generate_bbox: false model: name: SAM diff --git a/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/configuration.yaml b/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/configuration.yaml index e88c783c396..917aba5f0c5 100644 --- a/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/configuration.yaml +++ b/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/configuration.yaml @@ -92,7 +92,7 @@ pot_parameters: description: Number of data samples used for post-training optimization editable: true header: Number of data samples - max_value: 100000 + max_value: 1000 min_value: 1 type: INTEGER ui_rules: diff --git a/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/template_experimental.yaml b/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/template_experimental.yaml index 63ff5d3d9d4..d29f564e46a 100644 --- a/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/template_experimental.yaml +++ b/src/otx/algorithms/visual_prompting/configs/zero_shot_sam_tiny_vit/template_experimental.yaml @@ -13,7 +13,7 @@ framework: OTXVisualPrompting v0.1.0 # Task implementations. entrypoints: base: otx.algorithms.visual_prompting.tasks.ZeroShotTask - openvino: otx.algorithms.visual_prompting.tasks.openvino.OpenVINOVisualPromptingTask + openvino: otx.algorithms.visual_prompting.tasks.openvino.OpenVINOZeroShotVisualPromptingTask # Hyper Parameters hyper_parameters: @@ -34,5 +34,5 @@ training_targets: - CPU # Computational Complexity -gigaflops: 38.95 -size: 47 +gigaflops: 38.18 +size: 25 diff --git a/src/otx/algorithms/visual_prompting/tasks/inference.py b/src/otx/algorithms/visual_prompting/tasks/inference.py index ea8a1fbf869..1123cd20c87 100644 --- a/src/otx/algorithms/visual_prompting/tasks/inference.py +++ b/src/otx/algorithms/visual_prompting/tasks/inference.py @@ -19,15 +19,16 @@ import json import os import shutil -import subprocess import tempfile import time import warnings from collections import OrderedDict -from typing import Dict, List, Optional, Union +from typing import Any, Dict, List, Optional, Union +import openvino as ov import torch from omegaconf import DictConfig, ListConfig +from openvino.tools import mo from pytorch_lightning import LightningModule, Trainer from pytorch_lightning.callbacks import TQDMProgressBar from pytorch_lightning.loggers import CSVLogger @@ -284,7 +285,7 @@ def _export_to_onnx(self, onnx_path: Dict[str, str]): height = width = self.config.model.image_size for module, path in onnx_path.items(): if module == "visual_prompting_image_encoder": - dummy_inputs = {"images": torch.randn(1, 3, height, width, dtype=torch.float)} + dummy_inputs = {"images": torch.randn(1, 3, height, width, dtype=torch.float32)} output_names = ["image_embeddings"] dynamic_axes = None model_to_export = self.model.image_encoder @@ -299,11 +300,11 @@ def _export_to_onnx(self, onnx_path: Dict[str, str]): "point_labels": {1: "num_points"}, } dummy_inputs = { - "image_embeddings": torch.zeros(1, embed_dim, *embed_size, dtype=torch.float), - "point_coords": torch.randint(low=0, high=1024, size=(1, 2, 2), dtype=torch.float), - "point_labels": torch.randint(low=0, high=4, size=(1, 2), dtype=torch.float), - "mask_input": torch.randn(1, 1, *mask_input_size, dtype=torch.float), - "has_mask_input": torch.tensor([[1]], dtype=torch.float), + "image_embeddings": torch.zeros(1, embed_dim, *embed_size, dtype=torch.float32), + "point_coords": torch.randint(low=0, high=1024, size=(1, 2, 2), dtype=torch.float32), + "point_labels": torch.randint(low=0, high=4, size=(1, 2), dtype=torch.float32), + "mask_input": torch.randn(1, 1, *mask_input_size, dtype=torch.float32), + "has_mask_input": torch.tensor([[1]], dtype=torch.float32), } output_names = ["iou_predictions", "low_res_masks"] model_to_export = self.model @@ -381,25 +382,19 @@ def export( # noqa: D102 output_model.set_data(f"{module}.onnx", file.read()) else: for module, path in onnx_path.items(): - optimize_command = [ - "mo", - "--input_model", - path, - "--output_dir", - self.output_path, - "--model_name", - module, - ] + mo_args: Dict[str, Any] = {"input_model": path} if module == "visual_prompting_image_encoder": - optimize_command += [ - "--mean_values", - str(self.config.dataset.normalize.mean).replace(", ", ","), - "--scale_values", - str(self.config.dataset.normalize.std).replace(", ", ","), - ] + mo_args.update( + { + "mean_values": list(self.config.dataset.normalize.mean), + "scale_values": list(self.config.dataset.normalize.std), + } + ) if precision == ModelPrecision.FP16: - optimize_command.append("--compress_to_fp16") - subprocess.run(optimize_command, check=True) + mo_args.update({"compress_to_fp16": True}) + + ov_model = mo.convert_model(**mo_args) + ov.save_model(ov_model, os.path.join(self.output_path, f"{module}.xml")) with open(path.replace(".onnx", ".bin"), "rb") as file: output_model.set_data(f"{module}.bin", file.read()) with open(path.replace(".onnx", ".xml"), "rb") as file: @@ -547,6 +542,159 @@ def infer(self, dataset: DatasetEntity, inference_parameters: InferenceParameter return inference_callback.otx_dataset + def export( # noqa: D102 + self, + export_type: ExportType, + output_model: ModelEntity, + precision: ModelPrecision = ModelPrecision.FP32, + dump_features: bool = False, + ) -> None: + """Export model to OpenVINO IR. + + When SAM gets an image for inference, image encoder runs just once to get image embedding. + After that, prompt encoder + mask decoder runs repeatedly to get mask prediction. + For this case, SAM should be divided into two parts, image encoder and prompt encoder + mask decoder. + + Args: + export_type (ExportType): Export type should be ExportType.OPENVINO + output_model (ModelEntity): The model entity in which to write the OpenVINO IR data + precision (bool): Output model weights and inference precision + dump_features (bool): Flag to return "feature_vector" and "saliency_map". + + Raises: + Exception: If export_type is not ExportType.OPENVINO + """ + if dump_features: + logger.warning( + "Feature dumping is not implemented for the visual prompting task." + "The saliency maps and representation vector outputs will not be dumped in the exported model." + ) + + self.model = self.load_model(otx_model=self.task_environment.model) + if export_type == ExportType.ONNX: + output_model.model_format = ModelFormat.ONNX + output_model.optimization_type = ModelOptimizationType.ONNX + if precision == ModelPrecision.FP16: + raise RuntimeError("Export to FP16 ONNX is not supported") + elif export_type == ExportType.OPENVINO: + output_model.model_format = ModelFormat.OPENVINO + output_model.optimization_type = ModelOptimizationType.MO + else: + raise RuntimeError(f"not supported export type {export_type}") + + self.precision[0] = precision + output_model.has_xai = dump_features + + logger.info("Exporting to the OpenVINO model.") + onnx_path = { + "visual_prompting_image_encoder": os.path.join(self.output_path, "visual_prompting_image_encoder.onnx"), + "visual_prompting_prompt_getter": os.path.join(self.output_path, "visual_prompting_prompt_getter.onnx"), + "visual_prompting_decoder": os.path.join(self.output_path, "visual_prompting_decoder.onnx"), + } + self._export_to_onnx(onnx_path) + + if export_type == ExportType.ONNX: + for module, path in onnx_path.items(): + with open(path, "rb") as file: + output_model.set_data(f"{module}.onnx", file.read()) + else: + for module, path in onnx_path.items(): + mo_args: Dict[str, Any] = {"input_model": path} + if module == "visual_prompting_image_encoder": + mo_args.update( + { + "mean_values": list(self.config.dataset.normalize.mean), + "scale_values": list(self.config.dataset.normalize.std), + } + ) + if precision == ModelPrecision.FP16: + mo_args.update({"compress_to_fp16": True}) + + ov_model = mo.convert_model(**mo_args) + ov.save_model(ov_model, os.path.join(self.output_path, f"{module}.xml")) + with open(path.replace(".onnx", ".bin"), "rb") as file: + output_model.set_data(f"{module}.bin", file.read()) + with open(path.replace(".onnx", ".xml"), "rb") as file: + output_model.set_data(f"{module}.xml", file.read()) + + output_model.precision = self.precision + output_model.optimization_methods = self.optimization_methods + + output_model.set_data("label_schema.json", label_schema_to_bytes(self.task_environment.label_schema)) + self._set_metadata(output_model) + + def _export_to_onnx(self, onnx_path: Dict[str, str]): + """Export model to ONNX. + + Args: + onnx_path (Dict[str, str]): Paths to save ONNX models. + """ + image_size = self.config.model.image_size + embed_dim = self.model.prompt_encoder.embed_dim + embed_size = self.model.prompt_encoder.image_embedding_size + for module, path in onnx_path.items(): + if module == "visual_prompting_image_encoder": + dummy_inputs = {"images": torch.randn(1, 3, image_size, image_size, dtype=torch.float32)} + output_names = ["image_embeddings"] + dynamic_axes = None + model_to_export = self.model.image_encoder + + elif module == "visual_prompting_prompt_getter": + dummy_inputs = { + "image_embeddings": torch.randn(1, embed_dim, *embed_size, dtype=torch.float32), + "original_size": torch.randint(low=0, high=image_size * 2, size=(1, 2), dtype=torch.int64), + "threshold": torch.tensor([[0.1]], dtype=torch.float32), + "num_bg_points": torch.randint(low=1, high=image_size, size=(1, 1), dtype=torch.int64), + } + output_names = ["total_points_scores", "total_bg_coords"] + dynamic_axes = { + "total_points_scores": {0: "num_labels", 1: "num_points"}, + "total_bg_coords": {0: "num_labels", 1: "num_points"}, + } + model_to_export = self.model.prompt_getter + + elif module == "visual_prompting_decoder": + # sam without backbone + mask_input_size = [4 * x for x in embed_size] + dynamic_axes = { + "point_coords": {1: "num_points"}, + "point_labels": {1: "num_points"}, + } + dummy_inputs = { + "image_embeddings": torch.zeros(1, embed_dim, *embed_size, dtype=torch.float32), + "point_coords": torch.randint(low=0, high=1024, size=(1, 2, 2), dtype=torch.float32), + "point_labels": torch.randint(low=0, high=4, size=(1, 2), dtype=torch.float32), + "mask_input": torch.randn(1, 1, *mask_input_size, dtype=torch.float32), + "has_mask_input": torch.tensor([[1]], dtype=torch.float32), + } + output_names = ["iou_predictions", "low_res_masks"] + model_to_export = self.model + + else: + raise ValueError( + ( + f"{module} is undefined, use visual_prompting_image_encoder, visual_prompting_prompt_getter, " + f"or visual_prompting_decoder." + ) + ) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=torch.jit.TracerWarning) + warnings.filterwarnings("ignore", category=UserWarning) + with open(path, "wb") as f: + torch.onnx.export( + model_to_export, + tuple(dummy_inputs.values()), + f, + export_params=True, + verbose=False, + opset_version=13, + do_constant_folding=True, + input_names=list(dummy_inputs.keys()), + output_names=output_names, + dynamic_axes=dynamic_axes, + ) + def save_model(self, output_model: ModelEntity) -> None: """Save the model after training is completed. diff --git a/src/otx/algorithms/visual_prompting/tasks/openvino.py b/src/otx/algorithms/visual_prompting/tasks/openvino.py index fe499300970..60cc3b25331 100644 --- a/src/otx/algorithms/visual_prompting/tasks/openvino.py +++ b/src/otx/algorithms/visual_prompting/tasks/openvino.py @@ -20,8 +20,10 @@ import random import tempfile import time +from collections import defaultdict +from itertools import product from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, DefaultDict, Dict, List, Optional, Tuple, Type, Union from zipfile import ZipFile import attr @@ -119,7 +121,15 @@ def __init__( **attr.asdict( hparams.postprocessing, filter=lambda attr, value: attr.name - not in ["header", "description", "type", "visible_in_ui", "class_name"], + not in [ + "header", + "description", + "type", + "visible_in_ui", + "class_name", + "sim_threshold", + "num_bg_points", + ], ) }, } @@ -138,7 +148,7 @@ def __init__( self.labels = label_schema.get_labels(include_empty=False) self.transform = get_transform() # TODO (sungchul): insert args - def pre_process( # type: ignore + def pre_process( self, dataset_item: DatasetItemEntity, extra_processing: bool = False ) -> Tuple[Dict[str, Any], Dict[str, Any], List[Dict[str, Any]]]: """Pre-process function of OpenVINO Visual Prompting Inferencer for image encoder.""" @@ -159,7 +169,7 @@ def predict(self, dataset_item: DatasetItemEntity) -> List[Annotation]: # type: """Perform a prediction for a given input image.""" # forward image encoder images, meta, prompts = self.pre_process(dataset_item) - image_embeddings = self.forward(images) + image_embeddings = self.forward_image_encoder(images) annotations: List[Annotation] = [] hard_predictions: List[np.ndarray] = [] @@ -180,7 +190,7 @@ def predict(self, dataset_item: DatasetItemEntity) -> List[Annotation]: # type: soft_predictions.append(soft_prediction) return annotations - def forward(self, inputs: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: + def forward_image_encoder(self, inputs: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: """Forward function of OpenVINO Visual Prompting Inferencer.""" return self.model["image_encoder"].infer_sync(inputs) @@ -193,6 +203,279 @@ def await_all(self) -> None: self.model["image_encoder"].await_all() self.model["decoder"].await_all() + def pre_process_prompt_getter(self, *args, **kwargs) -> Any: + """Pre-process function of OpenVINO Zero-shot VIsual Prompting Inferencer for prompt getter.""" + pass + + +class OpenVINOZeroShotVisualPromptingInferencer(OpenVINOVisualPromptingInferencer): + """Inferencer implementation for Zero-shot Visual Prompting using OpenVINO backend. + + This inferencer has two models, image encoder and decoder. + + Args: + hparams (VisualPromptingBaseConfig): Hyper parameters that the model should use. + label_schema (LabelSchemaEntity): LabelSchemaEntity that was used during model training. + model_files (Dict[str, Union[str, Path, bytes]]): Path or bytes to model to load, + `.xml`, `.bin` or `.onnx` file. + weight_files (Dict[str, Union[str, Path, bytes, None]], optional): Path or bytes to weights to load, + `.xml`, `.bin` or `.onnx` file. Defaults to None. + device (str): Device to run inference on, such as CPU, GPU or MYRIAD. Defaults to "CPU". + num_requests (int) : Maximum number of requests that the inferencer can make. + Good value is the number of available cores. Defaults to 1. + """ + + def __init__( + self, + hparams: VisualPromptingBaseConfig, + label_schema: LabelSchemaEntity, + model_files: Dict[str, Union[str, Path, bytes]], + weight_files: Optional[Dict[str, Union[str, Path, bytes, None]]] = {}, + device: str = "CPU", + num_requests: int = 1, + ): + + assert all(module in model_files for module in ["image_encoder", "prompt_getter", "decoder"]) + + self.model = {} + model_parameters = { + "prompt_getter": {"input_layouts": "image_embeddings:NCHW"}, + "decoder": {"input_layouts": "image_embeddings:NCHW"}, + } + self.configuration = { + "image_encoder": { + **attr.asdict(hparams.postprocessing, filter=lambda attr, value: attr.name in ["image_size"]) + }, + "prompt_getter": { + **attr.asdict( + hparams.postprocessing, + filter=lambda attr, value: attr.name + in ["image_size", "sim_threshold", "num_bg_points", "embedded_processing"], + ) + }, + "decoder": { + **attr.asdict( + hparams.postprocessing, + filter=lambda attr, value: attr.name + not in [ + "header", + "description", + "type", + "visible_in_ui", + "class_name", + "sim_threshold", + "num_bg_points", + ], + ) + }, + } + + core = create_core() + for name in ["image_encoder", "prompt_getter", "decoder"]: + model_adapter = OpenvinoAdapter( + core=core, + model=model_files.get(name), + weights_path=weight_files.get(name, None), + model_parameters=model_parameters.get(name, {}), + device=device, + max_num_requests=num_requests, + plugin_config={"PERFORMANCE_HINT": "THROUGHPUT"}, + ) + self.model[name] = Model.create_model(model_adapter, name, self.configuration.get(name, {}), preload=True) + self.converter = VisualPromptingToAnnotationConverter() + self.labels = label_schema.get_labels(include_empty=False) + self.transform = get_transform() # TODO (sungchul): insert args + + self.point_labels_box = np.array([[2, 3]], dtype=np.float32) + self.has_mask_inputs = [np.array([[0.0]]), np.array([[1.0]])] + + def pre_process( # type: ignore + self, dataset_item: DatasetItemEntity, extra_processing: bool = False + ) -> Tuple[Dict[str, np.ndarray], Dict[str, Any]]: + """Pre-process function of OpenVINO Zero-shot Visual Prompting Inferencer for image encoder.""" + return self.model["image_encoder"].preprocess(dataset_item.numpy, extra_processing) + + def pre_process_prompt_getter( + self, image_embeddings: Dict[str, np.ndarray], original_size: np.ndarray + ) -> Dict[str, np.ndarray]: + """Pre-process function of OpenVINO Zero-shot VIsual Prompting Inferencer for prompt getter.""" + inputs_prompt_getter = { + "original_size": original_size[None], + "threshold": np.array([[self.model["prompt_getter"].sim_threshold]], dtype=np.float32), + "num_bg_points": np.array([[self.model["prompt_getter"].num_bg_points]], dtype=np.int64), + } + inputs_prompt_getter.update(image_embeddings) + return inputs_prompt_getter + + def predict(self, dataset_item: DatasetItemEntity) -> List[Annotation]: # type: ignore + """Perform a prediction for a given input image.""" + # forward image encoder + images, meta = self.pre_process(dataset_item) + original_size = np.array(meta["original_shape"][:2], dtype=np.int64) + image_embeddings = self.forward_image_encoder(images) + + # get point candidates + inputs_prompt_getter = self.pre_process_prompt_getter(image_embeddings, original_size) + total_prompts = self.forward_prompt_getter(inputs_prompt_getter) + + annotations: DefaultDict = defaultdict(list) + predicted_masks: DefaultDict = defaultdict(list) + used_points: DefaultDict = defaultdict(list) + for label, (points_scores, bg_coords) in enumerate( + zip(total_prompts["total_points_scores"], total_prompts["total_bg_coords"]) + ): + for points_score in points_scores: + if points_score[-1] == -1: + continue + x, y = points_score[:2] + is_done = False + for pm in predicted_masks.get(label, []): + # check if that point is already assigned + if pm[int(y), int(x)] > 0: + is_done = True + break + if is_done: + continue + + point_coords = np.concatenate((np.array([[x, y]]), bg_coords), axis=0, dtype=np.float32) + point_coords = self.model["decoder"]._apply_coords(point_coords, original_size) + point_labels = np.array([1] + [0] * len(bg_coords), dtype=np.float32) + inputs_decoder = {"point_coords": point_coords[None], "point_labels": point_labels[None]} + inputs_decoder.update(image_embeddings) + + prediction = self.forward_decoder(inputs_decoder, original_size) + metadata = { + "label": [_label for _label in self.labels if int(_label.id_) == label][0], + "original_size": original_size[None], + } + + # set annotation for eval + annotation, hard_prediction, _ = self.post_process(prediction, metadata) + annotations[label].extend(annotation) + predicted_masks[label].append(hard_prediction) + used_points[label].append(points_score) + self.__inspect_overlapping_areas(predicted_masks, used_points, annotations) + return sum(annotations.values(), []) + + def forward_prompt_getter(self, inputs: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: + """Forward function of OpenVINO Visual Prompting Inferencer.""" + return self.model["prompt_getter"].infer_sync(inputs) + + def forward_decoder( # type: ignore + self, inputs: Dict[str, np.ndarray], original_size: np.ndarray + ) -> Dict[str, np.ndarray]: + """Forward function of OpenVINO Visual Prompting Inferencer.""" + logits: np.ndarray + scores: np.ndarray + mask_slice = slice(0, 1) + for i in range(3): + if i == 0: + # First-step prediction + mask_input = np.zeros( + (1, 1, *map(lambda x: x * 4, inputs["image_embeddings"].shape[2:])), dtype=np.float32 + ) + has_mask_input = self.has_mask_inputs[0] + + elif i == 1: + # Cascaded Post-refinement-1 + mask_input, masks, iou_predictions = self._postprocess_masks( + logits, scores, original_size, is_single=True # noqa: F821 + ) + if masks.sum() == 0: + return {"iou_predictions": iou_predictions, "low_res_masks": mask_input} + + has_mask_input = self.has_mask_inputs[1] + + elif i == 2: + # Cascaded Post-refinement-2 + mask_input, masks, iou_predictions = self._postprocess_masks( + logits, scores, original_size # noqa: F821 + ) + if masks.sum() == 0: + return {"iou_predictions": iou_predictions, "low_res_masks": mask_input} + + has_mask_input = self.has_mask_inputs[1] + y, x = np.nonzero(masks) + box_coords = self.model["decoder"]._apply_coords( + np.array([[[x.min(), y.min()], [x.max(), y.max()]]], dtype=np.float32), original_size + ) + inputs["point_coords"] = np.concatenate((inputs["point_coords"], box_coords), axis=1) + inputs["point_labels"] = np.concatenate((inputs["point_labels"], self.point_labels_box), axis=1) + + inputs.update({"mask_input": mask_input, "has_mask_input": has_mask_input}) + prediction = self.model["decoder"].infer_sync(inputs) + scores, logits = prediction["iou_predictions"], prediction["low_res_masks"] + + return {"iou_predictions": scores[:, mask_slice], "low_res_masks": logits[:, mask_slice, :, :]} + + def _postprocess_masks( + self, logits: np.ndarray, scores: np.ndarray, original_size: np.ndarray, is_single: bool = False + ) -> Tuple[np.ndarray, ...]: + """Post-process logits for resized masks according to best index based on scores.""" + high_res_masks = self.model["decoder"].resize_and_crop(logits[0].transpose(1, 2, 0), original_size) + masks = high_res_masks > self.model["decoder"].mask_threshold + masks = masks.transpose(2, 0, 1)[None] + + if is_single: + best_idx = 0 + else: + # skip the first index components + scores, masks, logits = map(lambda x: x[:, 1:], (scores, masks, logits)) + + # filter zero masks + while len(scores[0]) > 0 and masks[0, (best_idx := np.argmax(scores[0]))].sum() == 0: + scores, masks, logits = map( + lambda x: np.concatenate((x[:, :best_idx], x[:, best_idx + 1 :]), axis=1), (scores, masks, logits) + ) + + if len(scores[0]) == 0: + # all predicted masks were zero masks, ignore them. + return None, np.zeros((self.model["decoder"].image_size, self.model["decoder"].image_size)), 0.0 + + best_idx = np.argmax(scores[0]) + return logits[:, [best_idx]], masks[0, best_idx], scores[0, best_idx] + + def __inspect_overlapping_areas( + self, + predicted_masks: Dict[int, List[np.ndarray]], + used_points: Dict[int, List[np.ndarray]], + annotations: Dict[int, List[np.ndarray]], + threshold_iou: float = 0.8, + ): + def __calculate_mask_iou(mask1: np.ndarray, mask2: np.ndarray): + assert mask1.ndim == 2 and mask2.ndim == 2 + intersection = np.logical_and(mask1, mask2).sum().item() + union = np.logical_or(mask1, mask2).sum().item() + + # Avoid division by zero + if union == 0: + return 0.0 + iou = intersection / union + return iou + + for (label, masks), (other_label, other_masks) in product(predicted_masks.items(), predicted_masks.items()): + if other_label <= label: + continue + + overlapped_label = [] + overlapped_other_label = [] + for (im, mask), (jm, other_mask) in product(enumerate(masks), enumerate(other_masks)): + if __calculate_mask_iou(mask, other_mask) > threshold_iou: + if used_points[label][im][2] > used_points[other_label][jm][2]: + overlapped_other_label.append(jm) + else: + overlapped_label.append(im) + + for im in overlapped_label[::-1]: + masks.pop(im) + used_points[label].pop(im) + annotations[label].pop(im) + + for jm in overlapped_other_label[::-1]: + other_masks.pop(jm) + used_points[other_label].pop(jm) + annotations[other_label].pop(jm) + class OTXOpenVinoDataLoader: """DataLoader implementation for VisualPromptingOpenVINOTask.""" @@ -201,28 +484,31 @@ def __init__( self, dataset: Any, inferencer: OpenVINOVisualPromptingInferencer, + module_name: str, shuffle: bool = True, - is_encoder: bool = True, output_model: Optional[ModelEntity] = None, ): self.dataset = dataset self.inferencer = inferencer + self.module_name = module_name self.shuffler = None if shuffle: self.shuffler = list(range(len(dataset))) random.shuffle(self.shuffler) - self.is_encoder = is_encoder self.target_length = self.inferencer.model["image_encoder"].orig_width - if not self.is_encoder: - core = ov.Core() - compressed_model = core.read_model( - output_model.get_data("visual_prompting_image_encoder.xml"), - output_model.get_data("visual_prompting_image_encoder.bin"), - ) - self.compressed_model = core.compile_model( - model=compressed_model, device_name=inferencer.model["image_encoder"].inference_adapter.device - ) + if self.module_name not in ["image_encoder"]: + self.image_encoder = self._load_module("image_encoder", output_model) + + def _load_module(self, module_name: str, output_model: ModelEntity, core=ov.Core()): + """Load specific module.""" + compressed_model = core.read_model( + output_model.get_data(f"visual_prompting_{module_name}.xml"), + output_model.get_data(f"visual_prompting_{module_name}.bin"), + ) + return core.compile_model( + model=compressed_model, device_name=self.inferencer.model[module_name].inference_adapter.device + ) def __getitem__(self, index: int): """Get item from dataset.""" @@ -234,10 +520,10 @@ def __getitem__(self, index: int): _, _, h, w = images["images"].shape pad_width = ((0, 0), (0, 0), (0, self.target_length - h), (0, self.target_length - w)) images["images"] = np.pad(images["images"], pad_width, mode="constant", constant_values=0) - if self.is_encoder: + if self.module_name == "image_encoder": return images else: - image_embeddings = self.compressed_model(images["images"]) + image_embeddings = self.image_encoder(images["images"]) prompt = prompts[0] # only use the first prompt prompt.pop("label") prompt.pop("orig_size") @@ -250,6 +536,77 @@ def __len__(self): return len(self.dataset) +class OTXZeroShotOpenVinoDataLoader(OTXOpenVinoDataLoader): + """DataLoader implementation for ZeroShotVisualPromptingOpenVINOTask.""" + + def __init__( + self, + dataset: Any, + inferencer: OpenVINOZeroShotVisualPromptingInferencer, + module_name: str, + shuffle: bool = True, + output_model: Optional[ModelEntity] = None, + ): + super().__init__( + dataset=dataset, inferencer=inferencer, module_name=module_name, shuffle=shuffle, output_model=output_model + ) + if self.module_name == "decoder": + self.prompt_getter = self._load_module("prompt_getter", output_model) + + def __getitem__(self, index: int) -> Dict[str, Any]: + """Get item from dataset.""" + images: Dict[str, np.ndarray] + meta: Dict[str, Any] + if self.shuffler is not None: + index = self.shuffler[index] + + items = self.dataset[index] + images, meta = self.inferencer.pre_process(items, extra_processing=True) # type: ignore + original_size = np.array(meta["original_shape"][:2]) + _, _, h, w = images["images"].shape + pad_width = ((0, 0), (0, 0), (0, self.target_length - h), (0, self.target_length - w)) + images["images"] = np.pad(images["images"], pad_width, mode="constant", constant_values=0) + if self.module_name == "image_encoder": + return images + else: + image_embeddings = self.image_encoder(images["images"]) + inputs_prompt_getter = self.inferencer.pre_process_prompt_getter(image_embeddings, original_size) + if self.module_name == "prompt_getter": + return inputs_prompt_getter + + total_prompts = self.prompt_getter(inputs_prompt_getter) + # only use the first prompt + point_score = total_prompts["total_points_scores"][0][0] + bg_coords = total_prompts["total_bg_coords"][0] + + x, y = point_score[:2] + point_coords = np.concatenate((np.array([[x, y]]), bg_coords), axis=0, dtype=np.float32) + point_coords = self.inferencer.model["decoder"]._apply_coords(point_coords, original_size) + point_labels = np.array([1] + [0] * len(bg_coords), dtype=np.float32) + inputs_decoder = {"point_coords": point_coords[None], "point_labels": point_labels[None]} + inputs_decoder.update(image_embeddings) + inputs_decoder.update( + { + "mask_input": np.zeros((1, 1, 256, 256), dtype=np.float32), + "has_mask_input": np.zeros((1, 1), dtype=np.float32), + } + ) + if index % 2 == 0: + prediction = self.inferencer.model["decoder"].infer_sync(inputs_decoder) + scores, low_res_masks = prediction["iou_predictions"], prediction["low_res_masks"] + best_idx = scores.argmax() + inputs_decoder.update( + {"mask_input": low_res_masks[:, [best_idx]], "has_mask_input": np.ones((1, 1), dtype=np.float32)} + ) + return inputs_decoder + + def __len__(self): + """Get length of dataset.""" + if self.module_name == "decoder": + return len(self.dataset) * 2 + return len(self.dataset) + + class OpenVINOVisualPromptingTask(IInferenceTask, IEvaluationTask, IOptimizationTask, IDeploymentTask): """Task implementation for Visual Prompting using OpenVINO backend.""" @@ -356,10 +713,10 @@ def deploy(self, output_model: ModelEntity) -> None: raise RuntimeError("deploy failed, model is None") work_dir = os.path.dirname(demo.__file__) - parameters = {} + parameters: Dict[str, Any] = {} parameters["converter_type"] = f"{self.task_type}" - parameters["model_parameters"] = self.inferencer.configuration # type: ignore - parameters["model_parameters"]["labels"] = LabelSchemaMapper.forward(self.task_environment.label_schema) # type: ignore # noqa: E501 + parameters["model_parameters"] = self.inferencer.configuration + parameters["model_parameters"]["labels"] = LabelSchemaMapper.forward(self.task_environment.label_schema) zip_buffer = io.BytesIO() with ZipFile(zip_buffer, "w") as arch: @@ -412,6 +769,8 @@ def optimize( dataset: DatasetEntity, output_model: ModelEntity, optimization_parameters: Optional[OptimizationParameters] = None, + module_names: List[str] = ["image_encoder", "decoder"], + ov_dataloader: Type[OTXOpenVinoDataLoader] = OTXOpenVinoDataLoader, ): """Optimize function of OpenVINOVisualPromptingTask.""" logger.info("Start PTQ optimization") @@ -423,27 +782,22 @@ def optimize( dataset = dataset.get_subset(Subset.TRAINING) - for i, (name, is_encoder) in enumerate(zip(["image_encoder", "decoder"], [True, False]), 1): - data_loader = OTXOpenVinoDataLoader( - dataset, self.inferencer, is_encoder=is_encoder, output_model=output_model - ) + for i, module_name in enumerate(module_names, 1): + data_loader = ov_dataloader(dataset, self.inferencer, module_name=module_name, output_model=output_model) quantization_dataset = nncf.Dataset(data_loader, lambda data: data) with tempfile.TemporaryDirectory() as tempdir: - xml_path = os.path.join(tempdir, f"visual_prompting_{name}.xml") - bin_path = os.path.join(tempdir, f"visual_prompting_{name}.bin") + xml_path = os.path.join(tempdir, f"visual_prompting_{module_name}.xml") + bin_path = os.path.join(tempdir, f"visual_prompting_{module_name}.bin") with open(xml_path, "wb") as f: - f.write(self.model.get_data(f"visual_prompting_{name}.xml")) + f.write(self.model.get_data(f"visual_prompting_{module_name}.xml")) with open(bin_path, "wb") as f: - f.write(self.model.get_data(f"visual_prompting_{name}.bin")) + f.write(self.model.get_data(f"visual_prompting_{module_name}.bin")) ov_model = ov.Core().read_model(xml_path, bin_path) if check_if_quantized(ov_model): raise RuntimeError("Model is already optimized by PTQ") - if optimization_parameters is not None: - optimization_parameters.update_progress(10 * i + 35 * (i - 1), None) - optimization_config_path = os.path.join(self._base_dir, "ptq_optimization_config.py") ptq_config = ADDict() if os.path.exists(optimization_config_path): @@ -456,16 +810,16 @@ def optimize( compressed_model = nncf.quantize(ov_model, quantization_dataset, **ptq_config) if optimization_parameters is not None: - optimization_parameters.update_progress(45 * i, None) + optimization_parameters.update_progress(90 // len(module_names) * i, None) with tempfile.TemporaryDirectory() as tempdir: - xml_path = os.path.join(tempdir, f"visual_prompting_{name}.xml") - bin_path = os.path.join(tempdir, f"visual_prompting_{name}.bin") + xml_path = os.path.join(tempdir, f"visual_prompting_{module_name}.xml") + bin_path = os.path.join(tempdir, f"visual_prompting_{module_name}.bin") ov.serialize(compressed_model, xml_path) with open(xml_path, "rb") as f: - output_model.set_data(f"visual_prompting_{name}.xml", f.read()) + output_model.set_data(f"visual_prompting_{module_name}.xml", f.read()) with open(bin_path, "rb") as f: - output_model.set_data(f"visual_prompting_{name}.bin", f.read()) + output_model.set_data(f"visual_prompting_{module_name}.bin", f.read()) output_model.set_data( "label_schema.json", @@ -484,3 +838,46 @@ def optimize( if optimization_parameters is not None: optimization_parameters.update_progress(100, None) logger.info("PTQ optimization completed") + + +class OpenVINOZeroShotVisualPromptingTask(OpenVINOVisualPromptingTask): + """Task implementation for Zero-shot Visual Prompting using OpenVINO backend.""" + + def load_inferencer(self) -> OpenVINOZeroShotVisualPromptingInferencer: + """Load OpenVINO Zero-shot Visual Prompting Inferencer.""" + if self.model is None: + raise RuntimeError("load_inferencer failed, model is None") + return OpenVINOZeroShotVisualPromptingInferencer( + self.hparams, + self.task_environment.label_schema, + model_files={ + "image_encoder": self.model.get_data("visual_prompting_image_encoder.xml"), + "prompt_getter": self.model.get_data("visual_prompting_prompt_getter.xml"), + "decoder": self.model.get_data("visual_prompting_decoder.xml"), + }, + weight_files={ + "image_encoder": self.model.get_data("visual_prompting_image_encoder.bin"), + "prompt_getter": self.model.get_data("visual_prompting_prompt_getter.bin"), + "decoder": self.model.get_data("visual_prompting_decoder.bin"), + }, + num_requests=get_default_async_reqs_num(), + ) + + def optimize( + self, + optimization_type: OptimizationType, + dataset: DatasetEntity, + output_model: ModelEntity, + optimization_parameters: Optional[OptimizationParameters] = None, + module_names: List[str] = ["image_encoder", "prompt_getter", "decoder"], + ov_dataloader: Type[OTXOpenVinoDataLoader] = OTXZeroShotOpenVinoDataLoader, + ): + """Optimize function of OpenVINOZeroShotVisualPromptingTask.""" + return super().optimize( + optimization_type=optimization_type, + dataset=dataset, + output_model=output_model, + optimization_parameters=optimization_parameters, + module_names=module_names, + ov_dataloader=ov_dataloader, + ) diff --git a/src/otx/api/usecases/exportable_code/demo/requirements.txt b/src/otx/api/usecases/exportable_code/demo/requirements.txt index 95593e05160..2389bc66a8b 100644 --- a/src/otx/api/usecases/exportable_code/demo/requirements.txt +++ b/src/otx/api/usecases/exportable_code/demo/requirements.txt @@ -1,4 +1,4 @@ -openvino==2023.0 -openvino-model-api==0.1.6 -otx @ git+https://github.com/openvinotoolkit/training_extensions/@0e1200f4cc5343dcac597674fc97a898fc9829c2#egg=otx +openvino==2023.2.0 +openvino-model-api==0.1.8 +otx @ git+https://github.com/openvinotoolkit/training_extensions/@2988fdc51ef7e4a136a9d4e09602b3844d7bafec#egg=otx numpy>=1.21.0,<=1.23.5 # np.bool was removed in 1.24.0 which was used in openvino runtime diff --git a/src/otx/api/usecases/exportable_code/prediction_to_annotation_converter.py b/src/otx/api/usecases/exportable_code/prediction_to_annotation_converter.py index 40d1f4beec2..f84c2b0facf 100644 --- a/src/otx/api/usecases/exportable_code/prediction_to_annotation_converter.py +++ b/src/otx/api/usecases/exportable_code/prediction_to_annotation_converter.py @@ -380,7 +380,7 @@ def convert_to_annotation(self, predictions: AnomalyResult, metadata: Dict[str, assert predictions.pred_mask is not None assert predictions.anomaly_map is not None annotations = create_annotation_from_segmentation_map( - predictions.pred_mask, predictions.anomaly_map, self.label_map + predictions.pred_mask, predictions.anomaly_map / 255.0, self.label_map ) if len(annotations) == 0: # TODO: add confidence to this label @@ -466,7 +466,7 @@ def convert_to_annotation(self, hard_prediction: np.ndarray, metadata: Dict[str, annotations = create_annotation_from_segmentation_map( hard_prediction=hard_prediction, soft_prediction=soft_prediction, - label_map={1: metadata["label"].label}, + label_map={1: metadata["label"].label if isinstance(metadata["label"], ScoredLabel) else metadata["label"]}, ) return annotations diff --git a/src/otx/cli/tools/train.py b/src/otx/cli/tools/train.py index dfb3fe1c4a1..5cf62ec2118 100644 --- a/src/otx/cli/tools/train.py +++ b/src/otx/cli/tools/train.py @@ -286,15 +286,19 @@ def train(exit_stack: Optional[ExitStack] = None): # pylint: disable=too-many-b resource_tracker = None if args.track_resource_usage and not is_multigpu_child_process(): - resource_tracker = ResourceTracker(args.track_resource_usage, args.gpus) + resource_tracker = ResourceTracker( + config_manager.output_path / "resource_usage.yaml", args.track_resource_usage, args.gpus + ) resource_tracker.start() + if exit_stack is not None: + exit_stack.callback(resource_tracker.stop) task.train( dataset, output_model, train_parameters=TrainParameters(), seed=args.seed, deterministic=args.deterministic ) - if resource_tracker is not None: - resource_tracker.stop(config_manager.output_path / "resource_usage.yaml") + if resource_tracker is not None and exit_stack is None: + resource_tracker.stop() model_path = config_manager.output_path / "models" save_model_data(output_model, str(model_path)) diff --git a/src/otx/cli/utils/experiment.py b/src/otx/cli/utils/experiment.py index 591d69fdee5..a642aea6ca3 100644 --- a/src/otx/cli/utils/experiment.py +++ b/src/otx/cli/utils/experiment.py @@ -30,12 +30,16 @@ class ResourceTracker: """Class to track resources usage. Args: + output_path (Union[str, Path]): Output file path to save CPU & GPU utilization and max meory usage values. resource_type (str, optional): Which resource to track. Available values are cpu, gpu or all now. Defaults to "all". gpu_ids (Optional[str]): GPU indices to record. """ - def __init__(self, resource_type: str = "all", gpu_ids: Optional[str] = None): + def __init__(self, output_path: Union[str, Path], resource_type: str = "all", gpu_ids: Optional[str] = None): + if isinstance(output_path, str): + output_path = Path(output_path) + self.output_path = output_path if resource_type == "all": self._resource_type = AVAILABLE_RESOURCE_TYPE else: @@ -62,19 +66,12 @@ def start(self): ) self._mem_check_proc.start() - def stop(self, output_path: Union[str, Path]): - """Terminate a process to record resources usage. - - Args: - output_path (Union[str, Path]): Output file path to save CPU & GPU utilization and max meory usage values. - """ + def stop(self): + """Terminate a process to record resources usage.""" if self._mem_check_proc is None or not self._mem_check_proc.is_alive(): return - if isinstance(output_path, str): - output_path = Path(output_path) - - self._queue.put(output_path) + self._queue.put(self.output_path) self._mem_check_proc.join(10) if self._mem_check_proc.exitcode is None: self._mem_check_proc.terminate() diff --git a/src/otx/cli/utils/hpo.py b/src/otx/cli/utils/hpo.py index 3fbfc103d64..3511ebf3e15 100644 --- a/src/otx/cli/utils/hpo.py +++ b/src/otx/cli/utils/hpo.py @@ -8,12 +8,14 @@ import os import re import shutil +import time from copy import deepcopy from enum import Enum from functools import partial from inspect import isclass from math import floor from pathlib import Path +from threading import Thread from typing import Any, Callable, Dict, List, Optional, Union import torch @@ -31,6 +33,7 @@ from otx.cli.utils.io import read_model, save_model_data from otx.core.data.adapter import get_dataset_adapter from otx.hpo import HyperBand, TrialStatus, run_hpo_loop +from otx.hpo.hpo_base import HpoBase from otx.utils.logger import get_logger logger = get_logger() @@ -383,6 +386,7 @@ class HpoRunner: val_dataset_size (int): validation dataset size hpo_workdir (Union[str, Path]): work directory for HPO hpo_time_ratio (int, optional): time ratio to use for HPO compared to training time. Defaults to 4. + progress_updater_callback (Optional[Callable[[Union[int, float]], None]]): callback to update progress """ # pylint: disable=too-many-instance-attributes @@ -394,6 +398,7 @@ def __init__( val_dataset_size: int, hpo_workdir: Union[str, Path], hpo_time_ratio: int = 4, + progress_updater_callback: Optional[Callable[[Union[int, float]], None]] = None, ): if train_dataset_size <= 0: raise ValueError(f"train_dataset_size should be bigger than 0. Your value is {train_dataset_size}") @@ -410,6 +415,7 @@ def __init__( self._val_dataset_size = val_dataset_size self._fixed_hp: Dict[str, Any] = {} self._initial_weight_name = "initial_weight.pth" + self._progress_updater_callback = progress_updater_callback self._align_batch_size_search_space_to_dataset_size() @@ -427,12 +433,16 @@ def _align_batch_size_search_space_to_dataset_size(self): if "range" in self._hpo_config["hp_space"][batch_size_name]: max_val = self._hpo_config["hp_space"][batch_size_name]["range"][1] min_val = self._hpo_config["hp_space"][batch_size_name]["range"][0] + step = 1 + if self._hpo_config["hp_space"][batch_size_name]["param_type"] in ["quniform", "qloguniform"]: + step = self._hpo_config["hp_space"][batch_size_name]["range"][2] if max_val > self._train_dataset_size: max_val = self._train_dataset_size self._hpo_config["hp_space"][batch_size_name]["range"][1] = max_val else: max_val = self._hpo_config["hp_space"][batch_size_name]["max"] min_val = self._hpo_config["hp_space"][batch_size_name]["min"] + step = self._hpo_config["hp_space"][batch_size_name].get("step", 1) if max_val > self._train_dataset_size: max_val = self._train_dataset_size @@ -440,10 +450,13 @@ def _align_batch_size_search_space_to_dataset_size(self): # If trainset size is lower than min batch size range, # fix batch size to trainset size + reason_to_fix_bs = "" if min_val >= max_val: - logger.info( - "Train set size is equal or lower than batch size range. Batch size is fixed to train set size." - ) + reason_to_fix_bs = "Train set size is equal or lower than batch size range." + elif max_val - min_val < step: + reason_to_fix_bs = "Difference between min and train set size is lesser than step." + if reason_to_fix_bs: + logger.info(f"{reason_to_fix_bs} Batch size is fixed to train set size.") del self._hpo_config["hp_space"][batch_size_name] self._fixed_hp[batch_size_name] = self._train_dataset_size self._environment.set_hyper_parameter_using_str_key(self._fixed_hp) @@ -460,12 +473,18 @@ def run_hpo(self, train_func: Callable, data_roots: Dict[str, Dict]) -> Union[Di """ self._environment.save_initial_weight(self._get_initial_model_weight_path()) hpo_algo = self._get_hpo_algo() + + if self._progress_updater_callback is not None: + progress_updater_thread = Thread(target=self._update_hpo_progress, args=[hpo_algo], daemon=True) + progress_updater_thread.start() + if torch.cuda.is_available(): resource_type = "gpu" elif is_xpu_available(): resource_type = "xpu" else: resource_type = "cpu" + run_hpo_loop( hpo_algo, partial( @@ -547,9 +566,27 @@ def _get_default_hyper_parameters(self): def _get_initial_model_weight_path(self): return self._hpo_workdir / self._initial_weight_name + def _update_hpo_progress(self, hpo_algo: HpoBase): + """Function for a thread to report a HPO progress regularly. + + Args: + hpo_algo (HpoBase): HPO algorithm class + """ + + while True: + if hpo_algo.is_done(): + break + self._progress_updater_callback(hpo_algo.get_progress() * 100) + time.sleep(1) + def run_hpo( - hpo_time_ratio: int, output: Path, environment: TaskEnvironment, dataset: DatasetEntity, data_roots: Dict[str, Dict] + hpo_time_ratio: int, + output: Path, + environment: TaskEnvironment, + dataset: DatasetEntity, + data_roots: Dict[str, Dict], + progress_updater_callback: Optional[Callable[[Union[int, float]], None]] = None, ) -> Optional[TaskEnvironment]: """Run HPO and load optimized hyper parameter and best HPO model weight. @@ -559,6 +596,7 @@ def run_hpo( environment (TaskEnvironment): otx task environment dataset (DatasetEntity): dataset to use for training data_roots (Dict[str, Dict]): dataset path of each dataset type + progress_updater_callback (Optional[Callable[[Union[int, float]], None]]): callback to update progress """ task_type = environment.model_template.task_type if not _check_hpo_enabled_task(task_type): @@ -579,6 +617,7 @@ def run_hpo( len(dataset.get_subset(Subset.VALIDATION)), hpo_save_path, hpo_time_ratio, + progress_updater_callback, ) logger.info("started hyper-parameter optimization") diff --git a/src/otx/cli/utils/io.py b/src/otx/cli/utils/io.py index 3770fb279bf..e747a93b42b 100644 --- a/src/otx/cli/utils/io.py +++ b/src/otx/cli/utils/io.py @@ -49,6 +49,8 @@ "tile_classifier.bin", "visual_prompting_image_encoder.xml", "visual_prompting_image_encoder.bin", + "visual_prompting_prompt_getter.xml", + "visual_prompting_prompt_getter.bin", "visual_prompting_decoder.xml", "visual_prompting_decoder.bin", "image_threshold", # NOTE: used for compatibility with with OTX 1.2.x. Remove when all Geti projects are upgraded. diff --git a/src/otx/core/data/adapter/base_dataset_adapter.py b/src/otx/core/data/adapter/base_dataset_adapter.py index 52195de4d0f..af87695bd66 100644 --- a/src/otx/core/data/adapter/base_dataset_adapter.py +++ b/src/otx/core/data/adapter/base_dataset_adapter.py @@ -273,11 +273,16 @@ def _prepare_label_information( return {"category_items": category_items, "label_groups": label_groups, "label_entities": label_entities} - def _is_normal_polygon(self, annotation: DatumAnnotationType.polygon) -> bool: + def _is_normal_polygon(self, annotation: DatumAnnotationType.polygon, width: int, height: int) -> bool: """To filter out the abnormal polygon.""" - x_points = [annotation.points[i] for i in range(0, len(annotation.points), 2)] - y_points = [annotation.points[i + 1] for i in range(0, len(annotation.points), 2)] - return min(x_points) < max(x_points) and min(y_points) < max(y_points) + x_points = annotation.points[::2] # Extract x-coordinates + y_points = annotation.points[1::2] # Extract y-coordinates + + return ( + min(x_points) < max(x_points) < width + and min(y_points) < max(y_points) < height + and annotation.get_area() > 0 + ) def _is_normal_bbox(self, x1: float, y1: float, x2: float, y2: float) -> bool: """To filter out the abrnormal bbox.""" diff --git a/src/otx/core/data/adapter/detection_dataset_adapter.py b/src/otx/core/data/adapter/detection_dataset_adapter.py index 612ab303f23..963b7dafd73 100644 --- a/src/otx/core/data/adapter/detection_dataset_adapter.py +++ b/src/otx/core/data/adapter/detection_dataset_adapter.py @@ -37,8 +37,11 @@ def get_otx_dataset(self) -> DatasetEntity: assert isinstance(image, Image) shapes = [] for ann in datumaro_item.annotations: - if self.task_type in (TaskType.INSTANCE_SEGMENTATION, TaskType.ROTATED_DETECTION): - if ann.type == DatumAnnotationType.polygon and self._is_normal_polygon(ann): + if ( + self.task_type in (TaskType.INSTANCE_SEGMENTATION, TaskType.ROTATED_DETECTION) + and ann.type == DatumAnnotationType.polygon + ): + if self._is_normal_polygon(ann, image.width, image.height): shapes.append(self._get_polygon_entity(ann, image.width, image.height)) elif ann.type == DatumAnnotationType.ellipse: shapes.append(self._get_ellipse_entity(ann, image.width, image.height)) diff --git a/src/otx/core/data/adapter/visual_prompting_dataset_adapter.py b/src/otx/core/data/adapter/visual_prompting_dataset_adapter.py index d428dc6afad..7a5c235f792 100644 --- a/src/otx/core/data/adapter/visual_prompting_dataset_adapter.py +++ b/src/otx/core/data/adapter/visual_prompting_dataset_adapter.py @@ -53,7 +53,7 @@ def get_otx_dataset(self) -> DatasetEntity: for ann in datumaro_item.annotations: if ann.type == DatumAnnotationType.polygon: # save polygons as-is, they will be converted to masks. - if self._is_normal_polygon(ann): + if self._is_normal_polygon(ann, image.width, image.height): shapes.append(self._get_polygon_entity(ann, image.width, image.height)) if ann.type == DatumAnnotationType.mask: diff --git a/tests/conftest.py b/tests/conftest.py index 4ae77116996..675d66778a2 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -13,6 +13,7 @@ otx_pytest_addoption_insertion, ) from .unit.api.fixtures.general import label_schema_example # noqa: F401 +import mlflow pytest_plugins = get_pytest_plugins_from_otx() # noqa: F405 @@ -93,3 +94,18 @@ def manage_tm_config_for_testing(): if created_cfg_dir: os.rmdir(cfg_dir) + + +@pytest.fixture(autouse=True, scope="session") +def init_mlflow_tracking(): + uri = os.environ.get("MLFLOW_TRACKING_SERVER_URI") + if uri is not None: + mlflow.set_tracking_uri(uri=uri) + + yield + + +@pytest.fixture(scope="session") +def fxt_mlflow_client(): + uri = os.environ.get("MLFLOW_TRACKING_SERVER_URI") + return mlflow.MlflowClient(uri) if uri is not None else None diff --git a/tests/e2e/cli/classification/reference/Custom_Image_Classification_EfficientNet-V2-S/compressed_model.yml b/tests/e2e/cli/classification/reference/Custom_Image_Classification_EfficientNet-V2-S/compressed_model.yml index ee44bed9abb..d234c15bd70 100644 --- a/tests/e2e/cli/classification/reference/Custom_Image_Classification_EfficientNet-V2-S/compressed_model.yml +++ b/tests/e2e/cli/classification/reference/Custom_Image_Classification_EfficientNet-V2-S/compressed_model.yml @@ -2,14 +2,14 @@ TestToolsHierarchicalClassification: nncf: number_of_fakequantizers: 267 ptq: - number_of_fakequantizers: 177 + number_of_fakequantizers: 207 TestToolsMultiClassClassification: nncf: number_of_fakequantizers: 267 ptq: - number_of_fakequantizers: 177 + number_of_fakequantizers: 207 TestToolsMultilabelClassification: nncf: number_of_fakequantizers: 269 ptq: - number_of_fakequantizers: 179 + number_of_fakequantizers: 209 diff --git a/tests/e2e/cli/classification/reference/Custom_Image_Classification_EfficinetNet-B0/compressed_model.yml b/tests/e2e/cli/classification/reference/Custom_Image_Classification_EfficinetNet-B0/compressed_model.yml index 27616d54b5a..ebea5aac8b2 100644 --- a/tests/e2e/cli/classification/reference/Custom_Image_Classification_EfficinetNet-B0/compressed_model.yml +++ b/tests/e2e/cli/classification/reference/Custom_Image_Classification_EfficinetNet-B0/compressed_model.yml @@ -2,14 +2,14 @@ TestToolsHierarchicalClassification: nncf: number_of_fakequantizers: 124 ptq: - number_of_fakequantizers: 76 + number_of_fakequantizers: 92 TestToolsMultiClassClassification: nncf: number_of_fakequantizers: 124 ptq: - number_of_fakequantizers: 76 + number_of_fakequantizers: 92 TestToolsMultilabelClassification: nncf: number_of_fakequantizers: 126 ptq: - number_of_fakequantizers: 78 + number_of_fakequantizers: 94 diff --git a/tests/e2e/cli/instance_segmentation/reference/Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B/compressed_model.yml b/tests/e2e/cli/instance_segmentation/reference/Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B/compressed_model.yml index c0f1a662f43..ed9ad16aa47 100644 --- a/tests/e2e/cli/instance_segmentation/reference/Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B/compressed_model.yml +++ b/tests/e2e/cli/instance_segmentation/reference/Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B/compressed_model.yml @@ -1,14 +1,14 @@ TestToolsOTXInstanceSegmentation: nncf: number_of_fakequantizers: 204 - ptq: - number_of_fakequantizers: 137 pot: number_of_fakequantizers: 137 + ptq: + number_of_fakequantizers: 160 TestToolsTilingInstanceSegmentation: nncf: number_of_fakequantizers: 204 - ptq: - number_of_fakequantizers: 137 pot: number_of_fakequantizers: 137 + ptq: + number_of_fakequantizers: 160 diff --git a/tests/e2e/cli/instance_segmentation/reference/Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50/compressed_model.yml b/tests/e2e/cli/instance_segmentation/reference/Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50/compressed_model.yml index b33253d9b42..5e78888bffc 100644 --- a/tests/e2e/cli/instance_segmentation/reference/Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50/compressed_model.yml +++ b/tests/e2e/cli/instance_segmentation/reference/Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50/compressed_model.yml @@ -1,14 +1,14 @@ TestToolsOTXInstanceSegmentation: nncf: number_of_fakequantizers: 97 - ptq: - number_of_fakequantizers: 99 pot: number_of_fakequantizers: 99 + ptq: + number_of_fakequantizers: 99 TestToolsTilingInstanceSegmentation: nncf: number_of_fakequantizers: 97 - ptq: - number_of_fakequantizers: 99 pot: number_of_fakequantizers: 99 + ptq: + number_of_fakequantizers: 99 diff --git a/tests/e2e/cli/semantic_segmentation/reference/Custom_Semantic_Segmentation_Lite-HRNet-x-mod3_OCR/compressed_model.yml b/tests/e2e/cli/semantic_segmentation/reference/Custom_Semantic_Segmentation_Lite-HRNet-x-mod3_OCR/compressed_model.yml index c7b1157342a..ccc409483e6 100644 --- a/tests/e2e/cli/semantic_segmentation/reference/Custom_Semantic_Segmentation_Lite-HRNet-x-mod3_OCR/compressed_model.yml +++ b/tests/e2e/cli/semantic_segmentation/reference/Custom_Semantic_Segmentation_Lite-HRNet-x-mod3_OCR/compressed_model.yml @@ -2,4 +2,4 @@ TestToolsOTXSegmentation: nncf: number_of_fakequantizers: 1138 ptq: - number_of_fakequantizers: 942 + number_of_fakequantizers: 1026 diff --git a/tests/e2e/cli/visual_prompting/reference/Zero_Shot_SAM_Tiny_ViT/compressed_decoder.yml b/tests/e2e/cli/visual_prompting/reference/Zero_Shot_SAM_Tiny_ViT/compressed_decoder.yml new file mode 100644 index 00000000000..9009e81d953 --- /dev/null +++ b/tests/e2e/cli/visual_prompting/reference/Zero_Shot_SAM_Tiny_ViT/compressed_decoder.yml @@ -0,0 +1,3 @@ +TestToolsZeroShotVisualPrompting: + ptq: + number_of_fakequantizers: 69 diff --git a/tests/e2e/cli/visual_prompting/reference/Zero_Shot_SAM_Tiny_ViT/compressed_image_encoder.yml b/tests/e2e/cli/visual_prompting/reference/Zero_Shot_SAM_Tiny_ViT/compressed_image_encoder.yml new file mode 100644 index 00000000000..2f72e18fd85 --- /dev/null +++ b/tests/e2e/cli/visual_prompting/reference/Zero_Shot_SAM_Tiny_ViT/compressed_image_encoder.yml @@ -0,0 +1,3 @@ +TestToolsZeroShotVisualPrompting: + ptq: + number_of_fakequantizers: 89 diff --git a/tests/e2e/cli/visual_prompting/reference/Zero_Shot_SAM_Tiny_ViT/compressed_prompt_getter.yml b/tests/e2e/cli/visual_prompting/reference/Zero_Shot_SAM_Tiny_ViT/compressed_prompt_getter.yml new file mode 100644 index 00000000000..43f5b4c35ce --- /dev/null +++ b/tests/e2e/cli/visual_prompting/reference/Zero_Shot_SAM_Tiny_ViT/compressed_prompt_getter.yml @@ -0,0 +1,3 @@ +TestToolsZeroShotVisualPrompting: + ptq: + number_of_fakequantizers: 1 diff --git a/tests/e2e/cli/visual_prompting/test_visual_prompting.py b/tests/e2e/cli/visual_prompting/test_visual_prompting.py index 2749a09f347..b6c1190e1d0 100644 --- a/tests/e2e/cli/visual_prompting/test_visual_prompting.py +++ b/tests/e2e/cli/visual_prompting/test_visual_prompting.py @@ -122,7 +122,15 @@ def test_otx_export_fp16(self, template, tmp_dir_path): @pytest.mark.parametrize("half_precision", [True, False]) def test_otx_eval_openvino(self, template, tmp_dir_path, half_precision): tmp_dir_path = tmp_dir_path / "visual_prompting" - otx_eval_openvino_testing(template, tmp_dir_path, otx_dir, args, threshold=0.2, half_precision=half_precision) + otx_eval_openvino_testing( + template, + tmp_dir_path, + otx_dir, + args, + threshold=0.2, + half_precision=half_precision, + is_visual_prompting=True, + ) @e2e_pytest_component @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @@ -143,4 +151,4 @@ def test_ptq_validate_fq(self, template, tmp_dir_path): @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ptq_eval(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "visual_prompting" - ptq_eval_testing(template, tmp_dir_path, otx_dir, args) + ptq_eval_testing(template, tmp_dir_path, otx_dir, args, is_visual_prompting=True) diff --git a/tests/e2e/cli/visual_prompting/test_zero_shot.py b/tests/e2e/cli/visual_prompting/test_zero_shot.py new file mode 100644 index 00000000000..e5471cc7bb7 --- /dev/null +++ b/tests/e2e/cli/visual_prompting/test_zero_shot.py @@ -0,0 +1,127 @@ +"""Tests for Visual Prompting with OTX CLI""" + +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import copy +import os + +import pytest + +from otx.api.entities.model_template import parse_model_template +from otx.cli.registry import Registry +from tests.test_suite.e2e_test_system import e2e_pytest_component +from tests.test_suite.run_test_command import ( + otx_eval_openvino_testing, + otx_eval_testing, + otx_export_testing, + otx_train_testing, + ptq_optimize_testing, + ptq_validate_fq_testing, + ptq_eval_testing, +) + +args = { + "--train-data-roots": "tests/assets/car_tree_bug", + "--val-data-roots": "tests/assets/car_tree_bug", + "--test-data-roots": "tests/assets/car_tree_bug", + "--input": "tests/assets/car_tree_bug/images/train", + "train_params": [ + "params", + "--learning_parameters.trainer.max_epochs", + "1", + "--learning_parameters.dataset.train_batch_size", + "1", + "--learning_parameters.dataset.use_mask", + "False", + ], +} + +otx_dir = os.getcwd() + +TT_STABILITY_TESTS = os.environ.get("TT_STABILITY_TESTS", False) +if TT_STABILITY_TESTS: + default_template = parse_model_template( + os.path.join( + "src/otx/algorithms/visual_prompting/configs", "zero_shot_sam_tiny_vit", "template_experimental.yaml" + ) + ) + templates = [default_template] * 100 + templates_ids = [template.model_template_id + f"-{i+1}" for i, template in enumerate(templates)] + +else: + templates = [ + template + for template in Registry("src/otx/algorithms/visual_prompting", experimental=True) + .filter(task_type="VISUAL_PROMPTING") + .templates + if "Zero_Shot" in template.name + ] + templates_ids = [template.model_template_id for template in templates] + + +class TestToolsZeroShotVisualPrompting: + @e2e_pytest_component + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_otx_train(self, template, tmp_dir_path): + tmp_dir_path = tmp_dir_path / "zero_shot_visual_prompting" + otx_train_testing(template, tmp_dir_path, otx_dir, args, deterministic=True) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_otx_eval(self, template, tmp_dir_path): + tmp_dir_path = tmp_dir_path / "zero_shot_visual_prompting" + otx_eval_testing(template, tmp_dir_path, otx_dir, args) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_otx_export(self, template, tmp_dir_path): + tmp_dir_path = tmp_dir_path / "zero_shot_visual_prompting" + otx_export_testing(template, tmp_dir_path, False) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_otx_export_fp16(self, template, tmp_dir_path): + tmp_dir_path = tmp_dir_path / "zero_shot_visual_prompting" + otx_export_testing(template, tmp_dir_path, half_precision=True) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + @pytest.mark.parametrize("half_precision", [True, False]) + def test_otx_eval_openvino(self, template, tmp_dir_path, half_precision): + tmp_dir_path = tmp_dir_path / "zero_shot_visual_prompting" + otx_eval_openvino_testing( + template, + tmp_dir_path, + otx_dir, + args, + threshold=0.2, + half_precision=half_precision, + is_visual_prompting=True, + ) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ptq_optimize(self, template, tmp_dir_path): + tmp_dir_path = tmp_dir_path / "zero_shot_visual_prompting" + ptq_optimize_testing(template, tmp_dir_path, otx_dir, args, is_visual_prompting=True) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ptq_validate_fq(self, template, tmp_dir_path): + tmp_dir_path = tmp_dir_path / "zero_shot_visual_prompting" + ptq_validate_fq_testing(template, tmp_dir_path, otx_dir, "visual_prompting", type(self).__name__) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ptq_eval(self, template, tmp_dir_path): + tmp_dir_path = tmp_dir_path / "zero_shot_visual_prompting" + ptq_eval_testing(template, tmp_dir_path, otx_dir, args, is_visual_prompting=True) diff --git a/tests/integration/cli/visual_prompting/test_visual_prompting.py b/tests/integration/cli/visual_prompting/test_visual_prompting.py index 18d220376a1..92ff4bf356e 100644 --- a/tests/integration/cli/visual_prompting/test_visual_prompting.py +++ b/tests/integration/cli/visual_prompting/test_visual_prompting.py @@ -109,7 +109,15 @@ def test_otx_export_onnx(self, template, tmp_dir_path): @pytest.mark.parametrize("half_precision", [True, False]) def test_otx_eval_openvino(self, template, tmp_dir_path, half_precision): tmp_dir_path = tmp_dir_path / "visual_prompting" - otx_eval_openvino_testing(template, tmp_dir_path, otx_dir, args, threshold=1.0, half_precision=half_precision) + otx_eval_openvino_testing( + template, + tmp_dir_path, + otx_dir, + args, + threshold=1.0, + half_precision=half_precision, + is_visual_prompting=True, + ) @e2e_pytest_component @pytest.mark.skip("demo.py is not supported.") diff --git a/tests/integration/cli/visual_prompting/test_zero_shot.py b/tests/integration/cli/visual_prompting/test_zero_shot.py index 8d403f27999..ccedf5c2fa2 100644 --- a/tests/integration/cli/visual_prompting/test_zero_shot.py +++ b/tests/integration/cli/visual_prompting/test_zero_shot.py @@ -12,6 +12,8 @@ from tests.test_suite.run_test_command import ( otx_eval_testing, otx_train_testing, + otx_export_testing, + otx_eval_openvino_testing, ) args = { @@ -39,7 +41,7 @@ templates_ids = [template.model_template_id for template in templates] -class TestVisualPromptingCLI: +class TestZeroShotVisualPromptingCLI: @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_train(self, template, tmp_dir_path): @@ -51,3 +53,36 @@ def test_otx_train(self, template, tmp_dir_path): def test_otx_eval(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "zero_shot_visual_prompting" otx_eval_testing(template, tmp_dir_path, otx_dir, args) + + @e2e_pytest_component + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_otx_export(self, template, tmp_dir_path): + tmp_dir_path = tmp_dir_path / "zero_shot_visual_prompting" + otx_export_testing(template, tmp_dir_path, False, check_ir_meta=False) + + @e2e_pytest_component + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_otx_export_fp16(self, template, tmp_dir_path): + tmp_dir_path = tmp_dir_path / "zero_shot_visual_prompting" + otx_export_testing(template, tmp_dir_path, half_precision=True) + + @e2e_pytest_component + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_otx_export_onnx(self, template, tmp_dir_path): + tmp_dir_path = tmp_dir_path / "zero_shot_visual_prompting" + otx_export_testing(template, tmp_dir_path, half_precision=False, is_onnx=True) + + @e2e_pytest_component + @pytest.mark.parametrize("template", templates, ids=templates_ids) + @pytest.mark.parametrize("half_precision", [True, False]) + def test_otx_eval_openvino(self, template, tmp_dir_path, half_precision): + tmp_dir_path = tmp_dir_path / "zero_shot_visual_prompting" + otx_eval_openvino_testing( + template, + tmp_dir_path, + otx_dir, + args, + threshold=1.0, + half_precision=half_precision, + is_visual_prompting=True, + ) diff --git a/tests/perf/__init__.py b/tests/perf/__init__.py new file mode 100644 index 00000000000..9984d0cb25b --- /dev/null +++ b/tests/perf/__init__.py @@ -0,0 +1,4 @@ +"""OTX Perfomance tests.""" + +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 diff --git a/tests/perf/benchmark-reference.csv b/tests/perf/benchmark-reference.csv new file mode 100644 index 00000000000..e491374d70f --- /dev/null +++ b/tests/perf/benchmark-reference.csv @@ -0,0 +1,198 @@ +benchmark,task,data_size,model,Precision(export),Precision(optimize),Precision(train),Recall(export),Recall(optimize),Recall(train),avg_data_time,avg_iter_time,avg_time_per_image(export),avg_time_per_image(optimize),epoch,f-measure(export),f-measure(optimize),f-measure(train),train_e2e_time,val_score,Dice Average(export),Dice Average(optimize),Dice Average(train),avg_cpu_util(%),avg_gpu_util(%),max_cpu_mem(GiB),max_gpu_mem(GiB),Accuracy(export),Accuracy(optimize),Accuracy(train),data +accuracy,anomaly_classification,large,ote_anomaly_classification_padim,1.0,1.0,1.0,0.9982,1.0,1.0,,,,,,0.9991,1.0,1.0,17.0,0.0,,,,,,,,,,,anomaly/mvtec/hazelnut_large +accuracy,anomaly_classification,large,ote_anomaly_classification_stfpm,1.0,1.0,1.0,1.0,1.0,1.0,,,,,,1.0,1.0,1.0,86.0,0.0,,,,,,,,,,,anomaly/mvtec/hazelnut_large +accuracy,anomaly_classification,medium,ote_anomaly_classification_padim,0.9991,0.9853,1.0,0.9991,0.9853,1.0,,,,,,0.9991,0.9853,1.0,13.0,0.0,,,,,,,,,,,anomaly/mvtec/wood_medium +accuracy,anomaly_classification,medium,ote_anomaly_classification_stfpm,0.9974,0.9983,1.0,0.9974,0.9983,1.0,,,,,,0.9974,0.9983,1.0,71.0,0.0,,,,,,,,,,,anomaly/mvtec/wood_medium +accuracy,anomaly_classification,small,ote_anomaly_classification_padim,0.9960999999999999,1.0,1.0,0.9954333333333333,1.0,1.0,,,,,,0.9957666666666666,1.0,1.0,9.333333333333334,0.0,,,,,,,,,,,anomaly/mvtec/bottle_small/ +accuracy,anomaly_classification,small,ote_anomaly_classification_stfpm,0.9973666666666666,0.9980333333333333,1.0,0.9973666666666666,0.9980333333333333,1.0,,,,,,0.9973666666666666,0.9980333333333333,1.0,42.666666666666664,0.0,,,,,,,,,,,anomaly/mvtec/bottle_small/ +accuracy,anomaly_detection,large,ote_anomaly_detection_padim,,,,,,,,,,,,0.9947,0.993,0.9991,17.0,0.0,,,,,,,,,,,anomaly/mvtec/hazelnut_large +accuracy,anomaly_detection,large,ote_anomaly_detection_stfpm,,,,,,,,,,,,0.9982,1.0,0.9991,85.0,0.0,,,,,,,,,,,anomaly/mvtec/hazelnut_large +accuracy,anomaly_detection,medium,ote_anomaly_detection_padim,,,,,,,,,,,,0.9948,0.9867,0.9987,13.0,0.0,,,,,,,,,,,anomaly/mvtec/wood_medium +accuracy,anomaly_detection,medium,ote_anomaly_detection_stfpm,,,,,,,,,,,,0.9889,0.9674,0.9983,71.0,0.0,,,,,,,,,,,anomaly/mvtec/wood_medium +accuracy,anomaly_detection,small,ote_anomaly_detection_padim,,,,,,,,,,,,0.9310333333333333,0.9343666666666667,0.9971,9.333333333333334,0.0,,,,,,,,,,,anomaly/mvtec/bottle_small/ +accuracy,anomaly_detection,small,ote_anomaly_detection_stfpm,,,,,,,,,,,,0.9967333333333332,0.9980333333333333,0.9964333333333334,41.0,0.0,,,,,,,,,,,anomaly/mvtec/bottle_small/ +accuracy,anomaly_segmentation,large,ote_anomaly_segmentation_padim,,,,,,,,,,,,0.9982,0.993,0.9991,19.0,0.0,,,,,,,,,,,anomaly/mvtec/hazelnut_large +accuracy,anomaly_segmentation,large,ote_anomaly_segmentation_stfpm,,,,,,,,,,,,1.0,1.0,0.9991,104.0,0.0,,,,,,,,,,,anomaly/mvtec/hazelnut_large +accuracy,anomaly_segmentation,medium,ote_anomaly_segmentation_padim,,,,,,,,,,,,1.0,0.9922,0.9987,15.0,0.0,,,,,,,,,,,anomaly/mvtec/wood_medium +accuracy,anomaly_segmentation,medium,ote_anomaly_segmentation_stfpm,,,,,,,,,,,,0.9983,0.9991,0.9987,95.0,0.0,,,,,,,,,,,anomaly/mvtec/wood_medium +accuracy,anomaly_segmentation,small,ote_anomaly_segmentation_padim,,,,,,,,,,,,0.9957666666666666,1.0,0.9971,10.333333333333334,0.0,,,,,,,,,,,anomaly/mvtec/bottle_small/ +accuracy,anomaly_segmentation,small,ote_anomaly_segmentation_stfpm,,,,,,,,,,,,0.9973666666666666,0.9986666666666667,0.9957666666666666,52.666666666666664,0.0,,,,,,,,,,,anomaly/mvtec/bottle_small/ +accuracy,detection,large,Custom_Object_Detection_Gen3_ATSS,0.866,0.8576,0.8295,0.7345,0.7441,0.7612,0.0137,0.2048,0.034,0.0195,28.0,0.7948,0.7968,0.7939,338.0,0.775,,,,,,,,,,,detection/vitens_large +accuracy,detection,large,Custom_Object_Detection_Gen3_SSD,0.7056,0.746,0.7338,0.6792,0.6234,0.6595,0.016,0.1527,0.0246,0.0123,48.0,0.6921,0.6792,0.6946,436.0,0.6815,,,,,,,,,,,detection/vitens_large +accuracy,detection,large,Custom_Object_Detection_YOLOX,0.8594,0.8794,0.857,0.7046,0.6551,0.7122,0.0104,0.1423,0.0102,0.0083,34.0,0.7744,0.7509,0.7779,303.0,0.7438,,,,,,,,,,,detection/vitens_large +accuracy,detection,large,Object_Detection_ResNeXt101_ATSS,0.8745,0.8818,0.8748,0.7924,0.7864,0.7926,0.0056,0.3352,0.7009,0.3536,32.0,0.8314,0.8314,0.8317,1365.0,0.8059,,,,,,,,,,,detection/vitens_large +accuracy,detection,large,Object_Detection_YOLOX_L,0.855,0.8668,0.7781,0.7607,0.7532,0.789,0.0131,0.2566,0.1644,0.0948,48.0,0.8051,0.806,0.7835,691.0,0.8191,,,,,,,,,,,detection/vitens_large +accuracy,detection,large,Object_Detection_YOLOX_S,0.8273,0.8632,0.8273,0.7184,0.6699,0.7272,0.0151,0.2112,0.0398,0.025,18.0,0.769,0.7543,0.774,226.0,0.7594,,,,,,,,,,,detection/vitens_large +accuracy,detection,large,Object_Detection_YOLOX_X,0.8166,0.8091,0.7647,0.7547,0.7643,0.7776,0.0044,0.2392,0.2897,0.1634,40.0,0.7845,0.7861,0.7711,1208.0,0.8049,,,,,,,,,,,detection/vitens_large +accuracy,detection,medium,Custom_Object_Detection_Gen3_ATSS,0.7784,0.7812,0.7836,0.6841,0.6994,0.6754,0.014,0.2142,0.0318,0.019,38.6667,0.7264,0.7374,0.7247,322.0,0.7473,,,,,,,,,,,detection/pothole_medium +accuracy,detection,medium,Custom_Object_Detection_Gen3_SSD,0.7465,0.7072,0.7465,0.5316,0.549,0.5316,0.0146,0.1463,0.0225,0.0121,58.0,0.6204,0.6175,0.6204,337.0,0.6701,,,,,,,,,,,detection/pothole_medium +accuracy,detection,medium,Custom_Object_Detection_YOLOX,0.794,0.8134,0.8089,0.6514,0.5664,0.6536,0.0125,0.1433,0.0096,0.0095,40.6667,0.7155,0.667,0.7229,243.0,0.6809,,,,,,,,,,,detection/pothole_medium +accuracy,detection,medium,Object_Detection_ResNeXt101_ATSS,0.7942,0.7974,0.8222,0.6928,0.6776,0.6667,0.0058,0.3342,0.6171,0.3145,24.6667,0.7382,0.732,0.7362,719.0,0.7639,,,,,,,,,,,detection/pothole_medium +accuracy,detection,medium,Object_Detection_YOLOX_L,0.7183,0.7479,0.8111,0.634,0.6079,0.5926,0.0155,0.2543,0.1486,0.0839,39.3333,0.671,0.6692,0.6844,404.0,0.6878,,,,,,,,,,,detection/pothole_medium +accuracy,detection,medium,Object_Detection_YOLOX_S,0.7866,0.8289,0.8309,0.6231,0.5904,0.5904,0.0161,0.191,0.036,0.0221,46.0,0.6948,0.6889,0.689,339.0,0.6813,,,,,,,,,,,detection/pothole_medium +accuracy,detection,medium,Object_Detection_YOLOX_X,0.8159,0.822,0.7949,0.6732,0.6732,0.6667,0.0051,0.238,0.2618,0.1468,41.3333,0.7373,0.7402,0.7249,866.0,0.6903,,,,,,,,,,,detection/pothole_medium +accuracy,detection,small,Custom_Object_Detection_Gen3_ATSS,0.6698,0.6764666666666667,0.6608333333333333,0.4437333333333333,0.4335333333333333,0.4524333333333333,0.05903333333333333,0.25053333333333333,0.03206666666666667,0.02036666666666667,39.444433333333336,0.5290666666666667,0.5231333333333333,0.5305333333333334,46.0,0.6503333333333333,,,,,,,,,,,detection/pothole_small/ +accuracy,detection,small,Custom_Object_Detection_Gen3_SSD,0.5493666666666667,0.6093666666666667,0.5592666666666667,0.3602,0.33916666666666667,0.3544,0.06430000000000001,0.18976666666666667,0.021866666666666663,0.0117,46.1111,0.42663333333333336,0.431,0.42743333333333333,43.0,0.5454666666666667,,,,,,,,,,,detection/pothole_small/ +accuracy,detection,small,Custom_Object_Detection_YOLOX,0.5974666666666666,0.5909666666666666,0.5782333333333334,0.4364666666666667,0.4001333333333334,0.4379,0.08443333333333335,0.20806666666666665,0.010133333333333333,0.0084,38.33336666666667,0.49783333333333335,0.45646666666666674,0.4891666666666667,37.666666666666664,0.5445666666666666,,,,,,,,,,,detection/pothole_small/ +accuracy,detection,small,Object_Detection_ResNeXt101_ATSS,0.6268333333333334,0.6405333333333333,0.6311666666666667,0.5345333333333334,0.5177999999999999,0.5330666666666667,0.018933333333333333,0.3417333333333333,0.6179666666666667,0.3147333333333333,29.333333333333332,0.5697666666666666,0.5681666666666666,0.5708333333333333,113.33333333333333,0.7014999999999999,,,,,,,,,,,detection/pothole_small/ +accuracy,detection,small,Object_Detection_YOLOX_L,0.42523333333333335,0.45203333333333334,0.4489666666666667,0.39799999999999996,0.35509999999999997,0.3958,0.12126666666666668,0.34149999999999997,0.15066666666666667,0.08446666666666668,30.0,0.4079,0.3945,0.41353333333333336,62.666666666666664,0.4736666666666667,,,,,,,,,,,detection/pothole_small/ +accuracy,detection,small,Object_Detection_YOLOX_S,0.4303333333333333,0.4604333333333333,0.4392666666666667,0.2803333333333333,0.25853333333333334,0.2890333333333333,0.11886666666666668,0.27813333333333334,0.037866666666666667,0.0228,67.77776666666666,0.3258666666666667,0.3051,0.3326,88.66666666666667,0.3561666666666667,,,,,,,,,,,detection/pothole_small/ +accuracy,detection,small,Object_Detection_YOLOX_X,0.5061333333333333,0.5411,0.5042333333333333,0.38273333333333337,0.33843333333333336,0.3935666666666666,0.034899999999999994,0.25376666666666664,0.2625,0.14776666666666669,36.8889,0.43050000000000005,0.41306666666666664,0.43133333333333335,138.0,0.46963333333333335,,,,,,,,,,,detection/pothole_small/ +accuracy,hierarchical_label_classification,medium,Custom_Image_Classification_DeiT-Tiny,,,,,,,0.0035,0.0839,0.0069,0.0073,26.0,,,,79.0,73.4618,,,,,,,,0.3128,0.3114,0.3128,classification/h_label/h_label_CUB_medium +accuracy,hierarchical_label_classification,medium,Custom_Image_Classification_EfficientNet-V2-S,,,,,,,0.0037,0.2467,0.0105,0.0108,26.0,,,,211.0,74.2912,,,,,,,,0.3408,0.3311,0.3402,classification/h_label/h_label_CUB_medium +accuracy,hierarchical_label_classification,medium,Custom_Image_Classification_EfficinetNet-B0,,,,,,,0.0035,0.228,0.0054,0.0059,36.6667,,,,255.0,73.0667,,,,,,,,0.3064,0.2871,0.3071,classification/h_label/h_label_CUB_medium +accuracy,hierarchical_label_classification,medium,Custom_Image_Classification_MobileNet-V3-large-1x,,,,,,,0.0034,0.109,0.005,0.006,28.6667,,,,104.0,72.4591,,,,,,,,0.2441,0.2205,0.2447,classification/h_label/h_label_CUB_medium +accuracy,hierarchical_label_classification,small,Custom_Image_Classification_DeiT-Tiny,,,,,,,0.04776666666666666,0.08979999999999999,0.006566666666666668,0.0076,29.444433333333336,,,,13.333333333333334,71.83643333333333,,,,,,,,0.7608333333333334,0.7608333333333333,0.7617666666666666,classification/h_label/h_label_CUB_small/ +accuracy,hierarchical_label_classification,small,Custom_Image_Classification_EfficientNet-V2-S,,,,,,,0.049499999999999995,0.17593333333333336,0.010433333333333334,0.010700000000000001,30.0,,,,23.666666666666668,74.1512,,,,,,,,0.8531333333333334,0.8474666666666666,0.8540666666666666,classification/h_label/h_label_CUB_small/ +accuracy,hierarchical_label_classification,small,Custom_Image_Classification_EfficinetNet-B0,,,,,,,0.047466666666666664,0.11686666666666667,0.004633333333333333,0.0055000000000000005,40.555566666666664,,,,15.666666666666666,70.60183333333333,,,,,,,,0.8475,0.8088333333333333,0.8493666666666666,classification/h_label/h_label_CUB_small/ +accuracy,hierarchical_label_classification,small,Custom_Image_Classification_MobileNet-V3-large-1x,,,,,,,0.04716666666666667,0.08793333333333335,0.004566666666666667,0.006566666666666668,34.44446666666666,,,,12.333333333333334,72.68516666666666,,,,,,,,0.7363,0.6873999999999999,0.742,classification/h_label/h_label_CUB_small/ +accuracy,instance_segmentation,medium,Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B,0.6167666666666667,0.6246,0.5984333333333334,0.39546666666666663,0.3279,0.37539999999999996,0.0061,0.2332,0.3330666666666667,0.2241,18.555533333333333,0.48006666666666664,0.42733333333333334,0.45953333333333335,485.0,0.5273,,,,,,,,,,,instance_seg/coco_car_person_medium +accuracy,instance_segmentation,medium,Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50,0.7623000000000001,0.7542,0.7540333333333334,0.5637,0.5621999999999999,0.5555666666666667,0.012533333333333334,0.35673333333333335,0.49666666666666665,0.2874333333333334,12.666666666666666,0.6478666666666667,0.6438333333333334,0.6395,438.3333333333333,0.7068666666666666,,,,,,,,,,,instance_seg/coco_car_person_medium +accuracy,multi_label_classification,large,Custom_Image_Classification_DeiT-Tiny,,,,,,,0.0074,0.0677,0.0054,0.0061,17.0,,,,294.0,0.9773,,,,,,,,0.9787,0.9785,0.9787,classification/multi_label/multilabel_food101_large +accuracy,multi_label_classification,large,Custom_Image_Classification_EfficientNet-V2-S,,,,,,,0.0063,0.2989,0.0101,0.0096,17.0,,,,958.0,0.9895,,,,,,,,0.9894,0.9891,0.9895,classification/multi_label/multilabel_food101_large +accuracy,multi_label_classification,large,Custom_Image_Classification_EfficinetNet-B0,,,,,,,0.0067,0.1598,0.0039,0.0045,25.0,,,,774.0,0.9828,,,,,,,,0.9824,0.9805,0.9823,classification/multi_label/multilabel_food101_large +accuracy,multi_label_classification,large,Custom_Image_Classification_MobileNet-V3-large-1x,,,,,,,0.0074,0.1159,0.0036,0.0047,16.0,,,,392.0,0.9803,,,,,,,,0.9803,0.9786,0.9802,classification/multi_label/multilabel_food101_large +accuracy,multi_label_classification,medium,Custom_Image_Classification_DeiT-Tiny,,,,,,,0.0133,0.0732,0.0059,0.0064,28.0,,,,89.0,0.9893,,,,,,,,0.9886,0.9885,0.9886,classification/multi_label/multilabel_CUB_medium +accuracy,multi_label_classification,medium,Custom_Image_Classification_EfficientNet-V2-S,,,,,,,0.0133,0.309,0.0097,0.0101,24.6667,,,,279.0,0.9962,,,,,,,,0.9957,0.9957,0.9957,classification/multi_label/multilabel_CUB_medium +accuracy,multi_label_classification,medium,Custom_Image_Classification_EfficinetNet-B0,,,,,,,0.0121,0.1633,0.0043,0.0049,27.3333,,,,162.0,0.9935,,,,,,,,0.9931,0.9924,0.9931,classification/multi_label/multilabel_CUB_medium +accuracy,multi_label_classification,medium,Custom_Image_Classification_MobileNet-V3-large-1x,,,,,,,0.0126,0.1203,0.0041,0.0052,26.6667,,,,122.0,0.994,,,,,,,,0.9934,0.9924,0.9933,classification/multi_label/multilabel_CUB_medium +accuracy,multi_label_classification,small,Custom_Image_Classification_DeiT-Tiny,,,,,,,0.04123333333333334,0.08753333333333334,0.0068000000000000005,0.0074333333333333335,16.0,,,,16.666666666666668,0.9814666666666666,,,,,,,,0.9887,0.9899666666666667,0.9899666666666667,classification/multi_label/multilabel_CUB_small/ +accuracy,multi_label_classification,small,Custom_Image_Classification_EfficientNet-V2-S,,,,,,,0.054433333333333334,0.2988,0.009566666666666666,0.011933333333333332,16.0,,,,39.666666666666664,0.9907333333333334,,,,,,,,0.9899666666666667,0.9918333333333335,0.9887333333333332,classification/multi_label/multilabel_CUB_small/ +accuracy,multi_label_classification,small,Custom_Image_Classification_EfficinetNet-B0,,,,,,,0.03896666666666667,0.15156666666666666,0.0047666666666666664,0.005833333333333334,16.444433333333333,,,,21.333333333333332,1.0,,,,,,,,0.9887333333333332,0.9868333333333333,0.9887333333333332,classification/multi_label/multilabel_CUB_small/ +accuracy,multi_label_classification,small,Custom_Image_Classification_MobileNet-V3-large-1x,,,,,,,0.039433333333333334,0.1253,0.0045,0.006066666666666667,16.0,,,,18.333333333333332,0.9907333333333334,,,,,,,,0.9918666666666667,0.9900000000000001,0.9925,classification/multi_label/multilabel_CUB_small/ +accuracy,semantic_segmentation,large,Custom_Semantic_Segmentation_Lite-HRNet-18-mod2_OCR,,,,,,,0.00605,0.27405,0.08954999999999999,0.09284999999999999,19.0,,,,683.0,0.91855,0.8611,0.8591500000000001,0.86175,,,,,,,,semantic_seg/kvasir_large +accuracy,semantic_segmentation,large,Custom_Semantic_Segmentation_Lite-HRNet-18_OCR,,,,,,,0.00605,0.2717,0.08675,0.41425,17.0,,,,607.0,0.9185,0.87695,0.3024,0.8774,,,,,,,,semantic_seg/kvasir_large +accuracy,semantic_segmentation,large,Custom_Semantic_Segmentation_Lite-HRNet-s-mod2_OCR,,,,,,,0.006,0.2184,0.06875,0.07955000000000001,18.5,,,,530.0,0.91315,0.8465,0.8330500000000001,0.84695,,,,,,,,semantic_seg/kvasir_large +accuracy,semantic_segmentation,large,Custom_Semantic_Segmentation_Lite-HRNet-x-mod3_OCR,,,,,,,0.0061,0.5128999999999999,0.2495,0.1885,9.0,,,,604.5,0.9140999999999999,0.86605,0.8060499999999999,0.8669,,,,,,,,semantic_seg/kvasir_large +accuracy,semantic_segmentation,large,Custom_Semantic_Segmentation_SegNext_B,,,,,,,0.007050000000000001,0.35,0.1375,0.1076,34.0,,,,1436.5,0.9418,0.90045,0.8891,0.90115,,,,,,,,semantic_seg/kvasir_large +accuracy,semantic_segmentation,large,Custom_Semantic_Segmentation_SegNext_s,,,,,,,0.0069,0.1986,0.09720000000000001,0.086,28.5,,,,692.0,0.92885,0.88795,0.8791500000000001,0.8895,,,,,,,,semantic_seg/kvasir_large +accuracy,semantic_segmentation,large,Custom_Semantic_Segmentation_SegNext_t,,,,,,,0.0072,0.16635,0.07915,0.06035,55.0,,,,1119.5,0.9414,0.90395,0.5005499999999999,0.9039,,,,,,,,semantic_seg/kvasir_large +accuracy,semantic_segmentation,medium,Custom_Semantic_Segmentation_Lite-HRNet-18-mod2_OCR,,,,,,,0.00845,0.28165,0.0905,0.09605,17.33335,,,,281.5,0.8896,0.8221,0.8216,0.8222,,,,,,,,semantic_seg/kvasir_medium +accuracy,semantic_segmentation,medium,Custom_Semantic_Segmentation_Lite-HRNet-18_OCR,,,,,,,0.00855,0.28335,0.08940000000000001,0.2745,17.83335,,,,291.5,0.88855,0.8176000000000001,0.28995,0.8177000000000001,,,,,,,,semantic_seg/kvasir_medium +accuracy,semantic_segmentation,medium,Custom_Semantic_Segmentation_Lite-HRNet-s-mod2_OCR,,,,,,,0.00845,0.2248,0.0695,0.07100000000000001,15.33335,,,,197.5,0.88825,0.81725,0.7763500000000001,0.8176,,,,,,,,semantic_seg/kvasir_medium +accuracy,semantic_segmentation,medium,Custom_Semantic_Segmentation_Lite-HRNet-x-mod3_OCR,,,,,,,0.00885,0.5444,0.24974999999999997,0.2262,16.16665,,,,485.0,0.8937999999999999,0.8139000000000001,0.7655,0.8141499999999999,,,,,,,,semantic_seg/kvasir_medium +accuracy,semantic_segmentation,medium,Custom_Semantic_Segmentation_SegNext_B,,,,,,,0.00995,0.35155000000000003,0.14029999999999998,0.1028,36.6667,,,,573.0,0.9128000000000001,0.87405,0.70145,0.87485,,,,,,,,semantic_seg/kvasir_medium +accuracy,semantic_segmentation,medium,Custom_Semantic_Segmentation_SegNext_s,,,,,,,0.00955,0.20079999999999998,0.09625,0.08385000000000001,49.5,,,,442.5,0.9167000000000001,0.85365,0.84995,0.8545499999999999,,,,,,,,semantic_seg/kvasir_medium +accuracy,semantic_segmentation,medium,Custom_Semantic_Segmentation_SegNext_t,,,,,,,0.01015,0.16945,0.08145,0.0648,42.666650000000004,,,,342.0,0.91675,0.82795,0.61695,0.82925,,,,,,,,semantic_seg/kvasir_medium +accuracy,semantic_segmentation,small,Custom_Semantic_Segmentation_Lite-HRNet-18-mod2_OCR,,,,,,,0.06001666666666666,0.33801666666666663,0.09393333333333333,0.09263333333333333,43.61111666666667,,,,93.0,0.8038333333333334,0.6704666666666667,0.6694,0.6708500000000001,,,,,,,,semantic_seg/kvasir_small/ +accuracy,semantic_segmentation,small,Custom_Semantic_Segmentation_Lite-HRNet-18_OCR,,,,,,,0.06026666666666667,0.3372833333333333,0.08983333333333333,0.46135,42.94446666666666,,,,93.0,0.8043999999999999,0.6844333333333333,0.2878833333333333,0.6845333333333333,,,,,,,,semantic_seg/kvasir_small/ +accuracy,semantic_segmentation,small,Custom_Semantic_Segmentation_Lite-HRNet-s-mod2_OCR,,,,,,,0.05955,0.27458333333333335,0.07375,0.0701,42.444449999999996,,,,72.16666666666667,0.7821666666666666,0.6797166666666666,0.6080333333333333,0.6796333333333333,,,,,,,,semantic_seg/kvasir_small/ +accuracy,semantic_segmentation,small,Custom_Semantic_Segmentation_Lite-HRNet-x-mod3_OCR,,,,,,,0.06826666666666666,0.6018333333333333,0.258,0.2538333333333333,44.055550000000004,,,,163.0,0.8036333333333333,0.6940666666666666,0.5269166666666667,0.6944,,,,,,,,semantic_seg/kvasir_small/ +accuracy,semantic_segmentation,small,Custom_Semantic_Segmentation_SegNext_B,,,,,,,0.06328333333333334,0.40045,0.13941666666666666,0.09408333333333334,47.944449999999996,,,,128.66666666666666,0.8298333333333333,0.7581666666666665,0.43953333333333333,0.75955,,,,,,,,semantic_seg/kvasir_small/ +accuracy,semantic_segmentation,small,Custom_Semantic_Segmentation_SegNext_s,,,,,,,0.05968333333333333,0.24644999999999997,0.09971666666666666,0.07931666666666666,40.166666666666664,,,,62.666666666666664,0.8137833333333333,0.7175166666666666,0.6184666666666666,0.7174999999999999,,,,,,,,semantic_seg/kvasir_small/ +accuracy,semantic_segmentation,small,Custom_Semantic_Segmentation_SegNext_t,,,,,,,0.061,0.21693333333333334,0.07861666666666667,0.06005,40.444449999999996,,,,52.833333333333336,0.7762833333333333,0.6835666666666667,0.4820333333333333,0.6836666666666668,,,,,,,,semantic_seg/kvasir_small/ +accuracy,single_label_classification,large,Custom_Image_Classification_DeiT-Tiny,,,,,,,0.0016,0.0571,0.006,0.007,18.0,,,,279.0,0.821,,,,,,,,0.822,0.8202,0.8217,classification/single_label/multiclass_food101_large +accuracy,single_label_classification,large,Custom_Image_Classification_EfficientNet-V2-S,,,,,,,0.0008,0.1465,0.0103,0.0116,19.0,,,,639.0,0.8872,,,,,,,,0.8833,0.8755,0.8835,classification/single_label/multiclass_food101_large +accuracy,single_label_classification,large,Custom_Image_Classification_EfficinetNet-B0,,,,,,,0.0008,0.0837,0.0042,0.0053,23.0,,,,434.0,0.8475,,,,,,,,0.837,0.8278,0.8375,classification/single_label/multiclass_food101_large +accuracy,single_label_classification,large,Custom_Image_Classification_MobileNet-V3-large-1x,,,,,,,0.0011,0.0568,0.004,0.0057,30.0,,,,437.0,0.822,,,,,,,,0.8188,0.767,0.818,classification/single_label/multiclass_food101_large +accuracy,single_label_classification,medium,Custom_Image_Classification_DeiT-Tiny,,,,,,,0.0031,0.0576,0.0064,0.0074,42.0,,,,102.0,0.8107,,,,,,,,0.7748,0.7719,0.774,classification/single_label/multiclass_CUB_medium +accuracy,single_label_classification,medium,Custom_Image_Classification_EfficientNet-V2-S,,,,,,,0.0032,0.1437,0.0106,0.0118,24.0,,,,139.0,0.8715,,,,,,,,0.8382,0.8205,0.8387,classification/single_label/multiclass_CUB_medium +accuracy,single_label_classification,medium,Custom_Image_Classification_EfficinetNet-B0,,,,,,,0.0031,0.0866,0.0047,0.0056,26.0,,,,88.0,0.7883,,,,,,,,0.7735,0.7225,0.7733,classification/single_label/multiclass_CUB_medium +accuracy,single_label_classification,medium,Custom_Image_Classification_MobileNet-V3-large-1x,,,,,,,0.0031,0.0592,0.0045,0.0061,20.0,,,,53.0,0.7912,,,,,,,,0.7594,0.6956,0.76,classification/single_label/multiclass_CUB_medium +accuracy,single_label_classification,small,Custom_Image_Classification_DeiT-Tiny,,,,,,,0.0363,0.08103333333333333,0.0074333333333333335,0.0089,20.0,,,,13.666666666666666,1.0,,,,,,,,0.9981333333333332,1.0,0.9981333333333332,classification/single_label/multiclass_CUB_small/ +accuracy,single_label_classification,small,Custom_Image_Classification_EfficientNet-V2-S,,,,,,,0.0385,0.1643,0.011166666666666667,0.013333333333333334,20.0,,,,17.333333333333332,1.0,,,,,,,,1.0,1.0,1.0,classification/single_label/multiclass_CUB_small/ +accuracy,single_label_classification,small,Custom_Image_Classification_EfficinetNet-B0,,,,,,,0.036399999999999995,0.10313333333333334,0.005066666666666666,0.006566666666666666,20.0,,,,9.333333333333334,1.0,,,,,,,,0.9981333333333332,0.9962333333333334,0.9981333333333332,classification/single_label/multiclass_CUB_small/ +accuracy,single_label_classification,small,Custom_Image_Classification_MobileNet-V3-large-1x,,,,,,,0.03626666666666667,0.08616666666666667,0.005,0.0083,25.0,,,,9.666666666666666,1.0,,,,,,,,0.9812,0.9529333333333333,0.9831,classification/single_label/multiclass_CUB_small/ +accuracy,tiling_instance_segmentation,medium,Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B,0.9706,0.8699,0.969,0.8997,0.2112,0.8977,0.0017,0.1806,0.6746,0.1244,23.3333,0.9338,0.3389,0.932,807.0,0.8792,,,,,,,,,,,tiling_instance_seg/vitens_aeromonas_medium +accuracy,tiling_instance_segmentation,medium,Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50,0.9674,0.7812,0.967,0.894,0.2521,0.8912,0.0018,0.1748,1.5331,0.1389,23.0,0.9293,0.38,0.9275,758.0,0.8703,,,,,,,,,,,tiling_instance_seg/vitens_aeromonas_medium +accuracy,tiling_instance_segmentation,medium,Custom_Counting_Instance_Segmentation_MaskRCNN_SwinT_FP16,0.6741,0.5214,0.9596,0.8599,0.1626,0.8971,0.0019,0.1862,2.2792,0.2513,21.6667,0.6751,0.2475,0.9273,796.0,0.8687,,,,,,,,,,,tiling_instance_seg/vitens_aeromonas_medium +accuracy,tiling_instance_segmentation,small,Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B,0.9665333333333334,0.8414,0.9650333333333334,0.8255333333333333,0.16206666666666666,0.8228333333333332,0.0032,0.1823,0.6004666666666667,0.1188,30.666666666666668,0.8891333333333332,0.2647,0.8866666666666667,180.66666666666666,0.8278,,,,,,,,,,,tiling_instance_seg/vitens_aeromonas_small/ +accuracy,tiling_instance_segmentation,small,Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50,0.9610666666666666,0.8028,0.9570666666666666,0.7963333333333332,0.16493333333333335,0.8095333333333333,0.0032666666666666664,0.1753,1.3643333333333334,0.12883333333333333,33.0,0.8683333333333333,0.2709,0.8758666666666667,195.66666666666666,0.8178000000000001,,,,,,,,,,,tiling_instance_seg/vitens_aeromonas_small/ +speed,anomaly_classification,large,ote_anomaly_classification_padim,1.0,0.9982,1.0,1.0,0.9982,1.0,,,,,,1.0,0.9982,1.0,17.0,0.0,,,,35.68,10.72,10.77,2.41,,,,anomaly/mvtec/hazelnut_large +speed,anomaly_classification,large,ote_anomaly_classification_stfpm,1.0,1.0,1.0,1.0,1.0,1.0,,,,,,1.0,1.0,1.0,20.0,0.0,,,,36.99,21.01,9.27,3.07,,,,anomaly/mvtec/hazelnut_large +speed,anomaly_classification,medium,ote_anomaly_classification_padim,1.0,0.9922,1.0,1.0,0.9922,1.0,,,,,,1.0,0.9922,1.0,13.0,0.0,,,,33.9,8.23,10.47,2.42,,,,anomaly/mvtec/wood_medium +speed,anomaly_classification,medium,ote_anomaly_classification_stfpm,0.9974,1.0,1.0,0.9974,1.0,1.0,,,,,,0.9974,1.0,1.0,16.0,0.0,,,,32.0,15.05,10.04,3.09,,,,anomaly/mvtec/wood_medium +speed,anomaly_classification,small,ote_anomaly_classification_padim,0.9960999999999999,1.0,1.0,0.9954333333333333,1.0,1.0,,,,,,0.9957666666666666,1.0,1.0,9.333333333333334,0.0,,,,32.156666666666666,5.4433333333333325,9.773333333333333,2.41,,,,anomaly/mvtec/bottle_small/ +speed,anomaly_classification,small,ote_anomaly_classification_stfpm,0.9980333333333333,0.9758333333333334,1.0,0.9980333333333333,0.9758333333333334,1.0,,,,,,0.9980333333333333,0.9758333333333334,1.0,10.333333333333334,0.0,,,,26.03333333333333,12.043333333333335,9.633333333333333,3.0533333333333332,,,,anomaly/mvtec/bottle_small/ +speed,anomaly_detection,large,ote_anomaly_detection_padim,,,,,,,,,,,,1.0,1.0,0.9991,17.0,0.0,,,,35.65,10.57,9.74,2.41,,,,anomaly/mvtec/hazelnut_large +speed,anomaly_detection,large,ote_anomaly_detection_stfpm,,,,,,,,,,,,1.0,0.9982,0.9982,21.0,0.0,,,,34.75,20.73,8.28,3.07,,,,anomaly/mvtec/hazelnut_large +speed,anomaly_detection,medium,ote_anomaly_detection_padim,,,,,,,,,,,,0.9944,0.9845,0.9987,13.0,0.0,,,,32.71,7.44,9.18,2.42,,,,anomaly/mvtec/wood_medium +speed,anomaly_detection,medium,ote_anomaly_detection_stfpm,,,,,,,,,,,,0.9473,0.9494,0.997,16.0,0.0,,,,31.47,14.76,8.76,3.09,,,,anomaly/mvtec/wood_medium +speed,anomaly_detection,small,ote_anomaly_detection_padim,,,,,,,,,,,,0.9306333333333333,0.9352333333333332,0.9971,9.333333333333334,0.0,,,,31.776666666666667,5.436666666666667,8.85,2.41,,,,anomaly/mvtec/bottle_small/ +speed,anomaly_detection,small,ote_anomaly_detection_stfpm,,,,,,,,,,,,0.9974,0.9764666666666667,0.9802666666666666,10.333333333333334,0.0,,,,26.226666666666663,12.090000000000002,9.48,3.08,,,,anomaly/mvtec/bottle_small/ +speed,anomaly_segmentation,large,ote_anomaly_segmentation_padim,,,,,,,,,,,,0.9982,0.9965,0.9991,19.0,0.0,,,,37.08,9.85,9.06,2.41,,,,anomaly/mvtec/hazelnut_large +speed,anomaly_segmentation,large,ote_anomaly_segmentation_stfpm,,,,,,,,,,,,1.0,1.0,0.9974,24.0,0.0,,,,38.71,16.42,7.93,3.08,,,,anomaly/mvtec/hazelnut_large +speed,anomaly_segmentation,medium,ote_anomaly_segmentation_padim,,,,,,,,,,,,0.9991,0.9896,0.9987,15.0,0.0,,,,32.37,7.39,9.44,2.42,,,,anomaly/mvtec/wood_medium +speed,anomaly_segmentation,medium,ote_anomaly_segmentation_stfpm,,,,,,,,,,,,0.9974,1.0,0.9987,21.0,0.0,,,,30.6,11.56,9.1,3.1,,,,anomaly/mvtec/wood_medium +speed,anomaly_segmentation,small,ote_anomaly_segmentation_padim,,,,,,,,,,,,0.9967333333333332,1.0,0.9971,10.333333333333334,0.0,,,,30.086666666666662,5.523333333333333,8.886666666666665,2.41,,,,anomaly/mvtec/bottle_small/ +speed,anomaly_segmentation,small,ote_anomaly_segmentation_stfpm,,,,,,,,,,,,0.9954333333333333,0.9594666666666667,0.9675333333333334,12.333333333333334,0.0,,,,23.38,10.1,8.573333333333332,3.09,,,,anomaly/mvtec/bottle_small/ +speed,detection,large,Custom_Object_Detection_Gen3_ATSS,0.7055,0.7374,0.7005,0.6193,0.5868,0.6198,0.0135,0.2005,0.0336,0.0199,2.0,0.6596,0.6536,0.6577,57.0,0.6138,,,,9.87,36.12,8.33,9.63,,,,detection/vitens_large +speed,detection,large,Custom_Object_Detection_Gen3_SSD,0.633,0.6917,0.6332,0.402,0.3797,0.4028,0.0165,0.157,0.0244,0.0124,2.0,0.4917,0.4903,0.4924,51.0,0.4174,,,,13.01,35.88,8.28,6.77,,,,detection/vitens_large +speed,detection,large,Custom_Object_Detection_YOLOX,0.7373,0.7656,0.7376,0.6379,0.6045,0.6377,0.0113,0.1427,0.0101,0.0086,2.0,0.684,0.6756,0.684,63.0,0.657,,,,9.4,18.03,7.98,3.77,,,,detection/vitens_large +speed,detection,large,Object_Detection_ResNeXt101_ATSS,0.7508,0.6991,0.6891,0.6076,0.6501,0.6577,0.0053,0.321,0.6785,0.3538,2.0,0.6716,0.6737,0.673,131.0,0.6179,,,,7.11,72.35,9.68,12.49,,,,detection/vitens_large +speed,detection,large,Object_Detection_YOLOX_L,0.8073,0.8138,0.8074,0.6348,0.633,0.6343,0.0135,0.2609,0.1644,0.0943,2.0,0.7107,0.7121,0.7105,70.0,0.7017,,,,10.92,42.19,8.98,10.44,,,,detection/vitens_large +speed,detection,large,Object_Detection_YOLOX_S,0.7468,0.8426,0.7462,0.6008,0.4903,0.6037,0.0134,0.1914,0.0399,0.025,2.0,0.6659,0.6199,0.6674,54.0,0.6467,,,,13.48,23.63,8.07,5.05,,,,detection/vitens_large +speed,detection,large,Object_Detection_YOLOX_X,0.7336,0.7464,0.7335,0.5917,0.5837,0.5936,0.0043,0.2309,0.2895,0.1634,2.0,0.6551,0.6551,0.6561,105.0,0.6396,,,,8.93,50.39,10.21,9.48,,,,detection/vitens_large +speed,detection,medium,Custom_Object_Detection_Gen3_ATSS,0.6913,0.6501,0.6915,0.549,0.5686,0.5512,0.0147,0.2107,0.031,0.0191,2.0,0.6105,0.6064,0.612,33.0,0.65,,,,8.71,36.32,11.44,8.79,,,,detection/pothole_medium +speed,detection,medium,Custom_Object_Detection_Gen3_SSD,0.5189,0.5143,0.5159,0.3987,0.4031,0.3987,0.0143,0.1444,0.0222,0.0118,2.0,0.4508,0.4518,0.4496,28.0,0.4679,,,,10.81,35.75,11.15,6.77,,,,detection/pothole_medium +speed,detection,medium,Custom_Object_Detection_YOLOX,0.622,0.6323,0.6182,0.512,0.4466,0.4989,0.0129,0.1448,0.0104,0.0097,2.0,0.5604,0.5224,0.5501,42.0,0.5294,,,,8.88,17.82,9.51,3.96,,,,detection/pothole_medium +speed,detection,medium,Object_Detection_ResNeXt101_ATSS,0.6672,0.6981,0.6807,0.549,0.5338,0.536,0.0058,0.3288,0.6173,0.3144,2.0,0.5963,0.5972,0.5956,87.0,0.6185,,,,6.91,65.14,22.22,12.55,,,,detection/pothole_medium +speed,detection,medium,Object_Detection_YOLOX_L,0.6108,0.6131,0.6183,0.4946,0.4662,0.4837,0.0152,0.2377,0.1493,0.083,2.0,0.5452,0.529,0.5414,44.0,0.5487,,,,9.94,36.56,17.11,10.97,,,,detection/pothole_medium +speed,detection,medium,Object_Detection_YOLOX_S,0.3106,0.3649,0.3296,0.2963,0.268,0.2745,0.0156,0.19,0.0365,0.0234,2.0,0.2962,0.2951,0.2913,31.0,0.3072,,,,11.91,22.25,11.21,5.29,,,,detection/pothole_medium +speed,detection,medium,Object_Detection_YOLOX_X,0.5225,0.5082,0.5279,0.4074,0.3965,0.4205,0.0047,0.2395,0.2638,0.1446,2.0,0.4571,0.4454,0.4669,71.0,0.4865,,,,8.3,43.91,21.4,9.92,,,,detection/pothole_medium +speed,detection,small,Custom_Object_Detection_Gen3_ATSS,0.0,0.0,0.0,0.0,0.0,0.0,0.05853333333333333,0.2513,0.0313,0.020033333333333334,2.0,0.0,0.0,0.0,10.666666666666666,0.0,,,,7.133333333333333,12.313333333333333,14.886666666666668,10.74,,,,detection/pothole_small/ +speed,detection,small,Custom_Object_Detection_Gen3_SSD,0.01863333333333333,0.022466666666666666,0.0181,0.07116666666666667,0.08423333333333334,0.07843333333333334,0.06366666666666666,0.1903,0.022000000000000002,0.012533333333333334,2.0,0.026600000000000002,0.030866666666666667,0.026366666666666667,10.333333333333334,0.03166666666666667,,,,8.5,10.343333333333334,14.173333333333334,7.849999999999999,,,,detection/pothole_small/ +speed,detection,small,Custom_Object_Detection_YOLOX,0.06016666666666667,0.0515,0.05343333333333333,0.025433333333333336,0.015233333333333335,0.026166666666666668,0.08536666666666666,0.21083333333333334,0.010199999999999999,0.008233333333333334,2.0,0.022500000000000003,0.013033333333333333,0.024866666666666665,11.666666666666666,0.07216666666666667,,,,7.653333333333333,6.63,11.236666666666666,3.233333333333333,,,,detection/pothole_small/ +speed,detection,small,Object_Detection_ResNeXt101_ATSS,0.5694333333333333,0.5670000000000001,0.5432,0.039933333333333335,0.0385,0.07043333333333333,0.019433333333333334,0.33403333333333335,0.6159666666666667,0.3149666666666667,2.0,0.0687,0.06606666666666668,0.11520000000000001,32.0,0.13426666666666667,,,,7.1866666666666665,20.84,36.346666666666664,11.766666666666666,,,,detection/pothole_small/ +speed,detection,small,Object_Detection_YOLOX_L,0.13346666666666668,0.14959999999999998,0.08436666666666666,0.022500000000000003,0.018166666666666668,0.016666666666666666,0.12173333333333332,0.3413333333333333,0.14933333333333335,0.0835,2.0,0.036,0.030366666666666663,0.026366666666666667,19.0,0.06186666666666666,,,,8.07,9.943333333333333,24.61,9.05,,,,detection/pothole_small/ +speed,detection,small,Object_Detection_YOLOX_S,0.0,0.0,0.0,0.0,0.0,0.0,0.11499999999999999,0.2748,0.03616666666666667,0.0226,2.0,0.0,0.0,0.0,16.666666666666668,0.0,,,,7.663333333333334,7.003333333333334,14.469999999999999,4.2,,,,detection/pothole_small/ +speed,detection,small,Object_Detection_YOLOX_X,0.3045333333333333,0.3199,0.2754666666666667,0.24326666666666666,0.23383333333333334,0.24839999999999998,0.035333333333333335,0.24703333333333333,0.2628333333333333,0.14780000000000001,2.0,0.26689999999999997,0.26636666666666664,0.25806666666666667,27.0,0.23963333333333336,,,,8.066666666666666,12.003333333333332,32.873333333333335,7.87,,,,detection/pothole_small/ +speed,hierarchical_label_classification,medium,Custom_Image_Classification_DeiT-Tiny,,,,,,,0.0038,0.0831,0.0068,0.0073,2.0,,,,15.0,66.9406,,,,10.55,45.81,8.38,4.22,0.0857,0.0869,0.0862,classification/h_label/h_label_CUB_medium +speed,hierarchical_label_classification,medium,Custom_Image_Classification_EfficientNet-V2-S,,,,,,,0.0043,0.2407,0.0106,0.0108,2.0,,,,27.0,70.5778,,,,7.97,65.09,10.3,11.94,0.0936,0.0887,0.0927,classification/h_label/h_label_CUB_medium +speed,hierarchical_label_classification,medium,Custom_Image_Classification_EfficinetNet-B0,,,,,,,0.0037,0.2224,0.0053,0.0058,2.0,,,,22.0,67.6897,,,,8.33,76.59,8.49,9.18,0.0773,0.0677,0.0773,classification/h_label/h_label_CUB_medium +speed,hierarchical_label_classification,medium,Custom_Image_Classification_MobileNet-V3-large-1x,,,,,,,0.0036,0.1071,0.005,0.0058,2.0,,,,15.0,67.017,,,,9.88,58.49,8.25,5.85,0.0773,0.0617,0.0782,classification/h_label/h_label_CUB_medium +speed,hierarchical_label_classification,small,Custom_Image_Classification_DeiT-Tiny,,,,,,,0.04973333333333333,0.09196666666666668,0.0065,0.0075,2.0,,,,6.666666666666667,55.6327,,,,6.510000000000001,4.739999999999999,9.453333333333333,3.3333333333333335,0.4463333333333333,0.4444333333333333,0.4463,classification/h_label/h_label_CUB_small/ +speed,hierarchical_label_classification,small,Custom_Image_Classification_EfficientNet-V2-S,,,,,,,0.05053333333333334,0.18316666666666667,0.0105,0.0109,2.0,,,,8.666666666666666,48.7654,,,,7.043333333333333,8.676666666666668,14.206666666666665,6.883333333333333,0.33899999999999997,0.34559999999999996,0.34179999999999994,classification/h_label/h_label_CUB_small/ +speed,hierarchical_label_classification,small,Custom_Image_Classification_EfficinetNet-B0,,,,,,,0.04933333333333333,0.12069999999999999,0.004633333333333333,0.0055000000000000005,2.0,,,,6.333333333333333,44.13583333333333,,,,5.7700000000000005,10.52,9.786666666666667,6.19,0.307,0.33613333333333334,0.30696666666666667,classification/h_label/h_label_CUB_small/ +speed,hierarchical_label_classification,small,Custom_Image_Classification_MobileNet-V3-large-1x,,,,,,,0.0496,0.09246666666666666,0.004566666666666667,0.0065,2.0,,,,6.333333333333333,46.60490000000001,,,,5.023333333333333,6.536666666666666,9.333333333333334,3.9033333333333338,0.33240000000000003,0.34933333333333333,0.3343,classification/h_label/h_label_CUB_small/ +speed,instance_segmentation,medium,Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B,0.2605,0.3373,0.2373,0.1972,0.0894,0.1828,0.006,0.2325,0.3387,0.2247,2.0,0.2224,0.1403,0.2027,113.0,0.2356,,,,,,,,,,,instance_seg/coco_car_person_medium +speed,instance_segmentation,medium,Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50,0.7227,0.7308,0.7066,0.5633,0.5605,0.5565,0.0126,0.3511,0.5008,0.2897,2.0,0.6325,0.6328,0.6219,115.0,0.6921,,,,,,,,,,,instance_seg/coco_car_person_medium +speed,multi_label_classification,large,Custom_Image_Classification_DeiT-Tiny,,,,,,,0.0076,0.0679,0.0053,0.0061,2.0,,,,53.0,0.9508,,,,12.43,35.89,6.86,3.38,0.9535,0.9527,0.9535,classification/multi_label/multilabel_food101_large +speed,multi_label_classification,large,Custom_Image_Classification_EfficientNet-V2-S,,,,,,,0.0064,0.2833,0.0098,0.0096,2.0,,,,151.0,0.9791,,,,7.39,51.61,7.29,6.77,0.9791,0.9802,0.9791,classification/multi_label/multilabel_food101_large +speed,multi_label_classification,large,Custom_Image_Classification_EfficinetNet-B0,,,,,,,0.0063,0.1545,0.0038,0.0045,2.0,,,,77.0,0.9601,,,,10.23,62.26,7.08,5.62,0.9611,0.9564,0.9612,classification/multi_label/multilabel_food101_large +speed,multi_label_classification,large,Custom_Image_Classification_MobileNet-V3-large-1x,,,,,,,0.0074,0.1153,0.0036,0.0047,2.0,,,,64.0,0.968,,,,11.32,48.44,6.96,3.55,0.9686,0.9657,0.9686,classification/multi_label/multilabel_food101_large +speed,multi_label_classification,medium,Custom_Image_Classification_DeiT-Tiny,,,,,,,0.0129,0.0726,0.0058,0.0064,2.0,,,,16.0,0.9854,,,,10.33,29.06,8.24,3.73,0.9856,0.9856,0.9855,classification/multi_label/multilabel_CUB_medium +speed,multi_label_classification,medium,Custom_Image_Classification_EfficientNet-V2-S,,,,,,,0.0127,0.303,0.0096,0.0101,2.0,,,,35.0,0.9901,,,,7.52,49.52,10.06,6.97,0.9898,0.9894,0.9899,classification/multi_label/multilabel_CUB_medium +speed,multi_label_classification,medium,Custom_Image_Classification_EfficinetNet-B0,,,,,,,0.0119,0.1609,0.0043,0.0049,2.0,,,,20.0,0.9861,,,,9.26,55.08,8.42,6.19,0.9863,0.9857,0.9863,classification/multi_label/multilabel_CUB_medium +speed,multi_label_classification,medium,Custom_Image_Classification_MobileNet-V3-large-1x,,,,,,,0.0124,0.1207,0.004,0.0051,2.0,,,,17.0,0.9871,,,,9.82,42.59,8.2,3.8,0.9871,0.9867,0.9872,classification/multi_label/multilabel_CUB_medium +speed,multi_label_classification,small,Custom_Image_Classification_DeiT-Tiny,,,,,,,0.04086666666666667,0.08786666666666666,0.006833333333333334,0.007700000000000001,2.0,,,,10.0,0.7500333333333332,,,,6.596666666666667,8.299999999999999,9.373333333333333,2.8633333333333333,0.8807666666666667,0.8807666666666667,0.8807666666666667,classification/multi_label/multilabel_CUB_small/ +speed,multi_label_classification,small,Custom_Image_Classification_EfficientNet-V2-S,,,,,,,0.0579,0.3075,0.009633333333333334,0.011866666666666666,2.0,,,,11.333333333333334,0.6667,,,,6.763333333333333,18.406666666666666,14.15,4.163333333333333,0.8343000000000002,0.8343000000000002,0.8343000000000002,classification/multi_label/multilabel_CUB_small/ +speed,multi_label_classification,small,Custom_Image_Classification_EfficinetNet-B0,,,,,,,0.037366666666666666,0.15133333333333335,0.004733333333333333,0.005866666666666667,2.0,,,,7.333333333333333,0.7314999999999999,,,,6.09,17.483333333333334,9.763333333333334,3.736666666666667,0.8675333333333333,0.8581666666666666,0.8675333333333333,classification/multi_label/multilabel_CUB_small/ +speed,multi_label_classification,small,Custom_Image_Classification_MobileNet-V3-large-1x,,,,,,,0.037866666666666667,0.1293,0.004433333333333333,0.006033333333333332,2.0,,,,6.666666666666667,0.9861,,,,5.546666666666667,12.69,9.286666666666667,2.6966666666666668,0.9912333333333333,0.9799333333333333,0.9912333333333333,classification/multi_label/multilabel_CUB_small/ +speed,semantic_segmentation,large,Custom_Semantic_Segmentation_Lite-HRNet-18-mod2_OCR,,,,,,,0.005849999999999999,0.26570000000000005,0.086,0.08755,2.0,,,,90.0,0.876,0.8205,0.8161,0.82075,8.16,29.31,8.14,3.54,,,,semantic_seg/kvasir_large +speed,semantic_segmentation,large,Custom_Semantic_Segmentation_Lite-HRNet-18_OCR,,,,,,,0.00595,0.2741,0.0857,0.15855,2.0,,,,92.5,0.8755999999999999,0.83005,0.3015,0.82995,8.0,28.71,8.13,3.54,,,,semantic_seg/kvasir_large +speed,semantic_segmentation,large,Custom_Semantic_Segmentation_Lite-HRNet-s-mod2_OCR,,,,,,,0.00605,0.21355000000000002,0.0723,0.07365,2.0,,,,75.0,0.84825,0.78265,0.7864,0.7826,8.95,21.33,8.06,2.39,,,,semantic_seg/kvasir_large +speed,semantic_segmentation,large,Custom_Semantic_Segmentation_Lite-HRNet-x-mod3_OCR,,,,,,,0.006200000000000001,0.5105500000000001,0.24245,0.19569999999999999,2.0,,,,155.5,0.85285,0.7899499999999999,0.7539,0.7901,6.98,51.15,8.4,8.88,,,,semantic_seg/kvasir_large +speed,semantic_segmentation,large,Custom_Semantic_Segmentation_SegNext_B,,,,,,,0.00665,0.34235000000000004,0.13779999999999998,0.0996,2.0,,,,104.5,0.8833,0.8433999999999999,0.6749499999999999,0.8445499999999999,7.76,74.71,8.82,12.75,,,,semantic_seg/kvasir_large +speed,semantic_segmentation,large,Custom_Semantic_Segmentation_SegNext_s,,,,,,,0.0063999999999999994,0.1941,0.09495,0.08410000000000001,2.0,,,,66.5,0.87835,0.81135,0.7854000000000001,0.8121499999999999,9.33,73.28,8.38,8.06,,,,semantic_seg/kvasir_large +speed,semantic_segmentation,large,Custom_Semantic_Segmentation_SegNext_t,,,,,,,0.00655,0.16275,0.07675,0.04375,2.0,,,,58.0,0.8775999999999999,0.7832,0.25205,0.784,10.22,65.39,8.25,5.95,,,,semantic_seg/kvasir_large +speed,semantic_segmentation,medium,Custom_Semantic_Segmentation_Lite-HRNet-18-mod2_OCR,,,,,,,0.00825,0.28354999999999997,0.10135,0.10364999999999999,2.0,,,,49.0,0.8392999999999999,0.7434499999999999,0.7432,0.7432000000000001,7.54,19.1,11.63,3.54,,,,semantic_seg/kvasir_medium +speed,semantic_segmentation,medium,Custom_Semantic_Segmentation_Lite-HRNet-18_OCR,,,,,,,0.008400000000000001,0.2851,0.10020000000000001,0.26565,2.0,,,,49.5,0.835,0.74085,0.3022,0.7405999999999999,7.47,18.96,12.17,3.54,,,,semantic_seg/kvasir_medium +speed,semantic_segmentation,medium,Custom_Semantic_Segmentation_Lite-HRNet-s-mod2_OCR,,,,,,,0.008199999999999999,0.22465000000000002,0.08045,0.08499999999999999,2.0,,,,40.0,0.8444499999999999,0.7416499999999999,0.70835,0.7414000000000001,8.05,14.97,11.12,2.47,,,,semantic_seg/kvasir_medium +speed,semantic_segmentation,medium,Custom_Semantic_Segmentation_Lite-HRNet-x-mod3_OCR,,,,,,,0.0093,0.53175,0.34815,0.37795,2.0,,,,81.5,0.82685,0.70435,0.63815,0.7042999999999999,6.73,33.57,18.29,8.88,,,,semantic_seg/kvasir_medium +speed,semantic_segmentation,medium,Custom_Semantic_Segmentation_SegNext_B,,,,,,,0.0097,0.34440000000000004,0.1251,0.0763,2.0,,,,48.5,0.6231500000000001,0.3473,0.00039999999999999996,0.34735,7.67,53.27,15.64,12.76,,,,semantic_seg/kvasir_medium +speed,semantic_segmentation,medium,Custom_Semantic_Segmentation_SegNext_s,,,,,,,0.009149999999999998,0.19495,0.07465,0.052349999999999994,2.0,,,,31.5,0.60535,0.2632,0.1447,0.26515,8.99,51.21,12.77,8.06,,,,semantic_seg/kvasir_medium +speed,semantic_segmentation,medium,Custom_Semantic_Segmentation_SegNext_t,,,,,,,0.01,0.16745,0.0436,0.0292,2.0,,,,28.5,0.5012000000000001,0.0965,0.00125,0.0964,9.4,42.37,11.35,5.95,,,,semantic_seg/kvasir_medium +speed,semantic_segmentation,small,Custom_Semantic_Segmentation_Lite-HRNet-18-mod2_OCR,,,,,,,0.05988333333333334,0.3396166666666667,0.2023,0.14441666666666667,2.0,,,,13.833333333333334,0.5011,0.17454999999999998,0.0727,0.17346666666666666,6.453333333333333,10.933333333333332,16.203333333333333,3.5400000000000005,,,,semantic_seg/kvasir_small/ +speed,semantic_segmentation,small,Custom_Semantic_Segmentation_Lite-HRNet-18_OCR,,,,,,,0.060950000000000004,0.34378333333333333,0.19685,0.46130000000000004,2.0,,,,15.666666666666666,0.5005666666666667,0.17346666666666666,0.19521666666666668,0.17300000000000001,6.083333333333333,10.496666666666668,16.743333333333336,3.5400000000000005,,,,semantic_seg/kvasir_small/ +speed,semantic_segmentation,small,Custom_Semantic_Segmentation_Lite-HRNet-s-mod2_OCR,,,,,,,0.06033333333333333,0.2786,0.18791666666666665,0.1863,2.0,,,,12.666666666666666,0.36434999999999995,0.14285,0.12890000000000001,0.14288333333333333,5.986666666666667,7.553333333333334,14.886666666666665,2.4966666666666666,,,,semantic_seg/kvasir_small/ +speed,semantic_segmentation,small,Custom_Semantic_Segmentation_Lite-HRNet-x-mod3_OCR,,,,,,,0.07413333333333333,0.6359666666666667,0.6796166666666666,0.17290000000000003,2.0,,,,21.0,0.34401666666666664,0.18161666666666668,0.1686,0.1798833333333333,6.45,20.426666666666666,28.44666666666667,8.876666666666667,,,,semantic_seg/kvasir_small/ +speed,semantic_segmentation,small,Custom_Semantic_Segmentation_SegNext_B,,,,,,,0.06481666666666668,0.4011166666666666,0.20945,0.21018333333333336,2.0,,,,21.333333333333332,0.4666333333333334,0.11538333333333334,0.11848333333333333,0.11521666666666668,5.903333333333333,23.820000000000004,23.236666666666668,12.743333333333334,,,,semantic_seg/kvasir_small/ +speed,semantic_segmentation,small,Custom_Semantic_Segmentation_SegNext_s,,,,,,,0.06333333333333334,0.2501333333333333,0.23045000000000002,0.23578333333333334,2.0,,,,13.0,0.42733333333333334,0.20501666666666665,0.19596666666666665,0.20533333333333334,6.153333333333333,21.01,17.736666666666668,8.06,,,,semantic_seg/kvasir_small/ +speed,semantic_segmentation,small,Custom_Semantic_Segmentation_SegNext_t,,,,,,,0.0652,0.22215,0.1798,0.15453333333333333,2.0,,,,11.666666666666666,0.45389999999999997,0.18183333333333332,0.12665,0.18055,6.416666666666667,18.026666666666667,15.31,5.953333333333333,,,,semantic_seg/kvasir_small/ +speed,single_label_classification,large,Custom_Image_Classification_DeiT-Tiny,,,,,,,0.0015,0.057,0.006,0.007,2.0,,,,49.0,0.737,,,,12.96,38.5,8.01,3.38,0.7368,0.7345,0.7368,classification/single_label/multiclass_food101_large +speed,single_label_classification,large,Custom_Image_Classification_EfficientNet-V2-S,,,,,,,0.0008,0.145,0.0102,0.0113,2.0,,,,147.0,0.8365,,,,6.9,24.18,8.54,6.88,0.831,0.816,0.831,classification/single_label/multiclass_food101_large +speed,single_label_classification,large,Custom_Image_Classification_EfficinetNet-B0,,,,,,,0.0008,0.084,0.0042,0.0052,2.0,,,,54.0,0.7258,,,,12.86,51.12,8.15,5.57,0.7085,0.689,0.7092,classification/single_label/multiclass_food101_large +speed,single_label_classification,large,Custom_Image_Classification_MobileNet-V3-large-1x,,,,,,,0.0012,0.0566,0.0041,0.0056,2.0,,,,49.0,0.7378,,,,12.96,34.14,8.01,3.37,0.749,0.6627,0.7492,classification/single_label/multiclass_food101_large +speed,single_label_classification,medium,Custom_Image_Classification_DeiT-Tiny,,,,,,,0.0031,0.0575,0.0064,0.0073,2.0,,,,15.0,0.4783,,,,10.86,31.84,9.45,3.73,0.4464,0.4423,0.4457,classification/single_label/multiclass_CUB_medium +speed,single_label_classification,medium,Custom_Image_Classification_EfficientNet-V2-S,,,,,,,0.0032,0.1421,0.0106,0.0119,2.0,,,,25.0,0.8181,,,,8.83,36.15,11.4,7.05,0.7886,0.7782,0.7886,classification/single_label/multiclass_CUB_medium +speed,single_label_classification,medium,Custom_Image_Classification_EfficinetNet-B0,,,,,,,0.0032,0.087,0.0047,0.0056,2.0,,,,16.0,0.6253,,,,10.73,42.48,9.62,6.14,0.6232,0.5752,0.6231,classification/single_label/multiclass_CUB_medium +speed,single_label_classification,medium,Custom_Image_Classification_MobileNet-V3-large-1x,,,,,,,0.0031,0.0608,0.0044,0.006,2.0,,,,15.0,0.6622,,,,10.6,29.84,9.33,3.62,0.6521,0.5244,0.6534,classification/single_label/multiclass_CUB_medium +speed,single_label_classification,small,Custom_Image_Classification_DeiT-Tiny,,,,,,,0.03833333333333333,0.0847,0.0074333333333333335,0.009066666666666667,2.0,,,,6.333333333333333,1.0,,,,6.63,3.17,10.74,2.8200000000000003,1.0,1.0,1.0,classification/single_label/multiclass_CUB_small/ +speed,single_label_classification,small,Custom_Image_Classification_EfficientNet-V2-S,,,,,,,0.0393,0.17116666666666666,0.011733333333333333,0.013433333333333334,2.0,,,,8.666666666666666,0.75,,,,7.0566666666666675,4.866666666666667,15.450000000000001,3.956666666666667,0.8456,0.8606666666666666,0.8437,classification/single_label/multiclass_CUB_small/ +speed,single_label_classification,small,Custom_Image_Classification_EfficinetNet-B0,,,,,,,0.03746666666666667,0.10513333333333334,0.0051333333333333335,0.0067,2.0,,,,6.333333333333333,0.8889,,,,5.876666666666668,3.926666666666667,11.023333333333333,3.7566666666666664,0.9021,0.8625666666666666,0.9021,classification/single_label/multiclass_CUB_small/ +speed,single_label_classification,small,Custom_Image_Classification_MobileNet-V3-large-1x,,,,,,,0.03763333333333333,0.08983333333333333,0.004900000000000001,0.008100000000000001,2.0,,,,5.666666666666667,0.7222333333333334,,,,5.06,2.7866666666666666,10.573333333333332,2.6966666666666668,0.7476666666666666,0.7966333333333333,0.7476666666666668,classification/single_label/multiclass_CUB_small/ +speed,tiling_instance_segmentation,medium,Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B,0.9447,0.8112,0.9378,0.8255,0.089,0.8255,0.0017,0.1738,0.7004,0.1075,2.0,0.8811,0.1602,0.8781,167.0,0.784,,,,,,,,,,,tiling_instance_seg/vitens_aeromonas_medium +speed,tiling_instance_segmentation,medium,Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50,0.9231,0.7116,0.9237,0.8122,0.1469,0.8,0.0016,0.1689,1.6392,0.1318,2.0,0.8637,0.2435,0.8572,154.0,0.775,,,,,,,,,,,tiling_instance_seg/vitens_aeromonas_medium +speed,tiling_instance_segmentation,medium,Custom_Counting_Instance_Segmentation_MaskRCNN_SwinT_FP16,0.6517,0.1647,0.4863,0.7772,0.0352,0.7374,0.0018,0.1779,2.4882,0.2203,2.0,0.7045,0.0578,0.5848,211.0,0.5315,,,,,,,,,,,tiling_instance_seg/vitens_aeromonas_medium +speed,tiling_instance_segmentation,small,Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B,0.8207,0.47869999999999996,0.7049333333333333,0.7246666666666667,0.072,0.7191000000000001,0.0033666666666666667,0.18263333333333334,0.6970000000000001,0.11599999999999999,2.0,0.7677999999999999,0.12143333333333332,0.7058333333333332,57.666666666666664,0.6683,,,,,,,,,,,tiling_instance_seg/vitens_aeromonas_small/ diff --git a/tests/perf/benchmark.py b/tests/perf/benchmark.py new file mode 100644 index 00000000000..dc0e37c65d7 --- /dev/null +++ b/tests/perf/benchmark.py @@ -0,0 +1,210 @@ +"""OTX Benchmark based on tools/experiment.py.""" + +# Copyright (C) 2023-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + + +import os +import glob +import pandas as pd +import yaml +from pathlib import Path +from typing import List, Optional + +from tests.test_suite.run_test_command import check_run + + +class OTXBenchmark: + """Benchmark runner based on tools/experiment.py in OTX1.x. + + Example: + >>> bm = OTXBenchmark(['random_sample1', 'random_sample2'], data_root='./data/coco') + >>> atss_result = bm.run('MobileNetV2-ATSS') + >>> yolox_result = bm.run('YOLOX-TINY') + + Args: + datasets (List[str]): Paths to datasets relative to the data_root. + Intended for, but not restricted to different sampling based on same dataset. + data_root (str): Path to the root of dataset directories. Defaults to './data'. + num_epoch (int): Overrides the per-model default number of epoch settings. + Defaults to 0, which means no overriding. + num_repeat (int): Number for trials with different random seed, which would be set + as range(0, num_repeat). Defaults to 1. + train_params (dict, optional): Additional training parameters. + e.x) {'learning_parameters.num_iters': 2}. Defaults to {}. + track_resources (bool): Whether to track CPU & GPU usage metrics. Defaults to False. + eval_upto (str): The last serial operation to evaluate. Choose one of ('train', 'export', 'optimize'). + Operations include the preceeding ones. + e.x) Eval up to 'optimize': train -> eval -> export -> eval -> optimize -> eval + Default to 'train'. + output_root (str): Output root dirctory for logs and results. Defaults to './otx-benchmark'. + dry_run (bool): Whether to just print the OTX command without execution. Defaults to False. + tags (dict, optional): Key-values pair metadata for the experiment. + subset_dir_names (dict, optional): Specify dataset subset directory names, if any. + e.x) {"train": "train_10percent", "val": "val_all", "test": "test"} + """ + + def __init__( + self, + datasets: List[str], + data_root: str = "data", + num_epoch: int = 0, + num_repeat: int = 1, + train_params: dict | None = None, + track_resources: bool = False, + eval_upto: str = "train", + output_root: str = "otx-benchmark", + dry_run: bool = False, + tags: dict | None = None, + subset_dir_names: dict | None = None, + ): + self.datasets = datasets + self.data_root = data_root + self.num_epoch = num_epoch + self.num_repeat = num_repeat + self.train_params = train_params or {} + self.track_resources = track_resources + self.eval_upto = eval_upto + self.output_root = output_root + self.dry_run = dry_run + self.tags = tags or {} + self.subset_dir_names = subset_dir_names or {"train": "", "val": "", "test": ""} + + def run( + self, + model_id: str, + train_params: dict = {}, + tags: dict = {}, + ) -> pd.DataFrame | None: + """Run configured benchmark with given model and return the result. + + Args: + model_id (str): Target model identifier + train_params (dict): Overrides global benchmark train params + tags (dict): Overrides global benchmark tags + + Retruns: + pd.DataFrame | None: Table with benchmark metrics + """ + + # Build config file + cfg = self._build_config(model_id, train_params, tags) + cfg_dir = Path(cfg["output_path"]) + cfg_dir.mkdir(parents=True, exist_ok=True) + cfg_path = cfg_dir / "cfg.yaml" + with open(cfg_path, "w") as cfg_file: + yaml.dump(cfg, cfg_file, indent=2) + cmd = [ + "python", + "tools/experiment.py", + "-f", + cfg_path, + ] + if self.dry_run: + cmd.append("-d") + # Run benchmark + check_run(cmd) + # Load result + result = self.load_result(cfg_dir) + return result + + @staticmethod + def load_result(result_path: str) -> pd.DataFrame | None: + """Load benchmark results recursively and merge as pd.DataFrame. + + Args: + result_path (str): Result directory or speicific file. + + Retruns: + pd.DataFrame: Table with benchmark metrics & options + """ + # Search csv files + if os.path.isdir(result_path): + csv_file_paths = glob.glob(f"{result_path}/**/exp_summary.csv", recursive=True) + else: + csv_file_paths = [result_path] + results = [] + # Load csv data + for csv_file_path in csv_file_paths: + result = pd.read_csv(csv_file_path) + # Append metadata if any + cfg_file_path = Path(csv_file_path).parent / "cfg.yaml" + if cfg_file_path.exists(): + with cfg_file_path.open("r") as cfg_file: + tags = yaml.safe_load(cfg_file).get("tags", {}) + for k, v in tags.items(): + result[k] = v + results.append(result) + if len(results) > 0: + # Merge experiments + data = pd.concat(results, ignore_index=True) + data["train_e2e_time"] = pd.to_timedelta(data["train_e2e_time"]).dt.total_seconds() # H:M:S str -> seconds + # Average by unique group + grouped = data.groupby(["benchmark", "task", "data_size", "model"]) + aggregated = grouped.mean(numeric_only=True) + # ["data/1", "data/2", "data/3"] -> "data/" + aggregated["data"] = grouped["data"].agg(lambda x: os.path.commonprefix(x.tolist())) + return aggregated + else: + return None + + def _build_config( + self, + model_id: str, + train_params: dict = {}, + tags: dict = {}, + ) -> dict: + """Build config for tools/expeirment.py.""" + all_train_params = self.train_params.copy() + all_train_params.update(train_params) + all_tags = self.tags.copy() + all_tags.update(tags) + + cfg = {} + cfg["tags"] = all_tags # metadata + cfg["output_path"] = os.path.abspath(Path(self.output_root) / "-".join(list(all_tags.values()) + [model_id])) + cfg["constants"] = { + "dataroot": os.path.abspath(self.data_root), + } + cfg["variables"] = { + "model": [model_id], + "data": self.datasets, + } + cfg["repeat"] = self.num_repeat + cfg["command"] = [] + resource_param = "" + if self.track_resources: + resource_param = "--track-resource-usage all" + if self.num_epoch > 0: + self._set_num_epoch(model_id, all_train_params, self.num_epoch) + params_str = " ".join([f"--{k} {v}" for k, v in all_train_params.items()]) + cfg["command"].append( + "otx train ${model}" + " --train-data-roots ${dataroot}/${data}" + f"/{self.subset_dir_names['train']}" + " --val-data-roots ${dataroot}/${data}" + f"/{self.subset_dir_names['val']}" + " --deterministic" + f" {resource_param}" + f" params {params_str}" + ) + cfg["command"].append("otx eval --test-data-roots ${dataroot}/${data}" + f"/{self.subset_dir_names['test']}") + if self.eval_upto == "train": + return cfg + + cfg["command"].append("otx export") + cfg["command"].append("otx eval --test-data-roots ${dataroot}/${data}" + f"/{self.subset_dir_names['test']}") + if self.eval_upto == "export": + return cfg + + cfg["command"].append("otx optimize") + cfg["command"].append("otx eval --test-data-roots ${dataroot}/${data}" + f"/{self.subset_dir_names['test']}") + return cfg + + @staticmethod + def _set_num_epoch(model_id: str, train_params: dict, num_epoch: int): + """Set model specific num_epoch parameter.""" + if "padim" in model_id: + return # No configurable parameter for num_epoch + elif "stfpm" in model_id: + train_params["learning_parameters.max_epochs"] = num_epoch + else: + train_params["learning_parameters.num_iters"] = num_epoch diff --git a/tests/perf/conftest.py b/tests/perf/conftest.py new file mode 100644 index 00000000000..21c1ab84d54 --- /dev/null +++ b/tests/perf/conftest.py @@ -0,0 +1,309 @@ +# Copyright (C) 2023-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + + +import os +import re +import shutil +from datetime import datetime +from pathlib import Path +from typing import Dict, List, Tuple, Callable + +import mlflow +import numpy as np +import pandas as pd +import pytest +import yaml + +from otx import __version__ as VERSION +from otx.api.entities.model_template import ModelCategory, ModelTemplate + +from .benchmark import OTXBenchmark + + +def pytest_addoption(parser): + """Add custom options for perf tests.""" + parser.addoption( + "--model-type", + action="store", + default="all", + choices=("default", "all"), + help="Choose default|all. Defaults to all.", + ) + parser.addoption( + "--data-size", + action="store", + default="all", + choices=("small", "medium", "large", "all"), + help="Choose small|medium|large|all. Defaults to all.", + ) + parser.addoption( + "--num-repeat", + action="store", + default=0, + help="Overrides default per-data-size number of repeat setting. " + "Random seeds are set to 0 ~ num_repeat-1 for the trials. " + "Defaults to 0 (small=3, medium=3, large=1).", + ) + parser.addoption( + "--num-epoch", + action="store", + default=0, + help="Overrides default per-model number of epoch setting. " + "Defaults to 0 (per-model epoch & early-stopping).", + ) + parser.addoption( + "--eval-upto", + action="store", + default="train", + choices=("train", "export", "optimize"), + help="Choose train|export|optimize. Defaults to train.", + ) + parser.addoption( + "--data-root", + action="store", + default="data", + help="Dataset root directory.", + ) + parser.addoption( + "--output-root", + action="store", + help="Output root directory. Defaults to temp directory.", + ) + parser.addoption( + "--summary-csv", + action="store", + help="Path to output summary cvs file. Defaults to {output-root}/benchmark-summary.csv", + ) + parser.addoption( + "--dry-run", + action="store_true", + default=False, + help="Print OTX commands without execution.", + ) + + +@pytest.fixture(scope="session") +def fxt_output_root(request: pytest.FixtureRequest, tmp_path_factory: pytest.TempPathFactory) -> Path: + """Output root + date + short commit hash.""" + output_root = request.config.getoption("--output-root") + if output_root is None: + output_root = tmp_path_factory.mktemp("otx-benchmark") + data_str = datetime.now().strftime("%Y%m%d-%H%M%S") + commit_str = os.environ.get("GH_CTX_SHA", "unknown") + print(f"Git SHA configured with {commit_str}") + return Path(output_root) / (data_str + "-" + commit_str[:7]) + + +@pytest.fixture(scope="session") +def fxt_working_branch() -> str: + """Git branch name for the current HEAD.""" + branch = os.environ.get("GH_CTX_REF_NAME", "unknown") + print(f"working branch name fixture configured with {branch}") + return branch + + +@pytest.fixture +def fxt_model_id(request: pytest.FixtureRequest) -> str: + """Skip by model category.""" + model_type: str = request.config.getoption("--model-type") + model_template: ModelTemplate = request.param + if model_type == "default": + if model_template.model_category == ModelCategory.OTHER: + pytest.skip(f"{model_template.model_category} category model") + return model_template.model_template_id + + +@pytest.fixture +def fxt_benchmark(request: pytest.FixtureRequest, fxt_output_root: Path) -> OTXBenchmark: + """Configure benchmark.""" + # Skip by dataset size + data_size_option: str = request.config.getoption("--data-size") + data_size: str = request.param[0] + if data_size_option != "all": + if data_size_option != data_size: + pytest.skip(f"{data_size} datasets") + + # Options + cfg: dict = request.param[1].copy() + + tags = cfg.get("tags", {}) + tags["data_size"] = data_size + cfg["tags"] = tags + + num_epoch_override: int = int(request.config.getoption("--num-epoch")) + if num_epoch_override > 0: # 0: use default + cfg["num_epoch"] = num_epoch_override + if "test_speed" in request.node.name: + if cfg.get("num_epoch", 0) == 0: # No user options + cfg["num_epoch"] = 2 + + num_repeat_override: int = int(request.config.getoption("--num-repeat")) + if num_repeat_override > 0: # 0: use default + cfg["num_repeat"] = num_repeat_override + + cfg["eval_upto"] = request.config.getoption("--eval-upto") + cfg["data_root"] = request.config.getoption("--data-root") + cfg["output_root"] = str(fxt_output_root) + cfg["dry_run"] = request.config.getoption("--dry-run") + + # Create benchmark + benchmark = OTXBenchmark( + **cfg, + ) + + return benchmark + + +def logging_perf_results_to_mlflow( + version: str, branch: str, git_hash: str, results: pd.DataFrame, client: "MlflowClient" +): + class DummyDatasetSource(mlflow.data.DatasetSource): + @staticmethod + def _get_source_type(): + return "dummy" + + class DummyDataset(mlflow.data.Dataset): + def _to_dict(self, base_dict): + return { + "name": base_dict["name"], + "digest": base_dict["digest"], + "source": base_dict["source"], + "source_type": base_dict["source_type"], + } + + exp_name = f"[{branch}] OTX Performance Benchmark" + exp = client.get_experiment_by_name(exp_name) + if exp is None: + exp_id = client.create_experiment(exp_name, tags={"Project": "OpenVINO Training Extensions", "Branch": branch}) + else: + exp_id = exp.experiment_id + + mlflow.set_experiment(experiment_id=exp_id) + + rows = results.to_dict(orient="records") + for row in rows: + task = row.pop("task") + model = row.pop("model") + data = row.pop("data") + data = os.path.dirname(data) + data_sz = row.pop("data_size") + benchmark = row.pop("benchmark") + runs = client.search_runs( + exp_id, + filter_string=f"tags.task LIKE '%{task}%' AND " + f"tags.model LIKE '%{model}%' AND " + f"tags.data LIKE '%{data}%' AND " + f"tags.benchmark LIKE '%{benchmark}%'", + ) + run = None + is_new_run = True + run_name = f"[{benchmark}] {task} | {model}" + if len(runs) == 0: + run = client.create_run(exp_id, run_name=run_name) + else: + is_new_run = False + run = runs[0] + + with mlflow.start_run(run_id=run.info.run_id): + if is_new_run: + mlflow.set_tag("task", task) + mlflow.set_tag("model", model) + mlflow.set_tag("data", data) + mlflow.set_tag("benchmark", benchmark) + dat_src = DummyDatasetSource() + dataset = DummyDataset(dat_src, data, data_sz) + mlflow.log_input(dataset) + mlflow.set_tag("version", version) + mlflow.set_tag("git-hash", git_hash) + for k, v in row.items(): + if isinstance(v, int) or isinstance(v, float): + k = k.replace("(", "_") + k = k.replace(")", "") + k = k.replace("%", "percentage") + history = client.get_metric_history(run.info.run_id, k) + step = 0 + if len(history) > 0: + step = history[-1].step + 1 + # set 'synchronous' to True to show the metric graph correctly + mlflow.log_metric(k, v, step=step, synchronous=True) + + +@pytest.fixture(scope="session", autouse=True) +def fxt_benchmark_summary(request: pytest.FixtureRequest, fxt_output_root: Path, fxt_working_branch, fxt_mlflow_client): + """Summarize all results at the end of test session.""" + yield + all_results = OTXBenchmark.load_result(fxt_output_root) + if all_results is not None: + print("=" * 20, "[Benchmark summary]") + print(all_results) + output_path = request.config.getoption("--summary-csv") + if not output_path: + output_path = fxt_output_root / "benchmark-summary.csv" + all_results.to_csv(output_path) + print(f" -> Saved to {output_path}.") + + if fxt_mlflow_client is None: + print( + "Tracking server is not configured. for logging results, " + "set 'MLFLOW_TRACKING_SERVER_URI' environment variable to server URI ." + ) + return + + # logging to the mlflow for 'develop' or 'releases/x.x.x' branch + if fxt_working_branch == "develop" or bool(re.match("^releases/[0-9]+\.[0-9]+\.[0-9]+$", fxt_working_branch)): + version = VERSION + git_hash = str(fxt_output_root).split("-")[-1] + logging_perf_results_to_mlflow(version, fxt_working_branch, git_hash, all_results, fxt_mlflow_client) + + if os.environ.get("BENCHMARK_RESULTS_CLEAR", False): + shutil.rmtree(fxt_output_root) + + +@pytest.fixture(scope="session") +def fxt_benchmark_reference() -> pd.DataFrame | None: + """Load reference benchmark results with index.""" + ref = pd.read_csv(Path(__file__).parent.resolve() / "benchmark-reference.csv") + if ref is not None: + ref.set_index(["benchmark", "task", "data_size", "model"], inplace=True) + return ref + + +@pytest.fixture(scope="session") +def fxt_check_benchmark_result(fxt_benchmark_reference: pd.DataFrame | None) -> Callable: + """Return result checking function with reference data.""" + + def check_benchmark_result(result: pd.DataFrame, key: Tuple, checks: List[Dict]): + if fxt_benchmark_reference is None: + print("No benchmark references loaded. Skipping result checking.") + return + + def get_entry(data: pd.DataFrame, key: Tuple) -> pd.Series: + if key in data.index: + return data.loc[key] + return None + + target_entry = get_entry(fxt_benchmark_reference, key) + if target_entry is None: + print(f"No benchmark reference for {key} loaded. Skipping result checking.") + return + + result_entry = get_entry(result, key) + assert result_entry is not None + + def compare(name: str, op: str, margin: float): + if name not in result_entry or result_entry[name] is None or np.isnan(result_entry[name]): + return + if name not in target_entry or target_entry[name] is None or np.isnan(target_entry[name]): + return + if op == "==": + assert abs(result_entry[name] - target_entry[name]) < target_entry[name] * margin + elif op == "<": + assert result_entry[name] < target_entry[name] * (1.0 + margin) + elif op == ">": + assert result_entry[name] > target_entry[name] * (1.0 - margin) + + for check in checks: + compare(**check) + + return check_benchmark_result diff --git a/tests/perf/test_anomaly.py b/tests/perf/test_anomaly.py new file mode 100644 index 00000000000..ac7e62e37c6 --- /dev/null +++ b/tests/perf/test_anomaly.py @@ -0,0 +1,281 @@ +"""OTX Anomaly perfomance tests.""" + +# Copyright (C) 2023-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + + +import pytest + +from otx.cli.registry import Registry +from typing import Callable +from .benchmark import OTXBenchmark + + +class TestPerfAnomalyClassification: + """Benchmark anomaly classification.""" + + MODEL_TEMPLATES = Registry(f"src/otx/algorithms").filter(task_type="ANOMALY_CLASSIFICATION").templates + MODEL_IDS = [template.model_template_id for template in MODEL_TEMPLATES] + + BENCHMARK_CONFIGS = { + "small": { + "tags": { + "task": "anomaly_classification", + }, + "datasets": [ + "anomaly/mvtec/bottle_small/1", + "anomaly/mvtec/bottle_small/2", + "anomaly/mvtec/bottle_small/3", + ], + "num_repeat": 3, + }, + "medium": { + "tags": { + "task": "anomaly_classification", + }, + "datasets": [ + "anomaly/mvtec/wood_medium", + ], + "num_repeat": 3, + }, + "large": { + "tags": { + "task": "anomaly_classification", + }, + "datasets": [ + "anomaly/mvtec/hazelnut_large", + ], + "num_repeat": 1, + }, + } + + @pytest.mark.parametrize("fxt_model_id", MODEL_TEMPLATES, ids=MODEL_IDS, indirect=True) + @pytest.mark.parametrize("fxt_benchmark", BENCHMARK_CONFIGS.items(), ids=BENCHMARK_CONFIGS.keys(), indirect=True) + def test_accuracy(self, fxt_model_id: str, fxt_benchmark: OTXBenchmark, fxt_check_benchmark_result: Callable): + """Benchmark accruacy metrics.""" + result = fxt_benchmark.run( + model_id=fxt_model_id, + tags={"benchmark": "accuracy"}, + ) + fxt_check_benchmark_result( + result, + key=("accuracy", fxt_benchmark.tags["task"], fxt_benchmark.tags["data_size"], fxt_model_id), + checks=[ + { + "name": "f-measure(train)", + "op": ">", + "margin": 0.1, + }, + { + "name": "f-measure(export)", + "op": ">", + "margin": 0.1, + }, + { + "name": "f-measure(optimize)", + "op": ">", + "margin": 0.1, + }, + ], + ) + + @pytest.mark.parametrize("fxt_model_id", MODEL_TEMPLATES, ids=MODEL_IDS, indirect=True) + @pytest.mark.parametrize("fxt_benchmark", BENCHMARK_CONFIGS.items(), ids=BENCHMARK_CONFIGS.keys(), indirect=True) + def test_speed(self, fxt_model_id: str, fxt_benchmark: OTXBenchmark, fxt_check_benchmark_result: Callable): + """Benchmark train time per iter / infer time per image.""" + fxt_benchmark.track_resources = True + result = fxt_benchmark.run( + model_id=fxt_model_id, + tags={"benchmark": "speed"}, + ) + fxt_check_benchmark_result( + result, + key=("speed", fxt_benchmark.tags["task"], fxt_benchmark.tags["data_size"], fxt_model_id), + checks=[ + { + "name": "train_e2e_time", + "op": "<", + "margin": 0.1, + }, + ], + ) + + +class TestPerfAnomalyDetection: + """Benchmark anomaly detection.""" + + MODEL_TEMPLATES = Registry(f"src/otx/algorithms").filter(task_type="ANOMALY_DETECTION").templates + MODEL_IDS = [template.model_template_id for template in MODEL_TEMPLATES] + + BENCHMARK_CONFIGS = { + "small": { + "tags": { + "task": "anomaly_detection", + }, + "datasets": [ + "anomaly/mvtec/bottle_small/1", + "anomaly/mvtec/bottle_small/2", + "anomaly/mvtec/bottle_small/3", + ], + "num_repeat": 3, + }, + "medium": { + "tags": { + "task": "anomaly_detection", + }, + "datasets": [ + "anomaly/mvtec/wood_medium", + ], + "num_repeat": 3, + }, + "large": { + "tags": { + "task": "anomaly_detection", + }, + "datasets": [ + "anomaly/mvtec/hazelnut_large", + ], + "num_repeat": 1, + }, + } + + @pytest.mark.parametrize("fxt_model_id", MODEL_TEMPLATES, ids=MODEL_IDS, indirect=True) + @pytest.mark.parametrize("fxt_benchmark", BENCHMARK_CONFIGS.items(), ids=BENCHMARK_CONFIGS.keys(), indirect=True) + def test_accuracy(self, fxt_model_id: str, fxt_benchmark: OTXBenchmark, fxt_check_benchmark_result: Callable): + """Benchmark accruacy metrics.""" + result = fxt_benchmark.run( + model_id=fxt_model_id, + tags={"benchmark": "accuracy"}, + ) + fxt_check_benchmark_result( + result, + key=("accuracy", fxt_benchmark.tags["task"], fxt_benchmark.tags["data_size"], fxt_model_id), + checks=[ + { + "name": "f-measure(train)", + "op": ">", + "margin": 0.1, + }, + { + "name": "f-measure(export)", + "op": ">", + "margin": 0.1, + }, + { + "name": "f-measure(optimize)", + "op": ">", + "margin": 0.1, + }, + ], + ) + + @pytest.mark.parametrize("fxt_model_id", MODEL_TEMPLATES, ids=MODEL_IDS, indirect=True) + @pytest.mark.parametrize("fxt_benchmark", BENCHMARK_CONFIGS.items(), ids=BENCHMARK_CONFIGS.keys(), indirect=True) + def test_speed(self, fxt_model_id: str, fxt_benchmark: OTXBenchmark, fxt_check_benchmark_result: Callable): + """Benchmark train time per iter / infer time per image.""" + fxt_benchmark.track_resources = True + result = fxt_benchmark.run( + model_id=fxt_model_id, + tags={"benchmark": "speed"}, + ) + fxt_check_benchmark_result( + result, + key=("speed", fxt_benchmark.tags["task"], fxt_benchmark.tags["data_size"], fxt_model_id), + checks=[ + { + "name": "train_e2e_time", + "op": "<", + "margin": 0.1, + }, + ], + ) + + +class TestPerfAnomalySegmentation: + """Benchmark anomaly segmentation.""" + + MODEL_TEMPLATES = Registry(f"src/otx/algorithms").filter(task_type="ANOMALY_SEGMENTATION").templates + MODEL_IDS = [template.model_template_id for template in MODEL_TEMPLATES] + + BENCHMARK_CONFIGS = { + "small": { + "tags": { + "task": "anomaly_segmentation", + }, + "datasets": [ + "anomaly/mvtec/bottle_small/1", + "anomaly/mvtec/bottle_small/2", + "anomaly/mvtec/bottle_small/3", + ], + "num_repeat": 3, + }, + "medium": { + "tags": { + "task": "anomaly_segmentation", + }, + "datasets": [ + "anomaly/mvtec/wood_medium", + ], + "num_repeat": 3, + }, + "large": { + "tags": { + "task": "anomaly_segmentation", + }, + "datasets": [ + "anomaly/mvtec/hazelnut_large", + ], + "num_repeat": 1, + }, + } + + @pytest.mark.parametrize("fxt_model_id", MODEL_TEMPLATES, ids=MODEL_IDS, indirect=True) + @pytest.mark.parametrize("fxt_benchmark", BENCHMARK_CONFIGS.items(), ids=BENCHMARK_CONFIGS.keys(), indirect=True) + def test_accuracy(self, fxt_model_id: str, fxt_benchmark: OTXBenchmark, fxt_check_benchmark_result: Callable): + """Benchmark accruacy metrics.""" + result = fxt_benchmark.run( + model_id=fxt_model_id, + tags={"benchmark": "accuracy"}, + ) + fxt_check_benchmark_result( + result, + key=("accuracy", fxt_benchmark.tags["task"], fxt_benchmark.tags["data_size"], fxt_model_id), + checks=[ + { + "name": "f-measure(train)", + "op": ">", + "margin": 0.1, + }, + { + "name": "f-measure(export)", + "op": ">", + "margin": 0.1, + }, + { + "name": "f-measure(optimize)", + "op": ">", + "margin": 0.1, + }, + ], + ) + + @pytest.mark.parametrize("fxt_model_id", MODEL_TEMPLATES, ids=MODEL_IDS, indirect=True) + @pytest.mark.parametrize("fxt_benchmark", BENCHMARK_CONFIGS.items(), ids=BENCHMARK_CONFIGS.keys(), indirect=True) + def test_speed(self, fxt_model_id: str, fxt_benchmark: OTXBenchmark, fxt_check_benchmark_result: Callable): + """Benchmark train time per iter / infer time per image.""" + fxt_benchmark.track_resources = True + result = fxt_benchmark.run( + model_id=fxt_model_id, + tags={"benchmark": "speed"}, + ) + fxt_check_benchmark_result( + result, + key=("speed", fxt_benchmark.tags["task"], fxt_benchmark.tags["data_size"], fxt_model_id), + checks=[ + { + "name": "train_e2e_time", + "op": "<", + "margin": 0.1, + }, + ], + ) diff --git a/tests/perf/test_classification.py b/tests/perf/test_classification.py new file mode 100644 index 00000000000..820d644ae40 --- /dev/null +++ b/tests/perf/test_classification.py @@ -0,0 +1,336 @@ +"""OTX Classification perfomance tests.""" + +# Copyright (C) 2023-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + + +import pytest + +from otx.cli.registry import Registry +from typing import Callable +from .benchmark import OTXBenchmark + + +MODEL_TEMPLATES = Registry(f"src/otx/algorithms").filter(task_type="CLASSIFICATION").templates +MODEL_IDS = [template.model_template_id for template in MODEL_TEMPLATES] + + +class TestPerfSingleLabelClassification: + """Benchmark single-label classification.""" + + BENCHMARK_CONFIGS = { + "small": { + "tags": { + "task": "single_label_classification", + }, + "datasets": [ + "classification/single_label/multiclass_CUB_small/1", + "classification/single_label/multiclass_CUB_small/2", + "classification/single_label/multiclass_CUB_small/3", + ], + "num_repeat": 3, + }, + "medium": { + "tags": { + "task": "single_label_classification", + }, + "datasets": [ + "classification/single_label/multiclass_CUB_medium", + ], + "num_repeat": 3, + }, + "large": { + "tags": { + "task": "single_label_classification", + }, + "datasets": [ + "classification/single_label/multiclass_food101_large", + ], + "num_repeat": 1, + }, + } + + @pytest.mark.parametrize("fxt_model_id", MODEL_TEMPLATES, ids=MODEL_IDS, indirect=True) + @pytest.mark.parametrize("fxt_benchmark", BENCHMARK_CONFIGS.items(), ids=BENCHMARK_CONFIGS.keys(), indirect=True) + def test_accuracy(self, fxt_model_id: str, fxt_benchmark: OTXBenchmark): + """Benchmark accruacy metrics.""" + result = fxt_benchmark.run( + model_id=fxt_model_id, + tags={"benchmark": "accuracy"}, + ) + fxt_check_benchmark_result( + result, + key=("accuracy", fxt_benchmark.tags["task"], fxt_benchmark.tags["data_size"], fxt_model_id), + checks=[ + { + "name": "Accuracy(train)", + "op": ">", + "margin": 0.1, + }, + { + "name": "Accuracy(export)", + "op": ">", + "margin": 0.1, + }, + { + "name": "Accuracy(optimize)", + "op": ">", + "margin": 0.1, + }, + { + "name": "epoch", + "op": "<", + "margin": 0.1, + }, + ], + ) + + @pytest.mark.parametrize("fxt_model_id", MODEL_TEMPLATES, ids=MODEL_IDS, indirect=True) + @pytest.mark.parametrize("fxt_benchmark", BENCHMARK_CONFIGS.items(), ids=BENCHMARK_CONFIGS.keys(), indirect=True) + def test_speed(self, fxt_model_id: str, fxt_benchmark: OTXBenchmark, fxt_check_benchmark_result: Callable): + """Benchmark train time per iter / infer time per image.""" + fxt_benchmark.track_resources = True + result = fxt_benchmark.run( + model_id=fxt_model_id, + tags={"benchmark": "speed"}, + ) + fxt_check_benchmark_result( + result, + key=("speed", fxt_benchmark.tags["task"], fxt_benchmark.tags["data_size"], fxt_model_id), + checks=[ + { + "name": "avg_data_time", + "op": "<", + "margin": 0.1, + }, + { + "name": "avg_iter_time", + "op": "<", + "margin": 0.1, + }, + { + "name": "avg_time_per_image(export)", + "op": "<", + "margin": 0.1, + }, + { + "name": "avg_time_per_image(optimize)", + "op": "<", + "margin": 0.1, + }, + ], + ) + + +class TestPerfMultiLabelClassification: + """Benchmark multi-label classification.""" + + BENCHMARK_CONFIGS = { + "small": { + "tags": { + "task": "multi_label_classification", + }, + "datasets": [ + "classification/multi_label/multilabel_CUB_small/1", + "classification/multi_label/multilabel_CUB_small/2", + "classification/multi_label/multilabel_CUB_small/3", + ], + "num_repeat": 3, + }, + "medium": { + "tags": { + "task": "multi_label_classification", + }, + "datasets": [ + "classification/multi_label/multilabel_CUB_medium", + ], + "num_repeat": 3, + }, + "large": { + "tags": { + "task": "multi_label_classification", + }, + "datasets": [ + "classification/multi_label/multilabel_food101_large", + ], + "num_repeat": 1, + }, + } + + @pytest.mark.parametrize("fxt_model_id", MODEL_TEMPLATES, ids=MODEL_IDS, indirect=True) + @pytest.mark.parametrize("fxt_benchmark", BENCHMARK_CONFIGS.items(), ids=BENCHMARK_CONFIGS.keys(), indirect=True) + def test_accuracy(self, fxt_model_id: str, fxt_benchmark: OTXBenchmark, fxt_check_benchmark_result: Callable): + """Benchmark accruacy metrics.""" + result = fxt_benchmark.run( + model_id=fxt_model_id, + tags={"benchmark": "accuracy"}, + ) + fxt_check_benchmark_result( + result, + key=("accuracy", fxt_benchmark.tags["task"], fxt_benchmark.tags["data_size"], fxt_model_id), + checks=[ + { + "name": "Accuracy(train)", + "op": ">", + "margin": 0.1, + }, + { + "name": "Accuracy(export)", + "op": ">", + "margin": 0.1, + }, + { + "name": "Accuracy(optimize)", + "op": ">", + "margin": 0.1, + }, + { + "name": "epoch", + "op": "<", + "margin": 0.1, + }, + ], + ) + + @pytest.mark.parametrize("fxt_model_id", MODEL_TEMPLATES, ids=MODEL_IDS, indirect=True) + @pytest.mark.parametrize("fxt_benchmark", BENCHMARK_CONFIGS.items(), ids=BENCHMARK_CONFIGS.keys(), indirect=True) + def test_speed(self, fxt_model_id: str, fxt_benchmark: OTXBenchmark, fxt_check_benchmark_result: Callable): + """Benchmark train time per iter / infer time per image.""" + fxt_benchmark.track_resources = True + result = fxt_benchmark.run( + model_id=fxt_model_id, + tags={"benchmark": "speed"}, + ) + fxt_check_benchmark_result( + result, + key=("speed", fxt_benchmark.tags["task"], fxt_benchmark.tags["data_size"], fxt_model_id), + checks=[ + { + "name": "avg_data_time", + "op": "<", + "margin": 0.1, + }, + { + "name": "avg_iter_time", + "op": "<", + "margin": 0.1, + }, + { + "name": "avg_time_per_image(export)", + "op": "<", + "margin": 0.1, + }, + { + "name": "avg_time_per_image(optimize)", + "op": "<", + "margin": 0.1, + }, + ], + ) + + +class TestPerfHierarchicalLabelClassification: + """Benchmark hierarchcial-label classification.""" + + BENCHMARK_CONFIGS = { + "small": { + "tags": { + "task": "hierarchical_label_classification", + }, + "datasets": [ + "classification/h_label/h_label_CUB_small/1", + "classification/h_label/h_label_CUB_small/2", + "classification/h_label/h_label_CUB_small/3", + ], + "num_repeat": 3, + }, + "medium": { + "tags": { + "task": "hierarchical_label_classification", + }, + "datasets": [ + "classification/h_label/h_label_CUB_medium", + ], + "num_repeat": 3, + }, + # TODO: Add large dataset + # "large": { + # "tags": { + # "task": "hierarchical_label_classification", + # }, + # "datasets": [ + # ], + # "num_repeat": 1, + # }, + } + + @pytest.mark.parametrize("fxt_model_id", MODEL_TEMPLATES, ids=MODEL_IDS, indirect=True) + @pytest.mark.parametrize("fxt_benchmark", BENCHMARK_CONFIGS.items(), ids=BENCHMARK_CONFIGS.keys(), indirect=True) + def test_accuracy(self, fxt_model_id: str, fxt_benchmark: OTXBenchmark, fxt_check_benchmark_result: Callable): + """Benchmark accruacy metrics.""" + result = fxt_benchmark.run( + model_id=fxt_model_id, + tags={"benchmark": "accuracy"}, + ) + fxt_check_benchmark_result( + result, + key=("accuracy", fxt_benchmark.tags["task"], fxt_benchmark.tags["data_size"], fxt_model_id), + checks=[ + { + "name": "Accuracy(train)", + "op": ">", + "margin": 0.1, + }, + { + "name": "Accuracy(export)", + "op": ">", + "margin": 0.1, + }, + { + "name": "Accuracy(optimize)", + "op": ">", + "margin": 0.1, + }, + { + "name": "epoch", + "op": "<", + "margin": 0.1, + }, + ], + ) + + @pytest.mark.parametrize("fxt_model_id", MODEL_TEMPLATES, ids=MODEL_IDS, indirect=True) + @pytest.mark.parametrize("fxt_benchmark", BENCHMARK_CONFIGS.items(), ids=BENCHMARK_CONFIGS.keys(), indirect=True) + def test_speed(self, fxt_model_id: str, fxt_benchmark: OTXBenchmark, fxt_check_benchmark_results: Callable): + """Benchmark train time per iter / infer time per image.""" + fxt_benchmark.track_resources = True + result = fxt_benchmark.run( + model_id=fxt_model_id, + tags={"benchmark": "speed"}, + ) + fxt_check_benchmark_result( + result, + key=("speed", fxt_benchmark.tags["task"], fxt_benchmark.tags["data_size"], fxt_model_id), + checks=[ + { + "name": "avg_data_time", + "op": "<", + "margin": 0.1, + }, + { + "name": "avg_iter_time", + "op": "<", + "margin": 0.1, + }, + { + "name": "avg_time_per_image(export)", + "op": "<", + "margin": 0.1, + }, + { + "name": "avg_time_per_image(optimize)", + "op": "<", + "margin": 0.1, + }, + ], + ) diff --git a/tests/perf/test_detection.py b/tests/perf/test_detection.py new file mode 100644 index 00000000000..c754549655a --- /dev/null +++ b/tests/perf/test_detection.py @@ -0,0 +1,122 @@ +"""OTX Detection perfomance tests.""" + +# Copyright (C) 2023-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + + +import pytest + +from otx.cli.registry import Registry +from typing import Callable +from .benchmark import OTXBenchmark + + +MODEL_TEMPLATES = Registry(f"src/otx/algorithms").filter(task_type="DETECTION").templates +MODEL_IDS = [template.model_template_id for template in MODEL_TEMPLATES] + + +class TestPerfDetection: + """Benchmark basic object detection.""" + + BENCHMARK_CONFIGS = { + "small": { + "tags": { + "task": "detection", + }, + "datasets": [ + "detection/pothole_small/1", + "detection/pothole_small/2", + "detection/pothole_small/3", + ], + "num_repeat": 3, + }, + "medium": { + "tags": { + "task": "detection", + }, + "datasets": [ + "detection/pothole_medium", + ], + "num_repeat": 3, + }, + "large": { + "tags": { + "task": "detection", + }, + "datasets": [ + "detection/vitens_large", + ], + "num_repeat": 1, + }, + } + + @pytest.mark.parametrize("fxt_model_id", MODEL_TEMPLATES, ids=MODEL_IDS, indirect=True) + @pytest.mark.parametrize("fxt_benchmark", BENCHMARK_CONFIGS.items(), ids=BENCHMARK_CONFIGS.keys(), indirect=True) + def test_accuracy(self, fxt_model_id: str, fxt_benchmark: OTXBenchmark, fxt_check_benchmark_result: Callable): + """Benchmark accruacy metrics.""" + result = fxt_benchmark.run( + model_id=fxt_model_id, + tags={"benchmark": "accuracy"}, + ) + fxt_check_benchmark_result( + result, + key=("accuracy", fxt_benchmark.tags["task"], fxt_benchmark.tags["data_size"], fxt_model_id), + checks=[ + { + "name": "f-measure(train)", + "op": ">", + "margin": 0.1, + }, + { + "name": "epoch", + "op": "<", + "margin": 0.1, + }, + { + "name": "f-measure(export)", + "op": ">", + "margin": 0.1, + }, + { + "name": "f-measure(optimize)", + "op": ">", + "margin": 0.1, + }, + ], + ) + + @pytest.mark.parametrize("fxt_model_id", MODEL_TEMPLATES, ids=MODEL_IDS, indirect=True) + @pytest.mark.parametrize("fxt_benchmark", BENCHMARK_CONFIGS.items(), ids=BENCHMARK_CONFIGS.keys(), indirect=True) + def test_speed(self, fxt_model_id: str, fxt_benchmark: OTXBenchmark, fxt_check_benchmark_result: Callable): + """Benchmark train time per iter / infer time per image.""" + fxt_benchmark.track_resources = True + result = fxt_benchmark.run( + model_id=fxt_model_id, + tags={"benchmark": "speed"}, + ) + fxt_check_benchmark_result( + result, + key=("speed", fxt_benchmark.tags["task"], fxt_benchmark.tags["data_size"], fxt_model_id), + checks=[ + { + "name": "avg_data_time", + "op": "<", + "margin": 0.1, + }, + { + "name": "avg_iter_time", + "op": "<", + "margin": 0.1, + }, + { + "name": "avg_time_per_image(export)", + "op": "<", + "margin": 0.1, + }, + { + "name": "avg_time_per_image(optimize)", + "op": "<", + "margin": 0.1, + }, + ], + ) diff --git a/tests/perf/test_instance_segmentation.py b/tests/perf/test_instance_segmentation.py new file mode 100644 index 00000000000..fc869a29a1b --- /dev/null +++ b/tests/perf/test_instance_segmentation.py @@ -0,0 +1,237 @@ +"""OTX Instance Segmentation perfomance tests.""" + +# Copyright (C) 2023-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + + +import pytest + +from otx.cli.registry import Registry +from typing import Callable +from .benchmark import OTXBenchmark + + +MODEL_TEMPLATES = Registry(f"src/otx/algorithms").filter(task_type="INSTANCE_SEGMENTATION").templates +MODEL_IDS = [template.model_template_id for template in MODEL_TEMPLATES] + + +class TestPerfInstanceSegmentation: + """Benchmark basic instance segmentation.""" + + BENCHMARK_CONFIGS = { + "small": { + "tags": { + "task": "instance_segmentation", + }, + "datasets": [ + "instance_seg/wgisd_small/1", + "instance_seg/wgisd_small/2", + "instance_seg/wgisd_small/3", + ], + "num_repeat": 3, + }, + "medium": { + "tags": { + "task": "instance_segmentation", + }, + "datasets": [ + "instance_seg/coco_car_person_medium", + ], + "num_repeat": 3, + }, + # TODO: Refine large dataset + # "large": { + # "tags": { + # "task": "instance_segmentation", + # }, + # "datasets": [ + # "instance_seg/bdd_large", + # ], + # "num_repeat": 1, + # }, + } + + @pytest.mark.parametrize("fxt_model_id", MODEL_TEMPLATES, ids=MODEL_IDS, indirect=True) + @pytest.mark.parametrize("fxt_benchmark", BENCHMARK_CONFIGS.items(), ids=BENCHMARK_CONFIGS.keys(), indirect=True) + def test_accuracy(self, fxt_model_id: str, fxt_benchmark: OTXBenchmark, fxt_check_benchmark_result: Callable): + """Benchmark accruacy metrics.""" + result = fxt_benchmark.run( + model_id=fxt_model_id, + tags={"benchmark": "accuracy"}, + ) + fxt_check_benchmark_result( + result, + key=("accuracy", fxt_benchmark.tags["task"], fxt_benchmark.tags["data_size"], fxt_model_id), + checks=[ + { + "name": "f-measure(train)", + "op": ">", + "margin": 0.1, + }, + { + "name": "epoch", + "op": "<", + "margin": 0.1, + }, + { + "name": "f-measure(export)", + "op": ">", + "margin": 0.1, + }, + { + "name": "f-measure(optimize)", + "op": ">", + "margin": 0.1, + }, + ], + ) + + @pytest.mark.parametrize("fxt_model_id", MODEL_TEMPLATES, ids=MODEL_IDS, indirect=True) + @pytest.mark.parametrize("fxt_benchmark", BENCHMARK_CONFIGS.items(), ids=BENCHMARK_CONFIGS.keys(), indirect=True) + def test_speed(self, fxt_model_id: str, fxt_benchmark: OTXBenchmark, fxt_check_benchmark_result: Callable): + """Benchmark train time per iter / infer time per image.""" + fxt_benchmark.track_resources = True + result = fxt_benchmark.run( + model_id=fxt_model_id, + tags={"benchmark": "speed"}, + ) + fxt_check_benchmark_result( + result, + key=("speed", fxt_benchmark.tags["task"], fxt_benchmark.tags["data_size"], fxt_model_id), + checks=[ + { + "name": "avg_data_time", + "op": "<", + "margin": 0.1, + }, + { + "name": "avg_iter_time", + "op": "<", + "margin": 0.1, + }, + { + "name": "avg_time_per_image(export)", + "op": "<", + "margin": 0.1, + }, + { + "name": "avg_time_per_image(optimize)", + "op": "<", + "margin": 0.1, + }, + ], + ) + + +class TestPerfTilingInstanceSegmentation: + """Benchmark tiling instance segmentation.""" + + TILING_PARAMS = { + "tiling_parameters.enable_tiling": 1, + } + BENCHMARK_CONFIGS = { + "small": { + "tags": { + "task": "tiling_instance_segmentation", + }, + "datasets": [ + "tiling_instance_seg/vitens_aeromonas_small/1", + "tiling_instance_seg/vitens_aeromonas_small/2", + "tiling_instance_seg/vitens_aeromonas_small/3", + ], + "num_repeat": 3, + "train_params": TILING_PARAMS, + }, + "medium": { + "tags": { + "task": "tiling_instance_segmentation", + }, + "datasets": [ + "tiling_instance_seg/vitens_aeromonas_medium", + ], + "num_repeat": 3, + "train_params": TILING_PARAMS, + }, + # TODO: Refine large dataset + # "large": { + # "tags": { + # "task": "tiling_instance_segmentation", + # }, + # "datasets": [ + # "tiling_instance_seg/dota_large", + # ], + # "num_repeat": 1, + # "train_params": TILING_PARAMS, + # }, + } + + @pytest.mark.parametrize("fxt_model_id", MODEL_TEMPLATES, ids=MODEL_IDS, indirect=True) + @pytest.mark.parametrize("fxt_benchmark", BENCHMARK_CONFIGS.items(), ids=BENCHMARK_CONFIGS.keys(), indirect=True) + def test_accuracy(self, fxt_model_id: str, fxt_benchmark: OTXBenchmark, fxt_check_benchmark_result: Callable): + """Benchmark accruacy metrics.""" + result = fxt_benchmark.run( + model_id=fxt_model_id, + tags={"benchmark": "accuracy"}, + ) + fxt_check_benchmark_result( + result, + key=("accuracy", fxt_benchmark.tags["task"], fxt_benchmark.tags["data_size"], fxt_model_id), + checks=[ + { + "name": "f-measure(train)", + "op": ">", + "margin": 0.1, + }, + { + "name": "epoch", + "op": "<", + "margin": 0.1, + }, + { + "name": "f-measure(export)", + "op": ">", + "margin": 0.1, + }, + { + "name": "f-measure(optimize)", + "op": ">", + "margin": 0.1, + }, + ], + ) + + @pytest.mark.parametrize("fxt_model_id", MODEL_TEMPLATES, ids=MODEL_IDS, indirect=True) + @pytest.mark.parametrize("fxt_benchmark", BENCHMARK_CONFIGS.items(), ids=BENCHMARK_CONFIGS.keys(), indirect=True) + def test_speed(self, fxt_model_id: str, fxt_benchmark: OTXBenchmark, fxt_check_benchmark_result: Callable): + """Benchmark train time per iter / infer time per image.""" + fxt_benchmark.track_resources = True + result = fxt_benchmark.run( + model_id=fxt_model_id, + tags={"benchmark": "speed"}, + ) + fxt_check_benchmark_result( + result, + key=("speed", fxt_benchmark.tags["task"], fxt_benchmark.tags["data_size"], fxt_model_id), + checks=[ + { + "name": "avg_data_time", + "op": "<", + "margin": 0.1, + }, + { + "name": "avg_iter_time", + "op": "<", + "margin": 0.1, + }, + { + "name": "avg_time_per_image(export)", + "op": "<", + "margin": 0.1, + }, + { + "name": "avg_time_per_image(optimize)", + "op": "<", + "margin": 0.1, + }, + ], + ) diff --git a/tests/perf/test_semantic_segmentation.py b/tests/perf/test_semantic_segmentation.py new file mode 100644 index 00000000000..62eaa01f6c0 --- /dev/null +++ b/tests/perf/test_semantic_segmentation.py @@ -0,0 +1,125 @@ +"""OTX Semantic Segmentation perfomance tests.""" + +# Copyright (C) 2023-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + + +import pytest + +from otx.cli.registry import Registry +from typing import Callable +from .benchmark import OTXBenchmark + + +MODEL_TEMPLATES = Registry(f"src/otx/algorithms").filter(task_type="SEGMENTATION").templates +MODEL_IDS = [template.model_template_id for template in MODEL_TEMPLATES] + + +class TestPerfSemanticSegmentation: + """Benchmark basic semantic segmentation.""" + + BENCHMARK_CONFIGS = { + "small": { + "tags": { + "task": "semantic_segmentation", + }, + "datasets": [ + "semantic_seg/kvasir_small/1", + "semantic_seg/kvasir_small/2", + "semantic_seg/kvasir_small/3", + ], + "subset_dir_names": {"train": "train", "val": "val", "test": "test"}, + "num_repeat": 3, + }, + "medium": { + "tags": { + "task": "semantic_segmentation", + }, + "datasets": [ + "semantic_seg/kvasir_medium", + ], + "subset_dir_names": {"train": "train", "val": "val", "test": "test"}, + "num_repeat": 3, + }, + "large": { + "tags": { + "task": "semantic_segmentation", + }, + "datasets": [ + "semantic_seg/kvasir_large", + ], + "subset_dir_names": {"train": "train", "val": "val", "test": "test"}, + "num_repeat": 1, + }, + } + + @pytest.mark.parametrize("fxt_model_id", MODEL_TEMPLATES, ids=MODEL_IDS, indirect=True) + @pytest.mark.parametrize("fxt_benchmark", BENCHMARK_CONFIGS.items(), ids=BENCHMARK_CONFIGS.keys(), indirect=True) + def test_accuracy(self, fxt_model_id: str, fxt_benchmark: OTXBenchmark, fxt_check_benchmark_result: Callable): + """Benchmark accruacy metrics.""" + result = fxt_benchmark.run( + model_id=fxt_model_id, + tags={"benchmark": "accuracy"}, + ) + fxt_check_benchmark_result( + result, + key=("accuracy", fxt_benchmark.tags["task"], fxt_benchmark.tags["data_size"], fxt_model_id), + checks=[ + { + "name": "Dice Average(train)", + "op": ">", + "margin": 0.1, + }, + { + "name": "epoch", + "op": "<", + "margin": 0.1, + }, + { + "name": "Dice Average(export)", + "op": ">", + "margin": 0.1, + }, + { + "name": "Dice Average(optimize)", + "op": ">", + "margin": 0.1, + }, + ], + ) + + @pytest.mark.parametrize("fxt_model_id", MODEL_TEMPLATES, ids=MODEL_IDS, indirect=True) + @pytest.mark.parametrize("fxt_benchmark", BENCHMARK_CONFIGS.items(), ids=BENCHMARK_CONFIGS.keys(), indirect=True) + def test_speed(self, fxt_model_id: str, fxt_benchmark: OTXBenchmark, fxt_check_benchmark_result: Callable): + """Benchmark train time per iter / infer time per image.""" + fxt_benchmark.track_resources = True + result = fxt_benchmark.run( + model_id=fxt_model_id, + tags={"benchmark": "speed"}, + ) + fxt_check_benchmark_result( + result, + key=("speed", fxt_benchmark.tags["task"], fxt_benchmark.tags["data_size"], fxt_model_id), + checks=[ + { + "name": "avg_data_time", + "op": "<", + "margin": 0.1, + }, + { + "name": "avg_iter_time", + "op": "<", + "margin": 0.1, + }, + { + "name": "avg_time_per_image(export)", + "op": "<", + "margin": 0.1, + }, + { + "name": "avg_time_per_image(optimize)", + "op": "<", + "margin": 0.1, + }, + ], + ) diff --git a/tests/test_suite/run_test_command.py b/tests/test_suite/run_test_command.py index d56259c8dda..4ec74b00b98 100644 --- a/tests/test_suite/run_test_command.py +++ b/tests/test_suite/run_test_command.py @@ -246,12 +246,15 @@ def otx_export_testing(template, root, dump_features=False, half_precision=False path_to_xml = os.path.join(save_path, "openvino.xml") assert os.path.exists(os.path.join(save_path, "label_schema.json")) if not is_onnx: - if "Visual_Prompting" in template.model_template_id: + if any(map(lambda x: x in template.model_template_id, ("Visual_Prompting", "Zero_Shot"))): path_to_xml = os.path.join(save_path, "visual_prompting_decoder.xml") assert os.path.exists(os.path.join(save_path, "visual_prompting_image_encoder.xml")) assert os.path.exists(os.path.join(save_path, "visual_prompting_image_encoder.bin")) assert os.path.exists(os.path.join(save_path, "visual_prompting_decoder.xml")) assert os.path.exists(os.path.join(save_path, "visual_prompting_decoder.bin")) + if "Zero_Shot" in template.model_template_id: + assert os.path.exists(os.path.join(save_path, "visual_prompting_prompt_getter.xml")) + assert os.path.exists(os.path.join(save_path, "visual_prompting_prompt_getter.bin")) else: assert os.path.exists(path_to_xml) assert os.path.exists(os.path.join(save_path, "openvino.bin")) @@ -262,9 +265,11 @@ def otx_export_testing(template, root, dump_features=False, half_precision=False xml_model = xml_stream.read() assert f"{input_size[1]},{input_size[0]}" in xml_model else: - if "Visual_Prompting" in template.model_template_id: + if any(map(lambda x: x in template.model_template_id, ("Visual_Prompting", "Zero_Shot"))): assert os.path.exists(os.path.join(save_path, "visual_prompting_image_encoder.onnx")) assert os.path.exists(os.path.join(save_path, "visual_prompting_decoder.onnx")) + if "Zero_Shot" in template.model_template_id: + assert os.path.exists(os.path.join(save_path, "visual_prompting_prompt_getter.onnx")) else: path_to_onnx = os.path.join(save_path, "model.onnx") assert os.path.exists(path_to_onnx) @@ -333,14 +338,16 @@ def otx_eval_openvino_testing( args, threshold=0.0, half_precision=False, + is_visual_prompting=False, ): template_work_dir = get_template_dir(template, root) - weights_path = f"{template_work_dir}/exported_{template.model_template_id}/openvino.xml" + weights_file = "visual_prompting_decoder" if is_visual_prompting else "openvino" + weights_path = f"{template_work_dir}/exported_{template.model_template_id}/{weights_file}.xml" output_path = f"{template_work_dir}/exported_{template.model_template_id}" perf_path = f"{template_work_dir}/exported_{template.model_template_id}/performance.json" if half_precision: - weights_path = f"{template_work_dir}/exported_{template.model_template_id}_fp16/openvino.xml" + weights_path = f"{template_work_dir}/exported_{template.model_template_id}_fp16/{weights_file}.xml" output_path = f"{template_work_dir}/exported_{template.model_template_id}_fp16" perf_path = f"{template_work_dir}/exported_{template.model_template_id}_fp16/performance.json" @@ -612,17 +619,21 @@ def _validate_fq_in_xml(xml_path, path_to_ref_data, compression_type, test_name, def ptq_validate_fq_testing(template, root, otx_dir, task_type, test_name): template_work_dir = get_template_dir(template, root) - xml_paths = [f"{template_work_dir}/ptq_{template.model_template_id}/openvino.xml"] - if task_type == "visual_prompting": + if "visual_prompting" == task_type: xml_paths = [ f"{template_work_dir}/ptq_{template.model_template_id}/visual_prompting_image_encoder.xml", f"{template_work_dir}/ptq_{template.model_template_id}/visual_prompting_decoder.xml", ] + if "zero_shot" in str(root): + xml_paths.append(f"{template_work_dir}/ptq_{template.model_template_id}/visual_prompting_prompt_getter.xml") + else: + xml_paths = [f"{template_work_dir}/ptq_{template.model_template_id}/openvino.xml"] + for xml_path in xml_paths: if not os.path.exists(xml_path): pytest.skip(reason=f"required file is not exist - {xml_path}") - if task_type == "visual_prompting": + if "visual_prompting" == task_type: paths_to_ref_data = [ os.path.join( otx_dir, @@ -643,6 +654,18 @@ def ptq_validate_fq_testing(template, root, otx_dir, task_type, test_name): "compressed_decoder.yml", ), ] + if "zero_shot" in str(root): + paths_to_ref_data.append( + os.path.join( + otx_dir, + "tests", + "e2e/cli", + task_type, + "reference", + template.model_template_id, + "compressed_prompt_getter.yml", + ) + ) else: paths_to_ref_data = [ os.path.join( @@ -656,10 +679,10 @@ def ptq_validate_fq_testing(template, root, otx_dir, task_type, test_name): def ptq_eval_testing(template, root, otx_dir, args, is_visual_prompting=False): template_work_dir = get_template_dir(template, root) - - weights_path = f"{template_work_dir}/ptq_{template.model_template_id}/openvino.xml" if is_visual_prompting: weights_path = f"{template_work_dir}/ptq_{template.model_template_id}/visual_prompting_decoder.xml" + else: + weights_path = f"{template_work_dir}/ptq_{template.model_template_id}/openvino.xml" if not os.path.exists(weights_path): pytest.skip(reason=f"required file is not exist - {weights_path}") diff --git a/tests/unit/algorithms/detection/adapters/mmdet/nncf/test_task.py b/tests/unit/algorithms/detection/adapters/mmdet/nncf/test_task.py index 5a543d59d0b..b000a0f7eca 100644 --- a/tests/unit/algorithms/detection/adapters/mmdet/nncf/test_task.py +++ b/tests/unit/algorithms/detection/adapters/mmdet/nncf/test_task.py @@ -2,7 +2,9 @@ # SPDX-License-Identifier: Apache-2.0 # +from contextlib import nullcontext import os +from unittest.mock import MagicMock import numpy as np import pytest @@ -11,11 +13,13 @@ from otx.algorithms.common.adapters.mmcv.hooks import OTXLoggerHook from otx.algorithms.detection.adapters.mmdet.nncf.task import DetectionNNCFTask from otx.api.configuration.helper import create +from otx.api.entities.inference_parameters import InferenceParameters from otx.api.entities.metrics import NullPerformance, Performance, ScoreMetric from otx.api.entities.model_template import TaskType, parse_model_template from otx.api.usecases.evaluation.metrics_helper import MetricsHelper from otx.api.usecases.tasks.interfaces.optimization_interface import OptimizationType from tests.test_suite.e2e_test_system import e2e_pytest_unit +from tests.unit.algorithms.detection.adapters.mmdet.test_task import MockDataLoader, MockDataset from tests.unit.algorithms.detection.test_helpers import ( DEFAULT_DET_TEMPLATE_DIR, generate_det_dataset, @@ -31,6 +35,7 @@ def setup(self, otx_model, tmp_dir_path) -> None: task_env = init_environment(hyper_parameters, model_template) self.model = otx_model self.det_nncf_task = DetectionNNCFTask(task_env, output_path=str(tmp_dir_path)) + self.dataset, _ = generate_det_dataset(task_type=TaskType.DETECTION) @e2e_pytest_unit def test_save_model(self, mocker): @@ -58,7 +63,6 @@ def test_save_model(self, mocker): @e2e_pytest_unit def test_optimize(self, mocker): """Test optimize method in OTXDetTaskNNCF.""" - self.dataset, _ = generate_det_dataset(task_type=TaskType.DETECTION) mock_lcurve_val = OTXLoggerHook.Curve() mock_lcurve_val.x = [0, 1] mock_lcurve_val.y = [0.1, 0.2] @@ -88,6 +92,38 @@ def test_optimize(self, mocker): assert self.model.performance != NullPerformance() assert self.model.performance.score.value == 0.1 + @e2e_pytest_unit + def test_infer(self, mocker) -> None: + """Test infer function.""" + + mocker.patch( + "otx.algorithms.detection.adapters.mmdet.nncf.task.build_nncf_detector", + return_value=(None, MagicMock()), + ) + mocker.patch( + "otx.algorithms.common.adapters.mmcv.utils.builder.build_dataset", + return_value=MockDataset(self.dataset, "det"), + ) + mocker.patch( + "otx.algorithms.common.adapters.mmcv.utils.builder.build_dataloader", + return_value=MockDataLoader(self.dataset), + ) + mocker.patch( + "otx.algorithms.detection.adapters.mmdet.task.single_gpu_test", + return_value=[ + np.array([np.array([[0, 0, 1, 1, 0.1]]), np.array([[0, 0, 1, 1, 0.2]]), np.array([[0, 0, 1, 1, 0.7]])]) + ], + ) + mocker.patch( + "otx.algorithms.detection.adapters.mmdet.task.FeatureVectorHook", + return_value=nullcontext(), + ) + + inference_parameters = InferenceParameters(is_evaluation=True) + outputs = self.det_nncf_task.infer(self.dataset, inference_parameters) + for output in outputs: + assert output.get_annotations()[-1].get_labels()[0].probability == 0.7 + @e2e_pytest_unit def test_initialize(self, mocker): """Test initialize method in OTXDetTaskNNCF.""" diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/datasets/pipelines/test_transforms.py b/tests/unit/algorithms/segmentation/adapters/mmseg/datasets/pipelines/test_transforms.py index facded59996..5187a0f507d 100644 --- a/tests/unit/algorithms/segmentation/adapters/mmseg/datasets/pipelines/test_transforms.py +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/datasets/pipelines/test_transforms.py @@ -1,4 +1,4 @@ -from typing import Any, Dict +from typing import Any, Dict, List import numpy as np import pytest @@ -110,11 +110,11 @@ class TestNormalize: @pytest.mark.parametrize( "mean,std,to_rgb,expected", [ - (1.0, 1.0, True, np.array([[[1.0, 0.0, 0.0]]], dtype=np.float32)), - (1.0, 1.0, False, np.array([[[-1.0, 0.0, 0.0]]], dtype=np.float32)), + ([[[1.0, 1.0, 1.0]]], [[[1.0, 1.0, 1.0]]], True, np.array([[[1.0, 0.0, -1.0]]], dtype=np.float32)), + ([[[1.0, 1.0, 1.0]]], [[[1.0, 1.0, 1.0]]], False, np.array([[[-1.0, 0.0, 1.0]]], dtype=np.float32)), ], ) - def test_call(self, mean: float, std: float, to_rgb: bool, expected: np.array) -> None: + def test_call(self, mean: List[float], std: List[float], to_rgb: bool, expected: np.array) -> None: """Test __call__.""" normalize = Normalize(mean=mean, std=std, to_rgb=to_rgb) inputs = dict(img=np.arange(3).reshape(1, 1, 3)) diff --git a/tests/unit/algorithms/visual_prompting/adapters/openvino/model_wrappers/test_openvino_models.py b/tests/unit/algorithms/visual_prompting/adapters/openvino/model_wrappers/test_openvino_models.py index 716df9c70b4..7740de14ab9 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/openvino/model_wrappers/test_openvino_models.py +++ b/tests/unit/algorithms/visual_prompting/adapters/openvino/model_wrappers/test_openvino_models.py @@ -15,6 +15,7 @@ from otx.algorithms.visual_prompting.adapters.openvino.model_wrappers import ( Decoder, ImageEncoder, + PromptGetter, ) from otx.api.entities.label import LabelEntity from tests.test_suite.e2e_test_system import e2e_pytest_unit @@ -47,6 +48,16 @@ def test_preproces(self, mocker): assert meta["resize_type"] == "fit_to_window" +class TestPromptGetter: + @e2e_pytest_unit + def test_parameters(self): + """Test parameters.""" + params = PromptGetter.parameters() + + assert params.get("sim_threshold").default_value == 0.5 + assert params.get("num_bg_points").default_value == 1 + + class TestDecoder: @pytest.fixture(autouse=True) def setup(self, mocker): diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/test_inference_callback.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/test_inference_callback.py index 39aec2025d4..3cb1710d76e 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/test_inference_callback.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/callbacks/test_inference_callback.py @@ -125,7 +125,18 @@ def setup(self, mocker, monkeypatch): @e2e_pytest_unit @pytest.mark.parametrize( "expected", - [[Point(0.5, 0.0), Point(0.0, 0.5), Point(0.5, 1.0), Point(1.0, 0.5)]], + [ + [ + Point(0.0, 0.0), + Point(0.0, 0.5), + Point(0.0, 1.0), + Point(0.5, 1.0), + Point(1.0, 1.0), + Point(1.0, 0.5), + Point(1.0, 0.0), + Point(0.5, 0.0), + ] + ], ) def test_on_predict_epoch_end(self, expected: Any): """Test on_predict_epoch_end.""" @@ -159,4 +170,4 @@ def test_on_predict_epoch_end(self, expected: Any): assert isinstance(annotation.shape, Polygon) assert annotation.shape.points == expected assert annotation.get_labels()[0].name == "rectangle" - assert annotation.get_labels()[0].probability == 1.0 + assert annotation.get_labels()[0].probability == 0.5 diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/test_sam_transforms.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/test_sam_transforms.py index 68e06b14482..2636627d2b9 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/test_sam_transforms.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/test_sam_transforms.py @@ -4,6 +4,7 @@ # SPDX-License-Identifier: Apache-2.0 # +import torch import numpy as np from typing import Tuple import pytest @@ -45,11 +46,21 @@ def test_apply_image(self, image: np.ndarray, expected: Tuple[int, int, int]): (np.array([[4, 4], [8, 8]]), (16, 16), np.array([[2, 2], [4, 4]])), ], ) - def test_apply_coords(self, coords: np.ndarray, original_size: Tuple[int, int], expected: np.ndarray): + @pytest.mark.parametrize("type", ["numpy", "torch"]) + def test_apply_coords(self, coords: np.ndarray, original_size: Tuple[int, int], expected: np.ndarray, type: str): """Test apply_coords.""" + if type == "torch": + coords = torch.tensor(coords) + original_size = torch.tensor(original_size) + expected = torch.tensor(expected) result = self.resize_longest_side.apply_coords(coords, original_size, self.resize_longest_side.target_length) - assert np.array_equal(result, expected) + if type == "torch": + assert isinstance(result, torch.Tensor) + assert torch.equal(result, expected) + else: + assert isinstance(result, np.ndarray) + assert np.array_equal(result, expected) @e2e_pytest_unit @pytest.mark.parametrize( @@ -59,11 +70,21 @@ def test_apply_coords(self, coords: np.ndarray, original_size: Tuple[int, int], (np.array([[4, 4, 8, 8], [8, 8, 12, 12]]), (16, 16), np.array([[2, 2, 4, 4], [4, 4, 6, 6]])), ], ) - def test_apply_boxes(self, boxes: np.ndarray, original_size: Tuple[int, int], expected: np.ndarray): + @pytest.mark.parametrize("type", ["numpy", "torch"]) + def test_apply_boxes(self, boxes: np.ndarray, original_size: Tuple[int, int], expected: np.ndarray, type: str): """Test apply_boxes.""" - result = self.resize_longest_side.apply_boxes(boxes, original_size) + if type == "torch": + boxes = torch.tensor(boxes) + original_size = torch.tensor(original_size) + expected = torch.tensor(expected) + result = self.resize_longest_side.apply_boxes(boxes, original_size, self.resize_longest_side.target_length) - assert np.array_equal(result, expected) + if type == "torch": + assert isinstance(result, torch.Tensor) + assert torch.equal(result, expected) + else: + assert isinstance(result, np.ndarray) + assert np.array_equal(result, expected) @e2e_pytest_unit @pytest.mark.parametrize( diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/test_transforms.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/test_transforms.py index 96c17dd2e35..36225af9d4a 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/test_transforms.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/pipelines/test_transforms.py @@ -30,7 +30,7 @@ def test_collate_fn(): "bboxes": np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), "points": [], "gt_masks": [Tensor([1, 2, 3])], - "original_size": [], + "original_size": np.array([1, 3]), "padding": [], "path": [], "labels": [], @@ -41,7 +41,7 @@ def test_collate_fn(): "bboxes": np.array([[9, 10, 11, 12]]), "points": [], "gt_masks": [Tensor([4, 5, 6])], - "original_size": [], + "original_size": np.array([1, 3]), "padding": [], "path": [], "labels": [], @@ -53,7 +53,7 @@ def test_collate_fn(): "bboxes": [Tensor([[1, 2, 3, 4], [5, 6, 7, 8]]), Tensor([[9, 10, 11, 12]])], "points": None, "gt_masks": [Tensor([[1, 2, 3]]), Tensor([[4, 5, 6]])], - "original_size": [[], []], + "original_size": [Tensor([1, 3]), Tensor([1, 3])], "path": [[], []], "labels": [[], []], "padding": [[], []], @@ -69,7 +69,8 @@ def test_collate_fn(): assert len(results["gt_masks"]) == len(expected["gt_masks"]) for r, e in zip(results["gt_masks"], expected["gt_masks"]): assert torch.all(r == e) - assert results["original_size"] == expected["original_size"] + for r, e in zip(results["original_size"], expected["original_size"]): + assert torch.all(r == e) assert results["path"] == expected["path"] assert results["labels"] == expected["labels"] assert results["padding"] == expected["padding"] diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/test_dataset.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/test_dataset.py index c3701eb3f58..99a76c3b17b 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/test_dataset.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/datasets/test_dataset.py @@ -19,7 +19,7 @@ generate_bbox, generate_bbox_from_mask, get_transform, - # generate_point_from_mask, + generate_point_from_mask, ) from otx.algorithms.visual_prompting.adapters.pytorch_lightning.datasets.pipelines import ( MultipleInputsCompose, @@ -184,7 +184,7 @@ def test_getitem( # Check specific values in the item assert item["index"] == 0 assert (item["images"] == dataset[0].media.numpy).all() - assert item["original_size"] == dataset[0].media.numpy.shape[:2] + assert np.all(item["original_size"] == dataset[0].media.numpy.shape[:2]) assert item["path"] == dataset[0].media.path assert isinstance(item["gt_masks"], list) assert isinstance(item["gt_masks"][0], np.ndarray) @@ -220,7 +220,7 @@ def test_getitem( # Check specific values in the item assert item["index"] == 0 assert (item["images"] == dataset[0].media.numpy).all() - assert item["original_size"] == dataset[0].media.numpy.shape[:2] + assert np.all(item["original_size"] == dataset[0].media.numpy.shape[:2]) assert item["path"] == dataset[0].media.path assert isinstance(item["gt_masks"], list) assert isinstance(item["gt_masks"][0], np.ndarray) @@ -248,8 +248,8 @@ def test_init_zeroshot(self, set_datamodule): datamodule = set_datamodule(train_type=TrainType.Zeroshot) assert datamodule.config.get("train_batch_size") == 1 - # assert "generate_point" in datamodule.kwargs - # assert "generate_bbox" in datamodule.kwargs + assert "generate_point" in datamodule.kwargs + assert "generate_bbox" in datamodule.kwargs @e2e_pytest_unit def test_setup(self, mocker, set_datamodule) -> None: diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py index 799d06f846b..fed22e060c8 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_segment_anything.py @@ -349,29 +349,13 @@ def test_select_masks(self) -> None: @e2e_pytest_unit def test_mask_postprocessing(self, mocker) -> None: """Test mask_postprocessing.""" - sam = SegmentAnything(config=self.base_config) - mocker.patch.object(sam, "resize_longest_image_size", return_value=Tensor((6, 6))) - sam.config.image_size = 6 - masks = torch.empty(1, 1, 2, 2) orig_size = Tensor((8, 8)) - results = sam.mask_postprocessing(masks, orig_size) + results = SegmentAnything.mask_postprocessing(masks, 6, orig_size) assert results[0, 0].shape == tuple(orig_size) - @e2e_pytest_unit - def test_resize_longest_image_size(self) -> None: - """Test resize_longest_image_size.""" - sam = SegmentAnything(config=self.base_config) - - input_image_size = Tensor((2, 4)) - longest_side = 6 - - results = sam.resize_longest_image_size(input_image_size, longest_side) - - assert torch.all(results == Tensor((3, 6))) - @e2e_pytest_unit def test_forward_train(self) -> None: """Test forward.""" diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py index b4ac5343147..4437fdc1f42 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/visual_prompters/test_zero_shot_segment_anything.py @@ -11,17 +11,25 @@ import torch from omegaconf import DictConfig +from otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything import ( + SegmentAnything, +) from otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.zero_shot_segment_anything import ( PromptGetter, ZeroShotSegmentAnything, ) -from tests.unit.algorithms.visual_prompting.test_helpers import MockScoredLabel, MockImageEncoder, MockPromptGetter +from tests.unit.algorithms.visual_prompting.test_helpers import ( + MockScoredLabel, + MockImageEncoder, + MockPromptGetter, + MockMaskDecoder, +) class TestPromptGetter: @pytest.fixture(autouse=True) def setup(self) -> None: - self.prompt_getter = PromptGetter(image_size=3) + self.prompt_getter = PromptGetter(image_size=3, downsizing=1) @e2e_pytest_unit def test_initialize(self) -> None: @@ -46,47 +54,64 @@ def test_set_reference(self) -> None: self.prompt_getter.set_reference( label=MockScoredLabel(label=1), reference_feats=torch.ones((self.prompt_getter.image_size, self.prompt_getter.image_size)), - reference_prompts=torch.zeros((self.prompt_getter.image_size, self.prompt_getter.image_size)), + reference_prompts=torch.ones((self.prompt_getter.image_size, self.prompt_getter.image_size)), ) + assert self.prompt_getter.reference_feats[0].sum() == 0 + assert self.prompt_getter.reference_prompts[0].sum() == 0 assert self.prompt_getter.reference_feats[1].sum() == 9 - assert self.prompt_getter.reference_prompts[1].sum() == 0 + assert self.prompt_getter.reference_prompts[1].sum() == 9 + + self.prompt_getter.set_reference( + label=MockScoredLabel(label=3), + reference_feats=torch.ones((self.prompt_getter.image_size, self.prompt_getter.image_size)), + reference_prompts=torch.ones((self.prompt_getter.image_size, self.prompt_getter.image_size)), + ) + + assert self.prompt_getter.reference_feats[2].sum() == 0 + assert self.prompt_getter.reference_prompts[2].sum() == 0 + assert self.prompt_getter.reference_feats[3].sum() == 9 + assert self.prompt_getter.reference_prompts[3].sum() == 9 @e2e_pytest_unit def test_forward(self, mocker) -> None: """Test forward.""" - mocker.patch( - "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.zero_shot_segment_anything.ZeroShotSegmentAnything" + mocker.patch.object( + self.prompt_getter, + "get_prompt_candidates", + return_value=(torch.tensor([[[0, 0, 0.5], [1, 1, 0.7]]]), torch.tensor([[[2, 2]]])), ) - mocker.patch.object(self.prompt_getter, "_point_selection", return_value=("points_scores", "bg_coords")) + image_embeddings = torch.ones(1, 4, 4, 4) + self.prompt_getter.reference_feats = torch.rand(1, 1, 4) + original_size = torch.tensor((self.prompt_getter.image_size, self.prompt_getter.image_size), dtype=torch.int64) - image_embeddings = torch.rand(1, 2, self.prompt_getter.image_size, self.prompt_getter.image_size) - self.prompt_getter.reference_feats = {1: torch.rand(1, 2)} - - prompts = self.prompt_getter( - image_embeddings=image_embeddings, - padding=(0, 0, 0, 0), - original_size=(self.prompt_getter.image_size, self.prompt_getter.image_size), + total_points_scores, total_bg_coords = self.prompt_getter( + image_embeddings=image_embeddings, original_size=original_size ) - assert 1 in prompts - assert prompts[1] == ("points_scores", "bg_coords") + assert total_points_scores.shape[0] == 1 + assert total_bg_coords.shape[0] == 1 @e2e_pytest_unit - def test_preprocess_target_feat(self) -> None: - """Test _preprocess_target_feat.""" - old_target_feat = torch.arange(1, self.prompt_getter.image_size**2 + 1, dtype=torch.float).reshape( - 1, 1, self.prompt_getter.image_size, self.prompt_getter.image_size + def test_get_prompt_candidates(self, mocker) -> None: + """Test get_prompt_candidates.""" + mocker.patch( + "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.zero_shot_segment_anything.ZeroShotSegmentAnything" + ) + mocker.patch.object(self.prompt_getter, "_point_selection", return_value=("points_scores", "bg_coords")) + image_embeddings = torch.ones(1, 4, 4, 4) + self.prompt_getter.reference_feats = torch.rand(1, 1, 4) + label = torch.tensor([[0]], dtype=torch.int64) + original_size = torch.tensor( + [[self.prompt_getter.image_size, self.prompt_getter.image_size]], dtype=torch.int64 ) - new_target_feat = self.prompt_getter._preprocess_target_feat( - target_feat=old_target_feat, - c_feat=1, - h_feat=self.prompt_getter.image_size, - w_feat=self.prompt_getter.image_size, + + points_scores, bg_coords = self.prompt_getter.get_prompt_candidates( + image_embeddings=image_embeddings, label=label, original_size=original_size ) - assert new_target_feat.sum() == 9 - assert new_target_feat.shape == (1, self.prompt_getter.image_size**2) + assert points_scores == "points_scores" + assert bg_coords == "bg_coords" @e2e_pytest_unit def test_point_selection(self) -> None: @@ -95,9 +120,8 @@ def test_point_selection(self) -> None: points_scores, bg_coords = self.prompt_getter._point_selection( mask_sim=mask_sim, - original_size=(self.prompt_getter.image_size, self.prompt_getter.image_size), - threshold=0.5, - downsizing=1, + original_size=torch.tensor([self.prompt_getter.image_size, self.prompt_getter.image_size]), + threshold=torch.tensor([[0.5]]), ) assert torch.equal(points_scores, torch.tensor([[2, 2, 0.9], [1, 2, 0.8], [0, 2, 0.7], [2, 1, 0.6]])) @@ -112,6 +136,10 @@ def zero_shot_segment_anything(state_dict: Optional[OrderedDict] = None): "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything.SAMImageEncoder", MockImageEncoder, ) + monkeypatch.setattr( + "otx.algorithms.visual_prompting.adapters.pytorch_lightning.models.visual_prompters.segment_anything.SAMMaskDecoder", + MockMaskDecoder, + ) return ZeroShotSegmentAnything(state_dict=state_dict) return zero_shot_segment_anything @@ -164,12 +192,8 @@ def test_learn(self, mocker, set_zero_shot_segment_anything) -> None: zero_shot_segment_anything = set_zero_shot_segment_anything() mocker.patch.object( zero_shot_segment_anything, - "_predict_mask", - return_value=( - torch.tensor([[[[0, 0, 0], [0, 1, 0], [0, 0, 0]]]]), - torch.tensor([1, 0, 0]), - torch.tensor([[[[0, 0, 0], [0, 1, 0], [0, 0, 0]]]]), - ), + "_predict_masks", + return_value=torch.tensor([[[[0, 0, 0], [0, 1, 0], [0, 0, 0]]]]), ) processed_prompts = {MockScoredLabel(label=1, name="label"): [{"box": torch.tensor([[0, 0, 1, 1]])}]} @@ -180,13 +204,11 @@ def test_learn(self, mocker, set_zero_shot_segment_anything) -> None: original_size=(8, 8), ) - assert zero_shot_segment_anything.prompt_getter.reference_feats.get(1).shape == (1, 2) - assert zero_shot_segment_anything.prompt_getter.reference_prompts.get(1).shape == (8, 8) + assert zero_shot_segment_anything.prompt_getter.reference_feats.shape == (2, 1, 2) + assert zero_shot_segment_anything.prompt_getter.reference_prompts.shape == (2, 8, 8) @e2e_pytest_unit - @pytest.mark.parametrize( - "expected", [[torch.tensor([[0, 0, 0], [0, 1, 0], [0, 0, 0]]), torch.tensor([0.0, 0.0, 0.5])]] - ) + @pytest.mark.parametrize("expected", [[torch.ones((8, 8)) / 2, torch.tensor([0.0, 0.0, 0.5])]]) def test_infer(self, monkeypatch, mocker, set_zero_shot_segment_anything, expected: torch.Tensor) -> None: """Test infer.""" monkeypatch.setattr( @@ -195,26 +217,39 @@ def test_infer(self, monkeypatch, mocker, set_zero_shot_segment_anything, expect ) zero_shot_segment_anything = set_zero_shot_segment_anything() - zero_shot_segment_anything.prompt_getter.reference_feats = {1: torch.rand((1, 2))} - zero_shot_segment_anything.prompt_getter.reference_prompts = {1: torch.zeros((8, 8))} + zero_shot_segment_anything.prompt_getter.reference_feats = torch.rand(1, 1, 4) + zero_shot_segment_anything.prompt_getter.reference_prompts = torch.zeros((8, 8)) mocker.patch.object( - zero_shot_segment_anything, - "_predict_mask", - return_value=( - torch.tensor([[[[0, 0, 0], [0, 1, 0], [0, 0, 0]]]]), - torch.tensor([1, 0, 0]), - torch.tensor([[[[0, 0, 0], [0, 1, 0], [0, 0, 0]]]]), - ), + SegmentAnything, "forward", return_value=(torch.tensor([[0.1, 0.2, 0.5, 0.7]]), torch.ones(1, 4, 4, 4)) ) total_results = zero_shot_segment_anything.infer( - images=torch.ones((1, 3, 8, 8)), padding=(0, 0, 0, 0), original_size=(8, 8) + images=torch.ones((1, 3, 8, 8)), original_size=torch.tensor([[8, 8]], dtype=torch.int64) ) for i, results in enumerate(total_results[0]): for _, result in results.items(): assert torch.equal(result[0], expected[i]) + @e2e_pytest_unit + @pytest.mark.parametrize("is_postprocess", [True, False]) + def test_predict_masks(self, mocker, set_zero_shot_segment_anything, is_postprocess: bool) -> None: + """Test _predict_masks.""" + mocker.patch.object( + SegmentAnything, "forward", return_value=(torch.tensor([[0.1, 0.2, 0.5, 0.7]]), torch.ones(1, 4, 4, 4)) + ) + + zero_shot_segment_anything = set_zero_shot_segment_anything() + zero_shot_segment_anything.config.model.image_size = 6 + + mask = zero_shot_segment_anything._predict_masks( + image_embeddings=torch.rand(1), + point_coords=torch.rand(1, 2, 2), + point_labels=torch.randint(low=0, high=2, size=(1, 2)), + original_size=torch.tensor((8, 8), dtype=torch.int64), + ) + assert mask.shape == (8, 8) + @e2e_pytest_unit def test_preprocess_prompts(self, set_zero_shot_segment_anything) -> None: """Test _preprocess_prompts. @@ -248,18 +283,39 @@ def test_generate_masked_features(self, set_zero_shot_segment_anything) -> None: assert masked_feat.shape == (1, 1) @e2e_pytest_unit - def test_preprocess_mask(self, set_zero_shot_segment_anything) -> None: - """Test _preprocess_mask.""" + def test_preprocess_masks(self, set_zero_shot_segment_anything) -> None: + """Test _preprocess_masks.""" zero_shot_segment_anything = set_zero_shot_segment_anything() zero_shot_segment_anything.config.model.image_size = 16 - result = zero_shot_segment_anything._preprocess_mask(x=torch.ones(1, 1, 8, 8)) + result = zero_shot_segment_anything._preprocess_masks(x=torch.ones(1, 1, 8, 8)) assert result[:8, :8].sum() == 8**2 assert result[:8, 8:].sum() == 0 assert result[8:, :8].sum() == 0 assert result[8:, 8:].sum() == 0 + @e2e_pytest_unit + @pytest.mark.parametrize( + "logits,expected", + [ + (torch.ones(1, 4, 4, 4), torch.ones(4, 4, dtype=torch.bool)), + (torch.zeros(1, 4, 4, 4), torch.zeros(4, 4, dtype=torch.bool)), + ], + ) + def test_postprocess_masks( + self, set_zero_shot_segment_anything, logits: torch.Tensor, expected: torch.Tensor + ) -> None: + """Test _postprocess_masks.""" + zero_shot_segment_anything = set_zero_shot_segment_anything() + zero_shot_segment_anything.config.model.image_size = 4 + scores = torch.tensor([[0.0, 0.1, 0.2, 0.3]]) + original_size = torch.tensor([4, 4], dtype=torch.int64) + + _, result = zero_shot_segment_anything._postprocess_masks(logits, scores, original_size) + + assert torch.equal(result, expected) + @e2e_pytest_unit @pytest.mark.parametrize("use_only_background", [True, False]) def test_merge_prompts(self, set_zero_shot_segment_anything, use_only_background: bool) -> None: @@ -285,37 +341,3 @@ def test_merge_prompts(self, set_zero_shot_segment_anything, use_only_background else: assert torch.equal(merged_input_prompts.get("point_coords"), torch.tensor([1, 0, 2])) assert torch.equal(merged_input_prompts.get("point_labels"), torch.tensor([1, 0, 0])) - - @e2e_pytest_unit - def test_predict_target_mask(self, mocker, set_zero_shot_segment_anything) -> None: - """Test _predict_target_mask.""" - zero_shot_segment_anything = set_zero_shot_segment_anything() - mocker.patch.object( - zero_shot_segment_anything, - "_predict_mask", - return_value=( - torch.tensor([[[[0, 0, 0], [0, 1, 0], [0, 0, 0]]]]), - torch.tensor([1, 0, 0]), - torch.tensor([[[[0, 0, 0], [0, 1, 0], [0, 0, 0]]]]), - ), - ) - - mask = zero_shot_segment_anything._predict_target_mask( - image_embeddings=torch.rand(1), input_prompts={}, padding=(0, 0, 0, 0), original_size=(1, 1) - ) - - assert mask.shape == (3, 3) - - @e2e_pytest_unit - def test_predict_mask(self, mocker, set_zero_shot_segment_anything) -> None: - """Test _predict_mask.""" - zero_shot_segment_anything = set_zero_shot_segment_anything() - mocker.patch.object(zero_shot_segment_anything, "postprocess_masks", return_value=torch.Tensor([[1]])) - - masks, scores, low_res_masks = zero_shot_segment_anything._predict_mask( - image_embeddings=torch.rand(1), input_prompts={}, padding=(0, 0, 0, 0), original_size=(1, 1) - ) - - assert masks.dtype == torch.bool - assert scores.shape[1] == 3 - assert low_res_masks.shape[1] == 3 diff --git a/tests/unit/algorithms/visual_prompting/tasks/test_inference.py b/tests/unit/algorithms/visual_prompting/tasks/test_inference.py index 996d1f97cd1..acd9d0c48ca 100644 --- a/tests/unit/algorithms/visual_prompting/tasks/test_inference.py +++ b/tests/unit/algorithms/visual_prompting/tasks/test_inference.py @@ -4,9 +4,13 @@ # SPDX-License-Identifier: Apache-2.0 # +import os +import torch +import numpy as np from typing import Optional, Dict, Any import pytest +from functools import wraps from omegaconf import DictConfig from otx.algorithms.visual_prompting.tasks.inference import InferenceTask, ZeroShotTask @@ -23,6 +27,7 @@ init_environment, MockImageEncoder, ) +import onnxruntime logger = get_logger() @@ -277,6 +282,91 @@ def test_infer(self, mocker): mocker_trainer.assert_called_once() + @e2e_pytest_unit + @pytest.mark.parametrize("export_type", [ExportType.ONNX, ExportType.OPENVINO]) + def test_export(self, mocker, export_type: ExportType): + """Test export.""" + model = self.zero_shot_task.load_model(otx_model=self.zero_shot_task.task_environment.model) + model.prompt_getter.reference_feats = torch.rand(3, 1, 256) + model.prompt_getter.reference_prompts = torch.rand(3, 720, 1280) + mocker.patch.object(self.zero_shot_task, "load_model", return_value=model) + + dataset = generate_visual_prompting_dataset() + output_model = ModelEntity(dataset, self.zero_shot_task.task_environment.get_model_configuration()) + + self.zero_shot_task.export(export_type, output_model, dump_features=False) + + if export_type == ExportType.ONNX: + assert output_model.model_format == ModelFormat.ONNX + assert "visual_prompting_image_encoder.onnx" in output_model.model_adapters + assert "visual_prompting_prompt_getter.onnx" in output_model.model_adapters + assert "visual_prompting_decoder.onnx" in output_model.model_adapters + + elif export_type == ExportType.OPENVINO: + assert output_model.model_format == ModelFormat.OPENVINO + assert "visual_prompting_image_encoder.bin" in output_model.model_adapters + assert "visual_prompting_image_encoder.xml" in output_model.model_adapters + assert "visual_prompting_prompt_getter.bin" in output_model.model_adapters + assert "visual_prompting_prompt_getter.xml" in output_model.model_adapters + assert "visual_prompting_decoder.bin" in output_model.model_adapters + assert "visual_prompting_decoder.xml" in output_model.model_adapters + + assert not output_model.has_xai + + @e2e_pytest_unit + def test_export_to_onnx(self): + """Test _export_to_onnx.""" + onnx_path = { + "visual_prompting_image_encoder": os.path.join( + self.zero_shot_task.output_path, "visual_prompting_image_encoder.onnx" + ), + "visual_prompting_prompt_getter": os.path.join( + self.zero_shot_task.output_path, "visual_prompting_prompt_getter.onnx" + ), + "visual_prompting_decoder": os.path.join(self.zero_shot_task.output_path, "visual_prompting_decoder.onnx"), + } + self.zero_shot_task.model = self.zero_shot_task.load_model(otx_model=self.zero_shot_task.task_environment.model) + self.zero_shot_task.model.prompt_getter.reference_feats = torch.randn(1, 1, 256) + self.zero_shot_task.model.prompt_getter.reference_feats /= ( + self.zero_shot_task.model.prompt_getter.reference_feats.norm(dim=-1, keepdim=True) + ) + + self.zero_shot_task._export_to_onnx(onnx_path) + + image_size = self.zero_shot_task.config.model.image_size + embed_dim = self.zero_shot_task.model.prompt_encoder.embed_dim + embed_size = self.zero_shot_task.model.prompt_encoder.image_embedding_size + mask_input_size = [4 * x for x in embed_size] + onnx_inputs = { + "visual_prompting_image_encoder": { + "images": np.random.random((1, 3, image_size, image_size)).astype(np.float32) + }, + "visual_prompting_prompt_getter": { + "image_embeddings": np.random.randn(1, embed_dim, *embed_size).astype(dtype=np.float32), + "original_size": np.random.randint(low=0, high=image_size * 2, size=(1, 2), dtype=np.int64), + "threshold": np.array([[0.1]], dtype=np.float32), + "num_bg_points": np.random.randint(low=1, high=image_size, size=(1, 1), dtype=np.int64), + }, + "visual_prompting_decoder": { + "image_embeddings": np.zeros((1, embed_dim, *embed_size), dtype=np.float32), + "point_coords": np.random.randint(low=0, high=1024, size=(1, 2, 2)).astype(np.float32), + "point_labels": np.random.randint(low=0, high=4, size=(1, 2)).astype(np.float32), + "mask_input": np.random.randn(1, 1, *mask_input_size).astype(np.float32), + "has_mask_input": np.array([[1]], dtype=np.float32), + }, + } + onnx_outputs = { + "visual_prompting_image_encoder": ["image_embeddings"], + "visual_prompting_prompt_getter": ["total_points_scores", "total_bg_coords"], + "visual_prompting_decoder": ["iou_predictions", "low_res_masks"], + } + + onnx_rt_models = { + k: onnxruntime.InferenceSession(v, providers=["CPUExecutionProvider"]) for k, v in onnx_path.items() + } + for name, onnx_model in onnx_rt_models.items(): + onnx_model.run(onnx_outputs.get(name), onnx_inputs.get(name)) + @e2e_pytest_unit def test_save_model(self, mocker): """Test save_model.""" diff --git a/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py b/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py index d228687f7ba..4181a8b2bab 100644 --- a/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py +++ b/tests/unit/algorithms/visual_prompting/tasks/test_openvino.py @@ -5,7 +5,7 @@ # from copy import deepcopy -from typing import Optional +from typing import Optional, Dict, Tuple import os import numpy as np @@ -21,8 +21,11 @@ from otx.algorithms.visual_prompting.configs.base import VisualPromptingBaseConfig from otx.algorithms.visual_prompting.tasks.openvino import ( OpenVINOVisualPromptingInferencer, + OpenVINOZeroShotVisualPromptingInferencer, OpenVINOVisualPromptingTask, + OpenVINOZeroShotVisualPromptingTask, OTXOpenVinoDataLoader, + OTXZeroShotOpenVinoDataLoader, ) from otx.api.configuration.configurable_parameters import ConfigurableParameters from otx.api.entities.annotation import Annotation @@ -130,7 +133,9 @@ def test_predict(self, mocker): ), ) mocker_forward = mocker.patch.object( - OpenVINOVisualPromptingInferencer, "forward", return_value={"image_embeddings": np.empty((4, 2, 2))} + OpenVINOVisualPromptingInferencer, + "forward_image_encoder", + return_value={"image_embeddings": np.empty((4, 2, 2))}, ) mocker_forward_decoder = mocker.patch.object( OpenVINOVisualPromptingInferencer, "forward_decoder", return_value=None @@ -149,12 +154,12 @@ def test_predict(self, mocker): assert returned_value == self.fake_annotation @e2e_pytest_unit - def test_forward(self): - """Test forward.""" + def test_forward_image_encoder(self): + """Test forward_image_encoder.""" fake_input = {"images": np.ones((1, 3, 2, 2))} fake_output = {"image_embeddings": np.ones((1, 1, 2, 2))} self.visual_prompting_ov_inferencer.model["image_encoder"].infer_sync.return_value = fake_output - returned_value = self.visual_prompting_ov_inferencer.forward(fake_input) + returned_value = self.visual_prompting_ov_inferencer.forward_image_encoder(fake_input) assert returned_value == fake_output @@ -169,15 +174,161 @@ def test_forward_decoder(self): assert returned_value == fake_output +class TestOpenVINOZeroShotVisualPromptingInferencer: + @pytest.fixture(autouse=True) + def setup(self, mocker): + self.fake_annotation = [ + Annotation( + Polygon(points=[Point(0, 0)]), + id=0, + labels=[ScoredLabel(LabelEntity(name="fake", domain="VISUALPROMPTING"), probability=1.0)], + ) + ] + mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.OpenvinoAdapter") + mocker.patch.object(Model, "create_model") + mocker.patch.object( + VisualPromptingToAnnotationConverter, "convert_to_annotation", return_value=self.fake_annotation + ) + self.task_environment = init_environment() + visual_prompting_hparams = self.task_environment.get_hyper_parameters(VisualPromptingBaseConfig) + label_schema = self.task_environment.label_schema + + self.visual_prompting_ov_inferencer = OpenVINOZeroShotVisualPromptingInferencer( + visual_prompting_hparams, + label_schema, + {"image_encoder": "", "prompt_getter": "", "decoder": ""}, + {"image_encoder": "", "prompt_getter": "", "decoder": ""}, + ) + self.visual_prompting_ov_inferencer.model["decoder"] = mocker.patch( + "otx.algorithms.visual_prompting.tasks.openvino.model_wrappers.Decoder", autospec=True + ) + self.visual_prompting_ov_inferencer.model["decoder"]._apply_coords.return_value = np.array([[1, 1]]) + + @e2e_pytest_unit + def test_predict(self, mocker): + """Test predict.""" + mocker_pre_process = mocker.patch.object( + OpenVINOZeroShotVisualPromptingInferencer, + "pre_process", + return_value=(torch.zeros((1, 3, 2, 2)), {"original_shape": (4, 4, 1)}), + ) + mocker_forward = mocker.patch.object( + OpenVINOZeroShotVisualPromptingInferencer, + "forward_image_encoder", + return_value={"image_embeddings": np.empty((4, 2, 2))}, + ) + mocker_forward_decoder = mocker.patch.object( + OpenVINOZeroShotVisualPromptingInferencer, + "forward_prompt_getter", + return_value={"total_points_scores": np.array([[[1, 1, 1]]]), "total_bg_coords": np.array([[[2, 2]]])}, + ) + mocker_forward_decoder = mocker.patch.object( + OpenVINOZeroShotVisualPromptingInferencer, "forward_decoder", return_value=None + ) + mocker_post_process = mocker.patch.object( + OpenVINOZeroShotVisualPromptingInferencer, "post_process", return_value=(self.fake_annotation, None, None) + ) + fake_input = mocker.Mock(spec=DatasetItemEntity) + + returned_value = self.visual_prompting_ov_inferencer.predict(fake_input) + + mocker_pre_process.assert_called_once() + mocker_forward.assert_called_once() + mocker_forward_decoder.assert_called_once() + mocker_post_process.assert_called_once() + assert returned_value == self.fake_annotation + + @e2e_pytest_unit + @pytest.mark.parametrize( + "postprocess_output,infer_sync_output,expected", + [ + ( + (np.ones((1, 1)), np.ones((3, 3)), 0.9), + {"iou_predictions": np.array([[0.9]]), "low_res_masks": np.ones((1, 1, 2, 2))}, + {"iou_predictions": np.array([[0.9]]), "low_res_masks": np.ones((1, 1, 2, 2))}, + ), + ( + (np.zeros((2, 2)), np.zeros((3, 3)), 0.0), + {"iou_predictions": np.array([[0.9]]), "low_res_masks": np.ones((1, 1, 2, 2))}, + {"iou_predictions": 0.0, "low_res_masks": np.zeros((2, 2))}, + ), + ], + ) + def test_forward_decoder( + self, + mocker, + postprocess_output: Tuple[torch.Tensor, torch.Tensor], + infer_sync_output: Dict[str, np.ndarray], + expected: Dict[str, torch.Tensor], + ): + """Test forward_decoder.""" + mocker.patch.object( + self.visual_prompting_ov_inferencer.model["decoder"], "infer_sync", return_value=infer_sync_output + ) + mocker.patch.object( + self.visual_prompting_ov_inferencer.model["decoder"], + "_apply_coords", + return_value=np.array([[[1, 1]]], dtype=np.float32), + ) + mocker.patch.object(self.visual_prompting_ov_inferencer, "_postprocess_masks", return_value=postprocess_output) + + result = self.visual_prompting_ov_inferencer.forward_decoder( + inputs={ + "image_embeddings": np.empty((1, 4, 2, 2)), + "point_coords": np.array([[[1, 1]]], dtype=np.float32), + "point_labels": np.array([[1]], dtype=np.float32), + }, + original_size=np.array([3, 3]), + ) + + assert np.all(result["iou_predictions"] == expected["iou_predictions"]) + assert np.all(result["low_res_masks"] == expected["low_res_masks"]) + + @e2e_pytest_unit + @pytest.mark.parametrize( + "high_res_masks,expected_masks,expected_scores", + [ + ( + np.repeat(np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])[..., None], 4, axis=-1), + np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.bool_), + 0.9, + ), + ( + np.concatenate( + ( + np.repeat(np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])[..., None], 3, axis=-1), + np.zeros((3, 3, 1)), + ), + axis=-1, + ), + np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.bool_), + 0.8, + ), + (np.zeros((3, 3, 4)), np.zeros((3, 3)), 0.0), + ], + ) + def test_postprocess_masks(self, high_res_masks: np.ndarray, expected_masks: np.ndarray, expected_scores: float): + """Test _postprocess_masks.""" + self.visual_prompting_ov_inferencer.model["decoder"].resize_and_crop.return_value = high_res_masks + self.visual_prompting_ov_inferencer.model["decoder"].mask_threshold = 0.0 + self.visual_prompting_ov_inferencer.model["decoder"].image_size = 3 + + _, result_masks, result_scores = self.visual_prompting_ov_inferencer._postprocess_masks( + logits=np.empty((1, 4, 2, 2)), scores=np.array([[0.5, 0.7, 0.8, 0.9]]), original_size=np.array([3, 3]) + ) + + assert result_masks.shape == (3, 3) + assert np.all(result_masks == expected_masks) + assert result_scores == expected_scores + + class TestOTXOpenVinoDataLoader: @pytest.fixture def load_dataloader(self, mocker): - def _load_dataloader(is_encoder: bool = True, output_model: Optional[ModelEntity] = None): + def _load_dataloader(module_name: str, output_model: Optional[ModelEntity] = None): dataset = generate_visual_prompting_dataset() dataset = dataset.get_subset(Subset.TRAINING) - return OTXOpenVinoDataLoader( - dataset, self.mocker_inferencer, is_encoder=is_encoder, output_model=output_model - ) + return OTXOpenVinoDataLoader(dataset, self.mocker_inferencer, module_name, output_model=output_model) return _load_dataloader @@ -188,16 +339,16 @@ def setup(self, mocker): self.mocker_inferencer = mocker.patch.object(OpenVINOVisualPromptingInferencer, "__init__") @e2e_pytest_unit - @pytest.mark.parametrize("is_encoder", [True, False]) - def test_getitem(self, mocker, load_dataloader, is_encoder: bool): + @pytest.mark.parametrize("module_name", ["image_encoder", "decoder"]) + def test_getitem(self, mocker, load_dataloader, module_name: str): """Test __getitem__.""" mocker_output_model = mocker.patch("otx.api.entities.model.ModelEntity") - if not is_encoder: + if module_name == "decoder": mocker.patch.object(mocker_output_model, "get_data") self.mocker_read_model.reset_mock() self.mocker_compile_model.reset_mock() - dataloader = load_dataloader(is_encoder, mocker_output_model) + dataloader = load_dataloader(module_name, mocker_output_model) setattr(dataloader, "target_length", 8) mocker.patch.object( @@ -208,7 +359,7 @@ def test_getitem(self, mocker, load_dataloader, is_encoder: bool): results = dataloader.__getitem__(0) - if is_encoder: + if module_name == "image_encoder": assert results["images"].shape == (1, 3, 8, 8) else: self.mocker_read_model.assert_called_once() @@ -218,6 +369,64 @@ def test_getitem(self, mocker, load_dataloader, is_encoder: bool): assert "image_embeddings" in results +class TestOTXZeroShotOpenVinoDataLoader: + @pytest.fixture + def load_dataloader(self, mocker): + def _load_dataloader(module_name: str, output_model: Optional[ModelEntity] = None): + dataset = generate_visual_prompting_dataset() + dataset = dataset.get_subset(Subset.TRAINING) + return OTXZeroShotOpenVinoDataLoader( + dataset, self.mocker_inferencer, module_name, output_model=output_model + ) + + return _load_dataloader + + @pytest.fixture(autouse=True) + def setup(self, mocker): + self.mocker_read_model = mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.ov.Core.read_model") + self.mocker_compile_model = mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.ov.Core.compile_model") + self.mocker_inferencer = mocker.patch.object(OpenVINOZeroShotVisualPromptingTask, "__init__") + + @e2e_pytest_unit + @pytest.mark.parametrize("module_name", ["image_encoder", "prompt_getter", "decoder"]) + def test_getitem(self, mocker, load_dataloader, module_name: str): + """Test __getitem__.""" + mocker_output_model = mocker.patch("otx.api.entities.model.ModelEntity") + if module_name in ["prompt_getter", "decoder"]: + mocker.patch.object(mocker_output_model, "get_data") + self.mocker_read_model.reset_mock() + self.mocker_compile_model.reset_mock() + + dataloader = load_dataloader(module_name, mocker_output_model) + + setattr(dataloader, "target_length", 8) + mocker.patch.object( + dataloader.inferencer, + "pre_process", + return_value=({"images": np.zeros((1, 3, 4, 4), dtype=np.uint8)}, {"original_shape": (4, 4)}), + ) + if module_name == "decoder": + mocker.patch.object( + dataloader, + "prompt_getter", + return_value={ + "total_points_scores": [np.array([[0, 0, 0.5]])], + "total_bg_coords": [np.array([[1, 1]])], + }, + ) + + results = dataloader.__getitem__(0) + + if module_name == "image_encoder": + assert results["images"].shape == (1, 3, 8, 8) + elif module_name == "prompt_getter": + self.mocker_read_model.assert_called_once() + self.mocker_compile_model.assert_called_once() + else: # decoder + self.mocker_read_model.call_count == 2 + self.mocker_compile_model.call_count == 2 + + class TestOpenVINOVisualPromptingTask: @pytest.fixture def otx_model(self): @@ -346,3 +555,89 @@ def patch_save_model(model, output_xml): self.visual_prompting_ov_task.model.get_data("visual_prompting_decoder.bin") == b"compressed_visual_prompting_decoder.bin" ) + + +class TestOpenVINOZeroShotVisualPromptingTask: + @pytest.fixture + def otx_model(self): + model_configuration = ModelConfiguration( + configurable_parameters=ConfigurableParameters(header="header", description="description"), + label_schema=LabelSchemaEntity(), + ) + return ModelEntity(train_dataset=DatasetEntity(), configuration=model_configuration) + + @pytest.fixture(autouse=True) + def setup(self, mocker, otx_model): + """Load the OpenVINOZeroShotVisualPromptingTask.""" + mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.OpenvinoAdapter") + mocker.patch.object(Model, "create_model") + self.task_environment = init_environment() + visual_prompting_hparams = self.task_environment.get_hyper_parameters(VisualPromptingBaseConfig) + + visual_prompting_ov_inferencer = OpenVINOZeroShotVisualPromptingInferencer( + visual_prompting_hparams, + self.task_environment.label_schema, + {"image_encoder": "", "prompt_getter": "", "decoder": ""}, + {"image_encoder": "", "prompt_getter": "", "decoder": ""}, + ) + + # self.task_environment.model = mocker.patch("otx.api.entities.model.ModelEntity") + self.task_environment.model = otx_model + mocker.patch.object( + OpenVINOZeroShotVisualPromptingTask, "load_inferencer", return_value=visual_prompting_ov_inferencer + ) + self.visual_prompting_ov_task = OpenVINOZeroShotVisualPromptingTask(task_environment=self.task_environment) + + @e2e_pytest_unit + def test_optimize(self, mocker): + """Test optimize.""" + + def patch_save_model(model, output_xml): + output_bin = output_xml.replace(".xml", ".bin") + with open(output_xml, "wb") as f: + f.write(f"compressed_{os.path.basename(output_xml)}".encode("utf-8")) + with open(output_bin, "wb") as f: + f.write(f"compressed_{os.path.basename(output_bin)}".encode("utf-8")) + + dataset = generate_visual_prompting_dataset() + output_model = deepcopy(self.task_environment.model) + self.visual_prompting_ov_task.model.set_data("visual_prompting_image_encoder.xml", b"image_encoder_xml") + self.visual_prompting_ov_task.model.set_data("visual_prompting_image_encoder.bin", b"image_encoder_bin") + self.visual_prompting_ov_task.model.set_data("visual_prompting_prompt_getter.xml", b"prompt_getter_xml") + self.visual_prompting_ov_task.model.set_data("visual_prompting_prompt_getter.bin", b"prompt_getter_bin") + self.visual_prompting_ov_task.model.set_data("visual_prompting_decoder.xml", b"decoder_xml") + self.visual_prompting_ov_task.model.set_data("visual_prompting_decoder.bin", b"decoder_bin") + mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.ov.Core.read_model", autospec=True) + mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.ov.serialize", new=patch_save_model) + mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.ov.Core.compile_model") + fake_quantize = mocker.patch("otx.algorithms.visual_prompting.tasks.openvino.nncf.quantize", autospec=True) + + self.visual_prompting_ov_task.optimize(OptimizationType.POT, dataset=dataset, output_model=output_model) + + fake_quantize.assert_called() + assert fake_quantize.call_count == 3 + + assert ( + self.visual_prompting_ov_task.model.get_data("visual_prompting_image_encoder.xml") + == b"compressed_visual_prompting_image_encoder.xml" + ) + assert ( + self.visual_prompting_ov_task.model.get_data("visual_prompting_image_encoder.bin") + == b"compressed_visual_prompting_image_encoder.bin" + ) + assert ( + self.visual_prompting_ov_task.model.get_data("visual_prompting_prompt_getter.xml") + == b"compressed_visual_prompting_prompt_getter.xml" + ) + assert ( + self.visual_prompting_ov_task.model.get_data("visual_prompting_prompt_getter.bin") + == b"compressed_visual_prompting_prompt_getter.bin" + ) + assert ( + self.visual_prompting_ov_task.model.get_data("visual_prompting_decoder.xml") + == b"compressed_visual_prompting_decoder.xml" + ) + assert ( + self.visual_prompting_ov_task.model.get_data("visual_prompting_decoder.bin") + == b"compressed_visual_prompting_decoder.bin" + ) diff --git a/tests/unit/algorithms/visual_prompting/test_helpers.py b/tests/unit/algorithms/visual_prompting/test_helpers.py index a9f22c7bf95..c1be0ae3c89 100644 --- a/tests/unit/algorithms/visual_prompting/test_helpers.py +++ b/tests/unit/algorithms/visual_prompting/test_helpers.py @@ -153,7 +153,6 @@ def __init__(self, *args, **kwargs): self.backbone = nn.Linear(1, 1) def forward(self, *args, **kwargs): - # return torch.Tensor([[1]]) return torch.ones((1, 2, 4, 4)) @@ -182,6 +181,9 @@ def __init__(self, *args, **kwargs): def forward(self, *args, **kwargs): return torch.Tensor([[1]]), torch.Tensor([[1]]) + def predict_mask(self, *args, **kwargs): + return self(*args, **kwargs) + class MockScoredLabel: def __init__(self, label: int, name: str = "background"): @@ -199,7 +201,8 @@ def initialize(self): def set_default_thresholds(self, *args, **kwargs): pass + def get_prompt_candidates(self, *args, **kwargs): + return {1: (torch.Tensor([[0, 0, 0.5]]), torch.Tensor([[1, 1]]))} + def forward(self, *args, **kwargs): - return { - MockScoredLabel(label=1, name="label"): (torch.tensor([[0, 0, 0.5], [1, 1, 0.7]]), torch.tensor([[2, 2]])) - } + return torch.tensor([[[0, 0, 0.5], [1, 1, 0.7]]]), torch.tensor([[[2, 2]]]) diff --git a/tests/unit/cli/utils/test_experiment.py b/tests/unit/cli/utils/test_experiment.py index e5a61a8e445..714931be130 100644 --- a/tests/unit/cli/utils/test_experiment.py +++ b/tests/unit/cli/utils/test_experiment.py @@ -22,8 +22,9 @@ def _set_up(self, mocker): @e2e_pytest_unit @pytest.mark.parametrize("resource_type", ("cpu", "gpu", "all", "cpu,gpu")) @pytest.mark.parametrize("gpu_ids", (None, "0", "0,3")) - def test_init(self, resource_type, gpu_ids): - ResourceTracker(resource_type, gpu_ids) + @pytest.mark.parametrize("output_path", ("fake", Path("fake"))) + def test_init(self, output_path, resource_type, gpu_ids): + ResourceTracker(output_path, resource_type, gpu_ids) @e2e_pytest_unit @pytest.mark.parametrize("resource_type", ("cpu", "gpu", "all", "cpu,gpu")) @@ -41,7 +42,7 @@ def test_start(self, resource_type, gpu_ids): expected_gpu_ids[0] = 0 # run - resource_tracker = ResourceTracker(resource_type, gpu_ids) + resource_tracker = ResourceTracker("fake_output", resource_type, gpu_ids) resource_tracker.start() self.mock_proc.start.assert_called_once() # check that a process to track resource usages starts @@ -51,7 +52,7 @@ def test_start(self, resource_type, gpu_ids): @e2e_pytest_unit def test_start_multiple_times(self): - resource_tracker = ResourceTracker() + resource_tracker = ResourceTracker("fake_output") # run multiple times resource_tracker.start() @@ -63,9 +64,9 @@ def test_start_multiple_times(self): def test_stop(self): output_path = Path("fake") - resource_tracker = ResourceTracker() + resource_tracker = ResourceTracker(output_path) resource_tracker.start() - resource_tracker.stop(output_path) + resource_tracker.stop() # check that code to terminate a process is executed properly self.mock_queue.put.assert_called_once_with(output_path) @@ -77,9 +78,9 @@ def test_stop_not_exit_normally(self): output_path = Path("fake") self.mock_proc.exitcode = None - resource_tracker = ResourceTracker() + resource_tracker = ResourceTracker(output_path) resource_tracker.start() - resource_tracker.stop(output_path) + resource_tracker.stop() # check that code to terminate a process is executed properly self.mock_queue.put.assert_called_once_with(output_path) @@ -90,8 +91,8 @@ def test_stop_not_exit_normally(self): @e2e_pytest_unit def test_stop_before_start(self): - resource_tracker = ResourceTracker() - resource_tracker.stop("fake") + resource_tracker = ResourceTracker("fake") + resource_tracker.stop() # check that code to make a process done isn't called self.mock_queue.put.assert_not_called() diff --git a/tests/unit/cli/utils/test_hpo.py b/tests/unit/cli/utils/test_hpo.py index d0b9467d1e2..f01a048a195 100644 --- a/tests/unit/cli/utils/test_hpo.py +++ b/tests/unit/cli/utils/test_hpo.py @@ -1,4 +1,5 @@ import json +import yaml from copy import deepcopy from pathlib import Path from tempfile import TemporaryDirectory @@ -450,6 +451,19 @@ def test_init_wrong_hpo_time_ratio(self, cls_task_env, hpo_time_ratio): with pytest.raises(ValueError): HpoRunner(cls_task_env, 100, 10, "fake_path", hpo_time_ratio) + @e2e_pytest_unit + @pytest.mark.parametrize("diff_from_min_bs", [0, 1]) + def test_init_fix_batch_size(self, cls_task_env, diff_from_min_bs): + task_env = TaskEnvironmentManager(cls_task_env) + with (Path(task_env.get_model_template_path()).parent / "hpo_config.yaml").open() as f: + hpo_config = yaml.safe_load(f) + batch_size_name = task_env.get_batch_size_name() + min_bs = hpo_config["hp_space"][batch_size_name]["range"][0] + train_dataset_size = min_bs + diff_from_min_bs + + hpo_runner = HpoRunner(cls_task_env, train_dataset_size, 10, "fake_path") + assert batch_size_name in hpo_runner._fixed_hp + @e2e_pytest_unit def test_run_hpo(self, mocker, cls_task_env): cls_task_env.model = None diff --git a/tools/README.md b/tools/README.md index dcc82a39007..b828cf78f2e 100644 --- a/tools/README.md +++ b/tools/README.md @@ -31,7 +31,10 @@ Arguments - -f / --file : Path to the YAML file describing the experiment setup. After all runs, results are aggregated and saved. - -d / --dryrun : Preview the experiment list before execution. Use with '-f / --file' argument. -Sample Experiment Recipe YAML File: +Both single experiment and multiple experiments are supported. +Here is the example. + +Single Experiment Recipe YAML File: output_path: research_framework_demo/det_model_test constants: # value in constant can't have other constant or variable. @@ -57,6 +60,39 @@ Sample Experiment Recipe YAML File: - otx eval --test-data-roots ${dataset_path}/${dataset} +Multiple Experiment Recipe YAML File: + + output_path: research_framework_demo/cls_det_model_test + constants: + dataset_path: some_dataset_path + experiments: + - name det + constants: + model_dir: otx/src/otx/algorithms/detection/configs/detection + variables: + model: + - cspdarknet_yolox + - mobilenetv2_atss + dataset: diopsis/12 + repeat: 2 + command: + - otx train ${model_dir}/${model}/template.yaml ... + - otx eval ... + - name: cls + constants: + model_dir: otx/src/otx/algorithms/classification/configs + dataset_path: other_dataset_path + variables: + model: + - efficientnet_b0_cls_incr + - deit_tiny + dataset: cifar10_300 + repeat: 2 + command: + - otx train ${model_dir}/${model}/template.yaml + --train-data-roots ${dataset_path}/${dataset} ... + - otx eval ... + Arguments for recipe - output_path (optional) : Output path where all experiment outputs are saved. Default is "./experiment\_{executed_time}" @@ -68,6 +104,14 @@ Arguments for recipe For example, if two models and two dataset are given as variable, then total 4 cases will be run as experiment. Also key of each varaible will be row headers of experiment result table. - repeat (optional) : Number of times to run experiments. Repeated experiments have different random seeds in "otx train" command. - command (required) : Specifies the commands to run. Supports both single commands and lists of commands. +- experiments (optional) : + To perform multiple experiments, the user is required to define a list of experiments in this section. + Each element in the list can contain all the keys mentioned above, excluding output_path. + The output path for each experiment is automatically set to output_path/name. + Values outside the experiment element, except for output_path and constants, will be disregarded. + If `constants` exsist at both upper most level and experiment element, + they're merged while experiment element takes precedence. + - name (required) : Specifies the unique name for the experiment. This name will be utilized as the directory name where the output of the experiment is stored. Upon completion of each experiment, the results are organized within the own workspace. Following the conclusion of all experiments, all experiment results are aggregated in two distinct formats: diff --git a/tools/experiment.py b/tools/experiment.py index 6d9a271e547..a807aecf59a 100644 --- a/tools/experiment.py +++ b/tools/experiment.py @@ -19,13 +19,15 @@ from datetime import datetime, timedelta from itertools import product from pathlib import Path -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, List, Optional, Tuple, Union import yaml from otx.cli.tools.cli import main as otx_cli from rich.console import Console from rich.table import Table +rich_console = Console() + def get_parser() -> argparse.ArgumentParser: """Parses command line arguments.""" @@ -66,6 +68,67 @@ def find_latest_file(root_dir: Union[Path, str], file_name: str) -> Union[None, return train_record_files[0] +def cvt_number_to_str(target: Dict): + """Convert int or float in dict to string. + + Args: + target (Dict): Dictionary object to change int or float to string in. + """ + result = copy(target) + + for key, val in result.items(): + if isinstance(val, (int, float)): + result[key] = str(val) + elif isinstance(val, list): + for i in range(len(val)): + if isinstance(val[i], (int, float)): + val[i] = str(val[i]) + + return result + + +class EvalResult: + """Class to save otx eval output. + + Current OTX eval output has different metrics depending on a task. + To deal with it, this class can save dynamic metric name. + Each metric can be set or gotten by both dict-like(ins["metric"]) or class-like(ins.metric) way. + "add" (only with a class having same metrics) and "true devide" are supported. + """ + + def __getitem__(self, key): + """Support dict-like way to get attribute.""" + return getattr(self, key) + + def __setitem__(self, key, value): + """Support dict-like way to set attribute.""" + setattr(self, key, value) + + def __add__(self, obj: "EvalResult"): + """Add with a class having same metrics.""" + new_obj = deepcopy(self) + new_obj_metrics = vars(new_obj).keys() + + if new_obj_metrics != vars(obj).keys(): + raise KeyError( + "Two objects have different metrics. " + f"Left operand : {','.join(new_obj_metrics)} / Right operand : {','.join(vars(obj).keys())}" + ) + + for attr in new_obj_metrics: + new_obj[attr] += obj[attr] + return new_obj + + def __truediv__(self, divisor: Union[int, float]): + """Divide each metric in the class.""" + new_obj = deepcopy(self) + + for attr in vars(new_obj).keys(): + new_obj[attr] /= divisor + + return new_obj + + @dataclass class ExperimentResult: """Dataclass to manage experiment result. @@ -76,44 +139,44 @@ class ExperimentResult: """ val_score: Union[float, None] = None - test_score: Union[float, None] = None + train_eval_result: Union[EvalResult, None] = None train_e2e_time: Union[timedelta, None] = None avg_iter_time: Union[float, None] = None std_iter_time: Union[float, None] = None avg_data_time: Union[float, None] = None std_data_time: Union[float, None] = None - export_model_score: Union[float, None] = None - avg_ov_infer_time: Union[float, None] = None + export_eval_result: Union[EvalResult, None] = None max_cpu_mem: Union[float, None] = None avg_cpu_util: Union[float, None] = None max_gpu_mem: Union[float, None] = None avg_gpu_util: Union[float, None] = None - optimize_model_score: Union[float, None] = None + optimize_eval_result: Union[EvalResult, None] = None epoch: Union[int, None] = None def get_formatted_result(self) -> Dict: """Return dictionary format result.""" result = dataclasses.asdict(self) + formatted_result = {} - for attr_name in ["max_cpu_mem", "max_gpu_mem"]: - max_mem = result.pop(attr_name) - result[f"{attr_name}(GiB)"] = max_mem - - for attr_name in ["avg_cpu_util", "avg_gpu_util"]: - res_util = result.pop(attr_name) - result[f"{attr_name}(%)"] = res_util - - if self.train_e2e_time is not None: - result["train_e2e_time"] = str(self.train_e2e_time).split(".")[0] + for key, val in result.items(): + if val is None: + continue + elif key in ["max_cpu_mem", "max_gpu_mem"]: + formatted_result[f"{key}(GiB)"] = round(val, 2) + elif key in ["avg_cpu_util", "avg_gpu_util"]: + formatted_result[f"{key}(%)"] = round(val, 2) + elif key == "train_e2e_time": + formatted_result[key] = str(self.train_e2e_time).split(".")[0] + elif isinstance(val, EvalResult): + task = key.split('_')[0] + for metric, score in vars(val).items(): + formatted_result[f"{metric}({task})"] = round(score, 4) + elif isinstance(val, float): + formatted_result[key] = round(val, 4) + else: + formatted_result[key] = val - # delete None value - for key in list(result.keys()): - if result[key] is None: - del result[key] - elif isinstance(result[key], float): - result[key] = round(result[key], 4) - - return result + return formatted_result def __add__(self, obj: "ExperimentResult"): """Add with same class. If None exists, it's skipped.""" @@ -152,15 +215,29 @@ def parse_formatted_dict(self, formatted_dict: Dict): """Parse a dictionary with same format.""" max_mem_pat = re.compile(r"max_.*_mem") cpu_util_pat = re.compile(r"avg.*_util") + eval_result_pat = re.compile(r"(.*)\((.*)\)") + for key, val in formatted_dict.items(): max_mem_name = max_mem_pat.search(key) cpu_util_name = cpu_util_pat.search(key) + eval_result_name = eval_result_pat.search(key) + if max_mem_name is not None: max_mem_name = max_mem_name.group(0) setattr(self, max_mem_name, val) elif cpu_util_name is not None: cpu_util_name = cpu_util_name.group(0) setattr(self, cpu_util_name, val) + elif eval_result_name is not None: + metric = eval_result_name.group(1) + task = eval_result_name.group(2) + eval_result = getattr(self, f"{task}_eval_result") + if eval_result is None: + eval_result = EvalResult() + eval_result[metric] = val + setattr(self, f"{task}_eval_result", eval_result) + else: + eval_result[metric] = val elif key == "train_e2e_time": setattr(self, key, parse_time_delta_fmt(val, "%H:%M:%S")) else: @@ -205,20 +282,21 @@ def _calculate_avg_std_per_iter(self): ) def _parse_eval_output(self, file_path: Path): - # NOTE: It is assumed that performance.json has key named either score or avg_time_per_image + for task in ["train", "export", "optimize"]: + if task in str(file_path.parent.name): + break + else: + print(f"Can not parse eval output in {file_path.parent.name}") + return + with file_path.open("r") as f: eval_output: Dict = json.load(f) - if "train" in str(file_path.parent.name): - self._exp_result.test_score = list(eval_output.values())[0] - elif "export" in str(file_path.parent.name): - for key, val in eval_output.items(): - if key == "avg_time_per_image": - self._exp_result.avg_ov_infer_time = val - else: - self._exp_result.export_model_score = val - elif "optimize" in str(file_path.parent.name): - self._exp_result.optimize_model_score = list(eval_output.values())[0] + eval_result = EvalResult() + for metric, score in eval_output.items(): + eval_result[metric] = score + + setattr(self._exp_result, f"{task}_eval_result", eval_result) def _parse_resource_usage(self, file_path: Path): with file_path.open("r") as f: @@ -236,7 +314,7 @@ def _parse_cli_report(self, file_path: Path, save_val_score=True): with file_path.open("r") as f: lines = f.readlines() - val_score_pattern = re.compile(r"score: Performance\(score: ([-+]?\d+(\.\d*)?|\.\d+)") + val_score_pattern = re.compile(r"score:.*Performance\(score: ([-+]?\d+(\.\d*)?|\.\d+)") e2e_time_pattern = re.compile(r"time elapsed: '(\d+:\d+:\d+(\.\d*)?)'") for line in lines: if save_val_score: @@ -255,7 +333,7 @@ class MMCVExpParser(BaseExpParser): def parse_exp_log(self): """Parse experiment log.""" for task_dir in (self._workspace / "outputs").iterdir(): - if task_dir.is_symlink(): + if task_dir.is_symlink(): # prevent duplicated parse continue if "train" in str(task_dir.name): @@ -289,13 +367,21 @@ def _parse_train_record(self, file_path: Path): lines = f.readlines() last_epoch = 0 + iter_time = [] + data_time = [] for line in lines: iter_history = json.loads(line) if iter_history.get("mode") == "train": - self._iter_time_arr.append(iter_history["time"]) - self._data_time_arr.append(iter_history["data_time"]) if iter_history["epoch"] > last_epoch: last_epoch = iter_history["epoch"] + if last_epoch <= 2: # if epoch >= 2, first epcoh is excluded from the calcuation + iter_time = [] + data_time = [] + iter_time.append(iter_history["time"]) + data_time.append(iter_history["data_time"]) + + self._iter_time_arr.extend(iter_time) + self._data_time_arr.extend(data_time) self._exp_result.epoch = last_epoch @@ -331,16 +417,20 @@ def parse_exp_log(self): self._parse_eval_output(eval_files[0]) -def get_exp_parser(workspace: Path) -> BaseExpParser: +def get_exp_parser(workspace: Path) -> Union[BaseExpParser, None]: """Get experiment parser depending on framework. Args: workspace (Path): Workspace to parse. Returns: - BaseExpParser: Experiment parser. + Union[BaseExpParser, None]: Experiment parser. If template file doesn't exist in the workspace, return None. """ - with (workspace / "template.yaml").open("r") as f: + template_file = workspace / "template.yaml" + if not template_file.exists(): + return None + + with template_file.open("r") as f: template = yaml.safe_load(f) if "anomaly" in template["task_type"].lower(): @@ -360,6 +450,9 @@ def organize_exp_result(workspace: Union[str, Path], exp_meta: Optional[Dict[str workspace = Path(workspace) exp_parser = get_exp_parser(workspace) + if exp_parser is None: + print(f"Unable to find which task \"{workspace}\" is. Parsing experiment result is skipped.") + return exp_parser.parse_exp_log() exp_result = exp_parser.get_exp_result() @@ -396,24 +489,22 @@ def print_table(headers: List[str], rows: List[Dict[str, Any]], table_title: str rows (List[Dict[str, Any]]): Rows of table. table_title (str, optional): Table title. Defaults to "Table". """ - # print experiment summary to console table = Table(title=table_title) for header in headers: table.add_column(header, justify="center", no_wrap=True) - for each_exp_result_summary in rows: + for row in rows: table_row = [] for header in headers: - val = each_exp_result_summary.get(header) + val = row.get(header) table_row.append(str(val)) table.add_row(*table_row) - console = Console() - console.print(table) + rich_console.print(table, justify="center", crop=False) def aggregate_all_exp_result(exp_dir: Union[str, Path]): - """Aggregate all experiment results. + """Aggregate all experiment results and save it and it's summary as a file. Args: exp_dir (Union[str, Path]): Experiment directory. @@ -488,8 +579,6 @@ def aggregate_all_exp_result(exp_dir: Union[str, Path]): rows.append(each_exp_result) write_csv(exp_dir / "exp_summary.csv", headers, rows) - print_table(headers, rows, "Experiment Summary") - @dataclass class Command: @@ -499,28 +588,44 @@ class Command: variable: Dict[str, str] = field(default_factory=dict) -class ExpRecipeParser: - """Class to parse an experiment recipe. +class ExpInfo: + """Class to store experiment information. + + It does additional things to provide complete experiment information. + For example, it replaces constants or varilabes if necessary, + and then it makes all possibles commands based on variables. Args: - recipe_file (Union[str, Path]): Recipe file to parse. + command (Union[str, List[str]]): All commands to exeucte. + output_path (Path): Output path to save experiment result. + name (str, optional): Experiment name. Defaults to "". + constants (Dict[str, str], optional): + Constants. If there are constants in variables or commands, + they are replaced based on this value. Defaults to None. + variables (Dict[str, str], optional): + Variables. If there are variables in command, they're replaced based on this value. Defaults to None. + repeat (int, optional): How many times to repeat experiments. Defaults to 1. """ - def __init__(self, recipe_file: Union[str, Path]): - if not os.path.exists(recipe_file): - raise RuntimeError(f"{recipe_file} doesn't exist.") - - with open(recipe_file, "r") as f: - self._exp_recipe: Dict = yaml.safe_load(f) - constants = self._exp_recipe.get("constants", {}) - self._cvt_number_to_str(constants) - self._constants: Dict[str, str] = constants - self._variables: Optional[Dict[str, str]] = None + def __init__( + self, + command: Union[str, List[str]], + output_path: Path, + name: str = "", + constants: Optional[Dict[str, str]] = None, + variables: Optional[Dict[str, str]] = None, + repeat: int = 1, + ): + self._raw_command = command self._commands: Optional[List[Command]] = None - self.output_path: Path = Path( - self._exp_recipe.get("output_path", f"experiment_{datetime.now().strftime('%Y%m%d_%H%M%S')}") - ) - self.repeat: int = self._exp_recipe.get("repeat", 1) + self.output_path = output_path + self.name = name + self._constants = constants if constants is not None else {} + if variables is None: + variables = {} + self._raw_variables = cvt_number_to_str(variables) + self._variables: Optional[Dict[str, str]] = None + self.repeat = repeat self._replace_pat = re.compile(r"\$\{(\w+)\}") @property @@ -532,9 +637,7 @@ def constants(self) -> Dict[str, str]: def variables(self) -> Dict[str, Union[str, List[str]]]: """Variables in recipe file. If it contains constants, they're replaced by real value.""" if self._variables is None: - variables = self._exp_recipe.get("variables", {}) - self._cvt_number_to_str(variables) - self._variables = self._replace_var_in_target(self.constants, variables) + self._variables = self._replace_var_in_target(self.constants, self._raw_variables) return self._variables @property @@ -547,18 +650,18 @@ def commands(self) -> List[Command]: List[Command]: List of Command instances. """ if self._commands is None: - command = self._exp_recipe.get("command", []) + command = self._raw_command if isinstance(command, str): command = [command] command = self._replace_var_in_target(self.constants, command) var_combinations = self._product_all_cases(self.variables, command) if not var_combinations: self._commands = [Command(command=command)] - - command_arr = [] - for var_combination in var_combinations: - command_arr.append(Command(self._replace_var_in_target(var_combination, command), var_combination)) - self._commands = command_arr + else: + command_arr = [] + for var_combination in var_combinations: + command_arr.append(Command(self._replace_var_in_target(var_combination, command), var_combination)) + self._commands = command_arr return self._commands def _product_all_cases( @@ -574,6 +677,7 @@ def _product_all_cases( if not found_keys: return [] + found_keys = sorted(found_keys) values_of_found_key = [] for key in found_keys: if isinstance(variable[key], list): @@ -608,16 +712,52 @@ def _replace_var_in_target( return target - @staticmethod - def _cvt_number_to_str(target: Dict): - """Convert int or float in dict to string.""" - for key, val in target.items(): - if isinstance(val, (int, float)): - target[key] = str(val) - elif isinstance(val, list): - for i in range(len(val)): - if isinstance(val[i], (int, float)): - val[i] = str(val[i]) + + +def parse_exp_recipe(recipe_file: Union[str, Path]) -> Tuple[List[ExpInfo], Path]: + """Parse an experiment recipe and return list of expeirment information and output path. + + Args: + recipe_file (Union[str, Path]): Recipe file to parse. + + Raises: + RuntimeError: If recipe file doesn't exist, error is raised. + + Returns: + Tuple[List[ExpInfo], Path]: List of expeirment information and output path. + """ + if not os.path.exists(recipe_file): + raise RuntimeError(f"{recipe_file} doesn't exist.") + + with open(recipe_file, "r") as f: + exp_recipe: Dict = yaml.safe_load(f) + + ori_constants: Dict[str, str] = cvt_number_to_str(exp_recipe.get("constants", {})) + output_path = Path(exp_recipe.get("output_path", f"experiment_{datetime.now().strftime('%Y%m%d_%H%M%S')}")) + exp_info_list = [] + + if "experiments" not in exp_recipe: + exp_recipe["experiments"] = [exp_recipe] + exp_recipe["experiments"][0]["name"] = "" + exp_recipe["experiments"][0].pop("constants") + + for exp in exp_recipe["experiments"]: + constants = copy(ori_constants) + if "constants" in exp: + constants.update(exp["constants"]) + + exp_info_list.append( + ExpInfo( + exp["command"], + output_path / exp["name"], + exp["name"], + constants, + exp.get("variables"), + exp.get("repeat", 1) + ) + ) + + return exp_info_list, output_path @dataclass @@ -635,24 +775,31 @@ def get_formatted_result(self) -> Dict: return result -def log_fail_cases(fail_cases: List[CommandFailInfo], output_path: Path): - """Print fail cases and save it as a file. +def log_exp_failed_cases( + failed_cases: Union[List[CommandFailInfo], Dict[str, List[CommandFailInfo]]], + output_path: Path, +): + """Print experiments failed cases to console and save them in each experiment directory as a file. Args: - fail_cases (List[CommandFailInfo]): False cases. - output_path (Path): Where fale cases are saved. + failed_cases (Union[List[CommandFailInfo], Dict[str, List[CommandFailInfo]]]): + List of CommandFailInfo or Dictionary having experiment name as key and CommandFailInfo object as value. + output_path (Path): Directory where experiment direcory exists. """ - console = Console() - console.rule("[bold red]List of failed cases") - for each_fail_case in fail_cases: - console.print(f"Case : {each_fail_case.variable}", crop=False) - console.print(f"command : {each_fail_case.command}", crop=False) - console.print("Error log:", str(each_fail_case.exception), crop=False) - console.print() - console.rule() + if isinstance(failed_cases, list): + failed_cases = {"" : failed_cases} + + for exp_name, failed_cases in failed_cases.items(): + rich_console.rule(f"[bold red]{exp_name} failed cases ") + + for each_fail_case in failed_cases: + rich_console.print(f"Case : {each_fail_case.variable}", crop=False) + rich_console.print(f"command : {each_fail_case.command}", crop=False) + rich_console.print("Error log:", str(each_fail_case.exception), crop=False) + rich_console.print() - with (output_path / "failed_cases.yaml").open("w") as f: - yaml.safe_dump([fail_case.get_formatted_result() for fail_case in fail_cases], f) + with (output_path / exp_name / "failed_cases.yaml").open("w") as f: + yaml.safe_dump([fail_case.get_formatted_result() for fail_case in failed_cases], f) class OtxCommandRunner: @@ -673,14 +820,14 @@ class OtxCommandRunner: "optimize": ["weights.pth", "openvino.bin"] } - def __init__(self, command_ins: Command, repeat_idx: int): + def __init__(self, command_ins: Command, workspace: Path, repeat_idx: int): self._command_ins = command_ins self._repeat_idx = repeat_idx self._command_var = copy(command_ins.variable) - self._workspace = Path("_".join(self._command_var.values()).replace("/", "_") + f"_repeat_{repeat_idx}") + self._workspace = workspace self._command_var["repeat"] = str(repeat_idx) self._fail_logs: List[CommandFailInfo] = [] - self._previous_cmd_entry: Optional[List[str]] = [] + self._previous_cmd_entry: List[str] = [] @property def fail_logs(self) -> List[CommandFailInfo]: @@ -781,6 +928,60 @@ def set_arguments_to_cmd(command: List[str], key: str, value: Optional[str] = No command.insert(index, key) +def run_experiment(exp_info: ExpInfo, dryrun: bool = False) -> List[CommandFailInfo]: + """Run single expeirment. + + Args: + exp_info (ExpInfo): ExpInfo having expreiment information to conduct. + dryrun (bool, optional): Whether to only print experiment commands. Defaults to False. + + Returns: + List[CommandFailInfo]: List of failed command information. + """ + failed_cases: List[CommandFailInfo] = [] + + for command_ins in exp_info.commands: + for repeat_idx in range(exp_info.repeat): + otx_cmd_runner = OtxCommandRunner( + command_ins, + exp_info.output_path + / "_".join(list(command_ins.variable.values()) + ["repeat", str(repeat_idx)]).replace("/", "_"), + repeat_idx + ) + otx_cmd_runner.run_command_list(dryrun) + failed_cases.extend(otx_cmd_runner.fail_logs) + + if not dryrun: + aggregate_all_exp_result(exp_info.output_path) + + return failed_cases + + +def print_experiments_summary(output_path: Path): + """Print experiment summary to console and save it as a file. + + Args: + output_path (Path): Output path where experiment summary file is saved. + """ + rich_console.rule("[bold green]Experiment summary") + + for summary_file in output_path.rglob("exp_summary.csv"): + exp_name = summary_file.parent.name + if not summary_file.exists(): + print(f"{exp_name} doesn't have exp_summary.csv file. Skipped.") + continue + + with summary_file.open() as f: + exp_summary_csv = csv.reader(f) + + headers = next(exp_summary_csv) + rows = [] + for row in exp_summary_csv: + rows.append(dict((header, val) for header, val in zip(headers, row))) + + print_table(headers, rows, f"{exp_name}") + + def run_experiment_recipe(recipe_file: Union[str, Path], dryrun: bool = False): """Run experiments based on the recipe. @@ -788,26 +989,21 @@ def run_experiment_recipe(recipe_file: Union[str, Path], dryrun: bool = False): recipe_file (Union[str, Path]): Recipe file to run. dryrun (bool, optional): Whether to only print experiment commands. Defaults to False. """ - exp_recipe = ExpRecipeParser(recipe_file) - output_path = exp_recipe.output_path - output_path.mkdir(exist_ok=True) - current_dir = os.getcwd() - os.chdir(output_path) - - fail_cases: List[CommandFailInfo] = [] - for command_ins in exp_recipe.commands: - for repeat_idx in range(exp_recipe.repeat): - otx_cmd_runner = OtxCommandRunner(command_ins, repeat_idx) - otx_cmd_runner.run_command_list(dryrun) - fail_cases.extend(otx_cmd_runner.fail_logs) + total_failed_cases: Dict[str, List[CommandFailInfo]] = {} + exp_info_list, output_path = parse_exp_recipe(recipe_file) + for exp_info in exp_info_list: + failed_cases = run_experiment(exp_info, dryrun) + total_failed_cases[exp_info.name] = failed_cases - os.chdir(current_dir) + if dryrun: + return - if fail_cases: - log_fail_cases(fail_cases, output_path) + for failed_cases in total_failed_cases.values(): + if failed_cases: + log_exp_failed_cases(total_failed_cases, output_path) + break - if not dryrun: - aggregate_all_exp_result(output_path) + print_experiments_summary(output_path) def main(): diff --git a/tox.ini b/tox.ini index 1d38a6e391b..6c7f7da2581 100644 --- a/tox.ini +++ b/tox.ini @@ -63,6 +63,12 @@ commands = deps = {[testenv]deps} -r{toxinidir}/requirements/dev.txt +passenv = + {[testenv]passenv} + MLFLOW_TRACKING_SERVER_URI + BENCHMARK_RESULTS_CLEAR + GH_CTX_REF_NAME + GH_CTX_SHA commands = python -m pytest -ra --showlocals --csv={toxworkdir}/{envname}.csv {posargs:tests/integration/{[testenv]test_dir}} From 691455145a6c579609a161199b4db0bc9b9442a5 Mon Sep 17 00:00:00 2001 From: Prokofiev Kirill Date: Tue, 20 Feb 2024 15:11:58 +0100 Subject: [PATCH 18/39] Merge develop 2 (#2936) * Update base.txt updated dependency version of datumaro * Update __init__.py update version string * Update requirements.txt * Temporarily skip visual prompting openvino integration test (#2323) * Fix import dm.DatasetSubset (#2324) Signed-off-by: Kim, Vinnam * Fix semantic segmentation soft prediction dtype (#2322) * Fix semantic segmentation soft prediction dtype * relax ref sal vals check --------- Co-authored-by: Songki Choi * Contrain yapf verison lesser than 0.40.0 (#2328) contrain_yapf_version * Fix detection e2e tests (#2327) Fix for detection * Mergeback: Label addtion/deletion 1.2.4 --> 1.4.0 (#2326) * Make black happy * Fix conflicts * Merge-back: add test datasets and edit the test code * Make black happy * Fix mis-merge * Make balck happy * Fix typo * Fix typoi --------- Co-authored-by: Songki Choi * Bump datumaro up to 1.4.0rc2 (#2332) bump datumaro up to 1.4.0rc2 * Tiling Doc for releases 1.4.0 (#2333) * Add tiling documentation * Bump otx version to 1.4.0rc2 (#2341) * OTX deploy for visual prompting task (#2311) * Enable `otx deploy` * (WIP) integration test * Docstring * Update args for create_model * Manually set image embedding layout * Enable to use model api for preprocessing - `fit_to_window` doesn't work expectedly, so newly implemented `VisualPromptingOpenvinoAdapter` to use new resize function * Remove skipped test * Updated * Update unit tests on model wrappers * Update * Update configuration * Fix not to patch pretrained path * pylint & update model api version in docstring --------- Co-authored-by: Wonju Lee * Bump albumentations version in anomaly requirements (#2350) increment albumentations version * Update action detection (#2346) * Remove skip mark for PTQ test of action detection * Update action detection documentation * Fix e2e (#2348) * Change classification dataset from dummy to toy * Revert test changes * Change label name for multilabel dataset * Revert e2e test changes * Change ov test cases' threshold * Add parent's label * Update ModelAPI in 1.4 release (#2347) * Upgrade model API * Update otx in exportable code * Fix unit tests * Fix black * Fix detection inference * Fix det tiling * Fix mypy * Fix demo * Fix visualizer in demo * Fix black * Add OTX optimize for visual prompting task (#2318) * Initial commit * Update block * (WIP) otx optimize * Fix * WIP * Update configs & exported outputs * Remove unused modules for torch * Add unit tests * pre-commit * Update CHANGELOG * Update detection docs (#2335) * Update detection docs * Revert template id changes * Fix wrong template id * Update docs/source/guide/explanation/algorithms/object_detection/object_detection.rst Co-authored-by: Eunwoo Shin * Update docs/source/guide/explanation/algorithms/object_detection/object_detection.rst Co-authored-by: Eunwoo Shin --------- Co-authored-by: Eunwoo Shin * Add visual prompting documentation (#2354) * (WIP) write docs * Add visual prompting documentation * Update CHANGELOG --------- Co-authored-by: sungchul.kim * Remove custom modelapi patch in visual prompting (#2359) * Remove custom modelapi patch * Update test * Fix graph metric order and label issues (#2356) * Fix graph metric going backward issue * Add license notice * Fix pre-commit issue * Add rename items & logic for metric --------- Signed-off-by: Songki Choi * Update multi-label document and conversion script (#2358) Update docs, label convert script * Update third party programs (#2365) * Make anomaly task compatible with older albumentations versions (#2363) * fix transforms export in metadata * wrap transform dict * add todo for updating to_dict call * Fixing detection saliency map for one class case (#2368) * fix softmax * fix validity tests * Add e2e test for visual prompting (#2360) * (WIP) otx optimize * pre-commit * (WIP) set e2e * Remove nncf config * Add visual prompting requirement * Add visual prompting in tox * Add visual prompting in setup.py * Fix typo * Delete unused configuration.yaml * Edit test_name * Add to limit activation range * Update from `vp` to `visprompt` * Fix about no returning the first label * pre-commit * (WIP) otx optimize * pre-commit * (WIP) set e2e * Remove nncf config * Add visual prompting requirement * Add visual prompting in tox * Add visual prompting in setup.py * Fix typo * pre-commit * Add actions * Update tests/e2e/cli/visual_prompting/test_visual_prompting.py Co-authored-by: Jaeguk Hyun * Skip PTQ e2e test * Change task name * Remove skipped tc --------- Co-authored-by: Jaeguk Hyun * Fix e2e (#2366) * Change e2e reference name * Update openvino eval threshold for multiclass classification * Change comment message * Fix tiling e2e tests --------- Co-authored-by: GalyaZalesskaya * Add Dino head unit tests (#2344) Recover DINO head unit tests * Update for release 1.4.0rc2 (#2370) * update for release 1.4.0rc2 * Add skip mark for unstable unit tests --------- Co-authored-by: jaegukhyun * Fix NNCF training on CPU (#2373) * Align label order between Geti and OTX (#2369) * align label order * align with pre-commit * update CHANGELOG.md * deal with edge case * update type hint * Remove CenterCrop from Classification test pipeline and editing missing docs link (#2375) * Fix missing link for docs and removing centercrop for classification data pipeline * Revert the test threshold * Fix H-label classification (#2377) * Fix h-labelissue * Update unit tests * Make black happy * Fix unittests * Make black happy * Fix update heades information func * Update the logic: consider the loss per batch * Update for release 1.4 (#2380) * updated for 1.4.0rc3 * update changelog & release note * bump datumaro version up --------- Co-authored-by: Songki Choi * Switch to PTQ for sseg (#2374) * Switch to PTQ for sseg * Update log messages * Fix invalid import structures in otx.api (#2383) Update tiler.py * Update for 1.4.0rc4 (#2385) update for release 1.4.0rc4 * [release 1.4.0] XAI: Return saliency maps for Mask RCNN IR async infer (#2395) * Return saliency maps for openvino async infer * add workaround to fix yapf importing error --------- Co-authored-by: eunwoosh * Update for release 1.4.0 (#2399) update version string Co-authored-by: Sungman Cho * Fix broken links in documentation (#2405) * fix docs links to datumaro's docs * fix docs links to otx's docs * bump version to 1.4.1 * Update exportable code README (#2411) * Updated for release 1.4.1 (#2412) updated for release 1.4.1 * Add workaround for the incorrect meta info M-RCNN (used for XAI) (#2437) Add workaround for the incorrect mata info * Add model category attributes to model template (#2439) Add model category attributes to model template * Add model category & status fields in model template * Add is_default_for_task attr to model template * Update model templates with category attrs * Add integration tests for model templates consistency * Fix license & doc string * Fix typo * Refactor test cases * Refactor common tests by generator --------- Signed-off-by: Songki Choi * Update for 1.4.2rc1 (#2441) update for release 1.4.2rc1 * Fix label list order for h-label classification (#2440) * Fix label list for h-label cls * Fix unit tests * Modified fq numbers for lite HRNET (#2445) modified fq numbers for lite HRNET * Update PTQ ignored scope for hrnet 18 mod2 (#2449) Update ptq ignored scope for hrnet 18 mod2 * Fix OpenVINO inference for legacy models (#2450) * bug fix for legacy openvino models * Add tests * Specific exceptions --------- * Update for 1.4.2rc2 (#2455) update for release 1.4.2rc2 * Prevent zero-sized saliency map in tiling if tile size is too big (#2452) * Prevent zero-sized saliency map in tiling if tile size is too big * Prevent zero-sized saliency in tiling (PyTorch) * Add unit tests for Tiler merge features methods --------- Co-authored-by: Galina * Update pot fq reference number (#2456) update pot fq reference number to 15 * Bump datumaro version to 1.5.0rc0 (#2470) bump datumaro version to 1.5.0rc0 * Set tox version constraint (#2472) set tox version constraint - https://github.com/tox-dev/tox/issues/3110 * Bug fix for albumentations (#2467) * bug fix for legacy openvino models * Address albumentation issue --------- Co-authored-by: Ashwin Vaidya * update for release 1.4.2rc3 * Add a dummy hierarchical config required by MAPI (#2483) * bump version to 1.4.2rc4 * Bump datumaro version (#2502) * bump datumaro version * remove deprecated/reomved attribute usage of the datumaro * Upgrade nncf version for 1.4 release (#2459) * Upgrade nncf version * Fix nncf interface warning * Set the exact nncf version * Update FQ refs after NNCF upgrade * Use NNCF from pypi * Update version for release 1.4.2rc5 (#2507) update version for release 1.4.2rc5 * Update for 1.4.2 (#2514) update for release 1.4.2 * create branch release/1.5.0 * Delete mem cache handler after training is done (#2535) release mem cache handler after training is done * Fix bug that auto batch size doesn't consider distributed training (#2533) * consider distributed training while searching batch size * update unit test * reveret gpu memory upper bound * fix typo * change allocated to reserved * add unit test for distributed training * align with pre-commit * Apply fix progress hook to release 1.5.0 (#2539) * Fix hook's ordering issue. AdaptiveRepeatHook changes the runner.max_iters before the ProgressHook * Change the expression * Fix typo * Fix multi-label, h-label issue * Fix auto_bs issue * Apply suggestions from code review Co-authored-by: Eunwoo Shin * Reflecting reviews * Refactor the name of get_data_cfg * Revert adaptive hook sampler init * Refactor the function name: get_data_cfg -> get_subset_data_cfg * Fix unit test errors * Remove adding AdaptiveRepeatDataHook for autobs * Remove unused import * Fix detection and segmentation case in Geti scenario --------- Co-authored-by: Eunwoo Shin * Re introduce adaptive scheduling for training (#2541) * Re-introduce adaptive patience for training * Revert unit tests * Update for release 1.4.3rc1 (#2542) * Mirror Anomaly ModelAPI changes (#2531) * Migrate anomaly exportable code to modelAPI (#2432) * Fix license in PR template * Migrate to modelAPI * Remove color conversion in streamer * Remove reverse_input_channels * Add float * Remove test as metadata is no longer used * Remove metadata from load method * remove anomalib openvino inferencer * fix signature * Support logacy OpenVINO model * Transform image * add configs * Re-introduce adaptive training (#2543) * Re-introduce adaptive patience for training * Revert unit tests * Fix auto input size mismatch in eval & export (#2530) * Fix auto input size mismatch in eval & export * Re-enable E2E tests for Issue#2518 * Add input size check in export testing * Format float numbers in log * Fix NNCF export shape mismatch * Fix saliency map issue * Disable auto input size if tiling enabled --------- Signed-off-by: Songki Choi * Update ref. fq number for anomaly e2e2 (#2547) * Skip e2e det tests by issue2548 (#2550) * Add skip to chained TC for issue #2548 (#2552) * Update for release 1.4.3 (#2551) * Update MAPI for 1.5 release (#2555) Upgrade MAPI to v 0.1.6 (#2529) * Upgrade MAPI * Update exp code demo commit * Fix MAPI imports * Update ModelAPI configuration (#2564) * Update MAPI rt infor for detection * Upadte export info for cls, det and seg * Update unit tests * Disable QAT for SegNexts (#2565) * Disable NNCF QAT for SegNext * Del obsolete pot configs * Move NNCF skip marks to test commands to avoid duplication * Add Anomaly modelAPI changes to releases/1.4.0 (#2563) * bug fix for legacy openvino models * Apply otx anomaly 1.5 changes * Fix tests * Fix compression config * fix modelAPI imports * update integration tests * Edit config types * Update keys in deployed model --------- Co-authored-by: Ashwin Vaidya Co-authored-by: Kim, Sungchul * Fix the CustomNonLinearClsHead when the batch_size is set to 1 (#2571) Fix bn1d issue Co-authored-by: sungmanc * Update ModelAPI configuration (#2564 from 1.4) (#2568) Update ModelAPI configuration (#2564) * Update MAPI rt infor for detection * Upadte export info for cls, det and seg * Update unit tests * Update for 1.4.4rc1 (#2572) * Hotfix DatasetEntity.get_combined_subset function loop (#2577) Fix get_combined_subset function * Revert default input size to `Default` due to YOLOX perf regression (#2580) Signed-off-by: Songki Choi * Fix for the degradation issue of the classification task (#2585) * Revert to sync with 1.4.0 * Remove repeat data * Convert to the RGB value * Fix color conversion logic * Fix precommit * Bump datumaro version to 1.5.1rc3 (#2587) * Add label ids to anomaly OpenVINO model xml (#2590) * Add label ids to model xml --------- * Fix DeiT-Tiny model regression during class incremental training (#2594) * enable IBloss for DeiT-Tiny * update changelog * add docstring * Add label ids to model xml in release 1.5 (#2591) Add label ids to model xml * Fix DeiT-Tiny regression test for release/1.4.0 (#2595) * Fix DeiT regression test * update changelog * temp * Fix mmcls bug not wrapping model in DataParallel on CPUs (#2601) Wrap multi-label and h-label classification models by MMDataParallel in case of CPU training. --------- Signed-off-by: Songki Choi * Fix h-label loss normalization issue w/ exclusive label group of singe label (#2604) * Fix h-label loss normalization issue w/ exclusive label group with signle label * Fix non-linear version --------- Signed-off-by: Songki Choi * Boost up Image numpy accessing speed through PIL (#2586) * boost up numpy accessing speed through PIL * update CHANGELOG * resolve precommit error * resolve precommit error * add fallback logic with PIL open * use convert instead of draft * Add missing import pathlib for cls e2e testing (#2610) * Fix division by zero in class incremental learning for classification (#2606) * Add empty label to reproduce zero-division error Signed-off-by: Songki Choi * Fix minor typo Signed-off-by: Songki Choi * Fix empty label 4 -> 3 Signed-off-by: Songki Choi * Prevent division by zero Signed-off-by: Songki Choi * Update license Signed-off-by: Songki Choi * Update CHANGELOG.md Signed-off-by: Songki Choi * Fix inefficient sampling Signed-off-by: Songki Choi * Revert indexing Signed-off-by: Songki Choi * Fix minor typo Signed-off-by: Songki Choi --------- Signed-off-by: Songki Choi * Unify logger usage (#2612) * unify logger * align with pre-commit * unify anomaly logger to otx * change logger file path * align with pre-commit * change logger file path in missing file * configure logger after ConfigManager is initialized * configure logger when ConfigManager instance is initialized * update unit test code * move config_logger to each cli file * align with pre-commit * change part still using mmcv logger * Fix XAI algorithm for Detection (#2609) * Impove saliency maps algorithm for Detection * Remove extra changes * Update unit tests * Changes for 1 class * Fix pre-commit * Update CHANGELOG * Tighten dependency constraint only adapting latest patches (#2607) * tighten dependency constratint only adapting latest patches * adjust scikit-image version w.r.t python version * adjust tensorboard version w.r.t python version * remove version specifier for scikit-image * Add metadata to optimized model (#2618) * bug fix for legacy openvino models * Add metadata to optimized model * Revert formatting changes --------- Co-authored-by: Ashwin Vaidya * modify omegaconf version constraint * [release 1.5.0] Fix XAI algorithm for Detection (#2617) Update detection XAI algorithm * Update dependency constraint (#2622) * Update tpp (#2621) * Fix h-label bug of missing parent labels in output (#2626) * Fix h-label bug of missing parent labels in output * Fix h-label test data label schema * Update CHANGELOG.md --------- Signed-off-by: Songki Choi * Update publish workflow (#2625) update publish workflow to push whl to internal pypi * bump datumaro version to ~=1.5.0 * fixed mistake while mergeing back 1.4.4 * modifiy readme * remove openvino model wrapper class * remove openvino model wrapper tests * [release 1.5.0] DeiT: enable tests + add ViTFeatureVectorHook (#2630) Add ViT feature vector hook * Fix docs broken link to datatumaro_h-label Signed-off-by: Songki Choi * Fix wrong label settings for non-anomaly task ModelAPIs Signed-off-by: Songki Choi * Update publish workflow for tag checking (#2632) * Update e2e tests for XAI Detection (#2634) Fix e2e XAI ref value * Disable QAT for newly added models (#2636) * Update release note and readme (#2637) * update release note and readme * remove package upload step on internal publish wf * update release note and, changelog, and readme * update version string to 1.6.0dev * fix datumaro version to 1.6.0rc0 * Mergeback 1.5.0 to develop (#2642) * Update publish workflow for tag checking (#2632) * Update e2e tests for XAI Detection (#2634) * Disable QAT for newly added models (#2636) * Update release note and readme (#2637) * remove package upload step on internal publish wf * update release note and, changelog, and readme * update version string to 1.6.0dev --------- Co-authored-by: Galina Zalesskaya Co-authored-by: Jaeguk Hyun * Revert "Mergeback 1.5.0 to develop" (#2645) Revert "Mergeback 1.5.0 to develop (#2642)" This reverts commit 2f67686103df873d020681f6d504f9595ce4a963. * Add a tool to help conduct experiments (#2651) * implement run and experiment * implement experiment result aggregator * refactor experiment.py * refactor run.py * get export model speed * add var collumn * refactor experiment.py * refine a way to update argument in cmd * refine resource tracker * support anomaly on research framework * refine code aggregating exp result * bugfix * make other task available * eval task save avg_time_per_images as result * Add new argument to track CPU&GPU utilization and memory usage (#2500) * add argument to track resource usage * fix bug * fix a bug in a multi gpu case * use total cpu usage * add unit test * add mark to unit test * cover edge case * add pynvml in requirement * align with pre-commit * add license comment * update changelog * refine argument help * align with pre-commit * add version to requirement and raise an error if not supported values are given * apply new resource tracker format * refactor run.py * support optimize in research framework * cover edge case * Handle a case where fail cases exist * make argparse raise error rather than exit if problem exist * revert tensorboard aggregator * bugfix * save failed cases as yaml file * deal with integer in variables * add epoch to metric * use latest log.json file * align with otx logging method * move experiment.py from cli to tools * refactor experiment.py * merge otx run feature into experiment.py * move set_arguments_to_cmd definition into experiment.py * refactor experiment.py * bugfix * minor bugfix * use otx.cli instead of each otx entry * add feature to parse single workspace * add comments * fix bugs * align with pre-commit * revert parser argument * align with pre-commit * Make `max_num_detections` configurable (#2647) * Make max_num_detections configurable * Fix RCNN case with integration test * Apply max_num_detections to train_cfg, too --------- Signed-off-by: Songki Choi * Revert inference batch size to 1 for instance segmentation (#2648) Signed-off-by: Songki Choi * Fix CPU training issue on non-CUDA system (#2655) Fix bug that auto adaptive batch size raises an error if CUDA isn't available (#2410) --------- Co-authored-by: Sungman Cho Co-authored-by: Eunwoo Shin * Remove unnecessary log while building a model (#2658) * revert logger in otx/algorithms/detection/adapters/mmdet/utils/builder.py * revert logger in otx/algorithms/classification/adapters/mmcls/utils/builder.py * make change more readable * Fix a minor bug of experiment.py (#2662) fix bug * Not check avg_time_per_image during test (#2665) * ignore avg_time_per_image during test * do not call stdev when length of array is less than 2 * ignore avg_time_per_image during regerssion test * Update docs for enabling sphinx.ext.autosummary (#2654) * fix some errors/warnings on docs source * enable sphinx-autosummary for API reference documentation * Update Makefile * update sphinx configuration * Update PTQ docs (#2672) * Replace POT -> PTQ * Fixes from comments * Update regression tests for develop (#2652) * Update regression tests (#2556) * update reg tests * update test suit * update regression criteria --------- Co-authored-by: Eunwoo Shin * Exclude py37 target config for cibuildwheel (#2673) * Add `--dryrun` option to tools/experiment.py (#2674) * Fix variable override bug * Add --dryrun option to see experiment list --------- Signed-off-by: Songki Choi * Update OTX explain CLI arguments (#2671) * Change int8 to uint8 to XAI tests * Add probabilities for CLI demo * Rename arguments for explain * Fix pre-commit * Remove extra changes * Fix integration tests * Fix integration "explain_all_classes" test for OV * Fix e2e tests for explain (#2681) * Add README.md for experiment.py (#2688) * write draft readme * refine readme * align with pre-commit * Fix typo in reg test cmd (#2691) * Select more proper model weight file according to commands run just before (#2696) * consider more complex case when prepare eval and optimize * update readme * align with pre-commit * add comment * Add visual prompting zero-shot learning (`learn` & `infer`) (#2616) * Add algobackend & temp configs * Update config * WIP * Fix to enable `algo_backend` * (WIP) Update dataset * (WIP) Update configs * (WIP) Update tasks * (WIP) Update models * Enable `learn` task through otx.train * (WIP) enable `infer` (TODO : normalize points) * Fix when `state_dict` is None * Enable `ZeroShotInferenceCallback` * Enable otx infer * Enable to independently use processor * Revert max_steps * Change `postprocess_masks` to `staticmethod` * Add `PromptGetter` & Enable `learn` and `infer` * precommit * Fix args * Fix typo * Change `id` to `id_` * Fix import * Fix args * precommit * (WIP) Add unit tests * Fix * Add unit tests * Fix * Add integration tests * precommit * Update CHANGELOG.md * Update docstring and type annotations * Fix * precommit * Fix unused args * precommit * Fix * Fix unsupported dtype in ov graph constant converter (#2676) * Fix unsupported dtype in ov graph constant converter * Fix more ov-graph related unit tests * Skip failure TC with adding issue number ref. (#2717) * Fix visual prompting e2e test (#2719) Skip zero-shot e2e * Remove duplicated variable combination in experiment.py (#2713) * Enhance detection & instance segmentation experiment (#2710) * Compute precision and recall along with f-measure * Log performance * Accept ellipse annotation from datumaro format * Fix dataset adapter condition for det/iset * Insert garbage collection btw experiments * Upgrade NNCF & OpenVINO (#2656) * Upgrade OV MAPI and NNCF version * Update demo requirements * Update changelog * Update datumaro * Add rust installation * Update NNCF configs for IS models * Update more fqs * Exclude nncf from upgrade * Revert "Update NNCF configs for IS models" This reverts commit 7c8db8cc0b5e5183b621b82c2bd4225de694558b. * Revert "Update more fqs" This reverts commit 5b91c329666632645606d5c42cf41cf268e4063a. * Revert "Exclude nncf from upgrade" This reverts commit 8926c51990efa81b40b8d0226348c12600da3cdc. * Update FQs * Revert "Revert "Update NNCF configs for IS models"" This reverts commit f904c0cbe144a2792e51eae67e4d49ca4dec6eb4. * Disable explain for NNCF detection task * Update FQs for anomaly * Update cls FQs * Update datumaro * Update exportable code requirements * Add unit test to cover the changes * Fix multilabel classification class index (#2736) Fix multilabel cls * Refine parsing final score of training in experiment.py (#2738) refine val parser * Make mean teacher algorithm consider distributed training (#2729) * make mean_teacher consider distributed training * align with pre-commit * re-enable test case * move tensor not to cuda but current device * apply comment * Add visual prompting zero-shot learning (`export`, IR inference) (#2706) * Add algobackend & temp configs * Update config * WIP * Fix to enable `algo_backend` * (WIP) Update dataset * (WIP) Update configs * (WIP) Update tasks * (WIP) Update models * Enable `learn` task through otx.train * (WIP) enable `infer` (TODO : normalize points) * Fix when `state_dict` is None * Enable `ZeroShotInferenceCallback` * Enable otx infer * Enable to independently use processor * Revert max_steps * Change `postprocess_masks` to `staticmethod` * Add `PromptGetter` & Enable `learn` and `infer` * precommit * Fix args * Fix typo * Change `id` to `id_` * Fix import * Fix args * precommit * (WIP) Add unit tests * Fix * Add unit tests * Fix * Add integration tests * precommit * Update CHANGELOG.md * Update docstring and type annotations * Fix * precommit * Reuse SAM modules for `export` & Add dataset * Fix * Enable `export` * Convert fp32 * Update logic & tests * Fix & Add prompt getter in `model_adapter_keys` * Initial `Inferencer`, `Task`, and `Model` * Fix to use original mask decoder during inference * Remove internal loop in `PromptGetter` * Update IO * (WIP) Add unit tests for export * Update `PromptGetter` to use only tensor ops * Fix issue about `original_size` disappear in onnx graph * (WIP) Add export unit test * Update * Fix typo * Update * Fix unexpected IF & Update inputs to avoid issues which OV on CPU doesn't support dynamic operations * Enable `PromptGetter` to handle #labels itself * Add ov inferencer * Fix overflow during casting dtype & duplicated cast * Fix * Add unit&integration tests * pre-commit * Fix original vpms * Fix intg & e2e tests * Change mo CLI to API * precommit * Remove blocks * Update CHANGELOG.md * Avoid repeatedly assigning constant tensors/arrays * Fix typo * Automate performance benchmark (#2742) * Add parameterized perf test template * Split acccuracy / perf tests * Automate speed test setting * Add benchmark summary fixture * Add multi/h-label tests * Add detection tests * Add instance segmentationt tests * Add tiling tests * Add semantic segmenation tests * Add anomaly test * Update tools/expreiment.py (#2751) * have constant exp directory name * support to parse dynamic eval output * align with pre-commit * fix minor unit test bug * Add performance benchmark github action workflow (#2762) * Split accuracy & speed benchmark github workflows (#2763) * Fix a bug that error is raised when train set size is greater than minimumof batch size in HPO by exactly 1 (#2760) deal with HPO edge case * Fix a bug that a process tracking resource usage doesn't exit when main process raises an error (#2765) * termiate a process tracking resource usage if main process raises an error * call stop() only if ExitStack is None * Skip large datasets for iSeg perf benchmark (#2766) * Support multiple experiments in single recipe for tools/experiment.py (#2757) * implement draft version * update logging failed cases * align with pre-commit * add doc string * Update README file * fix bugs: single command, failed case output * exclude first epoch from calculating iter time * fix weird name used when there is no variables * align with pre-commit * initialize iter_time and data_time at first * Enable perf benchmark result logging to mlflow server (#2768) * Bump datumaro version to 1.6.0rc1 (#2784) * bump datumaro version to 1.6.0rc1 * remove rust toolchain installation step from workflows * Update perf logging (#2785) * Update perf logging workflow to get branch+sha from gh context (#2791) * update perf logging workflow to get branch+sha from gh context * skip logging when tracking server uri is not configured * Add visual prompting zero-shot learning (optimize, documentation, bug fixes) (#2753) * Fix to resize bbox * (WIP) Add post-checking between masks with different labels * Fix to use the first mask in the first loop * Add post-checking between masks with different labels * pre-commit * Add optimize task * pre-commit * Add e2e * Update documentation * Update CHANGELOG * Check performance benchmark result with reference (#2821) * Average 'Small' (/1 /2 /3) dataset benchmark results * Load perf result with indexing * Add speed ref check for all tasks * Add accuracy ref check for all tasks * Mergeback releases/1.5.0 to develop (#2830) * Update MAPI version (#2730) * Update dependency for exportable code (#2732) * Filter invalid polygon shapes (#2795) --------- Co-authored-by: Vladislav Sovrasov Co-authored-by: Eugene Liu * Create OSSF scorecard workflow (#2831) * Fix ossf/scorecard-action version (#2832) * Update scorecard.yml * Update perf benchmark reference (#2843) * Set default wf permission to read-all (#2882) * Remedy token permission issue (#2888) * remedy token-permission issues - part2 * removed dispatch event from scorecard wf * Add progress callback interface to HPO (#2889) * add progress callback as HPO argument * deal with edge case * Restrict configurable parameters to avoid unreasonable cost for SaaS trial (#2891) * Reduce max value of POT samples to 1k * Reduce max value of num_iters to 1k * Fix pre-commit * Fix more token-permission issues - part3 (#2893) * Resolve pinned-dependency issues on publish_internal workflow (#2907) * Forward unittest workloads to AWS (#2887) * Resolve pinned dependency issues on workflows (#2909) * Fix pinned-dependency issues - part2 (#2911) * Add pinning dependencies (#2916) * Update pip install cmd to use hashes (#2919) * Fix HPO progress callback bug (#2908) fix minor bug * Fix pinned-dependencies issues (#2929) * Remove unused test files (#2930) * Update weekly workflow to run perf tests (#2920) * update weekly workflow to run perf tests * Fix missing fixture in perf test * update input to perf tests for weekly --------- Co-authored-by: Songki Choi * Adjust permission of documentation workflows from pages to contents for writing (#2933) * remove unused import --------- Signed-off-by: Kim, Vinnam Signed-off-by: Songki Choi Co-authored-by: Yunchu Lee Co-authored-by: Kim, Sungchul Co-authored-by: Vinnam Kim Co-authored-by: Evgeny Tsykunov Co-authored-by: Songki Choi Co-authored-by: Eunwoo Shin Co-authored-by: Jaeguk Hyun Co-authored-by: Sungman Cho Co-authored-by: Eugene Liu Co-authored-by: Wonju Lee Co-authored-by: Dick Ameln Co-authored-by: Vladislav Sovrasov Co-authored-by: sungchul.kim Co-authored-by: GalyaZalesskaya Co-authored-by: Harim Kang Co-authored-by: Ashwin Vaidya Co-authored-by: Ashwin Vaidya Co-authored-by: sungmanc --- .github/workflows/code_scan.yml | 11 ++- .github/workflows/docs.yml | 6 +- .github/workflows/docs_stable.yml | 6 +- .github/workflows/perf-accuracy.yml | 30 ++++++- .github/workflows/perf-speed.yml | 30 ++++++- .github/workflows/pre_merge.yml | 6 +- .github/workflows/publish.yml | 3 +- .github/workflows/publish_internal.yml | 6 +- .github/workflows/run_tests_in_tox.yml | 3 +- .github/workflows/run_tests_in_tox_custom.yml | 3 +- .github/workflows/weekly.yml | 56 +++++-------- requirements/gh-actions.txt | 45 +++++++++++ .../heads/custom_vision_transformer_head.py | 11 --- tests/perf/test_classification.py | 4 +- tests/run_code_checks.sh | 22 ----- tests/run_model_templates_tests.py | 81 ------------------- tests/run_model_templates_tests.sh | 10 --- 17 files changed, 157 insertions(+), 176 deletions(-) create mode 100644 requirements/gh-actions.txt delete mode 100755 tests/run_code_checks.sh delete mode 100644 tests/run_model_templates_tests.py delete mode 100755 tests/run_model_templates_tests.sh diff --git a/.github/workflows/code_scan.yml b/.github/workflows/code_scan.yml index d644c7d5751..7fa3c3b0923 100644 --- a/.github/workflows/code_scan.yml +++ b/.github/workflows/code_scan.yml @@ -20,7 +20,10 @@ jobs: with: python-version: "3.10" - name: Install dependencies - run: python -m pip install tox==4.21.1 + run: | + pip install --require-hashes --no-deps -r requirements/gh-actions.txt + pip-compile --generate-hashes -o /tmp/otx-dev-requirements.txt requirements/dev.txt + pip install --require-hashes --no-deps -r /tmp/otx-dev-requirements.txt - name: Trivy Scanning env: TRIVY_DOWNLOAD_URL: ${{ vars.TRIVY_DOWNLOAD_URL }} @@ -43,7 +46,11 @@ jobs: with: python-version: "3.10" - name: Install dependencies - run: python -m pip install tox==4.21.1 + run: | + pip install --require-hashes --no-deps -r requirements/gh-actions.txt + pip-compile --generate-hashes -o /tmp/otx-dev-requirements.txt requirements/dev.txt + pip install --require-hashes --no-deps -r /tmp/otx-dev-requirements.txt + rm /tmp/otx-dev-requirements.txt - name: Bandit Scanning run: tox -e bandit-scan - name: Upload Bandit artifact diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index d94d0b738ae..afd8064ae23 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -21,7 +21,11 @@ jobs: with: python-version: "3.10" - name: Install dependencies - run: python -m pip install -r requirements/dev.txt + run: | + pip install --require-hashes --no-deps -r requirements/gh-actions.txt + pip-compile --generate-hashes -o /tmp/otx-dev-requirements.txt requirements/dev.txt + pip install --require-hashes --no-deps -r /tmp/otx-dev-requirements.txt + rm /tmp/otx-dev-requirements.txt - name: Build-Docs run: tox -e build-doc - name: Create gh-pages branch diff --git a/.github/workflows/docs_stable.yml b/.github/workflows/docs_stable.yml index 1a6c5e58733..cfb98be200e 100644 --- a/.github/workflows/docs_stable.yml +++ b/.github/workflows/docs_stable.yml @@ -22,7 +22,11 @@ jobs: with: python-version: "3.10" - name: Install dependencies - run: python -m pip install -r requirements/dev.txt + run: | + pip install --require-hashes --no-deps -r requirements/gh-actions.txt + pip-compile --generate-hashes -o /tmp/otx-dev-requirements.txt requirements/dev.txt + pip install --require-hashes --no-deps -r /tmp/otx-dev-requirements.txt + rm /tmp/otx-dev-requirements.txt - name: Build-Docs run: tox -e build-doc - name: Create gh-pages branch diff --git a/.github/workflows/perf-accuracy.yml b/.github/workflows/perf-accuracy.yml index 1318403c3be..ef367a6f9d1 100644 --- a/.github/workflows/perf-accuracy.yml +++ b/.github/workflows/perf-accuracy.yml @@ -33,6 +33,34 @@ on: - export - optimize default: optimize + artifact-prefix: + type: string + default: perf-accuracy-benchmark + workflow_call: + inputs: + model-type: + type: string + description: Model type to run benchmark [default, all] + default: default + data-size: + type: string + description: Dataset size to run benchmark [small, medium, large, all] + default: all + num-repeat: + type: number + description: Overrides default per-data-size number of repeat setting + default: 0 + num-epoch: + type: number + description: Overrides default per-model number of epoch setting + default: 0 + eval-upto: + type: string + description: The last operation to evaluate. 'optimize' means all. [train, export, optimize] + default: optimize + artifact-prefix: + type: string + default: perf-accuracy-benchmark # Declare default permissions as read only. permissions: read-all @@ -73,4 +101,4 @@ jobs: task: ${{ matrix.task }} timeout-minutes: 8640 upload-artifact: true - artifact-prefix: perf-accuracy-benchmark + artifact-prefix: ${{ inputs.perf-accuracy-benchmark }} diff --git a/.github/workflows/perf-speed.yml b/.github/workflows/perf-speed.yml index 3e33a782c2b..26995b0077c 100644 --- a/.github/workflows/perf-speed.yml +++ b/.github/workflows/perf-speed.yml @@ -33,6 +33,34 @@ on: - export - optimize default: optimize + artifact-prefix: + type: string + default: perf-speed-benchmark + workflow_call: + inputs: + model-type: + type: string + description: Model type to run benchmark [default, all] + default: default + data-size: + type: string + description: Dataset size to run benchmark [small, medium, large, all] + default: medium + num-repeat: + type: number + description: Overrides default per-data-size number of repeat setting + default: 1 + num-epoch: + type: number + description: Overrides default per-model number of epoch setting + default: 3 + eval-upto: + type: string + description: The last operation to evaluate. 'optimize' means all [train, export, optimize] + default: optimize + artifact-prefix: + type: string + default: perf-speed-benchmark # Declare default permissions as read only. permissions: read-all @@ -59,4 +87,4 @@ jobs: task: all timeout-minutes: 8640 upload-artifact: true - artifact-prefix: perf-speed-benchmark + artifact-prefix: ${{ inputs.artifact-prefix }} diff --git a/.github/workflows/pre_merge.yml b/.github/workflows/pre_merge.yml index fd2cbddbe12..bc3d01c662a 100644 --- a/.github/workflows/pre_merge.yml +++ b/.github/workflows/pre_merge.yml @@ -31,9 +31,10 @@ jobs: python-version: "3.10" - name: Install dependencies run: | - pip install pip-tools==7.3.0 + pip install --require-hashes --no-deps -r requirements/gh-actions.txt pip-compile --generate-hashes -o /tmp/otx-dev-requirements.txt requirements/dev.txt pip install --require-hashes --no-deps -r /tmp/otx-dev-requirements.txt + rm /tmp/otx-dev-requirements.txt - name: Code quality checks run: tox -vv -e pre-commit-all-py310-pt1 Unit-Test: @@ -79,9 +80,10 @@ jobs: python-version: "3.8" - name: Install dependencies run: | - pip install pip-tools==7.3.0 + pip install --require-hashes --no-deps -r requirements/gh-actions.txt pip-compile --generate-hashes -o /tmp/otx-dev-requirements.txt requirements/dev.txt pip install --require-hashes --no-deps -r /tmp/otx-dev-requirements.txt + rm /tmp/otx-dev-requirements.txt - name: Run unit test run: tox -vv -e unittest-all-py38-pt1 - name: Upload coverage artifact diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 776b9507352..81f1719b431 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -33,9 +33,10 @@ jobs: python-version: "3.10" - name: Install pypa/build run: | - pip install pip-tools==7.3.0 + pip install --require-hashes --no-deps -r requirements/gh-actions.txt pip-compile --generate-hashes -o /tmp/otx-publish-requirements.txt requirements/publish.txt pip install --require-hashes --no-deps -r /tmp/otx-publish-requirements.txt + rm /tmp/otx-publish-requirements.txt - name: Build sdist run: python -m build --sdist - uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 diff --git a/.github/workflows/publish_internal.yml b/.github/workflows/publish_internal.yml index 802ba7b10a5..d3574b73377 100644 --- a/.github/workflows/publish_internal.yml +++ b/.github/workflows/publish_internal.yml @@ -31,9 +31,10 @@ jobs: python-version: "3.10" - name: Install pypa/build run: | - pip install pip-tools==7.3.0 + pip install --require-hashes --no-deps -r requirements/gh-actions.txt pip-compile --generate-hashes -o /tmp/otx-publish-requirements.txt requirements/publish.txt pip install --require-hashes --no-deps -r /tmp/otx-publish-requirements.txt + rm /tmp/otx-publish-requirements.txt - name: Build sdist run: python -m build --sdist - uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 @@ -56,9 +57,10 @@ jobs: python-version: "3.10" - name: Install dependencies run: | - pip install pip-tools==7.3.0 + pip install --require-hashes --no-deps -r requirements/gh-actions.txt pip-compile --generate-hashes -o /tmp/otx-publish-requirements.txt requirements/publish.txt pip install --require-hashes --no-deps -r /tmp/otx-publish-requirements.txt + rm /tmp/otx-publish-requirements.txt - name: Download artifacts uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 with: diff --git a/.github/workflows/run_tests_in_tox.yml b/.github/workflows/run_tests_in_tox.yml index 1adc0c2c641..470040f854e 100644 --- a/.github/workflows/run_tests_in_tox.yml +++ b/.github/workflows/run_tests_in_tox.yml @@ -52,9 +52,10 @@ jobs: python-version: ${{ inputs.python-version }} - name: Install dependencies run: | - pip install pip-tools==7.3.0 + pip install --require-hashes --no-deps -r requirements/gh-actions.txt pip-compile --generate-hashes -o /tmp/otx-dev-requirements.txt requirements/dev.txt pip install --require-hashes --no-deps -r /tmp/otx-dev-requirements.txt + rm /tmp/otx-dev-requirements.txt - name: Run Tests env: MLFLOW_TRACKING_SERVER_URI: ${{ vars.MLFLOW_TRACKING_SERVER_URI }} diff --git a/.github/workflows/run_tests_in_tox_custom.yml b/.github/workflows/run_tests_in_tox_custom.yml index 8bb28ade61a..3fb84957b6e 100644 --- a/.github/workflows/run_tests_in_tox_custom.yml +++ b/.github/workflows/run_tests_in_tox_custom.yml @@ -58,9 +58,10 @@ jobs: python-version: ${{ inputs.python-version }} - name: Install dependencies run: | - pip install pip-tools==7.3.0 + pip install --require-hashes --no-deps -r requirements/gh-actions.txt pip-compile --generate-hashes -o /tmp/otx-dev-requirements.txt requirements/dev.txt pip install --require-hashes --no-deps -r /tmp/otx-dev-requirements.txt + rm /tmp/otx-dev-requirements.txt - name: Run Tests env: MLFLOW_TRACKING_SERVER_URI: ${{ vars.MLFLOW_TRACKING_SERVER_URI }} diff --git a/.github/workflows/weekly.yml b/.github/workflows/weekly.yml index 3badd5ab79a..ceb401b21f6 100644 --- a/.github/workflows/weekly.yml +++ b/.github/workflows/weekly.yml @@ -10,41 +10,23 @@ on: permissions: read-all jobs: - Regression-Tests: - strategy: - fail-fast: false - matrix: - include: - - toxenv_task: "iseg" - test_dir: "tests/regression/instance_segmentation/test_instance_segmentation.py" - task: "instance_segmentation" - - toxenv_task: "iseg_t" - test_dir: "tests/regression/instance_segmentation/test_tiling_instance_segmentation.py" - task: "instance_segmentation" - - toxenv_task: "seg" - test_dir: "tests/regression/semantic_segmentation" - task: "segmentation" - - toxenv_task: "det" - test_dir: "tests/regression/detection" - task: "detection" - - toxenv_task: "ano" - test_dir: "tests/regression/anomaly" - task: "anomaly" - - toxenv_task: "act" - test_dir: "tests/regression/action" - task: "action" - - toxenv_task: "cls" - test_dir: "tests/regression/classification" - task: "classification" - name: Regression-Test-py310-${{ matrix.toxenv_task }} - uses: ./.github/workflows/run_tests_in_tox.yml + Performance-Speed-Tests: + name: Performance-Speed-py310 + uses: ./.github/workflows/perf-speed.yml with: - python-version: "3.10" - toxenv-pyver: "py310" - toxenv-task: ${{ matrix.toxenv_task }} - tests-dir: ${{ matrix.test_dir }} - runs-on: "['self-hosted', 'Linux', 'X64', 'dmount']" - task: ${{ matrix.task }} - timeout-minutes: 8640 - upload-artifact: true - artifact-prefix: "weekly-test-results" + model-type: default + data-size: medium + num-repeat: 1 + num-epoch: 3 + eval-upto: optimize + artifact-prefix: weekly-perf-speed-benchmark + Performance-Accuracy-Tests: + name: Performance-Accuracy-py310 + uses: ./.github/workflows/perf-accuracy.yml + with: + model-type: default + data-size: all + num-repeat: 0 + num-epoch: 0 + eval-upto: optimize + artifact-prefix: weekly-perf-accuracy-benchmark diff --git a/requirements/gh-actions.txt b/requirements/gh-actions.txt new file mode 100644 index 00000000000..33029eb1409 --- /dev/null +++ b/requirements/gh-actions.txt @@ -0,0 +1,45 @@ +# +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: +# +# pip-compile --generate-hashes --output-file=requirements.txt requirements/gh-actions.txt +# +build==1.0.3 \ + --hash=sha256:538aab1b64f9828977f84bc63ae570b060a8ed1be419e7870b8b4fc5e6ea553b \ + --hash=sha256:589bf99a67df7c9cf07ec0ac0e5e2ea5d4b37ac63301c4986d1acb126aa83f8f + # via pip-tools +click==8.1.7 \ + --hash=sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28 \ + --hash=sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de + # via pip-tools +packaging==23.2 \ + --hash=sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5 \ + --hash=sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7 + # via build +pip-tools==7.4.0 \ + --hash=sha256:a92a6ddfa86ff389fe6ace381d463bc436e2c705bd71d52117c25af5ce867bb7 \ + --hash=sha256:b67432fd0759ed834c5367f9e0ce8c95441acecfec9c8e24b41aca166757adf0 + # via -r requirements/gh-actions.txt +pyproject-hooks==1.0.0 \ + --hash=sha256:283c11acd6b928d2f6a7c73fa0d01cb2bdc5f07c57a2eeb6e83d5e56b97976f8 \ + --hash=sha256:f271b298b97f5955d53fb12b72c1fb1948c22c1a6b70b315c54cedaca0264ef5 + # via + # build + # pip-tools +tomli==2.0.1 \ + --hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc \ + --hash=sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f + # via + # build + # pip-tools + # pyproject-hooks +wheel==0.42.0 \ + --hash=sha256:177f9c9b0d45c47873b619f5b650346d632cdc35fb5e4d25058e09c9e581433d \ + --hash=sha256:c45be39f7882c9d34243236f2d63cbd58039e360f85d0913425fbd7ceea617a8 + # via pip-tools + +# WARNING: The following packages were not pinned, but pip requires them to be +# pinned when the requirements file includes hashes and the requirement is not +# satisfied by a package already installed. Consider using the --allow-unsafe flag. +# pip +# setuptools \ No newline at end of file diff --git a/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_vision_transformer_head.py b/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_vision_transformer_head.py index b58d0803589..38a2d704c2c 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_vision_transformer_head.py +++ b/src/otx/algorithms/classification/adapters/mmcls/models/heads/custom_vision_transformer_head.py @@ -6,8 +6,6 @@ from mmcls.models.builder import HEADS from mmcls.models.heads import VisionTransformerClsHead -from otx.algorithms.common.utils import cast_bf16_to_fp32 - @HEADS.register_module() class CustomVisionTransformerClsHead(VisionTransformerClsHead): @@ -34,15 +32,6 @@ def loss(self, cls_score, gt_label, feature=None): losses["loss"] = loss return losses - def post_process(self, pred): - """Post processing.""" - pred = cast_bf16_to_fp32(pred) - return super().post_process(pred) - - def forward(self, x): - """Forward fuction of CustomVisionTransformerClsHead class.""" - return self.simple_test(x) - def forward_train(self, x, gt_label, **kwargs): """Forward_train fuction of CustomVisionTransformerClsHead class.""" x = self.pre_logits(x) diff --git a/tests/perf/test_classification.py b/tests/perf/test_classification.py index 820d644ae40..9397dc5413e 100644 --- a/tests/perf/test_classification.py +++ b/tests/perf/test_classification.py @@ -52,7 +52,7 @@ class TestPerfSingleLabelClassification: @pytest.mark.parametrize("fxt_model_id", MODEL_TEMPLATES, ids=MODEL_IDS, indirect=True) @pytest.mark.parametrize("fxt_benchmark", BENCHMARK_CONFIGS.items(), ids=BENCHMARK_CONFIGS.keys(), indirect=True) - def test_accuracy(self, fxt_model_id: str, fxt_benchmark: OTXBenchmark): + def test_accuracy(self, fxt_model_id: str, fxt_benchmark: OTXBenchmark, fxt_check_benchmark_result: Callable): """Benchmark accruacy metrics.""" result = fxt_benchmark.run( model_id=fxt_model_id, @@ -301,7 +301,7 @@ def test_accuracy(self, fxt_model_id: str, fxt_benchmark: OTXBenchmark, fxt_chec @pytest.mark.parametrize("fxt_model_id", MODEL_TEMPLATES, ids=MODEL_IDS, indirect=True) @pytest.mark.parametrize("fxt_benchmark", BENCHMARK_CONFIGS.items(), ids=BENCHMARK_CONFIGS.keys(), indirect=True) - def test_speed(self, fxt_model_id: str, fxt_benchmark: OTXBenchmark, fxt_check_benchmark_results: Callable): + def test_speed(self, fxt_model_id: str, fxt_benchmark: OTXBenchmark, fxt_check_benchmark_result: Callable): """Benchmark train time per iter / infer time per image.""" fxt_benchmark.track_resources = True result = fxt_benchmark.run( diff --git a/tests/run_code_checks.sh b/tests/run_code_checks.sh deleted file mode 100755 index 395d3660bd9..00000000000 --- a/tests/run_code_checks.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash - -WORK_DIR=$(mktemp -d) -python3 -m venv "$WORK_DIR" -# shellcheck source=/dev/null -source "$WORK_DIR"/bin/activate -pip install pip --upgrade -pip install wheel -pip install ote_sdk/ -pip install ote_cli/ -pip install pre-commit -echo "" -echo "" -echo "" -echo " ##############################################" -echo " ######## ########" -echo " ######## ./tests/run_code_checks.sh ########" -echo " ######## ########" -echo " ##############################################" -echo "" -pre-commit run --all-files - diff --git a/tests/run_model_templates_tests.py b/tests/run_model_templates_tests.py deleted file mode 100644 index 4dfd18ce7ce..00000000000 --- a/tests/run_model_templates_tests.py +++ /dev/null @@ -1,81 +0,0 @@ -""" Runs tests selectively depending on changed files. """ - -import os -import sys -from subprocess import run - -from tests.test_suite.run_test_command import collect_env_vars - -ALGO_ROOT_DIR = "external" -ALGO_DIRS = [ - os.path.join(ALGO_ROOT_DIR, d) for d in os.listdir(ALGO_ROOT_DIR) if os.path.isdir(os.path.join(ALGO_ROOT_DIR, d)) -] -IMPORTANT_DIRS = [ - "ote_cli/", - "ote_sdk/", - "tests/", -] - -wd = sys.argv[1] - - -def what_to_test(): - """ - Returns a dict containing information whether it is needed - to run tests for particular algorithm. - """ - - print(f"{sys.argv=}") - run_algo_tests = {d: True for d in ALGO_DIRS} - if len(sys.argv) > 2: - run_algo_tests = {d: False for d in ALGO_DIRS} - changed_files = sys.argv[2:] - print(f"{changed_files=}") - - for changed_file in changed_files: - if any(changed_file.startswith(d) for d in IMPORTANT_DIRS): - run_algo_tests = {d: True for d in ALGO_DIRS} - break - - for d in ALGO_DIRS: - if changed_file.startswith(d): - run_algo_tests[d] = True - - for k, v in run_algo_tests.items(): - print("run", k, v) - - return run_algo_tests - - -def test(run_algo_tests): - """ - Runs tests for algorithms and other stuff (misc). - """ - - passed = {} - success = True - command = ["pytest", os.path.join("tests", "ote_cli", "misc"), "-v"] - try: - res = run(command, env=collect_env_vars(wd), check=True).returncode == 0 - except: # noqa: E722 - res = False - passed["misc"] = res - success *= res - for algo_dir in ALGO_DIRS: - if run_algo_tests[algo_dir]: - command = ["pytest", os.path.join(algo_dir, "tests", "ote_cli"), "-v", "-rxXs", "--durations=10"] - try: - res = run(command, env=collect_env_vars(wd), check=True).returncode == 0 - except: # noqa: E722 - res = False - passed[algo_dir] = res - success *= res - - for k, v in passed.items(): - res = "PASSED" if v else "FAILED" - print(f"Tests for {k} {res}") - - sys.exit(1 - success) - - -test(what_to_test()) diff --git a/tests/run_model_templates_tests.sh b/tests/run_model_templates_tests.sh deleted file mode 100755 index 1569c6305c9..00000000000 --- a/tests/run_model_templates_tests.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env bash - -python3 -m venv venv || exit 1 -# shellcheck source=/dev/null -. venv/bin/activate || exit 1 -pip install --upgrade pip || exit 1 -pip install -e ote_cli || exit 1 -pip install -e ote_sdk || exit 1 - -python tests/run_model_templates_tests.py "$(pwd)" "$@" || exit 1 From f549b80cd46e968ebabb0d2459527f2b7bfdad65 Mon Sep 17 00:00:00 2001 From: Prokofiev Kirill Date: Wed, 21 Feb 2024 10:53:39 +0100 Subject: [PATCH 19/39] Remove XPUATSSAssigner (#2940) * remove XPUASSIGNER * remove XPUAssigner from semisl --- .../mmdet/models/assigners/__init__.py | 3 +- .../models/assigners/xpu_atss_assigner.py | 212 ------------------ .../detection/mobilenetv2_atss/model.py | 2 +- .../mobilenetv2_atss/semisl/model.py | 2 +- .../detection/resnext101_atss/model.py | 2 +- .../detection/resnext101_atss/semisl/model.py | 2 +- 6 files changed, 5 insertions(+), 218 deletions(-) delete mode 100644 src/otx/algorithms/detection/adapters/mmdet/models/assigners/xpu_atss_assigner.py diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/assigners/__init__.py b/src/otx/algorithms/detection/adapters/mmdet/models/assigners/__init__.py index 8384d1c61ab..71418724251 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/models/assigners/__init__.py +++ b/src/otx/algorithms/detection/adapters/mmdet/models/assigners/__init__.py @@ -4,6 +4,5 @@ # from .custom_max_iou_assigner import CustomMaxIoUAssigner -from .xpu_atss_assigner import XPUATSSAssigner -__all__ = ["CustomMaxIoUAssigner", "XPUATSSAssigner"] +__all__ = ["CustomMaxIoUAssigner"] diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/assigners/xpu_atss_assigner.py b/src/otx/algorithms/detection/adapters/mmdet/models/assigners/xpu_atss_assigner.py deleted file mode 100644 index 4f699828137..00000000000 --- a/src/otx/algorithms/detection/adapters/mmdet/models/assigners/xpu_atss_assigner.py +++ /dev/null @@ -1,212 +0,0 @@ -"""Custom assigner to workaround a bug in IPEX.""" -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -# Copyright (c) OpenMMLab. All rights reserved. - -import warnings - -import torch -from mmdet.core.bbox import AssignResult -from mmdet.core.bbox.assigners import ATSSAssigner -from mmdet.core.bbox.builder import BBOX_ASSIGNERS - - -@BBOX_ASSIGNERS.register_module() -class XPUATSSAssigner(ATSSAssigner): - """Assign a corresponding gt bbox or background to each bbox. - - Each proposals will be assigned with `0` or a positive integer - indicating the ground truth index. - - - 0: negative sample, no assigned gt - - positive integer: positive sample, index (1-based) of assigned gt - - If ``alpha`` is not None, it means that the dynamic cost - ATSSAssigner is adopted, which is currently only used in the DDOD. - - Args: - topk (float): number of bbox selected in each level - """ - - def assign( - self, - bboxes, - num_level_bboxes, - gt_bboxes, - gt_bboxes_ignore=None, - gt_labels=None, - cls_scores=None, - bbox_preds=None, - ): - """Assign gt to bboxes. - - The assignment is done in following steps - - 1. compute iou between all bbox (bbox of all pyramid levels) and gt - 2. compute center distance between all bbox and gt - 3. on each pyramid level, for each gt, select k bbox whose center - are closest to the gt center, so we total select k*l bbox as - candidates for each gt - 4. get corresponding iou for the these candidates, and compute the - mean and std, set mean + std as the iou threshold - 5. select these candidates whose iou are greater than or equal to - the threshold as positive - 6. limit the positive sample's center in gt - - If ``alpha`` is not None, and ``cls_scores`` and `bbox_preds` - are not None, the overlaps calculation in the first step - will also include dynamic cost, which is currently only used in - the DDOD. - - Args: - bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). - num_level_bboxes (List): num of bboxes in each level - gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). - gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are - labelled as `ignored`, e.g., crowd boxes in COCO. Default None. - gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). - cls_scores (list[Tensor]): Classification scores for all scale - levels, each is a 4D-tensor, the channels number is - num_base_priors * num_classes. Default None. - bbox_preds (list[Tensor]): Box energies / deltas for all scale - levels, each is a 4D-tensor, the channels number is - num_base_priors * 4. Default None. - - Returns: - :obj:`AssignResult`: The assign result. - """ - INF = 100000000 - bboxes = bboxes[:, :4] - num_gt, num_bboxes = gt_bboxes.size(0), bboxes.size(0) - - message = ( - "Invalid alpha parameter because cls_scores or " - "bbox_preds are None. If you want to use the " - "cost-based ATSSAssigner, please set cls_scores, " - "bbox_preds and self.alpha at the same time. " - ) - - if self.alpha is None: - # ATSSAssigner - overlaps = self.iou_calculator(bboxes, gt_bboxes) - if cls_scores is not None or bbox_preds is not None: - warnings.warn(message) - else: - # Dynamic cost ATSSAssigner in DDOD - assert cls_scores is not None and bbox_preds is not None, message - - # compute cls cost for bbox and GT - cls_cost = torch.sigmoid(cls_scores[:, gt_labels]) - - # compute iou between all bbox and gt - overlaps = self.iou_calculator(bbox_preds, gt_bboxes) - - # make sure that we are in element-wise multiplication - assert cls_cost.shape == overlaps.shape - - # overlaps is actually a cost matrix - overlaps = cls_cost ** (1 - self.alpha) * overlaps**self.alpha - - # assign 0 by default - assigned_gt_inds = overlaps.new_full((num_bboxes,), 0, dtype=torch.long) - - if num_gt == 0 or num_bboxes == 0: - # No ground truth or boxes, return empty assignment - max_overlaps = overlaps.new_zeros((num_bboxes,)) - if num_gt == 0: - # No truth, assign everything to background - assigned_gt_inds[:] = 0 - if gt_labels is None: - assigned_labels = None - else: - assigned_labels = overlaps.new_full((num_bboxes,), -1, dtype=torch.long) - return AssignResult(num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) - - # compute center distance between all bbox and gt - gt_cx = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0 - gt_cy = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0 - gt_points = torch.stack((gt_cx, gt_cy), dim=1) - - bboxes_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0 - bboxes_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0 - bboxes_points = torch.stack((bboxes_cx, bboxes_cy), dim=1) - - distances = (bboxes_points[:, None, :] - gt_points[None, :, :]).pow(2).sum(-1).sqrt() - - if ( - self.ignore_iof_thr > 0 - and gt_bboxes_ignore is not None - and gt_bboxes_ignore.numel() > 0 - and bboxes.numel() > 0 - ): - ignore_overlaps = self.iou_calculator(bboxes, gt_bboxes_ignore, mode="iof") - ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) - ignore_idxs = ignore_max_overlaps > self.ignore_iof_thr - distances[ignore_idxs, :] = INF - assigned_gt_inds[ignore_idxs] = -1 - - # Selecting candidates based on the center distance - candidate_idxs = [] - start_idx = 0 - for level, bboxes_per_level in enumerate(num_level_bboxes): - # on each pyramid level, for each gt, - # select k bbox whose center are closest to the gt center - end_idx = start_idx + bboxes_per_level - distances_per_level = distances[start_idx:end_idx, :] - selectable_k = min(self.topk, bboxes_per_level) - - dim_1 = distances_per_level.shape[1] - if dim_1 == 1: - distances_per_level = distances_per_level.reshape(-1) - _, topk_idxs_per_level = distances_per_level.topk(selectable_k, dim=0, largest=False) - topk_idxs_per_level = topk_idxs_per_level.reshape(selectable_k, dim_1) - - candidate_idxs.append(topk_idxs_per_level + start_idx) - start_idx = end_idx - candidate_idxs = torch.cat(candidate_idxs, dim=0) - - # get corresponding iou for the these candidates, and compute the - # mean and std, set mean + std as the iou threshold - candidate_overlaps = overlaps[candidate_idxs, torch.arange(num_gt)] - overlaps_mean_per_gt = candidate_overlaps.mean(0) - overlaps_std_per_gt = candidate_overlaps.std(0) - overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt - - is_pos = candidate_overlaps >= overlaps_thr_per_gt[None, :] - - # limit the positive sample's center in gt - for gt_idx in range(num_gt): - candidate_idxs[:, gt_idx] += gt_idx * num_bboxes - ep_bboxes_cx = bboxes_cx.view(1, -1).expand(num_gt, num_bboxes).contiguous().view(-1) - ep_bboxes_cy = bboxes_cy.view(1, -1).expand(num_gt, num_bboxes).contiguous().view(-1) - candidate_idxs = candidate_idxs.view(-1) - - # calculate the left, top, right, bottom distance between positive - # bbox center and gt side - l_ = ep_bboxes_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0] - t_ = ep_bboxes_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1] - r_ = gt_bboxes[:, 2] - ep_bboxes_cx[candidate_idxs].view(-1, num_gt) - b_ = gt_bboxes[:, 3] - ep_bboxes_cy[candidate_idxs].view(-1, num_gt) - is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01 - - is_pos = is_pos & is_in_gts - - # if an anchor box is assigned to multiple gts, - # the one with the highest IoU will be selected. - overlaps_inf = torch.full_like(overlaps, -INF).t().contiguous().view(-1) - index = candidate_idxs.view(-1)[is_pos.view(-1)] - overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index] - overlaps_inf = overlaps_inf.view(num_gt, -1).t() - - max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1) - assigned_gt_inds[max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1 - - if gt_labels is not None: - assigned_labels = assigned_gt_inds.new_full((num_bboxes,), -1) - pos_inds = torch.nonzero(assigned_gt_inds > 0, as_tuple=False).squeeze() - if pos_inds.numel() > 0: - assigned_labels[pos_inds] = gt_labels[assigned_gt_inds[pos_inds] - 1] - else: - assigned_labels = None - return AssignResult(num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) diff --git a/src/otx/algorithms/detection/configs/detection/mobilenetv2_atss/model.py b/src/otx/algorithms/detection/configs/detection/mobilenetv2_atss/model.py index fccdd79317b..1dfc05f19f5 100644 --- a/src/otx/algorithms/detection/configs/detection/mobilenetv2_atss/model.py +++ b/src/otx/algorithms/detection/configs/detection/mobilenetv2_atss/model.py @@ -63,7 +63,7 @@ ), ), train_cfg=dict( - assigner=dict(type="XPUATSSAssigner", topk=9), + assigner=dict(type="ATSSAssigner", topk=9), allowed_border=-1, pos_weight=-1, debug=False, diff --git a/src/otx/algorithms/detection/configs/detection/mobilenetv2_atss/semisl/model.py b/src/otx/algorithms/detection/configs/detection/mobilenetv2_atss/semisl/model.py index f8c4d834a82..c38fbcdd179 100644 --- a/src/otx/algorithms/detection/configs/detection/mobilenetv2_atss/semisl/model.py +++ b/src/otx/algorithms/detection/configs/detection/mobilenetv2_atss/semisl/model.py @@ -62,7 +62,7 @@ use_qfl=False, ), train_cfg=dict( - assigner=dict(type="XPUATSSAssigner", topk=9), + assigner=dict(type="ATSSAssigner", topk=9), allowed_border=-1, pos_weight=-1, debug=False, diff --git a/src/otx/algorithms/detection/configs/detection/resnext101_atss/model.py b/src/otx/algorithms/detection/configs/detection/resnext101_atss/model.py index 53f592fb229..6d02dd868ab 100644 --- a/src/otx/algorithms/detection/configs/detection/resnext101_atss/model.py +++ b/src/otx/algorithms/detection/configs/detection/resnext101_atss/model.py @@ -63,7 +63,7 @@ ), ), train_cfg=dict( - assigner=dict(type="XPUATSSAssigner", topk=9), + assigner=dict(type="ATSSAssigner", topk=9), allowed_border=-1, pos_weight=-1, debug=False, diff --git a/src/otx/algorithms/detection/configs/detection/resnext101_atss/semisl/model.py b/src/otx/algorithms/detection/configs/detection/resnext101_atss/semisl/model.py index 54dde9b8423..cfa32cc0d9e 100644 --- a/src/otx/algorithms/detection/configs/detection/resnext101_atss/semisl/model.py +++ b/src/otx/algorithms/detection/configs/detection/resnext101_atss/semisl/model.py @@ -67,7 +67,7 @@ ), ), train_cfg=dict( - assigner=dict(type="XPUATSSAssigner", topk=9), + assigner=dict(type="ATSSAssigner", topk=9), allowed_border=-1, pos_weight=-1, debug=False, From 113e24f46137829237da8ad2e25f73814005ad54 Mon Sep 17 00:00:00 2001 From: Vladislav Sovrasov Date: Fri, 23 Feb 2024 06:47:16 +0900 Subject: [PATCH 20/39] Fix linters --- src/otx/algorithms/action/adapters/openvino/task.py | 4 ++-- src/otx/algorithms/anomaly/tasks/openvino.py | 4 ++-- src/otx/algorithms/anomaly/tasks/train.py | 4 +++- .../classification/adapters/mmcls/nncf/registers.py | 3 ++- src/otx/algorithms/classification/adapters/openvino/task.py | 4 ++-- src/otx/algorithms/detection/adapters/mmdet/nncf/builder.py | 2 +- src/otx/algorithms/detection/adapters/openvino/task.py | 4 ++-- src/otx/algorithms/segmentation/adapters/openvino/task.py | 4 ++-- src/otx/algorithms/visual_prompting/tasks/openvino.py | 4 ++-- 9 files changed, 18 insertions(+), 15 deletions(-) diff --git a/src/otx/algorithms/action/adapters/openvino/task.py b/src/otx/algorithms/action/adapters/openvino/task.py index 6adf7e49a73..b08e4138093 100644 --- a/src/otx/algorithms/action/adapters/openvino/task.py +++ b/src/otx/algorithms/action/adapters/openvino/task.py @@ -22,14 +22,14 @@ from typing import Any, Dict, List, Optional, Tuple, Union from zipfile import ZipFile -import nncf import numpy as np import openvino.runtime as ov from mmcv.utils import ProgressBar -from nncf.common.quantization.structs import QuantizationPreset from openvino.model_api.adapters import OpenvinoAdapter, create_core from openvino.model_api.models import Model +import nncf +from nncf.common.quantization.structs import QuantizationPreset from otx.algorithms.action.adapters.openvino import ( ActionOVClsDataLoader, get_ovdataloader, diff --git a/src/otx/algorithms/anomaly/tasks/openvino.py b/src/otx/algorithms/anomaly/tasks/openvino.py index 922ff2b3fd9..c06128d7b96 100644 --- a/src/otx/algorithms/anomaly/tasks/openvino.py +++ b/src/otx/algorithms/anomaly/tasks/openvino.py @@ -22,15 +22,15 @@ from typing import Any, Dict, List, Optional, Tuple, Union from zipfile import ZipFile -import nncf import numpy as np import openvino.runtime as ov from addict import Dict as ADDict from anomalib.data.utils.transform import get_transforms -from nncf.common.quantization.structs import QuantizationPreset from omegaconf import OmegaConf from openvino.model_api.models import AnomalyDetection, AnomalyResult +import nncf +from nncf.common.quantization.structs import QuantizationPreset from otx.algorithms.anomaly.adapters.anomalib.config import get_anomalib_config from otx.algorithms.anomaly.configs.base.configuration import BaseAnomalyConfig from otx.algorithms.common.utils import embed_ir_model_data diff --git a/src/otx/algorithms/anomaly/tasks/train.py b/src/otx/algorithms/anomaly/tasks/train.py index 10e670dac92..1089f7203b7 100644 --- a/src/otx/algorithms/anomaly/tasks/train.py +++ b/src/otx/algorithms/anomaly/tasks/train.py @@ -103,7 +103,9 @@ def train( if config.trainer.precision == 16: plugins.append(MixedPrecisionXPUPlugin()) - self.trainer = Trainer(**config.trainer, logger=CSVLogger(self.project_path, name=""), callbacks=callbacks, plugins=plugins) + self.trainer = Trainer( + **config.trainer, logger=CSVLogger(self.project_path, name=""), callbacks=callbacks, plugins=plugins + ) self.trainer.fit(model=self.model, datamodule=datamodule) self.save_model(output_model) diff --git a/src/otx/algorithms/classification/adapters/mmcls/nncf/registers.py b/src/otx/algorithms/classification/adapters/mmcls/nncf/registers.py index c708a3a1267..4779c268129 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/nncf/registers.py +++ b/src/otx/algorithms/classification/adapters/mmcls/nncf/registers.py @@ -6,9 +6,10 @@ from otx.algorithms.common.adapters.nncf.utils import is_nncf_enabled if is_nncf_enabled(): - from nncf.torch import register_module from timm.models.layers.conv2d_same import Conv2dSame + from nncf.torch import register_module + # Register custom modules. # Users of nncf should manually check every custom # layer with weights which should be compressed and diff --git a/src/otx/algorithms/classification/adapters/openvino/task.py b/src/otx/algorithms/classification/adapters/openvino/task.py index 135f8b20d98..40cf5a8d412 100644 --- a/src/otx/algorithms/classification/adapters/openvino/task.py +++ b/src/otx/algorithms/classification/adapters/openvino/task.py @@ -23,15 +23,15 @@ from typing import Any, List, Optional, Tuple, Union from zipfile import ZipFile -import nncf import numpy as np import openvino.runtime as ov from addict import Dict as ADDict -from nncf.common.quantization.structs import QuantizationPreset from openvino.model_api.adapters import OpenvinoAdapter, create_core from openvino.model_api.models import Model from openvino.model_api.models.utils import ClassificationResult +import nncf +from nncf.common.quantization.structs import QuantizationPreset from otx.algorithms.classification.configs import ClassificationConfig from otx.algorithms.classification.utils import ( get_cls_deploy_config, diff --git a/src/otx/algorithms/detection/adapters/mmdet/nncf/builder.py b/src/otx/algorithms/detection/adapters/mmdet/nncf/builder.py index 9befb61cbea..2a8d2a6dc04 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/nncf/builder.py +++ b/src/otx/algorithms/detection/adapters/mmdet/nncf/builder.py @@ -41,8 +41,8 @@ def build_nncf_detector( # pylint: disable=too-many-locals,too-many-statements from mmdet.datasets import build_dataloader as mmdet_build_dataloader from mmdet.datasets import build_dataset as mmdet_build_dataset from mmdet.datasets.pipelines import Compose - from nncf.torch.dynamic_graph.io_handling import nncf_model_input + from nncf.torch.dynamic_graph.io_handling import nncf_model_input from otx.algorithms.common.adapters.mmcv.nncf.utils import ( get_fake_input, model_eval, diff --git a/src/otx/algorithms/detection/adapters/openvino/task.py b/src/otx/algorithms/detection/adapters/openvino/task.py index 0d9e9b380bb..165e0919d29 100644 --- a/src/otx/algorithms/detection/adapters/openvino/task.py +++ b/src/otx/algorithms/detection/adapters/openvino/task.py @@ -14,15 +14,15 @@ from zipfile import ZipFile import attr -import nncf import numpy as np import openvino.runtime as ov from addict import Dict as ADDict -from nncf.common.quantization.structs import QuantizationPreset from openvino.model_api.adapters import OpenvinoAdapter, create_core from openvino.model_api.models import ImageModel, Model from openvino.model_api.tilers import DetectionTiler, InstanceSegmentationTiler +import nncf +from nncf.common.quantization.structs import QuantizationPreset from otx.algorithms.common.utils import OTXOpenVinoDataLoader from otx.algorithms.common.utils.ir import check_if_quantized from otx.algorithms.common.utils.utils import get_default_async_reqs_num diff --git a/src/otx/algorithms/segmentation/adapters/openvino/task.py b/src/otx/algorithms/segmentation/adapters/openvino/task.py index 4aca3b9a7fe..ae41579b66b 100644 --- a/src/otx/algorithms/segmentation/adapters/openvino/task.py +++ b/src/otx/algorithms/segmentation/adapters/openvino/task.py @@ -23,15 +23,15 @@ from zipfile import ZipFile import attr -import nncf import numpy as np import openvino.runtime as ov from addict import Dict as ADDict -from nncf.common.quantization.structs import QuantizationPreset from openvino.model_api.adapters import OpenvinoAdapter, create_core from openvino.model_api.models import Model from openvino.model_api.models.utils import ImageResultWithSoftPrediction +import nncf +from nncf.common.quantization.structs import QuantizationPreset from otx.algorithms.common.utils import OTXOpenVinoDataLoader, get_default_async_reqs_num, read_py_config from otx.algorithms.common.utils.ir import check_if_quantized from otx.algorithms.segmentation.adapters.openvino import model_wrappers diff --git a/src/otx/algorithms/visual_prompting/tasks/openvino.py b/src/otx/algorithms/visual_prompting/tasks/openvino.py index 0f29f40d17d..7ed7f8492cc 100644 --- a/src/otx/algorithms/visual_prompting/tasks/openvino.py +++ b/src/otx/algorithms/visual_prompting/tasks/openvino.py @@ -27,14 +27,14 @@ from zipfile import ZipFile import attr -import nncf import numpy as np import openvino.runtime as ov from addict import Dict as ADDict -from nncf.common.quantization.structs import QuantizationPreset from openvino.model_api.adapters import OpenvinoAdapter, create_core from openvino.model_api.models import Model +import nncf +from nncf.common.quantization.structs import QuantizationPreset from otx.algorithms.common.utils import get_default_async_reqs_num, read_py_config from otx.algorithms.common.utils.ir import check_if_quantized from otx.algorithms.visual_prompting.adapters.openvino import model_wrappers From df13078329e54d8889f39aa32be402b90320271b Mon Sep 17 00:00:00 2001 From: Vladislav Sovrasov Date: Tue, 27 Feb 2024 10:32:25 +0900 Subject: [PATCH 21/39] Fix linters --- src/otx/algorithms/visual_prompting/tasks/openvino.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/otx/algorithms/visual_prompting/tasks/openvino.py b/src/otx/algorithms/visual_prompting/tasks/openvino.py index a957e7833f3..f91e43ab075 100644 --- a/src/otx/algorithms/visual_prompting/tasks/openvino.py +++ b/src/otx/algorithms/visual_prompting/tasks/openvino.py @@ -30,7 +30,6 @@ import attr import cv2 -import nncf import numpy as np import openvino.runtime as ov from addict import Dict as ADDict From 3b547f281404e8184f361959450f44d7be4a9a68 Mon Sep 17 00:00:00 2001 From: "Shin, Eunwoo" Date: Wed, 28 Feb 2024 02:24:31 +0000 Subject: [PATCH 22/39] align with pre-commit --- src/otx/algorithms/action/adapters/openvino/task.py | 4 ++-- src/otx/algorithms/anomaly/tasks/openvino.py | 4 ++-- .../classification/adapters/mmcls/nncf/registers.py | 3 +-- src/otx/algorithms/classification/adapters/openvino/task.py | 4 ++-- src/otx/algorithms/detection/adapters/mmdet/nncf/builder.py | 2 +- src/otx/algorithms/detection/adapters/openvino/task.py | 4 ++-- src/otx/algorithms/segmentation/adapters/openvino/task.py | 4 ++-- src/otx/algorithms/visual_prompting/tasks/openvino.py | 4 ++-- 8 files changed, 14 insertions(+), 15 deletions(-) diff --git a/src/otx/algorithms/action/adapters/openvino/task.py b/src/otx/algorithms/action/adapters/openvino/task.py index b08e4138093..6adf7e49a73 100644 --- a/src/otx/algorithms/action/adapters/openvino/task.py +++ b/src/otx/algorithms/action/adapters/openvino/task.py @@ -22,14 +22,14 @@ from typing import Any, Dict, List, Optional, Tuple, Union from zipfile import ZipFile +import nncf import numpy as np import openvino.runtime as ov from mmcv.utils import ProgressBar +from nncf.common.quantization.structs import QuantizationPreset from openvino.model_api.adapters import OpenvinoAdapter, create_core from openvino.model_api.models import Model -import nncf -from nncf.common.quantization.structs import QuantizationPreset from otx.algorithms.action.adapters.openvino import ( ActionOVClsDataLoader, get_ovdataloader, diff --git a/src/otx/algorithms/anomaly/tasks/openvino.py b/src/otx/algorithms/anomaly/tasks/openvino.py index c06128d7b96..922ff2b3fd9 100644 --- a/src/otx/algorithms/anomaly/tasks/openvino.py +++ b/src/otx/algorithms/anomaly/tasks/openvino.py @@ -22,15 +22,15 @@ from typing import Any, Dict, List, Optional, Tuple, Union from zipfile import ZipFile +import nncf import numpy as np import openvino.runtime as ov from addict import Dict as ADDict from anomalib.data.utils.transform import get_transforms +from nncf.common.quantization.structs import QuantizationPreset from omegaconf import OmegaConf from openvino.model_api.models import AnomalyDetection, AnomalyResult -import nncf -from nncf.common.quantization.structs import QuantizationPreset from otx.algorithms.anomaly.adapters.anomalib.config import get_anomalib_config from otx.algorithms.anomaly.configs.base.configuration import BaseAnomalyConfig from otx.algorithms.common.utils import embed_ir_model_data diff --git a/src/otx/algorithms/classification/adapters/mmcls/nncf/registers.py b/src/otx/algorithms/classification/adapters/mmcls/nncf/registers.py index 4779c268129..c708a3a1267 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/nncf/registers.py +++ b/src/otx/algorithms/classification/adapters/mmcls/nncf/registers.py @@ -6,9 +6,8 @@ from otx.algorithms.common.adapters.nncf.utils import is_nncf_enabled if is_nncf_enabled(): - from timm.models.layers.conv2d_same import Conv2dSame - from nncf.torch import register_module + from timm.models.layers.conv2d_same import Conv2dSame # Register custom modules. # Users of nncf should manually check every custom diff --git a/src/otx/algorithms/classification/adapters/openvino/task.py b/src/otx/algorithms/classification/adapters/openvino/task.py index 40cf5a8d412..135f8b20d98 100644 --- a/src/otx/algorithms/classification/adapters/openvino/task.py +++ b/src/otx/algorithms/classification/adapters/openvino/task.py @@ -23,15 +23,15 @@ from typing import Any, List, Optional, Tuple, Union from zipfile import ZipFile +import nncf import numpy as np import openvino.runtime as ov from addict import Dict as ADDict +from nncf.common.quantization.structs import QuantizationPreset from openvino.model_api.adapters import OpenvinoAdapter, create_core from openvino.model_api.models import Model from openvino.model_api.models.utils import ClassificationResult -import nncf -from nncf.common.quantization.structs import QuantizationPreset from otx.algorithms.classification.configs import ClassificationConfig from otx.algorithms.classification.utils import ( get_cls_deploy_config, diff --git a/src/otx/algorithms/detection/adapters/mmdet/nncf/builder.py b/src/otx/algorithms/detection/adapters/mmdet/nncf/builder.py index 2a8d2a6dc04..9befb61cbea 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/nncf/builder.py +++ b/src/otx/algorithms/detection/adapters/mmdet/nncf/builder.py @@ -41,8 +41,8 @@ def build_nncf_detector( # pylint: disable=too-many-locals,too-many-statements from mmdet.datasets import build_dataloader as mmdet_build_dataloader from mmdet.datasets import build_dataset as mmdet_build_dataset from mmdet.datasets.pipelines import Compose - from nncf.torch.dynamic_graph.io_handling import nncf_model_input + from otx.algorithms.common.adapters.mmcv.nncf.utils import ( get_fake_input, model_eval, diff --git a/src/otx/algorithms/detection/adapters/openvino/task.py b/src/otx/algorithms/detection/adapters/openvino/task.py index 165e0919d29..0d9e9b380bb 100644 --- a/src/otx/algorithms/detection/adapters/openvino/task.py +++ b/src/otx/algorithms/detection/adapters/openvino/task.py @@ -14,15 +14,15 @@ from zipfile import ZipFile import attr +import nncf import numpy as np import openvino.runtime as ov from addict import Dict as ADDict +from nncf.common.quantization.structs import QuantizationPreset from openvino.model_api.adapters import OpenvinoAdapter, create_core from openvino.model_api.models import ImageModel, Model from openvino.model_api.tilers import DetectionTiler, InstanceSegmentationTiler -import nncf -from nncf.common.quantization.structs import QuantizationPreset from otx.algorithms.common.utils import OTXOpenVinoDataLoader from otx.algorithms.common.utils.ir import check_if_quantized from otx.algorithms.common.utils.utils import get_default_async_reqs_num diff --git a/src/otx/algorithms/segmentation/adapters/openvino/task.py b/src/otx/algorithms/segmentation/adapters/openvino/task.py index ae41579b66b..4aca3b9a7fe 100644 --- a/src/otx/algorithms/segmentation/adapters/openvino/task.py +++ b/src/otx/algorithms/segmentation/adapters/openvino/task.py @@ -23,15 +23,15 @@ from zipfile import ZipFile import attr +import nncf import numpy as np import openvino.runtime as ov from addict import Dict as ADDict +from nncf.common.quantization.structs import QuantizationPreset from openvino.model_api.adapters import OpenvinoAdapter, create_core from openvino.model_api.models import Model from openvino.model_api.models.utils import ImageResultWithSoftPrediction -import nncf -from nncf.common.quantization.structs import QuantizationPreset from otx.algorithms.common.utils import OTXOpenVinoDataLoader, get_default_async_reqs_num, read_py_config from otx.algorithms.common.utils.ir import check_if_quantized from otx.algorithms.segmentation.adapters.openvino import model_wrappers diff --git a/src/otx/algorithms/visual_prompting/tasks/openvino.py b/src/otx/algorithms/visual_prompting/tasks/openvino.py index f91e43ab075..02faaa4ee3e 100644 --- a/src/otx/algorithms/visual_prompting/tasks/openvino.py +++ b/src/otx/algorithms/visual_prompting/tasks/openvino.py @@ -30,14 +30,14 @@ import attr import cv2 +import nncf import numpy as np import openvino.runtime as ov from addict import Dict as ADDict +from nncf.common.quantization.structs import QuantizationPreset from openvino.model_api.adapters import OpenvinoAdapter, create_core from openvino.model_api.models import Model -import nncf -from nncf.common.quantization.structs import QuantizationPreset from otx.algorithms.common.utils import get_default_async_reqs_num, read_py_config from otx.algorithms.common.utils.ir import check_if_quantized from otx.algorithms.visual_prompting.adapters.openvino import model_wrappers From ed1262868f5ffa14723989e32c4eda14bfe5e2dc Mon Sep 17 00:00:00 2001 From: Vlad Sovrasov Date: Thu, 29 Feb 2024 17:51:58 +0000 Subject: [PATCH 23/39] Fix uts --- .../unit/algorithms/action/adapters/mmaction/test_task.py | 4 ++++ .../adapters/mmaction/utils/test_action_det_eval_utils.py | 5 +++++ .../action/adapters/openvino/test_action_dataloader.py | 5 +++++ .../classification/adapters/mmcls/test_configurer.py | 5 ++++- .../algorithms/classification/adapters/mmcls/test_task.py | 7 +++++++ .../common/adapters/mmcv/utils/test_automatic_bs.py | 3 +++ .../adapters/mmdet/datasets/test_detection_dataset.py | 5 +++++ .../unit/algorithms/detection/adapters/mmdet/test_task.py | 7 +++++++ .../segmentation/adapters/mmseg/test_mmseg_configurer.py | 5 ++++- .../models/encoders/test_sam_prompt_encoder.py | 3 +++ tests/unit/cli/utils/test_report.py | 6 +++++- 11 files changed, 52 insertions(+), 3 deletions(-) diff --git a/tests/unit/algorithms/action/adapters/mmaction/test_task.py b/tests/unit/algorithms/action/adapters/mmaction/test_task.py index 8415f3152a7..8245d27d7cc 100644 --- a/tests/unit/algorithms/action/adapters/mmaction/test_task.py +++ b/tests/unit/algorithms/action/adapters/mmaction/test_task.py @@ -9,6 +9,7 @@ from typing import Any, Dict import numpy as np +from otx.algorithms.common.utils.utils import is_xpu_available import pytest import torch from mmaction.models.backbones.x3d import X3D @@ -51,6 +52,9 @@ DEFAULT_ACTION_CLS_DIR = os.path.join("src/otx/algorithms/action/configs/classification", "x3d") DEFAULT_ACTION_DET_DIR = os.path.join("src/otx/algorithms/action/configs/detection", "x3d_fast_rcnn") +if is_xpu_available(): + pytest.skip("Action task is not supported on XPU", allow_module_level=True) + class MockModule(nn.Module): """Mock class for nn.Module.""" diff --git a/tests/unit/algorithms/action/adapters/mmaction/utils/test_action_det_eval_utils.py b/tests/unit/algorithms/action/adapters/mmaction/utils/test_action_det_eval_utils.py index 7ae2c9af37c..164a048d45d 100644 --- a/tests/unit/algorithms/action/adapters/mmaction/utils/test_action_det_eval_utils.py +++ b/tests/unit/algorithms/action/adapters/mmaction/utils/test_action_det_eval_utils.py @@ -11,12 +11,17 @@ from otx.algorithms.action.adapters.mmaction.data import OTXActionDetDataset from otx.algorithms.action.adapters.mmaction.utils import det_eval +from otx.algorithms.common.utils.utils import is_xpu_available from otx.api.entities.id import ID from otx.api.entities.label import Domain, LabelEntity +import pytest from tests.test_suite.e2e_test_system import e2e_pytest_unit FULL_BOX = np.array([[0, 0, 1, 1]]) +if is_xpu_available(): + pytest.skip("Action task is not supported on XPU", allow_module_level=True) + class MockDataInfoProxy(OTXActionDetDataset._DataInfoProxy): """Mock clsass for data proxy in OTXActionDetDataset.""" diff --git a/tests/unit/algorithms/action/adapters/openvino/test_action_dataloader.py b/tests/unit/algorithms/action/adapters/openvino/test_action_dataloader.py index bba74cba290..67645c0e4fc 100644 --- a/tests/unit/algorithms/action/adapters/openvino/test_action_dataloader.py +++ b/tests/unit/algorithms/action/adapters/openvino/test_action_dataloader.py @@ -6,6 +6,7 @@ from copy import deepcopy from typing import List, Optional +from otx.algorithms.common.utils.utils import is_xpu_available import pytest @@ -36,6 +37,10 @@ ) +if is_xpu_available(): + pytest.skip("Action task is not supported on XPU", allow_module_level=True) + + class MockDatasetEntity(DatasetEntity): """Mock class for DatasetEntity.""" diff --git a/tests/unit/algorithms/classification/adapters/mmcls/test_configurer.py b/tests/unit/algorithms/classification/adapters/mmcls/test_configurer.py index ab513913749..ab22b3249b5 100644 --- a/tests/unit/algorithms/classification/adapters/mmcls/test_configurer.py +++ b/tests/unit/algorithms/classification/adapters/mmcls/test_configurer.py @@ -1,5 +1,6 @@ import copy import os +from otx.algorithms.common.utils.utils import is_xpu_available import pytest import tempfile @@ -129,7 +130,7 @@ def test_configure_device(self, mocker): config = copy.deepcopy(self.model_cfg) self.configurer.configure_device(config) assert config.distributed is False - assert config.device == "cpu" + assert config.device in ["cpu", "xpu"] mocker.patch( "torch.distributed.is_initialized", @@ -183,6 +184,8 @@ def test_configure_input_size(self, mocker, input_size, training): @e2e_pytest_unit def test_configure_fp16(self): + if is_xpu_available(): + pytest.skip("FP16 is not supported on XPU") model_cfg = copy.deepcopy(self.model_cfg) model_cfg.fp16 = {} model_cfg.optimizer_config.type = "OptimizerHook" diff --git a/tests/unit/algorithms/classification/adapters/mmcls/test_task.py b/tests/unit/algorithms/classification/adapters/mmcls/test_task.py index 72e0b5a23ab..dda75f16d97 100644 --- a/tests/unit/algorithms/classification/adapters/mmcls/test_task.py +++ b/tests/unit/algorithms/classification/adapters/mmcls/test_task.py @@ -9,6 +9,7 @@ from typing import Any, Dict import numpy as np +from otx.algorithms.common.utils.utils import is_xpu_available import pytest import torch from torch import nn @@ -205,6 +206,9 @@ def _mock_train_model(*args, **kwargs): num_gpu = 5 mock_torch = mocker.patch.object(config_utils, "torch") mock_torch.cuda.device_count.return_value = num_gpu + if is_xpu_available(): + mock_devcnt = mocker.patch.object(config_utils, "get_adaptive_num_workers") + mock_devcnt.return_value = num_cpu // num_gpu _config = ModelConfiguration(ClassificationConfig("header"), self.mc_cls_label_schema) output_model = ModelEntity(self.mc_cls_dataset, _config) @@ -478,6 +482,9 @@ def _mock_train_model(*args, **kwargs): num_cpu = 20 mock_multiprocessing = mocker.patch.object(config_utils, "multiprocessing") mock_multiprocessing.cpu_count.return_value = num_cpu + if is_xpu_available(): + mock_devcnt = mocker.patch.object(config_utils, "get_adaptive_num_workers") + mock_devcnt.return_value = 1 num_gpu = 5 mock_torch = mocker.patch.object(config_utils, "torch") mock_torch.cuda.device_count.return_value = num_gpu diff --git a/tests/unit/algorithms/common/adapters/mmcv/utils/test_automatic_bs.py b/tests/unit/algorithms/common/adapters/mmcv/utils/test_automatic_bs.py index d590b74faf2..8fd3122d5bc 100644 --- a/tests/unit/algorithms/common/adapters/mmcv/utils/test_automatic_bs.py +++ b/tests/unit/algorithms/common/adapters/mmcv/utils/test_automatic_bs.py @@ -1,3 +1,4 @@ +from otx.algorithms.common.utils.utils import is_xpu_available import pytest from math import sqrt @@ -68,6 +69,8 @@ def mock_dataset(mocker): def test_adapt_batch_size( mocker, mock_adapt_algo_cls, common_cfg, mock_dataset, not_increase, is_action_task, is_iter_based_runner ): + if is_xpu_available(): + pytest.skip("Adaptive batch size is not supported on XPU") # prepare mock_train_func = mocker.MagicMock() new_bs = DEFAULT_BS // 2 if not_increase else DEFAULT_BS + 2 diff --git a/tests/unit/algorithms/detection/adapters/mmdet/datasets/test_detection_dataset.py b/tests/unit/algorithms/detection/adapters/mmdet/datasets/test_detection_dataset.py index 2ebd37e247d..24b12886406 100644 --- a/tests/unit/algorithms/detection/adapters/mmdet/datasets/test_detection_dataset.py +++ b/tests/unit/algorithms/detection/adapters/mmdet/datasets/test_detection_dataset.py @@ -4,6 +4,7 @@ # SPDX-License-Identifier: Apache-2.0 # import numpy as np +from otx.algorithms.common.utils.utils import is_xpu_available import pytest from otx.algorithms.detection.adapters.mmdet.datasets.dataset import OTXDetDataset, get_annotation_mmdet_format @@ -115,6 +116,8 @@ def test_evaluate(self, task_type, domain, metric, logger) -> None: if task_type == TaskType.DETECTION: results = [[np.random.rand(1, 5)]] elif task_type == TaskType.INSTANCE_SEGMENTATION: + if is_xpu_available(): + pytest.skip("Subprocess failure in XPU environment") results = [ ( [np.random.rand(1, 5)] * len(otx_dataset.get_labels()), @@ -128,6 +131,8 @@ def test_evaluate(self, task_type, domain, metric, logger) -> None: @e2e_pytest_unit def test_mask_evaluate(self) -> None: """Test evaluate method for instance segmentation""" + if is_xpu_available(): + pytest.skip("Subprocess failure in XPU environment") otx_dataset, labels = self.dataset[TaskType.INSTANCE_SEGMENTATION] dataset = OTXDetDataset(otx_dataset, labels, self.pipeline) dataset.pipeline = MockPipeline() diff --git a/tests/unit/algorithms/detection/adapters/mmdet/test_task.py b/tests/unit/algorithms/detection/adapters/mmdet/test_task.py index 8700612bcae..b4dffa9127a 100644 --- a/tests/unit/algorithms/detection/adapters/mmdet/test_task.py +++ b/tests/unit/algorithms/detection/adapters/mmdet/test_task.py @@ -9,6 +9,7 @@ from typing import Any, Dict import numpy as np +from otx.algorithms.common.utils.utils import is_xpu_available import pytest import torch from torch import nn @@ -242,6 +243,9 @@ def _mock_train_detector_iseg(*args, **kwargs): num_gpu = 5 mock_torch = mocker.patch.object(config_utils, "torch") mock_torch.cuda.device_count.return_value = num_gpu + if is_xpu_available(): + mock_devcnt = mocker.patch.object(config_utils, "get_adaptive_num_workers") + mock_devcnt.return_value = num_cpu // num_gpu _config = ModelConfiguration(DetectionConfig(), self.det_label_schema) output_model = ModelEntity(self.det_dataset, _config) @@ -506,6 +510,9 @@ def _mock_train_detector_iseg(*args, **kwargs): num_gpu = 5 mock_torch = mocker.patch.object(config_utils, "torch") mock_torch.cuda.device_count.return_value = num_gpu + if is_xpu_available(): + mock_devcnt = mocker.patch.object(config_utils, "get_adaptive_num_workers") + mock_devcnt.return_value = 1 _config = ModelConfiguration(DetectionConfig(), self.det_label_schema) output_model = ModelEntity(self.det_dataset, _config) diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/test_mmseg_configurer.py b/tests/unit/algorithms/segmentation/adapters/mmseg/test_mmseg_configurer.py index 13a44846e15..55d036ab202 100644 --- a/tests/unit/algorithms/segmentation/adapters/mmseg/test_mmseg_configurer.py +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/test_mmseg_configurer.py @@ -5,6 +5,7 @@ import copy import os +from otx.algorithms.common.utils.utils import is_xpu_available import pytest import tempfile @@ -129,7 +130,7 @@ def test_configure_device(self, mocker): config = copy.deepcopy(self.model_cfg) self.configurer.configure_device(config) assert config.distributed is False - assert config.device == "cpu" + assert config.device in ["cpu", "xpu"] mocker.patch( "torch.distributed.is_initialized", @@ -183,6 +184,8 @@ def test_configure_input_size(self, mocker, input_size, training): @e2e_pytest_unit def test_configure_fp16(self): + if is_xpu_available(): + pytest.skip("FP16 is not supported on XPU") model_cfg = copy.deepcopy(self.model_cfg) model_cfg.fp16 = {} self.configurer.configure_fp16(model_cfg) diff --git a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/encoders/test_sam_prompt_encoder.py b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/encoders/test_sam_prompt_encoder.py index 57dd43692e2..6430eca9e4a 100644 --- a/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/encoders/test_sam_prompt_encoder.py +++ b/tests/unit/algorithms/visual_prompting/adapters/pytorch_lightning/models/encoders/test_sam_prompt_encoder.py @@ -102,6 +102,9 @@ def test_get_batch_size( @pytest.mark.parametrize("device", ["cpu", "cuda"]) def test_get_device(self, device: str): """Test _get_device.""" + if device == "cuda" and not torch.cuda.is_available(): + pytest.skip("CUDA is not available") + self.prompt_encoder.point_embeddings.to(device) results = self.prompt_encoder._get_device() diff --git a/tests/unit/cli/utils/test_report.py b/tests/unit/cli/utils/test_report.py index d1431b98271..90b9a40a722 100644 --- a/tests/unit/cli/utils/test_report.py +++ b/tests/unit/cli/utils/test_report.py @@ -1,5 +1,6 @@ from pathlib import Path from pprint import pformat +from otx.algorithms.common.utils.utils import is_xpu_available from otx.api.entities.model_template import ModelTemplate from otx.cli.utils.report import ( @@ -33,7 +34,10 @@ def test_env_info_to_str(mocker): expected = "\tOTX: 1.2\n" mocker.patch("mmcv.utils.env.collect_env", return_value={"OTX": "1.2"}) result = env_info_to_str() - assert expected == result + if is_xpu_available(): + assert expected in result + else: + assert expected == result @e2e_pytest_unit From 81815191afdcdd9f2e66e6ffa49a0270abf4904e Mon Sep 17 00:00:00 2001 From: Eunwoo Shin Date: Wed, 6 Mar 2024 22:20:58 +0900 Subject: [PATCH 24/39] Resolve failed integration test w/ XPU (#3009) * import ipex in test code * change the way to move tensor to device in xpu * run sampling in deformable_attn on CPU * align with pre-commit * patch mmdeploy code * align with pre-commit --------- Co-authored-by: Your Name --- .../multi_scale_deformable_attn_pytorch.py | 42 ++++++++--- .../utils/_builder_build_data_parallel.py | 34 ++++----- .../adapters/mmdet/models/__init__.py | 13 +++- .../adapters/mmdet/models/patch_mmdeploy.py | 73 +++++++++++++++++++ tests/test_suite/run_test_command.py | 5 ++ 5 files changed, 135 insertions(+), 32 deletions(-) create mode 100644 src/otx/algorithms/detection/adapters/mmdet/models/patch_mmdeploy.py diff --git a/src/otx/algorithms/common/adapters/mmcv/ops/multi_scale_deformable_attn_pytorch.py b/src/otx/algorithms/common/adapters/mmcv/ops/multi_scale_deformable_attn_pytorch.py index 025f16f3287..cb37d284407 100644 --- a/src/otx/algorithms/common/adapters/mmcv/ops/multi_scale_deformable_attn_pytorch.py +++ b/src/otx/algorithms/common/adapters/mmcv/ops/multi_scale_deformable_attn_pytorch.py @@ -8,6 +8,10 @@ import torch.nn.functional as F from mmcv.ops import multi_scale_deform_attn +from otx.utils.logger import get_logger + +logger = get_logger() + def multi_scale_deformable_attn_pytorch( value: torch.Tensor, @@ -60,6 +64,9 @@ def multi_scale_deformable_attn_pytorch( return output.transpose(1, 2).contiguous() +_warning_custom_grid_sample = False + + def _custom_grid_sample(im: torch.Tensor, grid: torch.Tensor, align_corners: bool = False) -> torch.Tensor: """Custom patch for mmcv.ops.point_sample.bilinear_grid_sample. @@ -78,7 +85,18 @@ def _custom_grid_sample(im: torch.Tensor, grid: torch.Tensor, align_corners: boo Returns: torch.Tensor: A tensor with sampled points, shape (N, C, Hg, Wg) """ - device = im.device + ori_device = im.device + + if ori_device != "cpu": + global _warning_custom_grid_sample # noqa: PLW0603 + if not _warning_custom_grid_sample: + logger.warning( + "Sampling during 'multi_scale_deformable_attn_pytorch' is executed on CPU to avoid out of memory." + ) + _warning_custom_grid_sample = True + im = im.to("cpu") + grid = grid.to("cpu") + n, c, h, w = im.shape gn, gh, gw, _ = grid.shape assert n == gn @@ -114,14 +132,14 @@ def _custom_grid_sample(im: torch.Tensor, grid: torch.Tensor, align_corners: boo x0, x1, y0, y1 = x0 + 1, x1 + 1, y0 + 1, y1 + 1 # Clip coordinates to padded image size - x0 = torch.where(x0 < 0, torch.tensor(0).to(device), x0) - x0 = torch.where(x0 > padded_w - 1, torch.tensor(padded_w - 1).to(device), x0) - x1 = torch.where(x1 < 0, torch.tensor(0).to(device), x1) - x1 = torch.where(x1 > padded_w - 1, torch.tensor(padded_w - 1).to(device), x1) - y0 = torch.where(y0 < 0, torch.tensor(0).to(device), y0) - y0 = torch.where(y0 > padded_h - 1, torch.tensor(padded_h - 1).to(device), y0) - y1 = torch.where(y1 < 0, torch.tensor(0).to(device), y1) - y1 = torch.where(y1 > padded_h - 1, torch.tensor(padded_h - 1).to(device), y1) + x0 = torch.where(x0 < 0, torch.tensor(0).to("cpu"), x0) + x0 = torch.where(x0 > padded_w - 1, torch.tensor(padded_w - 1).to("cpu"), x0) + x1 = torch.where(x1 < 0, torch.tensor(0).to("cpu"), x1) + x1 = torch.where(x1 > padded_w - 1, torch.tensor(padded_w - 1).to("cpu"), x1) + y0 = torch.where(y0 < 0, torch.tensor(0).to("cpu"), y0) + y0 = torch.where(y0 > padded_h - 1, torch.tensor(padded_h - 1).to("cpu"), y0) + y1 = torch.where(y1 < 0, torch.tensor(0).to("cpu"), y1) + y1 = torch.where(y1 > padded_h - 1, torch.tensor(padded_h - 1).to("cpu"), y1) im_padded = im_padded.view(n, c, -1) @@ -135,7 +153,11 @@ def _custom_grid_sample(im: torch.Tensor, grid: torch.Tensor, align_corners: boo Ic = torch.gather(im_padded, 2, x1_y0) Id = torch.gather(im_padded, 2, x1_y1) - return (Ia * wa + Ib * wb + Ic * wc + Id * wd).reshape(n, c, gh, gw) + result = (Ia * wa + Ib * wb + Ic * wc + Id * wd).reshape(n, c, gh, gw) + + if ori_device != "cpu": + return result.to(ori_device) + return result multi_scale_deform_attn.multi_scale_deformable_attn_pytorch = multi_scale_deformable_attn_pytorch diff --git a/src/otx/algorithms/common/adapters/mmcv/utils/_builder_build_data_parallel.py b/src/otx/algorithms/common/adapters/mmcv/utils/_builder_build_data_parallel.py index f0dd316b0db..9f19ba25340 100644 --- a/src/otx/algorithms/common/adapters/mmcv/utils/_builder_build_data_parallel.py +++ b/src/otx/algorithms/common/adapters/mmcv/utils/_builder_build_data_parallel.py @@ -96,27 +96,19 @@ def scatter(self, inputs, kwargs, device_ids): inputs, kwargs = super().scatter(inputs, kwargs, [-1]) target_device = torch.device(f"xpu:{device_ids[0]}") - for x in inputs: - if isinstance(x, tuple): - for val in x: - if isinstance(val, dict): - for k in val: - if isinstance(val[k], torch.Tensor): - val[k] = val[k].to(target_device) - elif isinstance(val[k], list): - for i, item in enumerate(val[k]): - if isinstance(item, torch.Tensor): - val[k][i] = item.to(target_device) - - for x in kwargs: - if isinstance(x, dict): - for k in x: - if isinstance(x[k], torch.Tensor): - x[k] = x[k].to(target_device) - elif isinstance(x[k], list): - for i, item in enumerate(x[k]): - if isinstance(item, torch.Tensor): - x[k][i] = item.to(target_device) + def change_tensor_device(obj): + if isinstance(obj, list): + obj = list(map(change_tensor_device, obj)) + elif isinstance(obj, tuple): + obj = tuple(map(change_tensor_device, obj)) + elif isinstance(obj, dict): + obj = {key: change_tensor_device(val) for key, val in obj.items()} + elif isinstance(obj, torch.Tensor): + obj = obj.to(target_device) + return obj + + inputs = change_tensor_device(inputs) + kwargs = change_tensor_device(kwargs) return inputs, kwargs diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/__init__.py b/src/otx/algorithms/detection/adapters/mmdet/models/__init__.py index c59b3e97b84..9efe5351905 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/models/__init__.py +++ b/src/otx/algorithms/detection/adapters/mmdet/models/__init__.py @@ -3,6 +3,17 @@ # SPDX-License-Identifier: Apache-2.0 # -from . import assigners, backbones, dense_heads, detectors, heads, layers, losses, necks, roi_heads +from . import ( + assigners, + backbones, + dense_heads, + detectors, + heads, + layers, + losses, + necks, + patch_mmdeploy, # noqa: F401 + roi_heads, +) __all__ = ["assigners", "backbones", "dense_heads", "detectors", "heads", "layers", "losses", "necks", "roi_heads"] diff --git a/src/otx/algorithms/detection/adapters/mmdet/models/patch_mmdeploy.py b/src/otx/algorithms/detection/adapters/mmdet/models/patch_mmdeploy.py new file mode 100644 index 00000000000..32165c12dd9 --- /dev/null +++ b/src/otx/algorithms/detection/adapters/mmdet/models/patch_mmdeploy.py @@ -0,0 +1,73 @@ +"""Patch mmdeploy code.""" +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import torch + +from otx.algorithms.common.adapters.mmdeploy.utils import is_mmdeploy_enabled + + +def _select_nms_index( + scores: torch.Tensor, boxes: torch.Tensor, nms_index: torch.Tensor, batch_size: int, keep_top_k: int = -1 +): + """Transform NMS output. + + Args: + scores (Tensor): The detection scores of shape + [N, num_classes, num_boxes]. + boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4]. + nms_index (Tensor): NMS output of bounding boxes indexing. + batch_size (int): Batch size of the input image. + keep_top_k (int): Number of top K boxes to keep after nms. + Defaults to -1. + + Returns: + tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5] + and `labels` of shape [N, num_det]. + """ + batch_inds, cls_inds = nms_index[:, 0], nms_index[:, 1] + box_inds = nms_index[:, 2] + + # index by nms output + scores = scores[batch_inds, cls_inds, box_inds].unsqueeze(1) + boxes = boxes[batch_inds, box_inds, ...] + dets = torch.cat([boxes, scores], dim=1) + + # batch all + batched_dets = dets.unsqueeze(0).repeat(batch_size, 1, 1) + batch_template = torch.arange(0, batch_size, dtype=batch_inds.dtype, device=batch_inds.device) + batched_dets = batched_dets.where( + (batch_inds == batch_template.unsqueeze(1)).unsqueeze(-1), batched_dets.new_zeros(1) + ) + + batched_labels = cls_inds.unsqueeze(0).repeat(batch_size, 1) + batched_labels = batched_labels.where( + (batch_inds == batch_template.unsqueeze(1)), batched_labels.new_ones(1, dtype=batched_labels.dtype) * -1 + ) # this line is only different line from original code + + N = batched_dets.shape[0] + + # expand tensor to eliminate [0, ...] tensor + batched_dets = torch.cat((batched_dets, batched_dets.new_zeros((N, 1, 5))), 1) + batched_labels = torch.cat((batched_labels, batched_labels.new_zeros((N, 1))), 1) + + # sort + is_use_topk = keep_top_k > 0 and (torch.onnx.is_in_onnx_export() or keep_top_k < batched_dets.shape[1]) + if is_use_topk: + _, topk_inds = batched_dets[:, :, -1].topk(keep_top_k, dim=1) + else: + _, topk_inds = batched_dets[:, :, -1].sort(dim=1, descending=True) + topk_batch_inds = torch.arange(batch_size, dtype=topk_inds.dtype, device=topk_inds.device).view(-1, 1) + batched_dets = batched_dets[topk_batch_inds, topk_inds, ...] + batched_labels = batched_labels[topk_batch_inds, topk_inds, ...] + + # slice and recover the tensor + return batched_dets, batched_labels + + +if is_mmdeploy_enabled(): + + from mmdeploy.codebase.mmdet.core.post_processing import bbox_nms + + bbox_nms.select_nms_index = _select_nms_index diff --git a/tests/test_suite/run_test_command.py b/tests/test_suite/run_test_command.py index 1cf2d440baa..f643b5eed52 100644 --- a/tests/test_suite/run_test_command.py +++ b/tests/test_suite/run_test_command.py @@ -23,6 +23,11 @@ from otx.cli.utils.nncf import get_number_of_fakequantizers_in_xml from tests.test_suite.e2e_test_system import e2e_pytest_component +try: + import intel_extension_for_pytorch +except ImportError: + pass + def get_template_rel_dir(template): return os.path.dirname(os.path.relpath(template.model_template_path)) From dd5640090f0cd8c5cae630cb955fb085719e748d Mon Sep 17 00:00:00 2001 From: kprokofi Date: Thu, 7 Mar 2024 01:28:45 +0900 Subject: [PATCH 25/39] added unit tests. need to debug on XPU --- .../adapters/mmcls/apis/train.py | 1 - .../adapters/anomalib/accelerators/xpu.py | 44 ++++++++ .../anomalib/plugins/xpu_precision.py | 48 +++++++++ .../anomalib/strategies/test_xpu_single.py | 27 +++++ .../adapters/mmcls/api/test_train.py | 69 ++++++++++++ .../mmcv/hooks/test_xpu_optimizer_hook.py | 16 +++ .../adapters/mmcv/utils/test_fp16_utils.py | 100 ++++++++++++++++++ .../torch/amp/test_xpu_grad_scaler.py | 38 +++++++ .../adapters/mmdet/api/test_train.py | 70 ++++++++++++ .../adapters/mmseg/api/test_train.py | 70 ++++++++++++ .../adapters/mmseg/test_mmseg_configurer.py | 1 - 11 files changed, 482 insertions(+), 2 deletions(-) create mode 100644 tests/unit/algorithms/anomaly/adapters/anomalib/accelerators/xpu.py create mode 100644 tests/unit/algorithms/anomaly/adapters/anomalib/plugins/xpu_precision.py create mode 100644 tests/unit/algorithms/anomaly/adapters/anomalib/strategies/test_xpu_single.py create mode 100644 tests/unit/algorithms/classification/adapters/mmcls/api/test_train.py create mode 100644 tests/unit/algorithms/common/adapters/mmcv/hooks/test_xpu_optimizer_hook.py create mode 100644 tests/unit/algorithms/common/adapters/mmcv/utils/test_fp16_utils.py create mode 100644 tests/unit/algorithms/common/adapters/torch/amp/test_xpu_grad_scaler.py create mode 100644 tests/unit/algorithms/detection/adapters/mmdet/api/test_train.py create mode 100644 tests/unit/algorithms/segmentation/adapters/mmseg/api/test_train.py diff --git a/src/otx/algorithms/classification/adapters/mmcls/apis/train.py b/src/otx/algorithms/classification/adapters/mmcls/apis/train.py index 50658c86d35..58ca23b26aa 100644 --- a/src/otx/algorithms/classification/adapters/mmcls/apis/train.py +++ b/src/otx/algorithms/classification/adapters/mmcls/apis/train.py @@ -63,7 +63,6 @@ def train_model(model, dataset, cfg, distributed=False, validate=False, timestam ) # The specific dataloader settings train_loader_cfg = {**loader_cfg, **cfg.data.get("train_dataloader", {})} - data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset] fp16_cfg = cfg.get("fp16_", None) diff --git a/tests/unit/algorithms/anomaly/adapters/anomalib/accelerators/xpu.py b/tests/unit/algorithms/anomaly/adapters/anomalib/accelerators/xpu.py new file mode 100644 index 00000000000..9e507582528 --- /dev/null +++ b/tests/unit/algorithms/anomaly/adapters/anomalib/accelerators/xpu.py @@ -0,0 +1,44 @@ +import pytest +import torch +from otx.algorithms.anomaly.adapters.anomalib.accelerators import XPUAccelerator +from otx.algorithms.common.utils import is_xpu_available + + +@pytest.mark.skipif(not is_xpu_available(), reason="XPU is not available") +class TestXPUAccelerator: + @pytest.fixture + def accelerator(self): + return XPUAccelerator() + + def test_setup_device(self, accelerator): + device = torch.device("xpu") + accelerator.setup_device(device) + + def test_parse_devices(self, accelerator): + devices = [1, 2, 3] + parsed_devices = accelerator.parse_devices(devices) + assert isinstance(parsed_devices, list) + assert parsed_devices == devices + + def test_get_parallel_devices(self, accelerator): + devices = [1, 2, 3] + parallel_devices = accelerator.get_parallel_devices(devices) + assert isinstance(parallel_devices, list) + assert parallel_devices == [torch.device("xpu", idx) for idx in devices] + + def test_auto_device_count(self, accelerator): + count = accelerator.auto_device_count() + assert isinstance(count, int) + + def test_is_available(self, accelerator): + available = accelerator.is_available() + assert isinstance(available, bool) + assert available == is_xpu_available() + + def test_get_device_stats(self, accelerator): + device = torch.device("xpu") + stats = accelerator.get_device_stats(device) + assert isinstance(stats, dict) + + def test_teardown(self, accelerator): + accelerator.teardown() diff --git a/tests/unit/algorithms/anomaly/adapters/anomalib/plugins/xpu_precision.py b/tests/unit/algorithms/anomaly/adapters/anomalib/plugins/xpu_precision.py new file mode 100644 index 00000000000..8ca6b5b62d8 --- /dev/null +++ b/tests/unit/algorithms/anomaly/adapters/anomalib/plugins/xpu_precision.py @@ -0,0 +1,48 @@ +import pytest +import torch +from torch.optim import Optimizer +from otx.algorithms.anomaly.adapters.anomalib.plugins.xpu_precision import MixedPrecisionXPUPlugin + + +class TestMixedPrecisionXPUPlugin: + @pytest.fixture + def plugin(self): + return MixedPrecisionXPUPlugin() + + def test_init(self, plugin): + assert plugin.scaler is None + + def test_pre_backward(self, plugin, mocker): + tensor = torch.zeros(1) + module = mocker.MagicMock() + output = plugin.pre_backward(tensor, module) + assert output == tensor + + def test_optimizer_step_no_scaler(self, plugin, mocker): + optimizer = mocker.MagicMock(Optimizer) + model = mocker.MagicMock() + optimizer_idx = 0 + closure = mocker.MagicMock() + kwargs = {} + mock_optimizer_step = mocker.patch("otx.algorithms.anomaly.adapters.anomalib.plugins.xpu_precision.PrecisionPlugin.optimizer_step") + out = plugin.optimizer_step(optimizer, model, optimizer_idx, closure, **kwargs) + assert isinstance(out, mocker.MagicMock) + mock_optimizer_step.assert_called_once() + + def test_optimizer_step_with_scaler(self, plugin, mocker): + optimizer = mocker.MagicMock(Optimizer) + model = mocker.MagicMock() + optimizer_idx = 0 + closure = mocker.MagicMock() + plugin.scaler = mocker.MagicMock() + kwargs = {} + out = plugin.optimizer_step(optimizer, model, optimizer_idx, closure, **kwargs) + assert isinstance(out, mocker.MagicMock) + + def test_clip_gradients(self, plugin, mocker): + optimizer = mocker.MagicMock(Optimizer) + clip_val = 0.1 + gradient_clip_algorithm = "norm" + mock_clip_gradients = mocker.patch("otx.algorithms.anomaly.adapters.anomalib.plugins.xpu_precision.PrecisionPlugin.clip_gradients") + plugin.clip_gradients(optimizer, clip_val, gradient_clip_algorithm) + mock_clip_gradients.assert_called_once() diff --git a/tests/unit/algorithms/anomaly/adapters/anomalib/strategies/test_xpu_single.py b/tests/unit/algorithms/anomaly/adapters/anomalib/strategies/test_xpu_single.py new file mode 100644 index 00000000000..3d11390a308 --- /dev/null +++ b/tests/unit/algorithms/anomaly/adapters/anomalib/strategies/test_xpu_single.py @@ -0,0 +1,27 @@ +import pytest +import torch +import pytorch_lightning as pl +from otx.algorithms.anomaly.adapters.anomalib.strategies.xpu_single import SingleXPUStrategy +from otx.algorithms.common.utils.utils import is_xpu_available + + +@pytest.mark.skipif(not is_xpu_available(), reason="XPU is not available") +class TestSingleXPUStrategy: + def test_init(self): + strategy = SingleXPUStrategy(device="xpu:0") + assert strategy.device == "xpu:0" + assert strategy.accelerator is None + + def test_is_distributed(self): + strategy = SingleXPUStrategy(device="xpu:0") + assert not strategy.is_distributed + + def test_setup_optimizers(self): + strategy = SingleXPUStrategy(device="xpu:0") + trainer = pl.Trainer() + # Create mock optimizers and models for testing + optimizer = torch.optim.Adam(strategy.model.parameters(), lr=0.001) + model = torch.nn.Linear(10, 2) + trainer.model = model + strategy.setup_optimizers(trainer) + assert len(strategy.optimizers) == 1 diff --git a/tests/unit/algorithms/classification/adapters/mmcls/api/test_train.py b/tests/unit/algorithms/classification/adapters/mmcls/api/test_train.py new file mode 100644 index 00000000000..2d3adf3d606 --- /dev/null +++ b/tests/unit/algorithms/classification/adapters/mmcls/api/test_train.py @@ -0,0 +1,69 @@ +import pytest +from unittest import mock +from otx.algorithms.classification.adapters.mmcls.apis.train import train_model +import mmcv +from otx.algorithms.common.utils.utils import is_xpu_available + +@pytest.fixture +def mock_modules(mocker): + mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.get_root_logger', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.wrap_distributed_model', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.wrap_non_distributed_model', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_optimizer', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_runner', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_dataset', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.DistEvalHook', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.EvalHook', return_value=mock.MagicMock()) + +@pytest.fixture +def mmcv_cfg(): + return mmcv.Config({"gpu_ids" : [0], "seed": 42, "data": mock.MagicMock(), "device": "cuda", "optimizer": "SGD", "optimizer_config": {}, + "total_epochs": 1, "work_dir": "test", "lr_config": {}, "checkpoint_config": {}, "log_config": {}, "resume_from": False, "load_from": "", + "workflow": ""}) + +@pytest.fixture +def model(): + return mock.MagicMock() + +@pytest.fixture +def dataset(): + return mock.MagicMock() + +def test_train_model_single_dataset_no_validation(mock_modules, mmcv_cfg, model, dataset): + # Create mock inputs + _ = mock_modules + # Call the function + train_model(model, dataset, mmcv_cfg, validate=False) + +def test_train_model_multiple_datasets_distributed_training(mock_modules, mmcv_cfg, model, dataset): + # Create mock inputs + _ = mock_modules + # Call the function + train_model(model, [dataset, dataset], mmcv_cfg, distributed=True, validate=True) + +def test_train_model_specific_timestamp_and_cpu_device(mock_modules, mmcv_cfg, model, dataset): + # Create mock inputs + _ = mock_modules + timestamp = "2024-01-01" + device = "cpu" + mmcv_cfg.device = "cpu" + meta = {"info": "some_info"} + # Call the function + train_model(model, dataset, mmcv_cfg, timestamp=timestamp, device=device, meta=meta) + +def test_train_model_xpu_device(mock_modules, mmcv_cfg, model, dataset): + # Create mock inputs + if not is_xpu_available(): + pytest.skip("skip test since xpu is not available") + + _ = mock_modules + device = "xpu" + mmcv_cfg.device = "xpu" + + # Call the function + train_model(model, dataset, mmcv_cfg, device=device) diff --git a/tests/unit/algorithms/common/adapters/mmcv/hooks/test_xpu_optimizer_hook.py b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_xpu_optimizer_hook.py new file mode 100644 index 00000000000..0516deb3e62 --- /dev/null +++ b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_xpu_optimizer_hook.py @@ -0,0 +1,16 @@ +import pytest +from otx.algorithms.common.utils.utils import is_xpu_available + + +def test_init(): + if not is_xpu_available(): + pytest.skip("XPU is not available") + + from otx.algorithms.common.adapters.mmcv.hooks.xpu_optimizer_hook import BFp16XPUOptimizerHook + + hook = BFp16XPUOptimizerHook(grad_clip=None, coalesce=True, bucket_size_mb=-1, loss_scale=512.0, distributed=True) + assert hook._scale_update_param is None # Check scale update param is None + assert hook.coalesce is True # Check coalesce is True + assert hook.bucket_size_mb == -1 # Check bucket size is -1 + assert hook.loss_scale == 512.0 # Check loss scale is 512.0 + assert hook.distributed is True # Check distributed is True diff --git a/tests/unit/algorithms/common/adapters/mmcv/utils/test_fp16_utils.py b/tests/unit/algorithms/common/adapters/mmcv/utils/test_fp16_utils.py new file mode 100644 index 00000000000..d3b73d7b112 --- /dev/null +++ b/tests/unit/algorithms/common/adapters/mmcv/utils/test_fp16_utils.py @@ -0,0 +1,100 @@ +# test_custom_auto_fp16.py + +import pytest +from unittest.mock import MagicMock, patch +from torch import nn, torch +from otx.algorithms.common.adapters.mmcv.utils.fp16_utils import custom_auto_fp16 +from otx.algorithms.common.adapters.mmcv.utils.fp16_utils import custom_force_fp32 +from otx.algorithms.common.utils import is_xpu_available + + +@pytest.fixture +def test_module(): + class TestModule(torch.nn.Module): + def __init__(self): + super(TestModule, self).__init__() + self.fp16_enabled = False + + @custom_auto_fp16() + def test_method_fp16(self, arg1, arg2): + return torch.tensor(arg1) + torch.tensor(arg2) + + @custom_auto_fp16(out_fp32=True) + def test_method_force_out_fp32(self, arg1, arg2): + return torch.tensor(arg1) + torch.tensor(arg2) + + @custom_force_fp32(out_fp16=False) + def test_func_force_fp16_to_fp32(self, arg1, arg2): + return torch.tensor(arg1) + torch.tensor(arg2) + + @custom_force_fp32(out_fp16=True) + def test_func_force_fp32_out_fp16(self, arg1, arg2): + return torch.tensor(arg1) + torch.tensor(arg2) + + def set_fp16(self, enabled): + self.fp16_enabled = enabled + + return TestModule() + + +class TestCustomAutoFP16: + def test_simple_apply(self, test_module): + test_func = test_module.test_method_fp16 + # assertion simple ints + assert test_func(5, 6) == 11 + # no fp16 enabled + assert test_func(torch.tensor(5.3), torch.tensor(8.3)).dtype == torch.float32 + + def test_fp16_enabled_true(self, test_module): + test_module.set_fp16(enabled = True) + test_func = test_module.test_method_fp16 + # check fp16 casting + assert test_func(torch.tensor(5.3), torch.tensor(8.3)).dtype == torch.float16 + + def test_out_fp32_true(self, test_module): + test_module.set_fp16(enabled = True) + test_func = test_module.test_method_force_out_fp32 + # cast back to fp32 + assert test_func(torch.tensor(5.3), torch.tensor(8.3)).dtype == torch.float32 + + def test_fp16_enabled_xpu(self, test_module): + if not is_xpu_available(): + pytest.skip("XPU is not available") + # setup + test_module.set_fp16(enabled = True) + test_func = test_module.test_method_fp16 + # assertion + assert test_func(torch.tensor(5.3), torch.tensor(8.3)).dtype == torch.bfloat16 + + +class TestCustomForceFP32: + def test_simple_apply(self, test_module): + test_func = test_module.test_func_force_fp16_to_fp32 + # assertion simple ints + assert test_func(5, 6) == 11 + # no fp16 enabled + assert test_func(torch.tensor(5.3), torch.tensor(8.3)).dtype == torch.float32 + + def test_fp16_enabled_true(self, test_module): + test_module.set_fp16(enabled = True) + test_func = test_module.test_func_force_fp16_to_fp32 + output_type = test_func(torch.tensor(5.3, dtype=torch.float16), torch.tensor(8.3, dtype=torch.float16)).dtype + # check fp16 casting + assert output_type == torch.float32 + + def test_out_fp32_true(self, test_module): + test_module.set_fp16(enabled = True) + test_func = test_module.test_func_force_fp32_out_fp16 + output_type = test_func(torch.tensor(5.3, dtype=torch.float16), torch.tensor(8.3, dtype=torch.float16)).dtype + # cast back to fp32 + assert output_type == torch.float16 + + def test_fp16_enabled_xpu(self, test_module): + if not is_xpu_available(): + pytest.skip("XPU is not available") + # setup + test_module.set_fp16(enabled = True) + test_func = test_module.test_func_force_fp16_to_fp32 + output_type = test_func(torch.tensor(5.3, dtype=torch.bfloat16), torch.tensor(8.3, dtype=torch.bfloat16)).dtype + # assertion + assert output_type == torch.float32 diff --git a/tests/unit/algorithms/common/adapters/torch/amp/test_xpu_grad_scaler.py b/tests/unit/algorithms/common/adapters/torch/amp/test_xpu_grad_scaler.py new file mode 100644 index 00000000000..1e1eceea18d --- /dev/null +++ b/tests/unit/algorithms/common/adapters/torch/amp/test_xpu_grad_scaler.py @@ -0,0 +1,38 @@ +# test_xpu_grad_scaler.py + +import pytest +import torch +from otx.algorithms.common.utils import is_xpu_available +if is_xpu_available(): + from otx.algorithms.common.adapters.torch.amp.xpu_grad_scaler import XPUGradScaler + + +@pytest.mark.skipif(not is_xpu_available(), reason="XPU is not available") +class TestXPUGradScaler: + @pytest.fixture + def grad_scaler(self): + return XPUGradScaler() + + @pytest.fixture + def optimizer(self): + model = torch.nn.Linear(3, 3) + optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + return optimizer + + def test_init(self, grad_scaler): + assert grad_scaler._enabled + assert grad_scaler._init_scale == 2.0**16 + assert grad_scaler._growth_factor == 2.0 + assert grad_scaler._backoff_factor == 0.5 + assert grad_scaler._growth_interval == 2000 + + def test_scale(self, grad_scaler): + outputs = torch.tensor([1.0, 2.0, 3.0]) + scaled_outputs = grad_scaler.scale(outputs) + assert torch.all(scaled_outputs == outputs) + + def test_unscale_grads(self, grad_scaler, optimizer): + inv_scale = 1.0 + found_inf = False + output = grad_scaler._unscale_grads_(optimizer, inv_scale, found_inf, allow_bf16=False) + print(output) diff --git a/tests/unit/algorithms/detection/adapters/mmdet/api/test_train.py b/tests/unit/algorithms/detection/adapters/mmdet/api/test_train.py new file mode 100644 index 00000000000..ae85053ec23 --- /dev/null +++ b/tests/unit/algorithms/detection/adapters/mmdet/api/test_train.py @@ -0,0 +1,70 @@ +import pytest +from unittest import mock +from otx.algorithms.detection.adapters.mmdet.apis.train import train_detector +import mmcv +import os +from otx.algorithms.common.utils.utils import is_xpu_available + +@pytest.fixture +def mock_modules(mocker): + mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.get_root_logger', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_dp', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_ddp', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_optimizer', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_runner', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_dataset', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.DistEvalHook', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.EvalHook', return_value=mock.MagicMock()) + +@pytest.fixture +def mmcv_cfg(): + return mmcv.Config({"gpu_ids" : [0], "seed": 42, "data": mock.MagicMock(), "device": "cuda", "optimizer": "Adam", "optimizer_config": {}, + "total_epochs": 1, "work_dir": "test", "lr_config": {}, "checkpoint_config": {}, "log_config": {}, "resume_from": False, "load_from": "", + "workflow": "", "log_level": 1, "total_iters": 1000}) + +@pytest.fixture +def model(): + return mock.MagicMock() + +@pytest.fixture +def dataset(): + return mock.MagicMock() + +def test_train_model_single_dataset_no_validation(mock_modules, mmcv_cfg, model, dataset): + # Create mock inputs + _ = mock_modules + # Call the function + train_detector(model, dataset, mmcv_cfg, validate=False) + +def test_train_model_multiple_datasets_distributed_training(mock_modules, mmcv_cfg, model, dataset): + # Create mock inputs + _ = mock_modules + os.environ["LOCAL_RANK"] = "0" + # Call the function + train_detector(model, [dataset, dataset], mmcv_cfg, distributed=True, validate=True) + +def test_train_model_specific_timestamp_and_cpu_device(mock_modules, mmcv_cfg, model, dataset): + # Create mock inputs + _ = mock_modules + timestamp = "2024-01-01" + mmcv_cfg.device = "cpu" + meta = {"info": "some_info"} + # Call the function + train_detector(model, dataset, mmcv_cfg, timestamp=timestamp, meta=meta) + +def test_train_model_xpu_device(mock_modules, mmcv_cfg, model, dataset): + # Create mock inputs + if not is_xpu_available(): + pytest.skip("skip test since xpu is not available") + + _ = mock_modules + device = "xpu" + mmcv_cfg.device = "xpu" + + # Call the function + train_detector(model, dataset, mmcv_cfg, device=device) diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/api/test_train.py b/tests/unit/algorithms/segmentation/adapters/mmseg/api/test_train.py new file mode 100644 index 00000000000..ad5bc2cb647 --- /dev/null +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/api/test_train.py @@ -0,0 +1,70 @@ +import pytest +from unittest import mock +from otx.algorithms.segmentation.adapters.mmseg.apis.train import train_segmentor +import mmcv +import os +from otx.algorithms.common.utils.utils import is_xpu_available + +@pytest.fixture +def mock_modules(mocker): + mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.get_root_logger', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dp', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_ddp', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_optimizer', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_runner', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataset', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.DistEvalHook', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.EvalHook', return_value=mock.MagicMock()) + +@pytest.fixture +def mmcv_cfg(): + return mmcv.Config({"gpu_ids" : [0], "seed": 42, "data": mock.MagicMock(), "device": "cuda", "optimizer": "Adam", "optimizer_config": {}, + "total_epochs": 1, "work_dir": "test", "lr_config": {}, "checkpoint_config": {}, "log_config": {}, "resume_from": False, "load_from": "", + "workflow": "", "log_level": 1, "total_iters": 1000}) + +@pytest.fixture +def model(): + return mock.MagicMock() + +@pytest.fixture +def dataset(): + return mock.MagicMock() + +def test_train_model_single_dataset_no_validation(mock_modules, mmcv_cfg, model, dataset): + # Create mock inputs + _ = mock_modules + # Call the function + train_segmentor(model, dataset, mmcv_cfg, validate=False) + +def test_train_model_multiple_datasets_distributed_training(mock_modules, mmcv_cfg, model, dataset): + # Create mock inputs + _ = mock_modules + os.environ["LOCAL_RANK"] = "0" + # Call the function + train_segmentor(model, [dataset, dataset], mmcv_cfg, distributed=True, validate=True) + +def test_train_model_specific_timestamp_and_cpu_device(mock_modules, mmcv_cfg, model, dataset): + # Create mock inputs + _ = mock_modules + timestamp = "2024-01-01" + mmcv_cfg.device = "cpu" + meta = {"info": "some_info"} + # Call the function + train_segmentor(model, dataset, mmcv_cfg, timestamp=timestamp, meta=meta) + +def test_train_model_xpu_device(mock_modules, mmcv_cfg, model, dataset): + # Create mock inputs + if not is_xpu_available(): + pytest.skip("skip test since xpu is not available") + + _ = mock_modules + device = "xpu" + mmcv_cfg.device = "xpu" + + # Call the function + train_segmentor(model, dataset, mmcv_cfg, device=device) diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/test_mmseg_configurer.py b/tests/unit/algorithms/segmentation/adapters/mmseg/test_mmseg_configurer.py index 55d036ab202..7cb1dbdc749 100644 --- a/tests/unit/algorithms/segmentation/adapters/mmseg/test_mmseg_configurer.py +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/test_mmseg_configurer.py @@ -195,7 +195,6 @@ def test_configure_fp16(self): model_cfg.optimizer_config.type = "SAMOptimizerHook" self.configurer.configure_fp16(model_cfg) assert model_cfg.optimizer_config.type == "Fp16SAMOptimizerHook" - model_cfg.fp16 = {} model_cfg.optimizer_config.type = "DummyOptimizerHook" self.configurer.configure_fp16(model_cfg) From 18904e70ec61efe2e27dde8c83c03d5ae27bec3b Mon Sep 17 00:00:00 2001 From: kprokofi Date: Thu, 7 Mar 2024 02:10:33 +0900 Subject: [PATCH 26/39] refactor as class --- .../adapters/mmcls/api/test_train.py | 110 +++++++++--------- .../adapters/mmdet/api/test_train.py | 110 +++++++++--------- .../adapters/mmseg/api/test_train.py | 110 +++++++++--------- 3 files changed, 168 insertions(+), 162 deletions(-) diff --git a/tests/unit/algorithms/classification/adapters/mmcls/api/test_train.py b/tests/unit/algorithms/classification/adapters/mmcls/api/test_train.py index 2d3adf3d606..489a9fa3f15 100644 --- a/tests/unit/algorithms/classification/adapters/mmcls/api/test_train.py +++ b/tests/unit/algorithms/classification/adapters/mmcls/api/test_train.py @@ -2,68 +2,70 @@ from unittest import mock from otx.algorithms.classification.adapters.mmcls.apis.train import train_model import mmcv +import torch from otx.algorithms.common.utils.utils import is_xpu_available -@pytest.fixture -def mock_modules(mocker): - mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.get_root_logger', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.wrap_distributed_model', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.wrap_non_distributed_model', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_optimizer', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_runner', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_dataset', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.DistEvalHook', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.EvalHook', return_value=mock.MagicMock()) -@pytest.fixture -def mmcv_cfg(): - return mmcv.Config({"gpu_ids" : [0], "seed": 42, "data": mock.MagicMock(), "device": "cuda", "optimizer": "SGD", "optimizer_config": {}, - "total_epochs": 1, "work_dir": "test", "lr_config": {}, "checkpoint_config": {}, "log_config": {}, "resume_from": False, "load_from": "", - "workflow": ""}) +class TestTrainModel: + @pytest.fixture + def mock_modules(self, mocker): + mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.get_root_logger', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.wrap_distributed_model', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.wrap_non_distributed_model', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_optimizer', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_runner', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_dataset', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.DistEvalHook', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.EvalHook', return_value=mock.MagicMock()) -@pytest.fixture -def model(): - return mock.MagicMock() + @pytest.fixture + def mmcv_cfg(self): + return mmcv.Config({"gpu_ids" : [0], "seed": 42, "data": mock.MagicMock(), "device": "cpu", "optimizer": "SGD", "optimizer_config": {}, + "total_epochs": 1, "work_dir": "test", "lr_config": {}, "checkpoint_config": {}, "log_config": {}, "resume_from": False, "load_from": "", + "workflow": ""}) -@pytest.fixture -def dataset(): - return mock.MagicMock() + @pytest.fixture + def model(self): + return mock.MagicMock() -def test_train_model_single_dataset_no_validation(mock_modules, mmcv_cfg, model, dataset): - # Create mock inputs - _ = mock_modules - # Call the function - train_model(model, dataset, mmcv_cfg, validate=False) + @pytest.fixture + def dataset(self): + return mock.MagicMock() -def test_train_model_multiple_datasets_distributed_training(mock_modules, mmcv_cfg, model, dataset): - # Create mock inputs - _ = mock_modules - # Call the function - train_model(model, [dataset, dataset], mmcv_cfg, distributed=True, validate=True) + def test_train_model_single_dataset_no_validation(self, mock_modules, mmcv_cfg, model, dataset): + # Create mock inputs + _ = mock_modules + # Call the function + train_model(model, dataset, mmcv_cfg, validate=False) -def test_train_model_specific_timestamp_and_cpu_device(mock_modules, mmcv_cfg, model, dataset): - # Create mock inputs - _ = mock_modules - timestamp = "2024-01-01" - device = "cpu" - mmcv_cfg.device = "cpu" - meta = {"info": "some_info"} - # Call the function - train_model(model, dataset, mmcv_cfg, timestamp=timestamp, device=device, meta=meta) + def test_train_model_multiple_datasets_distributed_training(self, mock_modules, mmcv_cfg, model, dataset): + # Create mock inputs + _ = mock_modules + # Call the function + train_model(model, [dataset, dataset], mmcv_cfg, distributed=True, validate=True) -def test_train_model_xpu_device(mock_modules, mmcv_cfg, model, dataset): - # Create mock inputs - if not is_xpu_available(): - pytest.skip("skip test since xpu is not available") + @pytest.mark.skipif(is_xpu_available() or not torch.cuda.is_available(), reason="cuda is not available") + def test_train_model_specific_timestamp_and_cuda_device(self, mock_modules, mmcv_cfg, model, dataset): + # Create mock inputs + _ = mock_modules + timestamp = "2024-01-01" + device = "cuda" + mmcv_cfg.device = "cuda" + meta = {"info": "some_info"} + # Call the function + train_model(model, dataset, mmcv_cfg, timestamp=timestamp, device=device, meta=meta) - _ = mock_modules - device = "xpu" - mmcv_cfg.device = "xpu" + @pytest.mark.skipif(not is_xpu_available(), reason="xpu is not available") + def test_train_model_xpu_device(self, mock_modules, mmcv_cfg, model, dataset): + # Create mock inputs + _ = mock_modules + device = "xpu" + mmcv_cfg.device = "xpu" - # Call the function - train_model(model, dataset, mmcv_cfg, device=device) + # Call the function + train_model(model, dataset, mmcv_cfg, device=device) diff --git a/tests/unit/algorithms/detection/adapters/mmdet/api/test_train.py b/tests/unit/algorithms/detection/adapters/mmdet/api/test_train.py index ae85053ec23..e6a516fa61d 100644 --- a/tests/unit/algorithms/detection/adapters/mmdet/api/test_train.py +++ b/tests/unit/algorithms/detection/adapters/mmdet/api/test_train.py @@ -3,68 +3,70 @@ from otx.algorithms.detection.adapters.mmdet.apis.train import train_detector import mmcv import os +import torch from otx.algorithms.common.utils.utils import is_xpu_available -@pytest.fixture -def mock_modules(mocker): - mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.get_root_logger', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_dp', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_ddp', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_optimizer', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_runner', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_dataset', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.DistEvalHook', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.EvalHook', return_value=mock.MagicMock()) -@pytest.fixture -def mmcv_cfg(): - return mmcv.Config({"gpu_ids" : [0], "seed": 42, "data": mock.MagicMock(), "device": "cuda", "optimizer": "Adam", "optimizer_config": {}, - "total_epochs": 1, "work_dir": "test", "lr_config": {}, "checkpoint_config": {}, "log_config": {}, "resume_from": False, "load_from": "", - "workflow": "", "log_level": 1, "total_iters": 1000}) +class TestTrainDetector: + @pytest.fixture + def mock_modules(self, mocker): + mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.get_root_logger', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_dp', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_ddp', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_optimizer', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_runner', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_dataset', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.DistEvalHook', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.EvalHook', return_value=mock.MagicMock()) -@pytest.fixture -def model(): - return mock.MagicMock() + @pytest.fixture + def mmcv_cfg(self): + return mmcv.Config({"gpu_ids" : [0], "seed": 42, "data": mock.MagicMock(), "device": "cpu", "optimizer": "Adam", "optimizer_config": {}, + "total_epochs": 1, "work_dir": "test", "lr_config": {}, "checkpoint_config": {}, "log_config": {}, "resume_from": False, "load_from": "", + "workflow": "", "log_level": 1, "total_iters": 1000}) -@pytest.fixture -def dataset(): - return mock.MagicMock() + @pytest.fixture + def model(self): + return mock.MagicMock() -def test_train_model_single_dataset_no_validation(mock_modules, mmcv_cfg, model, dataset): - # Create mock inputs - _ = mock_modules - # Call the function - train_detector(model, dataset, mmcv_cfg, validate=False) + @pytest.fixture + def dataset(self): + return mock.MagicMock() -def test_train_model_multiple_datasets_distributed_training(mock_modules, mmcv_cfg, model, dataset): - # Create mock inputs - _ = mock_modules - os.environ["LOCAL_RANK"] = "0" - # Call the function - train_detector(model, [dataset, dataset], mmcv_cfg, distributed=True, validate=True) + def test_train_model_single_dataset_no_validation(self, mock_modules, mmcv_cfg, model, dataset): + # Create mock inputs + _ = mock_modules + # Call the function + train_detector(model, dataset, mmcv_cfg, validate=False) -def test_train_model_specific_timestamp_and_cpu_device(mock_modules, mmcv_cfg, model, dataset): - # Create mock inputs - _ = mock_modules - timestamp = "2024-01-01" - mmcv_cfg.device = "cpu" - meta = {"info": "some_info"} - # Call the function - train_detector(model, dataset, mmcv_cfg, timestamp=timestamp, meta=meta) + def test_train_model_multiple_datasets_distributed_training(self, mock_modules, mmcv_cfg, model, dataset): + # Create mock inputs + _ = mock_modules + os.environ["LOCAL_RANK"] = "0" + # Call the function + train_detector(model, [dataset, dataset], mmcv_cfg, distributed=True, validate=True) -def test_train_model_xpu_device(mock_modules, mmcv_cfg, model, dataset): - # Create mock inputs - if not is_xpu_available(): - pytest.skip("skip test since xpu is not available") + @pytest.mark.skipif(is_xpu_available() or not torch.cuda.is_available(), reason="cuda is not available") + def test_train_model_specific_timestamp_and_cuda_device(self, mock_modules, mmcv_cfg, model, dataset): + # Create mock inputs + _ = mock_modules + timestamp = "2024-01-01" + mmcv_cfg.device = "cuda" + meta = {"info": "some_info"} + # Call the function + train_detector(model, dataset, mmcv_cfg, timestamp=timestamp, meta=meta) - _ = mock_modules - device = "xpu" - mmcv_cfg.device = "xpu" + @pytest.mark.skipif(not is_xpu_available(), reason="xpu is not available") + def test_train_model_xpu_device(self, mock_modules, mmcv_cfg, model, dataset): + # Create mock inputs + _ = mock_modules + device = "xpu" + mmcv_cfg.device = "xpu" - # Call the function - train_detector(model, dataset, mmcv_cfg, device=device) + # Call the function + train_detector(model, dataset, mmcv_cfg, device=device) diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/api/test_train.py b/tests/unit/algorithms/segmentation/adapters/mmseg/api/test_train.py index ad5bc2cb647..25767cd33da 100644 --- a/tests/unit/algorithms/segmentation/adapters/mmseg/api/test_train.py +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/api/test_train.py @@ -3,68 +3,70 @@ from otx.algorithms.segmentation.adapters.mmseg.apis.train import train_segmentor import mmcv import os +import torch from otx.algorithms.common.utils.utils import is_xpu_available -@pytest.fixture -def mock_modules(mocker): - mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.get_root_logger', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dp', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_ddp', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_optimizer', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_runner', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataset', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.DistEvalHook', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.EvalHook', return_value=mock.MagicMock()) -@pytest.fixture -def mmcv_cfg(): - return mmcv.Config({"gpu_ids" : [0], "seed": 42, "data": mock.MagicMock(), "device": "cuda", "optimizer": "Adam", "optimizer_config": {}, - "total_epochs": 1, "work_dir": "test", "lr_config": {}, "checkpoint_config": {}, "log_config": {}, "resume_from": False, "load_from": "", - "workflow": "", "log_level": 1, "total_iters": 1000}) +class TestTrainSegmentor: + @pytest.fixture + def mock_modules(self, mocker): + mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.get_root_logger', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dp', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_ddp', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_optimizer', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_runner', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataset', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataloader', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.DistEvalHook', return_value=mock.MagicMock()) + mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.EvalHook', return_value=mock.MagicMock()) -@pytest.fixture -def model(): - return mock.MagicMock() + @pytest.fixture + def mmcv_cfg(self): + return mmcv.Config({"gpu_ids" : [0], "seed": 42, "data": mock.MagicMock(), "device": "cpu", "optimizer": "Adam", "optimizer_config": {}, + "total_epochs": 1, "work_dir": "test", "lr_config": {}, "checkpoint_config": {}, "log_config": {}, "resume_from": False, "load_from": "", + "workflow": "", "log_level": 1, "total_iters": 1000}) -@pytest.fixture -def dataset(): - return mock.MagicMock() + @pytest.fixture + def model(self): + return mock.MagicMock() -def test_train_model_single_dataset_no_validation(mock_modules, mmcv_cfg, model, dataset): - # Create mock inputs - _ = mock_modules - # Call the function - train_segmentor(model, dataset, mmcv_cfg, validate=False) + @pytest.fixture + def dataset(self): + return mock.MagicMock() -def test_train_model_multiple_datasets_distributed_training(mock_modules, mmcv_cfg, model, dataset): - # Create mock inputs - _ = mock_modules - os.environ["LOCAL_RANK"] = "0" - # Call the function - train_segmentor(model, [dataset, dataset], mmcv_cfg, distributed=True, validate=True) + def test_train_model_single_dataset_no_validation(self, mock_modules, mmcv_cfg, model, dataset): + # Create mock inputs + _ = mock_modules + # Call the function + train_segmentor(model, dataset, mmcv_cfg, validate=False) -def test_train_model_specific_timestamp_and_cpu_device(mock_modules, mmcv_cfg, model, dataset): - # Create mock inputs - _ = mock_modules - timestamp = "2024-01-01" - mmcv_cfg.device = "cpu" - meta = {"info": "some_info"} - # Call the function - train_segmentor(model, dataset, mmcv_cfg, timestamp=timestamp, meta=meta) + def test_train_model_multiple_datasets_distributed_training(self, mock_modules, mmcv_cfg, model, dataset): + # Create mock inputs + _ = mock_modules + os.environ["LOCAL_RANK"] = "0" + # Call the function + train_segmentor(model, [dataset, dataset], mmcv_cfg, distributed=True, validate=True) -def test_train_model_xpu_device(mock_modules, mmcv_cfg, model, dataset): - # Create mock inputs - if not is_xpu_available(): - pytest.skip("skip test since xpu is not available") + @pytest.mark.skipif(is_xpu_available() or not torch.cuda.is_available(), reason="cuda is not available") + def test_train_model_specific_timestamp_and_cuda_device(self, mock_modules, mmcv_cfg, model, dataset): + # Create mock inputs + _ = mock_modules + timestamp = "2024-01-01" + mmcv_cfg.device = "cuda" + meta = {"info": "some_info"} + # Call the function + train_segmentor(model, dataset, mmcv_cfg, timestamp=timestamp, meta=meta) - _ = mock_modules - device = "xpu" - mmcv_cfg.device = "xpu" + @pytest.mark.skipif(not is_xpu_available(), reason="xpu is not available") + def test_train_model_xpu_device(self, mock_modules, mmcv_cfg, model, dataset): + # Create mock inputs + _ = mock_modules + device = "xpu" + mmcv_cfg.device = "xpu" - # Call the function - train_segmentor(model, dataset, mmcv_cfg, device=device) + # Call the function + train_segmentor(model, dataset, mmcv_cfg, device=device) From b913cb1d37fdc8f8fdba5045f2a103d6355d006d Mon Sep 17 00:00:00 2001 From: kprokofi Date: Thu, 7 Mar 2024 02:17:43 +0900 Subject: [PATCH 27/39] minor fix --- .../unit/algorithms/detection/adapters/mmdet/api/test_train.py | 3 +-- .../algorithms/segmentation/adapters/mmseg/api/test_train.py | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/unit/algorithms/detection/adapters/mmdet/api/test_train.py b/tests/unit/algorithms/detection/adapters/mmdet/api/test_train.py index e6a516fa61d..3ef4f0317df 100644 --- a/tests/unit/algorithms/detection/adapters/mmdet/api/test_train.py +++ b/tests/unit/algorithms/detection/adapters/mmdet/api/test_train.py @@ -65,8 +65,7 @@ def test_train_model_specific_timestamp_and_cuda_device(self, mock_modules, mmcv def test_train_model_xpu_device(self, mock_modules, mmcv_cfg, model, dataset): # Create mock inputs _ = mock_modules - device = "xpu" mmcv_cfg.device = "xpu" # Call the function - train_detector(model, dataset, mmcv_cfg, device=device) + train_detector(model, dataset, mmcv_cfg) diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/api/test_train.py b/tests/unit/algorithms/segmentation/adapters/mmseg/api/test_train.py index 25767cd33da..318f7f7dd8a 100644 --- a/tests/unit/algorithms/segmentation/adapters/mmseg/api/test_train.py +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/api/test_train.py @@ -65,8 +65,7 @@ def test_train_model_specific_timestamp_and_cuda_device(self, mock_modules, mmcv def test_train_model_xpu_device(self, mock_modules, mmcv_cfg, model, dataset): # Create mock inputs _ = mock_modules - device = "xpu" mmcv_cfg.device = "xpu" # Call the function - train_segmentor(model, dataset, mmcv_cfg, device=device) + train_segmentor(model, dataset, mmcv_cfg) From 78a0d874a47cf860a61173c6b5dbecda10c8a62c Mon Sep 17 00:00:00 2001 From: kprokofi Date: Thu, 7 Mar 2024 06:41:27 +0900 Subject: [PATCH 28/39] fix common tests --- .../mmcv/hooks/test_xpu_optimizer_hook.py | 7 ++----- .../adapters/mmcv/utils/test_fp16_utils.py | 18 ++++++------------ .../adapters/torch/amp/test_xpu_grad_scaler.py | 5 +++-- .../detection/adapters/mmdet/api/test_train.py | 1 - 4 files changed, 11 insertions(+), 20 deletions(-) diff --git a/tests/unit/algorithms/common/adapters/mmcv/hooks/test_xpu_optimizer_hook.py b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_xpu_optimizer_hook.py index 0516deb3e62..f976e9b9a68 100644 --- a/tests/unit/algorithms/common/adapters/mmcv/hooks/test_xpu_optimizer_hook.py +++ b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_xpu_optimizer_hook.py @@ -1,16 +1,13 @@ import pytest from otx.algorithms.common.utils.utils import is_xpu_available - +@pytest.mark.skipif(not is_xpu_available(), reason="XPU is not available") def test_init(): - if not is_xpu_available(): - pytest.skip("XPU is not available") - from otx.algorithms.common.adapters.mmcv.hooks.xpu_optimizer_hook import BFp16XPUOptimizerHook hook = BFp16XPUOptimizerHook(grad_clip=None, coalesce=True, bucket_size_mb=-1, loss_scale=512.0, distributed=True) assert hook._scale_update_param is None # Check scale update param is None assert hook.coalesce is True # Check coalesce is True assert hook.bucket_size_mb == -1 # Check bucket size is -1 - assert hook.loss_scale == 512.0 # Check loss scale is 512.0 + assert hook._init_scale == 512.0 # Check loss scale is 512.0 assert hook.distributed is True # Check distributed is True diff --git a/tests/unit/algorithms/common/adapters/mmcv/utils/test_fp16_utils.py b/tests/unit/algorithms/common/adapters/mmcv/utils/test_fp16_utils.py index d3b73d7b112..a8d659a9855 100644 --- a/tests/unit/algorithms/common/adapters/mmcv/utils/test_fp16_utils.py +++ b/tests/unit/algorithms/common/adapters/mmcv/utils/test_fp16_utils.py @@ -49,7 +49,10 @@ def test_fp16_enabled_true(self, test_module): test_module.set_fp16(enabled = True) test_func = test_module.test_method_fp16 # check fp16 casting - assert test_func(torch.tensor(5.3), torch.tensor(8.3)).dtype == torch.float16 + if not is_xpu_available(): + assert test_func(torch.tensor(5.3), torch.tensor(8.3)).dtype == torch.float16 + else: + assert test_func(torch.tensor(5.3), torch.tensor(8.3)).dtype == torch.bfloat16 def test_out_fp32_true(self, test_module): test_module.set_fp16(enabled = True) @@ -57,15 +60,6 @@ def test_out_fp32_true(self, test_module): # cast back to fp32 assert test_func(torch.tensor(5.3), torch.tensor(8.3)).dtype == torch.float32 - def test_fp16_enabled_xpu(self, test_module): - if not is_xpu_available(): - pytest.skip("XPU is not available") - # setup - test_module.set_fp16(enabled = True) - test_func = test_module.test_method_fp16 - # assertion - assert test_func(torch.tensor(5.3), torch.tensor(8.3)).dtype == torch.bfloat16 - class TestCustomForceFP32: def test_simple_apply(self, test_module): @@ -75,6 +69,7 @@ def test_simple_apply(self, test_module): # no fp16 enabled assert test_func(torch.tensor(5.3), torch.tensor(8.3)).dtype == torch.float32 + @pytest.mark.skipif(is_xpu_available(), reason="cuda is not available") def test_fp16_enabled_true(self, test_module): test_module.set_fp16(enabled = True) test_func = test_module.test_func_force_fp16_to_fp32 @@ -89,9 +84,8 @@ def test_out_fp32_true(self, test_module): # cast back to fp32 assert output_type == torch.float16 + @pytest.mark.skipif(not is_xpu_available(), reason="XPU is not available") def test_fp16_enabled_xpu(self, test_module): - if not is_xpu_available(): - pytest.skip("XPU is not available") # setup test_module.set_fp16(enabled = True) test_func = test_module.test_func_force_fp16_to_fp32 diff --git a/tests/unit/algorithms/common/adapters/torch/amp/test_xpu_grad_scaler.py b/tests/unit/algorithms/common/adapters/torch/amp/test_xpu_grad_scaler.py index 1e1eceea18d..cbd4cde5a00 100644 --- a/tests/unit/algorithms/common/adapters/torch/amp/test_xpu_grad_scaler.py +++ b/tests/unit/algorithms/common/adapters/torch/amp/test_xpu_grad_scaler.py @@ -27,7 +27,7 @@ def test_init(self, grad_scaler): assert grad_scaler._growth_interval == 2000 def test_scale(self, grad_scaler): - outputs = torch.tensor([1.0, 2.0, 3.0]) + outputs = torch.tensor([1.0, 2.0, 3.0], device="xpu:0") scaled_outputs = grad_scaler.scale(outputs) assert torch.all(scaled_outputs == outputs) @@ -35,4 +35,5 @@ def test_unscale_grads(self, grad_scaler, optimizer): inv_scale = 1.0 found_inf = False output = grad_scaler._unscale_grads_(optimizer, inv_scale, found_inf, allow_bf16=False) - print(output) + assert isinstance(output, dict) + assert not output diff --git a/tests/unit/algorithms/detection/adapters/mmdet/api/test_train.py b/tests/unit/algorithms/detection/adapters/mmdet/api/test_train.py index 3ef4f0317df..019c07552bf 100644 --- a/tests/unit/algorithms/detection/adapters/mmdet/api/test_train.py +++ b/tests/unit/algorithms/detection/adapters/mmdet/api/test_train.py @@ -66,6 +66,5 @@ def test_train_model_xpu_device(self, mock_modules, mmcv_cfg, model, dataset): # Create mock inputs _ = mock_modules mmcv_cfg.device = "xpu" - # Call the function train_detector(model, dataset, mmcv_cfg) From 7f6862c68c476122a1fa4cabbff4990d4e1f9aa2 Mon Sep 17 00:00:00 2001 From: kprokofi Date: Thu, 7 Mar 2024 06:51:18 +0900 Subject: [PATCH 29/39] fix common for XPU --- .../common/adapters/mmcv/hooks/test_xpu_optimizer_hook.py | 2 +- .../common/adapters/torch/amp/test_xpu_grad_scaler.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/unit/algorithms/common/adapters/mmcv/hooks/test_xpu_optimizer_hook.py b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_xpu_optimizer_hook.py index f976e9b9a68..4ce482fcb79 100644 --- a/tests/unit/algorithms/common/adapters/mmcv/hooks/test_xpu_optimizer_hook.py +++ b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_xpu_optimizer_hook.py @@ -6,8 +6,8 @@ def test_init(): from otx.algorithms.common.adapters.mmcv.hooks.xpu_optimizer_hook import BFp16XPUOptimizerHook hook = BFp16XPUOptimizerHook(grad_clip=None, coalesce=True, bucket_size_mb=-1, loss_scale=512.0, distributed=True) - assert hook._scale_update_param is None # Check scale update param is None assert hook.coalesce is True # Check coalesce is True assert hook.bucket_size_mb == -1 # Check bucket size is -1 + assert hook._scale_update_param is 512.0 # Check scale update param is 512.0 assert hook._init_scale == 512.0 # Check loss scale is 512.0 assert hook.distributed is True # Check distributed is True diff --git a/tests/unit/algorithms/common/adapters/torch/amp/test_xpu_grad_scaler.py b/tests/unit/algorithms/common/adapters/torch/amp/test_xpu_grad_scaler.py index cbd4cde5a00..abd7c5d4642 100644 --- a/tests/unit/algorithms/common/adapters/torch/amp/test_xpu_grad_scaler.py +++ b/tests/unit/algorithms/common/adapters/torch/amp/test_xpu_grad_scaler.py @@ -29,7 +29,8 @@ def test_init(self, grad_scaler): def test_scale(self, grad_scaler): outputs = torch.tensor([1.0, 2.0, 3.0], device="xpu:0") scaled_outputs = grad_scaler.scale(outputs) - assert torch.all(scaled_outputs == outputs) + assert scaled_outputs.device.type == "xpu" + assert scaled_outputs == outputs * grad_scaler._scale def test_unscale_grads(self, grad_scaler, optimizer): inv_scale = 1.0 From ce21edb35ef7fb3055a4e58dbb6e2de5e84a872b Mon Sep 17 00:00:00 2001 From: kprokofi Date: Thu, 7 Mar 2024 06:55:29 +0900 Subject: [PATCH 30/39] minor --- .../common/adapters/mmcv/hooks/test_xpu_optimizer_hook.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/unit/algorithms/common/adapters/mmcv/hooks/test_xpu_optimizer_hook.py b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_xpu_optimizer_hook.py index 4ce482fcb79..0857007def7 100644 --- a/tests/unit/algorithms/common/adapters/mmcv/hooks/test_xpu_optimizer_hook.py +++ b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_xpu_optimizer_hook.py @@ -4,10 +4,11 @@ @pytest.mark.skipif(not is_xpu_available(), reason="XPU is not available") def test_init(): from otx.algorithms.common.adapters.mmcv.hooks.xpu_optimizer_hook import BFp16XPUOptimizerHook + from otx.algorithms.common.adapters.torch.amp import XPUGradScaler hook = BFp16XPUOptimizerHook(grad_clip=None, coalesce=True, bucket_size_mb=-1, loss_scale=512.0, distributed=True) assert hook.coalesce is True # Check coalesce is True assert hook.bucket_size_mb == -1 # Check bucket size is -1 assert hook._scale_update_param is 512.0 # Check scale update param is 512.0 - assert hook._init_scale == 512.0 # Check loss scale is 512.0 assert hook.distributed is True # Check distributed is True + assert isinstance(hook.loss_scaler, XPUGradScaler) From c36c259ba8f9257d243aeae4aa4fe5491d0f50bd Mon Sep 17 00:00:00 2001 From: kprokofi Date: Thu, 7 Mar 2024 07:06:38 +0900 Subject: [PATCH 31/39] fix anomaly tests --- .../anomaly/adapters/anomalib/strategies/test_xpu_single.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/unit/algorithms/anomaly/adapters/anomalib/strategies/test_xpu_single.py b/tests/unit/algorithms/anomaly/adapters/anomalib/strategies/test_xpu_single.py index 3d11390a308..b076c08b5c8 100644 --- a/tests/unit/algorithms/anomaly/adapters/anomalib/strategies/test_xpu_single.py +++ b/tests/unit/algorithms/anomaly/adapters/anomalib/strategies/test_xpu_single.py @@ -9,7 +9,7 @@ class TestSingleXPUStrategy: def test_init(self): strategy = SingleXPUStrategy(device="xpu:0") - assert strategy.device == "xpu:0" + assert strategy._root_device == "xpu:0" assert strategy.accelerator is None def test_is_distributed(self): @@ -20,8 +20,9 @@ def test_setup_optimizers(self): strategy = SingleXPUStrategy(device="xpu:0") trainer = pl.Trainer() # Create mock optimizers and models for testing - optimizer = torch.optim.Adam(strategy.model.parameters(), lr=0.001) model = torch.nn.Linear(10, 2) + strategy._optimizers = [torch.optim.Adam(model.parameters(), lr=0.001)] + strategy._model = model trainer.model = model strategy.setup_optimizers(trainer) assert len(strategy.optimizers) == 1 From 39f42161a82bc59ce97e40df64897c919fb2b27a Mon Sep 17 00:00:00 2001 From: Emily Date: Wed, 6 Mar 2024 22:58:22 +0000 Subject: [PATCH 32/39] fix device type --- .../anomaly/adapters/anomalib/strategies/test_xpu_single.py | 2 +- .../common/adapters/torch/amp/test_xpu_grad_scaler.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/unit/algorithms/anomaly/adapters/anomalib/strategies/test_xpu_single.py b/tests/unit/algorithms/anomaly/adapters/anomalib/strategies/test_xpu_single.py index b076c08b5c8..9a261c22e35 100644 --- a/tests/unit/algorithms/anomaly/adapters/anomalib/strategies/test_xpu_single.py +++ b/tests/unit/algorithms/anomaly/adapters/anomalib/strategies/test_xpu_single.py @@ -9,7 +9,7 @@ class TestSingleXPUStrategy: def test_init(self): strategy = SingleXPUStrategy(device="xpu:0") - assert strategy._root_device == "xpu:0" + assert strategy._root_device.type == "xpu" assert strategy.accelerator is None def test_is_distributed(self): diff --git a/tests/unit/algorithms/common/adapters/torch/amp/test_xpu_grad_scaler.py b/tests/unit/algorithms/common/adapters/torch/amp/test_xpu_grad_scaler.py index abd7c5d4642..6e453a0982b 100644 --- a/tests/unit/algorithms/common/adapters/torch/amp/test_xpu_grad_scaler.py +++ b/tests/unit/algorithms/common/adapters/torch/amp/test_xpu_grad_scaler.py @@ -30,7 +30,7 @@ def test_scale(self, grad_scaler): outputs = torch.tensor([1.0, 2.0, 3.0], device="xpu:0") scaled_outputs = grad_scaler.scale(outputs) assert scaled_outputs.device.type == "xpu" - assert scaled_outputs == outputs * grad_scaler._scale + assert torch.equal(scaled_outputs, outputs * grad_scaler._scale) def test_unscale_grads(self, grad_scaler, optimizer): inv_scale = 1.0 From 21225d07f98b04d4bec589a0c300cb680893fbaf Mon Sep 17 00:00:00 2001 From: kprokofi Date: Thu, 7 Mar 2024 07:36:58 +0900 Subject: [PATCH 33/39] add headers --- .../anomalib/strategies/xpu_single.py | 3 +- .../anomalib/accelerators/__init__.py | 5 ++ .../adapters/anomalib/accelerators/xpu.py | 6 ++ .../adapters/anomalib/plugins/__init__.py | 5 ++ .../anomalib/plugins/xpu_precision.py | 14 +++- .../adapters/anomalib/strategies/__init__.py | 5 ++ .../anomalib/strategies/test_xpu_single.py | 5 ++ .../adapters/mmcls/api/__init__.py | 5 ++ .../adapters/mmcls/api/test_train.py | 79 +++++++++++++++---- .../mmcv/hooks/test_xpu_optimizer_hook.py | 6 ++ .../common/adapters/mmcv/test_configurer.py | 6 ++ .../adapters/mmcv/utils/test_fp16_utils.py | 16 ++-- .../torch/amp/test_xpu_grad_scaler.py | 1 + .../detection/adapters/mmdet/api/__init__.py | 5 ++ .../adapters/mmdet/api/test_train.py | 69 ++++++++++++---- .../adapters/mmseg/api/__init__.py | 5 ++ .../adapters/mmseg/api/test_train.py | 75 ++++++++++++++---- tools/experiment.py | 3 +- 18 files changed, 255 insertions(+), 58 deletions(-) create mode 100644 tests/unit/algorithms/anomaly/adapters/anomalib/accelerators/__init__.py create mode 100644 tests/unit/algorithms/anomaly/adapters/anomalib/plugins/__init__.py create mode 100644 tests/unit/algorithms/anomaly/adapters/anomalib/strategies/__init__.py create mode 100644 tests/unit/algorithms/classification/adapters/mmcls/api/__init__.py create mode 100644 tests/unit/algorithms/detection/adapters/mmdet/api/__init__.py create mode 100644 tests/unit/algorithms/segmentation/adapters/mmseg/api/__init__.py diff --git a/src/otx/algorithms/anomaly/adapters/anomalib/strategies/xpu_single.py b/src/otx/algorithms/anomaly/adapters/anomalib/strategies/xpu_single.py index e211d3d2f42..3c00aeb6747 100644 --- a/src/otx/algorithms/anomaly/adapters/anomalib/strategies/xpu_single.py +++ b/src/otx/algorithms/anomaly/adapters/anomalib/strategies/xpu_single.py @@ -1,4 +1,5 @@ -"""Lightning strategy for single XPU devic.""" +"""Lightning strategy for single XPU device.""" + # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # diff --git a/tests/unit/algorithms/anomaly/adapters/anomalib/accelerators/__init__.py b/tests/unit/algorithms/anomaly/adapters/anomalib/accelerators/__init__.py new file mode 100644 index 00000000000..3b508890f94 --- /dev/null +++ b/tests/unit/algorithms/anomaly/adapters/anomalib/accelerators/__init__.py @@ -0,0 +1,5 @@ +"""Test for otx.algorithms.anomaly.adapters.anomalib.accelerators""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# diff --git a/tests/unit/algorithms/anomaly/adapters/anomalib/accelerators/xpu.py b/tests/unit/algorithms/anomaly/adapters/anomalib/accelerators/xpu.py index 9e507582528..4b89d101661 100644 --- a/tests/unit/algorithms/anomaly/adapters/anomalib/accelerators/xpu.py +++ b/tests/unit/algorithms/anomaly/adapters/anomalib/accelerators/xpu.py @@ -1,3 +1,9 @@ +"""Test for otx.algorithms.anomaly.adapters.anomalib.accelerators.xpu""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + import pytest import torch from otx.algorithms.anomaly.adapters.anomalib.accelerators import XPUAccelerator diff --git a/tests/unit/algorithms/anomaly/adapters/anomalib/plugins/__init__.py b/tests/unit/algorithms/anomaly/adapters/anomalib/plugins/__init__.py new file mode 100644 index 00000000000..e715eaf7d23 --- /dev/null +++ b/tests/unit/algorithms/anomaly/adapters/anomalib/plugins/__init__.py @@ -0,0 +1,5 @@ +"""Test for otx.algorithms.anomaly.adapters.anomalib.plugins""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# diff --git a/tests/unit/algorithms/anomaly/adapters/anomalib/plugins/xpu_precision.py b/tests/unit/algorithms/anomaly/adapters/anomalib/plugins/xpu_precision.py index 8ca6b5b62d8..9671b4fec94 100644 --- a/tests/unit/algorithms/anomaly/adapters/anomalib/plugins/xpu_precision.py +++ b/tests/unit/algorithms/anomaly/adapters/anomalib/plugins/xpu_precision.py @@ -1,3 +1,9 @@ +"""Test for otx.algorithms.anomaly.adapters.anomalib.plugins.xpu_precision""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + import pytest import torch from torch.optim import Optimizer @@ -24,7 +30,9 @@ def test_optimizer_step_no_scaler(self, plugin, mocker): optimizer_idx = 0 closure = mocker.MagicMock() kwargs = {} - mock_optimizer_step = mocker.patch("otx.algorithms.anomaly.adapters.anomalib.plugins.xpu_precision.PrecisionPlugin.optimizer_step") + mock_optimizer_step = mocker.patch( + "otx.algorithms.anomaly.adapters.anomalib.plugins.xpu_precision.PrecisionPlugin.optimizer_step" + ) out = plugin.optimizer_step(optimizer, model, optimizer_idx, closure, **kwargs) assert isinstance(out, mocker.MagicMock) mock_optimizer_step.assert_called_once() @@ -43,6 +51,8 @@ def test_clip_gradients(self, plugin, mocker): optimizer = mocker.MagicMock(Optimizer) clip_val = 0.1 gradient_clip_algorithm = "norm" - mock_clip_gradients = mocker.patch("otx.algorithms.anomaly.adapters.anomalib.plugins.xpu_precision.PrecisionPlugin.clip_gradients") + mock_clip_gradients = mocker.patch( + "otx.algorithms.anomaly.adapters.anomalib.plugins.xpu_precision.PrecisionPlugin.clip_gradients" + ) plugin.clip_gradients(optimizer, clip_val, gradient_clip_algorithm) mock_clip_gradients.assert_called_once() diff --git a/tests/unit/algorithms/anomaly/adapters/anomalib/strategies/__init__.py b/tests/unit/algorithms/anomaly/adapters/anomalib/strategies/__init__.py new file mode 100644 index 00000000000..c718569f812 --- /dev/null +++ b/tests/unit/algorithms/anomaly/adapters/anomalib/strategies/__init__.py @@ -0,0 +1,5 @@ +"""Test for otx.algorithms.anomaly.adapters.anomalib.strategies""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# diff --git a/tests/unit/algorithms/anomaly/adapters/anomalib/strategies/test_xpu_single.py b/tests/unit/algorithms/anomaly/adapters/anomalib/strategies/test_xpu_single.py index 9a261c22e35..97ff1989ed4 100644 --- a/tests/unit/algorithms/anomaly/adapters/anomalib/strategies/test_xpu_single.py +++ b/tests/unit/algorithms/anomaly/adapters/anomalib/strategies/test_xpu_single.py @@ -1,3 +1,8 @@ +"""Tests the XPU strategy.""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + import pytest import torch import pytorch_lightning as pl diff --git a/tests/unit/algorithms/classification/adapters/mmcls/api/__init__.py b/tests/unit/algorithms/classification/adapters/mmcls/api/__init__.py new file mode 100644 index 00000000000..8d0d86fe233 --- /dev/null +++ b/tests/unit/algorithms/classification/adapters/mmcls/api/__init__.py @@ -0,0 +1,5 @@ +"""Test for otx.algorithms.classification.adapters.mmcls.api""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# diff --git a/tests/unit/algorithms/classification/adapters/mmcls/api/test_train.py b/tests/unit/algorithms/classification/adapters/mmcls/api/test_train.py index 489a9fa3f15..60aa92eef84 100644 --- a/tests/unit/algorithms/classification/adapters/mmcls/api/test_train.py +++ b/tests/unit/algorithms/classification/adapters/mmcls/api/test_train.py @@ -1,3 +1,9 @@ +"""Test for otx.algorithms.classification.adapters.mmcls.apis.train""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + import pytest from unittest import mock from otx.algorithms.classification.adapters.mmcls.apis.train import train_model @@ -9,25 +15,66 @@ class TestTrainModel: @pytest.fixture def mock_modules(self, mocker): - mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.get_root_logger', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.wrap_distributed_model', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.wrap_non_distributed_model', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_optimizer', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_runner', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_dataset', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.DistEvalHook', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.classification.adapters.mmcls.apis.train.EvalHook', return_value=mock.MagicMock()) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader", return_value=mock.MagicMock() + ) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.apis.train.get_root_logger", return_value=mock.MagicMock() + ) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader", return_value=mock.MagicMock() + ) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.apis.train.wrap_distributed_model", + return_value=mock.MagicMock(), + ) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.apis.train.wrap_non_distributed_model", + return_value=mock.MagicMock(), + ) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.apis.train.build_optimizer", return_value=mock.MagicMock() + ) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.apis.train.build_runner", return_value=mock.MagicMock() + ) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.apis.train.build_dataset", return_value=mock.MagicMock() + ) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader", return_value=mock.MagicMock() + ) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader", return_value=mock.MagicMock() + ) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader", return_value=mock.MagicMock() + ) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.apis.train.DistEvalHook", return_value=mock.MagicMock() + ) + mocker.patch("otx.algorithms.classification.adapters.mmcls.apis.train.EvalHook", return_value=mock.MagicMock()) @pytest.fixture def mmcv_cfg(self): - return mmcv.Config({"gpu_ids" : [0], "seed": 42, "data": mock.MagicMock(), "device": "cpu", "optimizer": "SGD", "optimizer_config": {}, - "total_epochs": 1, "work_dir": "test", "lr_config": {}, "checkpoint_config": {}, "log_config": {}, "resume_from": False, "load_from": "", - "workflow": ""}) + return mmcv.Config( + { + "gpu_ids": [0], + "seed": 42, + "data": mock.MagicMock(), + "device": "cpu", + "optimizer": "SGD", + "optimizer_config": {}, + "total_epochs": 1, + "work_dir": "test", + "lr_config": {}, + "checkpoint_config": {}, + "log_config": {}, + "resume_from": False, + "load_from": "", + "workflow": "", + } + ) @pytest.fixture def model(self): diff --git a/tests/unit/algorithms/common/adapters/mmcv/hooks/test_xpu_optimizer_hook.py b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_xpu_optimizer_hook.py index 0857007def7..dd13f4c6e81 100644 --- a/tests/unit/algorithms/common/adapters/mmcv/hooks/test_xpu_optimizer_hook.py +++ b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_xpu_optimizer_hook.py @@ -1,6 +1,12 @@ +"""Test for XPU optimizer hook""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + import pytest from otx.algorithms.common.utils.utils import is_xpu_available + @pytest.mark.skipif(not is_xpu_available(), reason="XPU is not available") def test_init(): from otx.algorithms.common.adapters.mmcv.hooks.xpu_optimizer_hook import BFp16XPUOptimizerHook diff --git a/tests/unit/algorithms/common/adapters/mmcv/test_configurer.py b/tests/unit/algorithms/common/adapters/mmcv/test_configurer.py index cbc7a410493..954dc37482c 100644 --- a/tests/unit/algorithms/common/adapters/mmcv/test_configurer.py +++ b/tests/unit/algorithms/common/adapters/mmcv/test_configurer.py @@ -1,3 +1,9 @@ +"""Test for otx.algorithms.common.adapters.mmcv.configurer""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + import pytest from mmcv.utils import Config from otx.algorithms.common.adapters.mmcv import configurer diff --git a/tests/unit/algorithms/common/adapters/mmcv/utils/test_fp16_utils.py b/tests/unit/algorithms/common/adapters/mmcv/utils/test_fp16_utils.py index a8d659a9855..da50590124b 100644 --- a/tests/unit/algorithms/common/adapters/mmcv/utils/test_fp16_utils.py +++ b/tests/unit/algorithms/common/adapters/mmcv/utils/test_fp16_utils.py @@ -1,4 +1,8 @@ -# test_custom_auto_fp16.py +"""Test for otx.algorithms.common.adapters.mmcv.utils.fp16_utils""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# import pytest from unittest.mock import MagicMock, patch @@ -46,7 +50,7 @@ def test_simple_apply(self, test_module): assert test_func(torch.tensor(5.3), torch.tensor(8.3)).dtype == torch.float32 def test_fp16_enabled_true(self, test_module): - test_module.set_fp16(enabled = True) + test_module.set_fp16(enabled=True) test_func = test_module.test_method_fp16 # check fp16 casting if not is_xpu_available(): @@ -55,7 +59,7 @@ def test_fp16_enabled_true(self, test_module): assert test_func(torch.tensor(5.3), torch.tensor(8.3)).dtype == torch.bfloat16 def test_out_fp32_true(self, test_module): - test_module.set_fp16(enabled = True) + test_module.set_fp16(enabled=True) test_func = test_module.test_method_force_out_fp32 # cast back to fp32 assert test_func(torch.tensor(5.3), torch.tensor(8.3)).dtype == torch.float32 @@ -71,14 +75,14 @@ def test_simple_apply(self, test_module): @pytest.mark.skipif(is_xpu_available(), reason="cuda is not available") def test_fp16_enabled_true(self, test_module): - test_module.set_fp16(enabled = True) + test_module.set_fp16(enabled=True) test_func = test_module.test_func_force_fp16_to_fp32 output_type = test_func(torch.tensor(5.3, dtype=torch.float16), torch.tensor(8.3, dtype=torch.float16)).dtype # check fp16 casting assert output_type == torch.float32 def test_out_fp32_true(self, test_module): - test_module.set_fp16(enabled = True) + test_module.set_fp16(enabled=True) test_func = test_module.test_func_force_fp32_out_fp16 output_type = test_func(torch.tensor(5.3, dtype=torch.float16), torch.tensor(8.3, dtype=torch.float16)).dtype # cast back to fp32 @@ -87,7 +91,7 @@ def test_out_fp32_true(self, test_module): @pytest.mark.skipif(not is_xpu_available(), reason="XPU is not available") def test_fp16_enabled_xpu(self, test_module): # setup - test_module.set_fp16(enabled = True) + test_module.set_fp16(enabled=True) test_func = test_module.test_func_force_fp16_to_fp32 output_type = test_func(torch.tensor(5.3, dtype=torch.bfloat16), torch.tensor(8.3, dtype=torch.bfloat16)).dtype # assertion diff --git a/tests/unit/algorithms/common/adapters/torch/amp/test_xpu_grad_scaler.py b/tests/unit/algorithms/common/adapters/torch/amp/test_xpu_grad_scaler.py index 6e453a0982b..57e3a1e0ce1 100644 --- a/tests/unit/algorithms/common/adapters/torch/amp/test_xpu_grad_scaler.py +++ b/tests/unit/algorithms/common/adapters/torch/amp/test_xpu_grad_scaler.py @@ -3,6 +3,7 @@ import pytest import torch from otx.algorithms.common.utils import is_xpu_available + if is_xpu_available(): from otx.algorithms.common.adapters.torch.amp.xpu_grad_scaler import XPUGradScaler diff --git a/tests/unit/algorithms/detection/adapters/mmdet/api/__init__.py b/tests/unit/algorithms/detection/adapters/mmdet/api/__init__.py new file mode 100644 index 00000000000..b68d52460fd --- /dev/null +++ b/tests/unit/algorithms/detection/adapters/mmdet/api/__init__.py @@ -0,0 +1,5 @@ +"""Test for otx.algorithms.detection.adapters.mmdet.api""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# diff --git a/tests/unit/algorithms/detection/adapters/mmdet/api/test_train.py b/tests/unit/algorithms/detection/adapters/mmdet/api/test_train.py index 019c07552bf..e3a95e2c52a 100644 --- a/tests/unit/algorithms/detection/adapters/mmdet/api/test_train.py +++ b/tests/unit/algorithms/detection/adapters/mmdet/api/test_train.py @@ -1,3 +1,9 @@ +"""Test for otx.algorithms.detection.adapters.mmdet.apis.train""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + import pytest from unittest import mock from otx.algorithms.detection.adapters.mmdet.apis.train import train_detector @@ -10,25 +16,56 @@ class TestTrainDetector: @pytest.fixture def mock_modules(self, mocker): - mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.get_root_logger', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_dp', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_ddp', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_optimizer', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_runner', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_dataset', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.DistEvalHook', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.detection.adapters.mmdet.apis.train.EvalHook', return_value=mock.MagicMock()) + mocker.patch( + "otx.algorithms.detection.adapters.mmdet.apis.train.build_dataloader", return_value=mock.MagicMock() + ) + mocker.patch( + "otx.algorithms.detection.adapters.mmdet.apis.train.get_root_logger", return_value=mock.MagicMock() + ) + mocker.patch( + "otx.algorithms.detection.adapters.mmdet.apis.train.build_dataloader", return_value=mock.MagicMock() + ) + mocker.patch("otx.algorithms.detection.adapters.mmdet.apis.train.build_dp", return_value=mock.MagicMock()) + mocker.patch("otx.algorithms.detection.adapters.mmdet.apis.train.build_ddp", return_value=mock.MagicMock()) + mocker.patch( + "otx.algorithms.detection.adapters.mmdet.apis.train.build_optimizer", return_value=mock.MagicMock() + ) + mocker.patch("otx.algorithms.detection.adapters.mmdet.apis.train.build_runner", return_value=mock.MagicMock()) + mocker.patch("otx.algorithms.detection.adapters.mmdet.apis.train.build_dataset", return_value=mock.MagicMock()) + mocker.patch( + "otx.algorithms.detection.adapters.mmdet.apis.train.build_dataloader", return_value=mock.MagicMock() + ) + mocker.patch( + "otx.algorithms.detection.adapters.mmdet.apis.train.build_dataloader", return_value=mock.MagicMock() + ) + mocker.patch( + "otx.algorithms.detection.adapters.mmdet.apis.train.build_dataloader", return_value=mock.MagicMock() + ) + mocker.patch("otx.algorithms.detection.adapters.mmdet.apis.train.DistEvalHook", return_value=mock.MagicMock()) + mocker.patch("otx.algorithms.detection.adapters.mmdet.apis.train.EvalHook", return_value=mock.MagicMock()) @pytest.fixture def mmcv_cfg(self): - return mmcv.Config({"gpu_ids" : [0], "seed": 42, "data": mock.MagicMock(), "device": "cpu", "optimizer": "Adam", "optimizer_config": {}, - "total_epochs": 1, "work_dir": "test", "lr_config": {}, "checkpoint_config": {}, "log_config": {}, "resume_from": False, "load_from": "", - "workflow": "", "log_level": 1, "total_iters": 1000}) + return mmcv.Config( + { + "gpu_ids": [0], + "seed": 42, + "data": mock.MagicMock(), + "device": "cpu", + "optimizer": "Adam", + "optimizer_config": {}, + "total_epochs": 1, + "work_dir": "test", + "lr_config": {}, + "checkpoint_config": {}, + "log_config": {}, + "resume_from": False, + "load_from": "", + "workflow": "", + "log_level": 1, + "total_iters": 1000, + } + ) @pytest.fixture def model(self): diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/api/__init__.py b/tests/unit/algorithms/segmentation/adapters/mmseg/api/__init__.py new file mode 100644 index 00000000000..3189b1b1499 --- /dev/null +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/api/__init__.py @@ -0,0 +1,5 @@ +"""Test for otx.algorithms.segmentation.adapters.mmseg.api""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/api/test_train.py b/tests/unit/algorithms/segmentation/adapters/mmseg/api/test_train.py index 318f7f7dd8a..99a93fac1f3 100644 --- a/tests/unit/algorithms/segmentation/adapters/mmseg/api/test_train.py +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/api/test_train.py @@ -1,3 +1,9 @@ +"""Test for otx.algorithms.segmentation.adapters.mmseg.api.train""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + import pytest from unittest import mock from otx.algorithms.segmentation.adapters.mmseg.apis.train import train_segmentor @@ -10,25 +16,62 @@ class TestTrainSegmentor: @pytest.fixture def mock_modules(self, mocker): - mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.get_root_logger', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dp', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_ddp', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_optimizer', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_runner', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataset', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataloader', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.DistEvalHook', return_value=mock.MagicMock()) - mocker.patch('otx.algorithms.segmentation.adapters.mmseg.apis.train.EvalHook', return_value=mock.MagicMock()) + mocker.patch( + "otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataloader", return_value=mock.MagicMock() + ) + mocker.patch( + "otx.algorithms.segmentation.adapters.mmseg.apis.train.get_root_logger", return_value=mock.MagicMock() + ) + mocker.patch( + "otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataloader", return_value=mock.MagicMock() + ) + mocker.patch("otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dp", return_value=mock.MagicMock()) + mocker.patch("otx.algorithms.segmentation.adapters.mmseg.apis.train.build_ddp", return_value=mock.MagicMock()) + mocker.patch( + "otx.algorithms.segmentation.adapters.mmseg.apis.train.build_optimizer", return_value=mock.MagicMock() + ) + mocker.patch( + "otx.algorithms.segmentation.adapters.mmseg.apis.train.build_runner", return_value=mock.MagicMock() + ) + mocker.patch( + "otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataset", return_value=mock.MagicMock() + ) + mocker.patch( + "otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataloader", return_value=mock.MagicMock() + ) + mocker.patch( + "otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataloader", return_value=mock.MagicMock() + ) + mocker.patch( + "otx.algorithms.segmentation.adapters.mmseg.apis.train.build_dataloader", return_value=mock.MagicMock() + ) + mocker.patch( + "otx.algorithms.segmentation.adapters.mmseg.apis.train.DistEvalHook", return_value=mock.MagicMock() + ) + mocker.patch("otx.algorithms.segmentation.adapters.mmseg.apis.train.EvalHook", return_value=mock.MagicMock()) @pytest.fixture def mmcv_cfg(self): - return mmcv.Config({"gpu_ids" : [0], "seed": 42, "data": mock.MagicMock(), "device": "cpu", "optimizer": "Adam", "optimizer_config": {}, - "total_epochs": 1, "work_dir": "test", "lr_config": {}, "checkpoint_config": {}, "log_config": {}, "resume_from": False, "load_from": "", - "workflow": "", "log_level": 1, "total_iters": 1000}) + return mmcv.Config( + { + "gpu_ids": [0], + "seed": 42, + "data": mock.MagicMock(), + "device": "cpu", + "optimizer": "Adam", + "optimizer_config": {}, + "total_epochs": 1, + "work_dir": "test", + "lr_config": {}, + "checkpoint_config": {}, + "log_config": {}, + "resume_from": False, + "load_from": "", + "workflow": "", + "log_level": 1, + "total_iters": 1000, + } + ) @pytest.fixture def model(self): diff --git a/tools/experiment.py b/tools/experiment.py index 6a79aae7537..53dc0faeb37 100644 --- a/tools/experiment.py +++ b/tools/experiment.py @@ -22,10 +22,11 @@ from typing import Any, Dict, List, Optional, Tuple, Union import yaml -from otx.cli.tools.cli import main as otx_cli from rich.console import Console from rich.table import Table +from otx.cli.tools.cli import main as otx_cli + rich_console = Console() From d566b7edfb2e31ecbb8f478d4c8ed3393c49406d Mon Sep 17 00:00:00 2001 From: kprokofi Date: Thu, 7 Mar 2024 07:39:24 +0900 Subject: [PATCH 34/39] add headers --- .../common/adapters/torch/amp/test_xpu_grad_scaler.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/unit/algorithms/common/adapters/torch/amp/test_xpu_grad_scaler.py b/tests/unit/algorithms/common/adapters/torch/amp/test_xpu_grad_scaler.py index 57e3a1e0ce1..107939f50eb 100644 --- a/tests/unit/algorithms/common/adapters/torch/amp/test_xpu_grad_scaler.py +++ b/tests/unit/algorithms/common/adapters/torch/amp/test_xpu_grad_scaler.py @@ -1,4 +1,8 @@ -# test_xpu_grad_scaler.py +"""Test for otx.algorithms.common.adapters.torch.amp.xpu_grad_scaler """ + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# import pytest import torch From 6a45aad375ad6d156baf91963acb38536c2518b3 Mon Sep 17 00:00:00 2001 From: kprokofi Date: Thu, 7 Mar 2024 07:41:03 +0900 Subject: [PATCH 35/39] revert experiments back --- tools/experiment.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/experiment.py b/tools/experiment.py index 53dc0faeb37..6a79aae7537 100644 --- a/tools/experiment.py +++ b/tools/experiment.py @@ -22,11 +22,10 @@ from typing import Any, Dict, List, Optional, Tuple, Union import yaml +from otx.cli.tools.cli import main as otx_cli from rich.console import Console from rich.table import Table -from otx.cli.tools.cli import main as otx_cli - rich_console = Console() From 0d538ded04f835ce585a8793cdd34311ada036d3 Mon Sep 17 00:00:00 2001 From: Emily Chun Date: Thu, 7 Mar 2024 15:20:25 +0900 Subject: [PATCH 36/39] Omit e2e tests, which are not supported on xpu (#3048) * Omit e2e tests, which are not supported on xpu * Fix pre-commit issue --- tests/e2e/cli/action/test_action_classification.py | 4 ++++ tests/e2e/cli/action/test_action_detection.py | 4 ++++ .../test_api_xai_sanity_instance_segmentation.py | 4 ++++ .../cli/instance_segmentation/test_instance_segmentation.py | 4 ++++ tests/e2e/cli/instance_segmentation/test_tiling_instseg.py | 4 ++++ tests/e2e/cli/semantic_segmentation/test_segmentation.py | 4 ++++ tests/e2e/cli/visual_prompting/test_visual_prompting.py | 4 ++++ tests/e2e/cli/visual_prompting/test_zero_shot.py | 4 ++++ 8 files changed, 32 insertions(+) diff --git a/tests/e2e/cli/action/test_action_classification.py b/tests/e2e/cli/action/test_action_classification.py index 4b6210b94c5..eca4096d7b1 100644 --- a/tests/e2e/cli/action/test_action_classification.py +++ b/tests/e2e/cli/action/test_action_classification.py @@ -9,6 +9,7 @@ import pytest import torch +from otx.algorithms.common.utils.utils import is_xpu_available from otx.api.entities.model_template import parse_model_template from otx.cli.registry import Registry from tests.test_suite.e2e_test_system import e2e_pytest_component @@ -41,6 +42,9 @@ "4", ] +if is_xpu_available(): + pytest.skip("Action task is not supported on XPU", allow_module_level=True) + otx_dir = os.getcwd() MULTI_GPU_UNAVAILABLE = torch.cuda.device_count() <= 1 diff --git a/tests/e2e/cli/action/test_action_detection.py b/tests/e2e/cli/action/test_action_detection.py index 7a9a95ed77f..442bf60d878 100644 --- a/tests/e2e/cli/action/test_action_detection.py +++ b/tests/e2e/cli/action/test_action_detection.py @@ -9,6 +9,7 @@ import pytest import torch +from otx.algorithms.common.utils.utils import is_xpu_available from otx.api.entities.model_template import parse_model_template from otx.cli.registry import Registry from tests.test_suite.e2e_test_system import e2e_pytest_component @@ -41,6 +42,9 @@ "4", ] +if is_xpu_available(): + pytest.skip("Action detection task is not supported on XPU", allow_module_level=True) + otx_dir = os.getcwd() MULTI_GPU_UNAVAILABLE = torch.cuda.device_count() <= 1 diff --git a/tests/e2e/cli/instance_segmentation/test_api_xai_sanity_instance_segmentation.py b/tests/e2e/cli/instance_segmentation/test_api_xai_sanity_instance_segmentation.py index 0b6132b67db..00a019b3e04 100644 --- a/tests/e2e/cli/instance_segmentation/test_api_xai_sanity_instance_segmentation.py +++ b/tests/e2e/cli/instance_segmentation/test_api_xai_sanity_instance_segmentation.py @@ -8,6 +8,7 @@ import torch +from otx.algorithms.common.utils.utils import is_xpu_available from otx.algorithms.detection.adapters.mmdet.task import MMDetectionTask from otx.algorithms.detection.adapters.openvino.task import OpenVINODetectionTask from otx.algorithms.detection.configs.base import DetectionConfig @@ -36,6 +37,9 @@ assert_text_explain_all = "The number of saliency maps should be equal to the number of all classes." assert_text_explain_predicted = "The number of saliency maps should be equal to the number of predicted classes." +if is_xpu_available(): + pytest.skip("Instance segmentation task is not supported on XPU", allow_module_level=True) + class TestISegmXAIAPI: def _prepare_task_env(self, temp_dir, train=True, tile=False): diff --git a/tests/e2e/cli/instance_segmentation/test_instance_segmentation.py b/tests/e2e/cli/instance_segmentation/test_instance_segmentation.py index 3d5eb2a1d49..aec8858c4c7 100644 --- a/tests/e2e/cli/instance_segmentation/test_instance_segmentation.py +++ b/tests/e2e/cli/instance_segmentation/test_instance_segmentation.py @@ -9,6 +9,7 @@ import pytest import torch +from otx.algorithms.common.utils.utils import is_xpu_available from otx.api.entities.model_template import parse_model_template from otx.cli.registry import Registry from tests.test_suite.e2e_test_system import e2e_pytest_component @@ -74,6 +75,9 @@ "2", ] +if is_xpu_available(): + pytest.skip("Instance segmentation task is not supported on XPU", allow_module_level=True) + otx_dir = os.getcwd() iseg_config_root = Path("src/otx/algorithms/detection/configs/instance_segmentation") diff --git a/tests/e2e/cli/instance_segmentation/test_tiling_instseg.py b/tests/e2e/cli/instance_segmentation/test_tiling_instseg.py index 30e7aec80a2..830fc6cddaa 100644 --- a/tests/e2e/cli/instance_segmentation/test_tiling_instseg.py +++ b/tests/e2e/cli/instance_segmentation/test_tiling_instseg.py @@ -7,6 +7,7 @@ import pytest +from otx.algorithms.common.utils.utils import is_xpu_available from otx.api.entities.model_template import parse_model_template from otx.cli.registry import Registry from tests.test_suite.e2e_test_system import e2e_pytest_component @@ -62,6 +63,9 @@ "4", ] +if is_xpu_available(): + pytest.skip("Instance segmentation task is not supported on XPU", allow_module_level=True) + otx_dir = os.getcwd() TT_STABILITY_TESTS = os.environ.get("TT_STABILITY_TESTS", False) diff --git a/tests/e2e/cli/semantic_segmentation/test_segmentation.py b/tests/e2e/cli/semantic_segmentation/test_segmentation.py index d85698a1394..ead9ed6fb42 100644 --- a/tests/e2e/cli/semantic_segmentation/test_segmentation.py +++ b/tests/e2e/cli/semantic_segmentation/test_segmentation.py @@ -8,6 +8,7 @@ import pytest import torch +from otx.algorithms.common.utils.utils import is_xpu_available from otx.api.entities.model_template import parse_model_template from otx.cli.registry import Registry from tests.test_suite.e2e_test_system import e2e_pytest_component @@ -60,6 +61,9 @@ "4", ] +if is_xpu_available(): + pytest.skip("Semantic segmentation task is not supported on XPU", allow_module_level=True) + otx_dir = Path.cwd() MULTI_GPU_UNAVAILABLE = torch.cuda.device_count() <= 1 diff --git a/tests/e2e/cli/visual_prompting/test_visual_prompting.py b/tests/e2e/cli/visual_prompting/test_visual_prompting.py index b6c1190e1d0..33acf728223 100644 --- a/tests/e2e/cli/visual_prompting/test_visual_prompting.py +++ b/tests/e2e/cli/visual_prompting/test_visual_prompting.py @@ -9,6 +9,7 @@ import pytest +from otx.algorithms.common.utils.utils import is_xpu_available from otx.api.entities.model_template import parse_model_template from otx.cli.registry import Registry from tests.test_suite.e2e_test_system import e2e_pytest_component @@ -49,6 +50,9 @@ "4", ] +if is_xpu_available(): + pytest.skip("Visual prompting task is not supported on XPU", allow_module_level=True) + otx_dir = os.getcwd() TT_STABILITY_TESTS = os.environ.get("TT_STABILITY_TESTS", False) diff --git a/tests/e2e/cli/visual_prompting/test_zero_shot.py b/tests/e2e/cli/visual_prompting/test_zero_shot.py index c0fb4fdb5d3..153d4353766 100644 --- a/tests/e2e/cli/visual_prompting/test_zero_shot.py +++ b/tests/e2e/cli/visual_prompting/test_zero_shot.py @@ -9,6 +9,7 @@ import pytest +from otx.algorithms.common.utils.utils import is_xpu_available from otx.api.entities.model_template import parse_model_template from otx.cli.registry import Registry from tests.test_suite.e2e_test_system import e2e_pytest_component @@ -38,6 +39,9 @@ ], } +if is_xpu_available(): + pytest.skip("Zero shot visual prompting task is not supported on XPU", allow_module_level=True) + otx_dir = os.getcwd() TT_STABILITY_TESTS = os.environ.get("TT_STABILITY_TESTS", False) From 4edfd5239e11000b347b3652d63ac3bff5a226b6 Mon Sep 17 00:00:00 2001 From: "Shin, Eunwoo" Date: Fri, 8 Mar 2024 09:57:53 +0900 Subject: [PATCH 37/39] revert github workflow --- .github/workflows/docs.yml | 2 +- .github/workflows/docs_stable.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 4b9c131b4b6..b294c18c382 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -12,7 +12,7 @@ jobs: Build-Docs: runs-on: ubuntu-20.04 permissions: - pages: write + contents: write steps: - name: Checkout repository uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 diff --git a/.github/workflows/docs_stable.yml b/.github/workflows/docs_stable.yml index 6c095ad257e..cbddeda016c 100644 --- a/.github/workflows/docs_stable.yml +++ b/.github/workflows/docs_stable.yml @@ -11,7 +11,7 @@ jobs: Build-Docs: runs-on: ubuntu-20.04 permissions: - pages: write + contents: write steps: - name: Checkout repository uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 From a0b3bc66a6a9e965dd286e1c989625e1c189fd42 Mon Sep 17 00:00:00 2001 From: kprokofi Date: Mon, 11 Mar 2024 00:59:18 +0900 Subject: [PATCH 38/39] remove pytest.mark.skipif --- .../anomalib/strategies/xpu_single.py | 1 - .../adapters/torch/amp/xpu_grad_scaler.py | 8 ++++- .../adapters/anomalib/accelerators/xpu.py | 23 ++++++++++----- .../anomalib/strategies/test_xpu_single.py | 29 ++++++++++++++----- .../adapters/mmcls/api/test_train.py | 10 ++++--- .../mmcv/hooks/test_xpu_optimizer_hook.py | 12 ++++---- .../torch/amp/test_xpu_grad_scaler.py | 26 +++++++---------- .../adapters/mmdet/api/test_train.py | 12 +++++--- .../adapters/mmseg/api/test_train.py | 10 ++++--- tools/experiment.py | 3 +- 10 files changed, 82 insertions(+), 52 deletions(-) diff --git a/src/otx/algorithms/anomaly/adapters/anomalib/strategies/xpu_single.py b/src/otx/algorithms/anomaly/adapters/anomalib/strategies/xpu_single.py index 3c00aeb6747..014132a840e 100644 --- a/src/otx/algorithms/anomaly/adapters/anomalib/strategies/xpu_single.py +++ b/src/otx/algorithms/anomaly/adapters/anomalib/strategies/xpu_single.py @@ -30,7 +30,6 @@ def __init__( checkpoint_io: Optional[CheckpointIO] = None, precision_plugin: Optional[PrecisionPlugin] = None, ): - if not is_xpu_available(): raise MisconfigurationException("`SingleXPUStrategy` requires XPU devices to run") diff --git a/src/otx/algorithms/common/adapters/torch/amp/xpu_grad_scaler.py b/src/otx/algorithms/common/adapters/torch/amp/xpu_grad_scaler.py index f3994050cae..be37f003b78 100644 --- a/src/otx/algorithms/common/adapters/torch/amp/xpu_grad_scaler.py +++ b/src/otx/algorithms/common/adapters/torch/amp/xpu_grad_scaler.py @@ -7,7 +7,11 @@ from typing import List import torch -from intel_extension_for_pytorch.cpu.autocast._grad_scaler import _MultiDeviceReplicator + +from otx.algorithms.common.utils.utils import is_xpu_available + +if is_xpu_available(): + from intel_extension_for_pytorch.cpu.autocast._grad_scaler import _MultiDeviceReplicator from torch.cuda.amp.grad_scaler import GradScaler, _refresh_per_optimizer_state @@ -16,6 +20,8 @@ class XPUGradScaler(GradScaler): def __init__(self, init_scale=2.0**16, growth_factor=2.0, backoff_factor=0.5, growth_interval=2000, enabled=True): self._enabled = enabled + if not is_xpu_available(): + raise RuntimeError("XPU GradScaler requires XPU device.") if self._enabled: assert growth_factor > 1.0, "The growth factor must be > 1.0." diff --git a/tests/unit/algorithms/anomaly/adapters/anomalib/accelerators/xpu.py b/tests/unit/algorithms/anomaly/adapters/anomalib/accelerators/xpu.py index 4b89d101661..d277fc2c6dc 100644 --- a/tests/unit/algorithms/anomaly/adapters/anomalib/accelerators/xpu.py +++ b/tests/unit/algorithms/anomaly/adapters/anomalib/accelerators/xpu.py @@ -10,41 +10,50 @@ from otx.algorithms.common.utils import is_xpu_available -@pytest.mark.skipif(not is_xpu_available(), reason="XPU is not available") class TestXPUAccelerator: @pytest.fixture - def accelerator(self): - return XPUAccelerator() + def accelerator(self, mocker): + mock_torch = mocker.patch("otx.algorithms.anomaly.adapters.anomalib.accelerators.xpu.torch") + return XPUAccelerator(), mock_torch def test_setup_device(self, accelerator): + accelerator, mock_torch = accelerator device = torch.device("xpu") accelerator.setup_device(device) + assert mock_torch.xpu.set_device.called def test_parse_devices(self, accelerator): + accelerator, _ = accelerator devices = [1, 2, 3] parsed_devices = accelerator.parse_devices(devices) assert isinstance(parsed_devices, list) assert parsed_devices == devices - def test_get_parallel_devices(self, accelerator): + def test_get_parallel_devices(self, accelerator, mocker): + accelerator, _ = accelerator devices = [1, 2, 3] parallel_devices = accelerator.get_parallel_devices(devices) assert isinstance(parallel_devices, list) - assert parallel_devices == [torch.device("xpu", idx) for idx in devices] + assert all([isinstance(device, mocker.MagicMock) for device in parallel_devices]) - def test_auto_device_count(self, accelerator): + def test_auto_device_count(self, accelerator, mocker): + accelerator, mock_torch = accelerator count = accelerator.auto_device_count() - assert isinstance(count, int) + assert isinstance(count, mocker.MagicMock) + assert mock_torch.xpu.device_count.called def test_is_available(self, accelerator): + accelerator, _ = accelerator available = accelerator.is_available() assert isinstance(available, bool) assert available == is_xpu_available() def test_get_device_stats(self, accelerator): + accelerator, _ = accelerator device = torch.device("xpu") stats = accelerator.get_device_stats(device) assert isinstance(stats, dict) def test_teardown(self, accelerator): + accelerator, _ = accelerator accelerator.teardown() diff --git a/tests/unit/algorithms/anomaly/adapters/anomalib/strategies/test_xpu_single.py b/tests/unit/algorithms/anomaly/adapters/anomalib/strategies/test_xpu_single.py index 97ff1989ed4..f1e0de3204b 100644 --- a/tests/unit/algorithms/anomaly/adapters/anomalib/strategies/test_xpu_single.py +++ b/tests/unit/algorithms/anomaly/adapters/anomalib/strategies/test_xpu_single.py @@ -7,22 +7,37 @@ import torch import pytorch_lightning as pl from otx.algorithms.anomaly.adapters.anomalib.strategies.xpu_single import SingleXPUStrategy -from otx.algorithms.common.utils.utils import is_xpu_available +from pytorch_lightning.utilities.exceptions import MisconfigurationException -@pytest.mark.skipif(not is_xpu_available(), reason="XPU is not available") class TestSingleXPUStrategy: - def test_init(self): + def test_init(self, mocker): + with pytest.raises(MisconfigurationException): + strategy = SingleXPUStrategy(device="xpu:0") + mocked_is_xpu_available = mocker.patch( + "otx.algorithms.anomaly.adapters.anomalib.strategies.xpu_single.is_xpu_available", return_value=True + ) strategy = SingleXPUStrategy(device="xpu:0") + assert mocked_is_xpu_available.call_count == 1 assert strategy._root_device.type == "xpu" assert strategy.accelerator is None - def test_is_distributed(self): - strategy = SingleXPUStrategy(device="xpu:0") + @pytest.fixture + def strategy(self, mocker): + mocker.patch( + "otx.algorithms.anomaly.adapters.anomalib.strategies.xpu_single.is_xpu_available", return_value=True + ) + return SingleXPUStrategy(device="xpu:0") + + def test_is_distributed(self, strategy): assert not strategy.is_distributed - def test_setup_optimizers(self): - strategy = SingleXPUStrategy(device="xpu:0") + def test_setup_optimizers(self, strategy, mocker): + mocker.patch("otx.algorithms.anomaly.adapters.anomalib.strategies.xpu_single.torch") + mocker.patch( + "otx.algorithms.anomaly.adapters.anomalib.strategies.xpu_single.torch.xpu.optimize", + return_value=(mocker.MagicMock(), mocker.MagicMock()), + ) trainer = pl.Trainer() # Create mock optimizers and models for testing model = torch.nn.Linear(10, 2) diff --git a/tests/unit/algorithms/classification/adapters/mmcls/api/test_train.py b/tests/unit/algorithms/classification/adapters/mmcls/api/test_train.py index 60aa92eef84..26143b66807 100644 --- a/tests/unit/algorithms/classification/adapters/mmcls/api/test_train.py +++ b/tests/unit/algorithms/classification/adapters/mmcls/api/test_train.py @@ -96,7 +96,6 @@ def test_train_model_multiple_datasets_distributed_training(self, mock_modules, # Call the function train_model(model, [dataset, dataset], mmcv_cfg, distributed=True, validate=True) - @pytest.mark.skipif(is_xpu_available() or not torch.cuda.is_available(), reason="cuda is not available") def test_train_model_specific_timestamp_and_cuda_device(self, mock_modules, mmcv_cfg, model, dataset): # Create mock inputs _ = mock_modules @@ -107,12 +106,15 @@ def test_train_model_specific_timestamp_and_cuda_device(self, mock_modules, mmcv # Call the function train_model(model, dataset, mmcv_cfg, timestamp=timestamp, device=device, meta=meta) - @pytest.mark.skipif(not is_xpu_available(), reason="xpu is not available") - def test_train_model_xpu_device(self, mock_modules, mmcv_cfg, model, dataset): + def test_train_model_xpu_device(self, mock_modules, mmcv_cfg, model, dataset, mocker): # Create mock inputs _ = mock_modules device = "xpu" mmcv_cfg.device = "xpu" - + mocker.patch("otx.algorithms.classification.adapters.mmcls.apis.train.torch") + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.apis.train.torch.xpu.optimize", + return_value=(mocker.MagicMock(), mocker.MagicMock()), + ) # Call the function train_model(model, dataset, mmcv_cfg, device=device) diff --git a/tests/unit/algorithms/common/adapters/mmcv/hooks/test_xpu_optimizer_hook.py b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_xpu_optimizer_hook.py index dd13f4c6e81..0151ccff104 100644 --- a/tests/unit/algorithms/common/adapters/mmcv/hooks/test_xpu_optimizer_hook.py +++ b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_xpu_optimizer_hook.py @@ -3,18 +3,16 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import pytest -from otx.algorithms.common.utils.utils import is_xpu_available - -@pytest.mark.skipif(not is_xpu_available(), reason="XPU is not available") -def test_init(): +def test_init(mocker): from otx.algorithms.common.adapters.mmcv.hooks.xpu_optimizer_hook import BFp16XPUOptimizerHook - from otx.algorithms.common.adapters.torch.amp import XPUGradScaler + mocker.patch( + "otx.algorithms.common.adapters.mmcv.hooks.xpu_optimizer_hook.XPUGradScaler", return_value=mocker.MagicMock() + ) hook = BFp16XPUOptimizerHook(grad_clip=None, coalesce=True, bucket_size_mb=-1, loss_scale=512.0, distributed=True) assert hook.coalesce is True # Check coalesce is True assert hook.bucket_size_mb == -1 # Check bucket size is -1 assert hook._scale_update_param is 512.0 # Check scale update param is 512.0 assert hook.distributed is True # Check distributed is True - assert isinstance(hook.loss_scaler, XPUGradScaler) + assert isinstance(hook.loss_scaler, mocker.MagicMock) diff --git a/tests/unit/algorithms/common/adapters/torch/amp/test_xpu_grad_scaler.py b/tests/unit/algorithms/common/adapters/torch/amp/test_xpu_grad_scaler.py index 107939f50eb..465834b9a37 100644 --- a/tests/unit/algorithms/common/adapters/torch/amp/test_xpu_grad_scaler.py +++ b/tests/unit/algorithms/common/adapters/torch/amp/test_xpu_grad_scaler.py @@ -6,16 +6,14 @@ import pytest import torch -from otx.algorithms.common.utils import is_xpu_available -if is_xpu_available(): - from otx.algorithms.common.adapters.torch.amp.xpu_grad_scaler import XPUGradScaler +from otx.algorithms.common.adapters.torch.amp.xpu_grad_scaler import XPUGradScaler -@pytest.mark.skipif(not is_xpu_available(), reason="XPU is not available") class TestXPUGradScaler: @pytest.fixture - def grad_scaler(self): + def grad_scaler(self, mocker): + mocker.patch("otx.algorithms.common.adapters.torch.amp.xpu_grad_scaler.is_xpu_available", return_value=True) return XPUGradScaler() @pytest.fixture @@ -31,15 +29,11 @@ def test_init(self, grad_scaler): assert grad_scaler._backoff_factor == 0.5 assert grad_scaler._growth_interval == 2000 - def test_scale(self, grad_scaler): - outputs = torch.tensor([1.0, 2.0, 3.0], device="xpu:0") + def test_scale(self, grad_scaler, mocker): + outputs = mocker.MagicMock(torch.Tensor) + outputs.device.type = "xpu" + outputs.device.index = 0 + grad_scaler._lazy_init_scale_growth_tracker = mocker.MagicMock() + grad_scaler._scale = mocker.MagicMock() scaled_outputs = grad_scaler.scale(outputs) - assert scaled_outputs.device.type == "xpu" - assert torch.equal(scaled_outputs, outputs * grad_scaler._scale) - - def test_unscale_grads(self, grad_scaler, optimizer): - inv_scale = 1.0 - found_inf = False - output = grad_scaler._unscale_grads_(optimizer, inv_scale, found_inf, allow_bf16=False) - assert isinstance(output, dict) - assert not output + assert isinstance(scaled_outputs.device.type, mocker.MagicMock) diff --git a/tests/unit/algorithms/detection/adapters/mmdet/api/test_train.py b/tests/unit/algorithms/detection/adapters/mmdet/api/test_train.py index e3a95e2c52a..1048cabbe2c 100644 --- a/tests/unit/algorithms/detection/adapters/mmdet/api/test_train.py +++ b/tests/unit/algorithms/detection/adapters/mmdet/api/test_train.py @@ -88,20 +88,24 @@ def test_train_model_multiple_datasets_distributed_training(self, mock_modules, # Call the function train_detector(model, [dataset, dataset], mmcv_cfg, distributed=True, validate=True) - @pytest.mark.skipif(is_xpu_available() or not torch.cuda.is_available(), reason="cuda is not available") - def test_train_model_specific_timestamp_and_cuda_device(self, mock_modules, mmcv_cfg, model, dataset): + def test_train_model_specific_timestamp_and_cuda_device(self, mock_modules, mmcv_cfg, model, dataset, mocker): # Create mock inputs _ = mock_modules timestamp = "2024-01-01" mmcv_cfg.device = "cuda" meta = {"info": "some_info"} + # Call the function train_detector(model, dataset, mmcv_cfg, timestamp=timestamp, meta=meta) - @pytest.mark.skipif(not is_xpu_available(), reason="xpu is not available") - def test_train_model_xpu_device(self, mock_modules, mmcv_cfg, model, dataset): + def test_train_model_xpu_device(self, mock_modules, mmcv_cfg, model, dataset, mocker): # Create mock inputs _ = mock_modules mmcv_cfg.device = "xpu" + mocker.patch("otx.algorithms.detection.adapters.mmdet.apis.train.torch") + mocker.patch( + "otx.algorithms.detection.adapters.mmdet.apis.train.torch.xpu.optimize", + return_value=(mocker.MagicMock(), mocker.MagicMock()), + ) # Call the function train_detector(model, dataset, mmcv_cfg) diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/api/test_train.py b/tests/unit/algorithms/segmentation/adapters/mmseg/api/test_train.py index 99a93fac1f3..8f70756764b 100644 --- a/tests/unit/algorithms/segmentation/adapters/mmseg/api/test_train.py +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/api/test_train.py @@ -94,7 +94,6 @@ def test_train_model_multiple_datasets_distributed_training(self, mock_modules, # Call the function train_segmentor(model, [dataset, dataset], mmcv_cfg, distributed=True, validate=True) - @pytest.mark.skipif(is_xpu_available() or not torch.cuda.is_available(), reason="cuda is not available") def test_train_model_specific_timestamp_and_cuda_device(self, mock_modules, mmcv_cfg, model, dataset): # Create mock inputs _ = mock_modules @@ -104,11 +103,14 @@ def test_train_model_specific_timestamp_and_cuda_device(self, mock_modules, mmcv # Call the function train_segmentor(model, dataset, mmcv_cfg, timestamp=timestamp, meta=meta) - @pytest.mark.skipif(not is_xpu_available(), reason="xpu is not available") - def test_train_model_xpu_device(self, mock_modules, mmcv_cfg, model, dataset): + def test_train_model_xpu_device(self, mock_modules, mmcv_cfg, model, dataset, mocker): # Create mock inputs _ = mock_modules mmcv_cfg.device = "xpu" - + mocker.patch("otx.algorithms.segmentation.adapters.mmseg.apis.train.torch") + mocker.patch( + "otx.algorithms.segmentation.adapters.mmseg.apis.train.torch.xpu.optimize", + return_value=(mocker.MagicMock(), mocker.MagicMock()), + ) # Call the function train_segmentor(model, dataset, mmcv_cfg) diff --git a/tools/experiment.py b/tools/experiment.py index 6a79aae7537..53dc0faeb37 100644 --- a/tools/experiment.py +++ b/tools/experiment.py @@ -22,10 +22,11 @@ from typing import Any, Dict, List, Optional, Tuple, Union import yaml -from otx.cli.tools.cli import main as otx_cli from rich.console import Console from rich.table import Table +from otx.cli.tools.cli import main as otx_cli + rich_console = Console() From 76c5ce77b7344418eb6fdc91ebbacbd4a1b95e1b Mon Sep 17 00:00:00 2001 From: kprokofi Date: Mon, 11 Mar 2024 01:05:14 +0900 Subject: [PATCH 39/39] revert experiments back --- tools/experiment.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/experiment.py b/tools/experiment.py index 53dc0faeb37..6a79aae7537 100644 --- a/tools/experiment.py +++ b/tools/experiment.py @@ -22,11 +22,10 @@ from typing import Any, Dict, List, Optional, Tuple, Union import yaml +from otx.cli.tools.cli import main as otx_cli from rich.console import Console from rich.table import Table -from otx.cli.tools.cli import main as otx_cli - rich_console = Console()

sdh5-6*icVSuMrC8Rm3Cn5gnzmxs>|MBO-frxhvkkku zR%1uY9ITx%9gXpt`ddn0P>K>dX+G+H3*b7Gz9D>Ji`U9#ALBa;0G68@}MB5TR zE+GtTJBp8~KFaw%WdQCIUygpke0LjQ>}Uomz1K@j#+NN-L%$K&*bxr$RKXSG8|y(eBm_Q7w?Eju$BiCDU&>o zNXmk>=JPmAh^*m7MoJSTsvb?{^I4>QDHTZxm5F3wb@({UuUddLlQ&^ejatq3QkAr# zIf*w6RY^Nl$sC2aCa%Dmsatu0Z$)qUQq0KhAtaV#8AnoDF>X`GKFlLzdZ;|qPpGeY z1A42}Hidv9NCc6d^5wK;n2|S+=UHu`3$6O6_SgY`&cocEL_BzNB%Z!K3Qyk_P7B4M zTL}o-VeZ@d?o=AjX_?-e{qgKAfdoe|p5gYX8~t&R@KEdaN3YSUiz9K@F0HzF5sIo~ z5kR>ZNM|gF$TY3i@5j+7tj)tcH%oG=s5 zMJMd{eYG-iMTg!jt@A*_<94TC zLVUjIz=VT)uYU!N^UcCq=YfRIm-`BFIUD4Z`<*twYi>PN;yG%yC?ECvTr65tf;V5D zf&cz`eLqMv75G2D+=#yuBwxMVg^$lnrXr~#Nb>OVqbUSQES)t0Pmn)+LolkRXCROx zyC4n>?EMLk!6rrGCDfg{DbIuRJ)Y@YP1z7e!58rQ_<1S{D>ATkYYgtZIvNM|$Kd{J zqww%`5qRwSFb*6-@zhPBycRT|AWWoKsZ`bC|D^|FaO#0*oVs7@^b;`mZ~$^U2OPX+ zp1Wxn)@;p0eOV(Waez}3)q(P;S;&v*qB7|uSXxmU+JwTuI#dOZM?-WiniHpBN?aXg zWXwQ&_AD$Iw*p%`sZ1KSV0O_$v}bi=Uez*O-g_0abnjqwVhMQPY_6RP2$VTm5B^4M z>bedaI zP?b_eln5Ad^!?z-B_Mo8=U*yV7%&__N(qB9{+<4|RL92Tp(;d$j^m-3Qkj>yZo9cd z2oU-Z%ueya9=y<&x=CDC+j1JjkC^qmhW@KY9QeFag6Q&fR{x;Lr~e*HS87K%q&2xNRCz zb=*=1^q;;dwhCprmhW2WP(8GyNZeNNh?0t+Mbo4(lsi$PTp_N0a40WA)4mjmLS4ZZ zs1rsAeM=}+Jk`fL|q(Zm7t}5l$`$j0SJg(eu*kJ6i8ITE|_Slos#Y~k>fBiW&$dM%lTta3i59) zmqHa&7{CiKu$(`L4b@<1k1W|XHu0cAxPBzL0yE` zQI*U^XJH@abAMISE}IU#a_UZkW1Zzg3?7wKB;z(%{zRd!*=36{yJVrQZ_=1L6Nwk* z(W%p6$hrL_0h#6LxN>&{p1dg-PtcycQOaU04qhK%4HBQ`jsMI|et7zZ(U!N++8s|^ zr>?(a@QBv^*f-J|C*E`U5Zt`|A}m`u8oA|hbm}yPvgm|e0ic74-w&e^AC`q}n+UF3 zNAvwh;^4Kz@z}m0cwpBhxMTZ;*tlvqQY(|-&x=YeL4tmh#8G!5)kCzsr_BPhaFm0{ zC@QR^=uEV&tieqWjK|UEYVpbojd0{|i#l(N%)er`idUb%Q{%4gc}QdVKxf z0(|z`bl>vBC3yYGOj{lD1Rb-cS-)^+D4w`A5DS)sA#QXc&qHd&2ax{HaiOh&I+6bY zB-;0xU}8PJ^gcg{#OU8=5Xu?2Y->w@DZrnDBhv5(C3Ae`6|2r&MG7gf>hoor@{yJs20?iDE*f zAfg@lp)*hvJcajC4eFvMS&zD=q^W32YvZ7+9c}5I29wPlyRf8oJ-Q3$8%*X`5GLLG z&dZs2c@x28H`U3`g}30cc{gKQ-_6)cplqIXEjD&uO^EEphUt5-qiYX#cVC8GUF&gK z+d^z@Y{%MhR78mrsU8RsL4a^5BM4;Wgo8k${D(qUQW<4huSjP5p5>X9YwK6Q0jC^) zAu2=1qb8=oD(6+Ks3g2oE>3}1(V>#*a`2-RmQvVCv&NiBc__@}Afas$ihUsQ3Jw1i zNE}S0XuSOapeS4=b)xW>r);D`Y+5mWnkCZz=vtR3kuG33Sol>*A@MU95&{x`rHB+1_Q7wpSOSGcm^t9=ON*j8MdkLDgl-t{}YRXod2CUgC zJ%q!ex=XR4eIK^Wyao#=Zb7%!z%Hi(QAn$5Eqbfh5-K{T(3kQj{EV`dsOAVT`nTzn zYVioWARYmf(Nkt6S1_j-%u)#5x95n za9p!{G&XDt#Ju%Em^?oQCH0Aj2>^xGB{O&a7(`FbL1tqC`sQR|&B{EiU73qzi_+21o{Z?i3|m*;9R82P zF#7vZNQlh9=F1Cl@>x3F6CHT}m0o=OMjt+YZ5BQ}GXw9Qn2NK{Pr%!Uit*%Q`B=0j z5AhM1bmnxdmN)VNgIia)H~^5rHW(%Y>SW+pzdG@pv|!@1@4_VDhw-RvuEn#*dhlOg zuC|1`O#v22K3|8g&i3KcGt=*o1Z;|4G#Gt`_+xpJeGd<7OhVc53ati!W1pNLWn51Fr=3v~kD*(6c3B+A{ zgK+P@Ks<0wAReVMdh(Vq9J)OMhY6D-cSYcZyOdK4$I-jOsGgJ`3AEzUXKoI{leb0T zwp;UX#xUBnXT;AiFL~d_hfGu@1u&%lheR-3ukg__k6x9Jmv_e#m zr3K`pc%&xSq*_N_Ie#sBS3y`*`sbTf_~oHOTmJ%-b6~2Rh{8#7BU;-MCAx(7H0FoLeSuTF`d55DH9%(Y8f2bQb>!R=|WqB%?A>0 z<)3s+?^D9h)88ojAamAH%1UIMR&bzX4^-bxRcXG#0JO)2D8*s+7nScAUW^6Xg?9v3`9ZuG`^)`D1_sEO@FS841QR?S3DQ8(348yaJ4QOo;$Qg{U>gqETzu!#4(!dc3B5Dqeerb?ip zRdHX(q(llhs#l4CI`OLCRtbHIW_&5kRZ3IRT~mk!5d(-EJynPlOPRVB2hfigL{#JE z03y}ksS`6nM9PIADf&*1NE%tuTovLA|60Ipg53Fl^8%A zL<}T8MG{ZNWNVZVdU}m2%@rb74?wR5ZE!tW@XrYksuEKq1c~w^1`;!uE72NGO8BSr z&(uC2olYTaFw4lC7HOjX~6u4U1-bYO0rZ_5}>4eH6})k zLuFuz%@5BWMv!onk})jD)QIvV1-xif6|ak>Bd-I?CauQe@vEQ?!5vg2&52W8zJxI8 z%Im@sCEllQ=8dui^C{%I=lrtU`HxXlE|dZ$RLlqXS@J0ViN z&gD)76GCQkS`YtWI({vys+c%OvymaGnA8@DO>2kYnoSqty3H5SF2;@A?%6mDYuEW< z`{kjy_s(1#d#VnvywHj_j<@5L!;LuhNHLzjCkfZ=kHfT{48$j8z>h!Nm>($&6=69U zpAOGa@JtBQx_bb9D}$esmJvKG}f}UhT!FXBXfz+9y;N@11BQ z1j_L0~8iHgKL9&@3S&eVr?Zs!WHR1i^ z)p+YrE?#>g881H)XLUBm4}{{`TgKw?>xSdDpOve0{>bfWsilXjyB-j@yU4W$RwP6q=4{=z(Im%)%JWeR8B>G zLI+{eg@Vu)6opPhb>vhIx_D0{w_$30vvmq?CqQP<)U&RgN@c;gmAJI?3aoG2MdiMl z12PV_sNA=9?4mNc8T);UWO<>ks|XS$;-yY@5h$7_ymQ{oR4BB*8?n86Kel!5!sez` zSX0}Bg%#Z#>@6U)7I84A*O4s`4tz&q5>g2R7LJhtd>ezql|~|4giYKzIOgFcCZk zfN?%Z-0O6lzK}js)0wR@pWj6aKSOyCsSj&yVxcS-((+oJxOLT~LflqUeg|5)JCHa9 zB8U(mgohe63M|TT1pb?php)LS?{%-e()p&@rEcbyTeM2EwLF+em8hLclzqOSVQo?b z5;F%9wMKERQgS&^=Y6h)ts*aq;+H8I>V6DIZN(`Ik)a;?rbyBaDCrzs2r`u+yqI`V zjSH=CqYC~Q8GJ68uT@7(RmAE?v&0#nQ*DWAM3#a@m zgX!3mqcN2t2lc%W4E?t_)HY}0@keX%(c6ph?JFVu)sj3LwvCZ6*gY=ZzY*yNx+m<)uuoFlf!IxgqzgUMj1 z$iWBvG6_?A$K&LQ4*b_=i}Am{UWfnv?I!%^7i;nDdwux)wW(Aj6?pqlCSH3y5idU+ zLq($5Q6YHx=COEa|43|J6^PVGsxz$};=T7E)8}d0ubcYoK;oe1!BXZs-+tq+VpFp*eQqrFYz@FIy8>~?6~VaY>R`*8JiI>;k6b&}?Ag2G@!FwEd~k9qJ|fUR zI5ipXpKPSn6YjNChNU=0HS+YWp?LJ#F}VBc(U`xy7!Ad37#H4x{D8^G7}JP?z($OV zXr$9_!nA~DDv>r0vZfOvF4U!t+MQWl=q{d*4bv~j`qrJimzH3rtrtHV8=AM`>LoY# zSHTM?-&H4?ChRGcom3~6QJrj`eGM+1v4i*Ca@58&peke%rf1JWZ`lIOuU>|plKE)O zXvgHlDLl7&G*G3~MNh;e>n_Z}WKcO(27QOZMI&-;WS>h@<+{M35LdZ>zU5#wnMYwR ztvaYBc%?v!4IZw=l7T~8`VY&E_!LQrUMJYlKe@b!gFq1<%M{A;U@_QSQk{cYp8_d0 zzoej2nvHcK z0b-!g+95vEJ}rR?NNPlhb5kXRie_tRs;*{NS?$QLwGN1;{kq;*1EDPrj(-(LzoS|* z2N6>uK8SeDUOjW_#F_FXQYWGP<{%~n3+c7h~# zST27M&0@%d7T4F*Udcu+{^F?-H+w77+6a^eR`4d^g+x`NDScWWK=Z-%g{fetG?1eS z9q0SW1d}NVjaXd07|SNErgWw>&(KIq>y}JMTk&$ z@)I|V#iLh^#_d-Jpu4LOH3_w-iD*V)a0}8#)FF4wBut8#hN%RJdUmy>PDcxsNh_^` z&U^;JBBi3~^a^vSGsHZt?UUV&nYqeEZoyTHZafbro;vYh(hrn{1j>BnPHw=qSyy3M z!$uCg>UodT@dwtRJx`q`R%8CSRkjvT3qOBK;xtT-pF(RyZPY|mshb4{j0#<8&2+Do zi4;ghfCGs3l}ojrTypRVLlF?_-zD{7%0u8VkPs4@ou!bIA{z!1g2bmt1c73j2C2@Z zXao(HNFNLkh0L_?sSt&+^!gmBCHgp14Hwekd$|vqR+03eVPNnA>rnwqWU`4d^5Y+SAoYZY~zqZlxmGPncYbrIUA{w_+`U;gpCH?mZPsxUclp zoGiuck_G52oQsB}Hj*`m^rhpWv(T4l(`>cA#}5gW?}cLIm_#gGn}+9~oPcwu=iuYB zEAjE$OY!cRPQ3L(Ena)3oB+wesYlff*fnxI^)QVN;tU@@dT#=HmS!S4G80cOkeZ7k#*GYKeP!3*sjE?`o7@WK> z9LEoY(3u9|$nB%CVSOMP+7fZ=)zLV2q>LlqR@!s|ZVCZXOEtptesR1ll=a55WmGi< zIQ>|L^{i5BmzN(1!>zZ(AwNF_V}2H8qZQ{seU7!?(SFDJ0dxNA{uw0Ndi#Sw!tY#; z+5g`XP%)_*Pd(d?zkM_pe|vu(zEytY^%i`1VjRvrn@csq@AJV3oVcF=y;u2%Kxo$7 zO}j@Tza|ZSzf0oiNAE3oIDjaOX2yL}Bij1TLFeYRmgz6lBtYD`^Z7wA(J>ds)2fLw z&FG?Ckbux(1(+}`16#I6;HF(;aqHz{aP97~c;Y|~&K+;U$AtMur+%w$ZCor2a>!o=>A>6VaJQ`8MD+o?=0qqAR)*#!$MZ?e2$ z{dp}_&Vz{u5+&jZ6Ll8eInODRrHz-GBFY(FNJpE8^0C#J!GYYI%H`IgXbL}nQgj_A zMo#2?KY;_t5<|PPHvsy4xv|3&T+|JPGBaGwjE4%VB0&enYh* znUtw6;qFWYhc$}b9R(-?qdn0J!ILYlv9y|R&JdQpPi8VV@ zkdm8)K>h$~6C#kP9eiABG4|g!6(77cAK!km9)Bl9{`%1>eEw!PVW9T)MR@h`1VSVX zFRA*SN<%o!@t!a`=s@)LBqR931dLJ9q7MrHPxFDq|`|d1!@#YLuBj=73SxD>5!~8Dq4dMSDj2HO7AH2~Y z5A7R{#cReQG1RFMmz(iHWRUvk&wbFW4T{u=L4xP2P@aYJWIE<@KYFcKXQM928fe%$ z?!QW~P-wsXECJycVeZ;!tk_$ELx-C1-l;ace{!0Iu|A**`QYSud~m!9 zA0Dg3M})|SFP7q+=koB{V=4SyqwvBVA$aD706cp2X#3w*u200o@(ENS4JZ$8L{`95 zlti?lA#sM~OUCoMt&M6#W1Mn{ZBE5!wqsT<@0aX$8;EUa*?~1p+tIE;R}Sx`ygsV> zP1v{KhW_cn|4Nx$L9?0@DwACcZ^z<>ZKw)w9=%D%^J1jt&S@!H2?aia?_GiTRydn?!2KQy~I~7j_%0{&_1@RKXG^-v^o4VjQNoK#AgKwj<_{iSaUIz+Y!;WAIxd^I(kkAUPF%;EWgeE-ZosN3mtooD zomQ#4gK~ULe~3#3iG+yiR7w{UB+JdT_WSIzMYM%z$)1IrQDwHYs)afDB^pS66oR2Y zh(lFTGH$yj9>*Wa!YfY^9tSHed8#(@C)Kp^UIK(8%Ar{I)~uqV_wX^d>IkeU zt7je##g;uuNKDYQ+c=EXtghdrV(yCZc==_H!av=FzkPlw{`$!Ve13K=A)@4ZFA;07(|EDpCpqgE@bvnO`4t)07RJ?b59F>r}8)Z6G5VuCRQgh0Fr^J$ez*|tUh?29z)h3a=r?6NsV-7b=tkslG03g%*INBW2M>=@w!*H->$q~-a~6}#k_0(DQ6;> z*z{n*gvvzfWY5w&u%LD`Dnsk&kQEt=)oB0#|MW>jK~&<_OezilFHvNo42=m*Jij_u znJyop;zX*ADnes?a0w>Rs)JOyE+rtRuD{p&(2IBB4*<#d$`(Negc0ueY|4Tu0j0Q^qte`pz786VTE zTK!j4A#}e4V|X_nWq%d@Ps* z@g5(m@RvcyQzPO08^OaVkPw@(rF{nx4=T1K@9zjbUv<6CTg8$QzjiPg_5FU3L|KJt z$*^S7n`(k1j6AhFpiI>)E>)>3$u5vMg=K(s*cTAP_@EWTf&=zBdnR(oz|19x1LcbDOV z(=AjHC3sa+ULT6Wsrv~dI@9BK>+}5a+%049#I+-E;EGEyYwj3CjEceVA12U|#bWz5 zjoAIHZsCQy2sNrCRfDUwh@hud9xvV%j$?Q6{CK|4-_G;DbrcqE=IA<*iliST|6j~= zyx-mLH#u+Tz|DLRad2@Uk<$;>#mx&(Fo_(VkGbon;K+01@#eF6IP<6$4hy$#%7<&=$>|58@e)<+30_w(+!@aQH3UyyGZqJ~8jg8=si;nl!rGV zH)tB6(nJ-|h=~z(sEeI!FquM!t}emTeSPb6ue597II8Ydgo(mjbIP@RLoeDhXHv