From c8d0914218cafc21b24fc1a56ef8393bf579f341 Mon Sep 17 00:00:00 2001 From: Jaden Travnik Date: Tue, 24 Nov 2020 14:28:27 -0500 Subject: [PATCH 01/70] First commit --- docs/misc/changelog.rst | 4 + multi_input_tests.py | 87 ++++++ stable_baselines3/common/base_class.py | 3 +- stable_baselines3/common/buffers.py | 241 ++++++++++++++-- stable_baselines3/common/multi_input_envs.py | 210 ++++++++++++++ .../common/on_policy_algorithm.py | 61 +++- stable_baselines3/common/policies.py | 264 +++++++++++++++--- stable_baselines3/common/preprocessing.py | 43 ++- stable_baselines3/common/torch_layers.py | 146 ++++++++-- stable_baselines3/common/type_aliases.py | 21 +- stable_baselines3/common/utils.py | 88 +++++- .../common/vec_env/vec_frame_stack.py | 218 ++++++++++++--- tests/test_her.py | 49 +++- 13 files changed, 1253 insertions(+), 182 deletions(-) create mode 100644 multi_input_tests.py create mode 100644 stable_baselines3/common/multi_input_envs.py diff --git a/docs/misc/changelog.rst b/docs/misc/changelog.rst index b154e4976..e4912b2e1 100644 --- a/docs/misc/changelog.rst +++ b/docs/misc/changelog.rst @@ -8,6 +8,7 @@ Pre-Release 0.11.0a1 (WIP) Breaking Changes: ^^^^^^^^^^^^^^^^^ +- Breaks HER as HER needs to be updated to use the new dictionary observations - ``evaluate_policy`` now returns rewards/episode lengths from a ``Monitor`` wrapper if one is present, this allows to return the unnormalized reward in the case of Atari games for instance. - Renamed ``common.vec_env.is_wrapped`` to ``common.vec_env.is_vecenv_wrapped`` to avoid confusion @@ -19,6 +20,8 @@ New Features: automatic check for image spaces. - ``VecFrameStack`` now has a ``channels_order`` argument to tell if observations should be stacked on the first or last observation dimension (originally always stacked on last). +- Add support for dictionary observations in both RolloutBuffer (need to be tested in ReplayBuffer) +- Added simple 4x4 and 9room test environments - Added ``common.env_util.is_wrapped`` and ``common.env_util.unwrap_wrapper`` functions for checking/unwrapping an environment for specific wrapper. - Added ``env_is_wrapped()`` method for ``VecEnv`` to check if its environments are wrapped @@ -26,6 +29,7 @@ New Features: - Added ``monitor_kwargs`` parameter to ``make_vec_env`` and ``make_atari_env`` - Wrap the environments automatically with a ``Monitor`` wrapper when possible. + Bug Fixes: ^^^^^^^^^^ - Fixed bug where code added VecTranspose on channel-first image environments (thanks @qxcv) diff --git a/multi_input_tests.py b/multi_input_tests.py new file mode 100644 index 000000000..e9936f0e3 --- /dev/null +++ b/multi_input_tests.py @@ -0,0 +1,87 @@ +import argparse +import gym +import numpy as np + +from stable_baselines3 import PPO, SAC +from stable_baselines3.common.policies import MultiInputActorCriticPolicy +from stable_baselines3.common.vec_env import ( + DummyVecEnv, + VecFrameStack, + VecTransposeImage, +) + +from stable_baselines3.common.multi_input_envs import ( + SimpleMultiObsEnv, + NineRoomMultiObsEnv, +) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Runs the multi_input_tests script") + parser.add_argument( + "--timesteps", + type=int, + default=30000, + help="Number of timesteps to train for (default: 20000)", + ) + parser.add_argument( + "--num_envs", + type=int, + default=10, + help="Number of environments to use (default: 10)", + ) + parser.add_argument( + "--frame_stacks", + type=int, + default=1, + help="Number of stacked frames to use (default: 4)", + ) + parser.add_argument( + "--room9", + action="store_true", + help="If true, uses more complex 9 room environment", + ) + args = parser.parse_args() + + ENV_CLS = NineRoomMultiObsEnv if args.room9 else SimpleMultiObsEnv + + make_env = lambda: ENV_CLS(random_start=True) + + env = DummyVecEnv([make_env for i in range(args.num_envs)]) + if args.frame_stacks > 1: + env = VecFrameStack(env, n_stack=args.frame_stacks) + + model = PPO(MultiInputActorCriticPolicy, env) + + model.learn(args.timesteps) + env.close() + print("Done training, starting testing") + + make_env = lambda: ENV_CLS(random_start=False) + test_env = DummyVecEnv([make_env]) + if args.frame_stacks > 1: + test_env = VecFrameStack(test_env, n_stack=args.frame_stacks) + + obs = test_env.reset() + num_episodes = 1 + trajectories = [[]] + i_step, i_episode = 0, 0 + while i_episode < num_episodes: + action, _states = model.predict(obs, deterministic=False) + obs, reward, done, info = test_env.step(action) + test_env.render() + trajectories[-1].append((test_env.get_attr("state")[0], action[0])) + + i_step += 1 + + if done[0]: + if info[0]["got_to_end"]: + print(f"Episode {i_episode} : Got to end in {i_step} steps") + else: + print(f"Episode {i_episode} : Did not get to end") + obs = test_env.reset() + i_step = 0 + trajectories.append([]) + i_episode += 1 + + test_env.close() diff --git a/stable_baselines3/common/base_class.py b/stable_baselines3/common/base_class.py index 32c1ce57c..9008c2aa4 100644 --- a/stable_baselines3/common/base_class.py +++ b/stable_baselines3/common/base_class.py @@ -197,7 +197,8 @@ def _wrap_env(env: GymEnv, verbose: int = 0, monitor_wrapper: bool = True) -> Ve env = VecTransposeImage(env) # check if wrapper for dict support is needed when using HER - if isinstance(env.observation_space, gym.spaces.dict.Dict): + if isinstance(env.observation_space, gym.spaces.dict.Dict) and \ + set(env.observation_space.spaces.keys()) == set(["observation", "desired_goal"]): env = ObsDictWrapper(env) return env diff --git a/stable_baselines3/common/buffers.py b/stable_baselines3/common/buffers.py index 8dbce4ccc..7d7cfb189 100644 --- a/stable_baselines3/common/buffers.py +++ b/stable_baselines3/common/buffers.py @@ -13,7 +13,12 @@ psutil = None from stable_baselines3.common.preprocessing import get_action_dim, get_obs_shape -from stable_baselines3.common.type_aliases import ReplayBufferSamples, RolloutBufferSamples +from stable_baselines3.common.type_aliases import ( + ReplayBufferSamples, + RolloutBufferSamples, + DictRolloutBufferSamples, + DictReplayBufferSamples, +) from stable_baselines3.common.vec_env import VecNormalize @@ -42,6 +47,7 @@ def __init__( self.observation_space = observation_space self.action_space = action_space self.obs_shape = get_obs_shape(observation_space) + self.is_dict_data = isinstance(self.observation_space, spaces.Dict) self.action_dim = get_action_dim(action_space) self.pos = 0 self.full = False @@ -130,14 +136,17 @@ def to_torch(self, array: np.ndarray, copy: bool = True) -> th.Tensor: @staticmethod def _normalize_obs( - obs: Union[np.ndarray, Dict[str, np.ndarray]], env: Optional[VecNormalize] = None + obs: Union[np.ndarray, Dict[str, np.ndarray]], + env: Optional[VecNormalize] = None, ) -> Union[np.ndarray, Dict[str, np.ndarray]]: if env is not None: return env.normalize_obs(obs) return obs @staticmethod - def _normalize_reward(reward: np.ndarray, env: Optional[VecNormalize] = None) -> np.ndarray: + def _normalize_reward( + reward: np.ndarray, env: Optional[VecNormalize] = None + ) -> np.ndarray: if env is not None: return env.normalize_reward(reward).astype(np.float32) return reward @@ -168,7 +177,9 @@ def __init__( n_envs: int = 1, optimize_memory_usage: bool = False, ): - super(ReplayBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs) + super(ReplayBuffer, self).__init__( + buffer_size, observation_space, action_space, device, n_envs=n_envs + ) assert n_envs == 1, "Replay buffer only support single environment for now" @@ -177,20 +188,59 @@ def __init__( mem_available = psutil.virtual_memory().available self.optimize_memory_usage = optimize_memory_usage - self.observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape, dtype=observation_space.dtype) + + if self.is_dict_data: + self.observations = { + key: np.zeros((self.buffer_size, self.n_envs) + _obs_shape) + for key, _obs_shape in self.obs_shape.items() + } + else: + self.observations = np.zeros( + (self.buffer_size, self.n_envs) + self.obs_shape, + dtype=observation_space.dtype, + ) if optimize_memory_usage: # `observations` contains also the next observation self.next_observations = None else: - self.next_observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape, dtype=observation_space.dtype) - self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=action_space.dtype) + if self.is_dict_data: + self.next_observations = { + key: np.zeros((self.buffer_size, self.n_envs) + _obs_shape) + for key, _obs_shape in self.obs_shape.items() + } + else: + self.next_observations = np.zeros( + (self.buffer_size, self.n_envs) + self.obs_shape, + dtype=observation_space.dtype, + ) + self.actions = np.zeros( + (self.buffer_size, self.n_envs, self.action_dim), dtype=action_space.dtype + ) self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) self.dones = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) if psutil is not None: - total_memory_usage = self.observations.nbytes + self.actions.nbytes + self.rewards.nbytes + self.dones.nbytes + obs_nbytes = 0 + if self.is_dict_data: + for key, obs in self.observations.items(): + obs_nbytes += obs.nbytes + else: + obs_nbytes = self.observations.nbytes + + total_memory_usage = ( + obs_nbytes + + self.actions.nbytes + + self.rewards.nbytes + + self.dones.nbytes + ) if self.next_observations is not None: - total_memory_usage += self.next_observations.nbytes + next_obs_nbytes = 0 + if self.is_dict_data: + for key, obs in self.observations.items(): + next_obs_nbytes += obs.nbytes + else: + next_obs_nbytes = self.next_observations.nbytes + total_memory_usage += next_obs_nbytes if total_memory_usage > mem_available: # Convert to GB @@ -201,13 +251,40 @@ def __init__( f"replay buffer {total_memory_usage:.2f}GB > {mem_available:.2f}GB" ) - def add(self, obs: np.ndarray, next_obs: np.ndarray, action: np.ndarray, reward: np.ndarray, done: np.ndarray) -> None: + def add( + self, + obs: Union[np.ndarray, dict], + next_obs: np.ndarray, + action: np.ndarray, + reward: np.ndarray, + done: np.ndarray, + ) -> None: # Copy to avoid modification by reference - self.observations[self.pos] = np.array(obs).copy() + + if self.is_dict_data: + for key in self.observations.keys(): + self.observations[key][self.pos] = np.array(obs[key]).copy() + else: + self.observations[self.pos] = np.array(obs).copy() + if self.optimize_memory_usage: - self.observations[(self.pos + 1) % self.buffer_size] = np.array(next_obs).copy() + if self.is_dict_data: + for key in self.observations.keys(): + self.observations[key][ + (self.pos + 1) % self.buffer_size + ] = np.array(next_obs[key]).copy() + else: + self.observations[(self.pos + 1) % self.buffer_size] = np.array( + next_obs + ).copy() else: - self.next_observations[self.pos] = np.array(next_obs).copy() + if self.is_dict_data: + for key in self.next_observations.keys(): + self.next_observations[key][self.pos] = np.array( + next_obs[key] + ).copy() + else: + self.next_observations[self.pos] = np.array(next_obs).copy() self.actions[self.pos] = np.array(action).copy() self.rewards[self.pos] = np.array(reward).copy() @@ -218,7 +295,9 @@ def add(self, obs: np.ndarray, next_obs: np.ndarray, action: np.ndarray, reward: self.full = True self.pos = 0 - def sample(self, batch_size: int, env: Optional[VecNormalize] = None) -> ReplayBufferSamples: + def sample( + self, batch_size: int, env: Optional[VecNormalize] = None + ) -> ReplayBufferSamples: """ Sample elements from the replay buffer. Custom sampling when using memory efficient variant, @@ -235,16 +314,56 @@ def sample(self, batch_size: int, env: Optional[VecNormalize] = None) -> ReplayB # Do not sample the element with index `self.pos` as the transitions is invalid # (we use only one array to store `obs` and `next_obs`) if self.full: - batch_inds = (np.random.randint(1, self.buffer_size, size=batch_size) + self.pos) % self.buffer_size + batch_inds = ( + np.random.randint(1, self.buffer_size, size=batch_size) + self.pos + ) % self.buffer_size else: batch_inds = np.random.randint(0, self.pos, size=batch_size) return self._get_samples(batch_inds, env=env) - def _get_samples(self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None) -> ReplayBufferSamples: + def _get_samples( + self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None + ) -> ReplayBufferSamples: + + if self.is_dict_data: + if self.optimize_memory_usage: + next_obs = { + key: self.to_torch( + self._normalize_obs( + obs[(batch_inds + 1) % self.buffer_size, 0, :], + env, + ) + ) + for key, obs in self.observations.items() + } + else: + next_obs = { + key: self.to_torch(self._normalize_obs(obs[batch_inds, 0, :], env)) + for key, obs in self.next_observations.items() + } + + normalized_obs = { + key: self.to_torch(self._normalize_obs(obs[batch_inds, 0, :], env)) + for key, obs in self.observations.items() + } + return DictReplayBufferSamples( + observations=normalized_obs, + actions=self.to_torch(self.actions[batch_inds]), + next_observations=next_obs, + dones=self.to_torch(self.dones[batch_inds]), + returns=self.to_torch( + self._normalize_reward(self.rewards[batch_inds], env) + ), + ) + if self.optimize_memory_usage: - next_obs = self._normalize_obs(self.observations[(batch_inds + 1) % self.buffer_size, 0, :], env) + next_obs = self._normalize_obs( + self.observations[(batch_inds + 1) % self.buffer_size, 0, :], env + ) else: - next_obs = self._normalize_obs(self.next_observations[batch_inds, 0, :], env) + next_obs = self._normalize_obs( + self.next_observations[batch_inds, 0, :], env + ) data = ( self._normalize_obs(self.observations[batch_inds, 0, :], env), @@ -290,17 +409,36 @@ def __init__( n_envs: int = 1, ): - super(RolloutBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs) + super(RolloutBuffer, self).__init__( + buffer_size, observation_space, action_space, device, n_envs=n_envs + ) self.gae_lambda = gae_lambda self.gamma = gamma - self.observations, self.actions, self.rewards, self.advantages = None, None, None, None + self.observations, self.actions, self.rewards, self.advantages = ( + None, + None, + None, + None, + ) self.returns, self.dones, self.values, self.log_probs = None, None, None, None self.generator_ready = False self.reset() def reset(self) -> None: - self.observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape, dtype=np.float32) - self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=np.float32) + + if self.is_dict_data: + self.observations = {} + for (key, obs_input_shape) in self.obs_shape.items(): + self.observations[key] = np.zeros( + (self.buffer_size, self.n_envs) + obs_input_shape, dtype=np.float32 + ) + else: + self.observations = np.zeros( + (self.buffer_size, self.n_envs) + self.obs_shape, dtype=np.float32 + ) + self.actions = np.zeros( + (self.buffer_size, self.n_envs, self.action_dim), dtype=np.float32 + ) self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) self.returns = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) self.dones = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) @@ -310,7 +448,9 @@ def reset(self) -> None: self.generator_ready = False super(RolloutBuffer, self).reset() - def compute_returns_and_advantage(self, last_values: th.Tensor, dones: np.ndarray) -> None: + def compute_returns_and_advantage( + self, last_values: th.Tensor, dones: np.ndarray + ) -> None: """ Post-processing step: compute the returns (sum of discounted rewards) and GAE advantage. @@ -336,13 +476,25 @@ def compute_returns_and_advantage(self, last_values: th.Tensor, dones: np.ndarra else: next_non_terminal = 1.0 - self.dones[step + 1] next_values = self.values[step + 1] - delta = self.rewards[step] + self.gamma * next_values * next_non_terminal - self.values[step] - last_gae_lam = delta + self.gamma * self.gae_lambda * next_non_terminal * last_gae_lam + delta = ( + self.rewards[step] + + self.gamma * next_values * next_non_terminal + - self.values[step] + ) + last_gae_lam = ( + delta + self.gamma * self.gae_lambda * next_non_terminal * last_gae_lam + ) self.advantages[step] = last_gae_lam self.returns = self.advantages + self.values def add( - self, obs: np.ndarray, action: np.ndarray, reward: np.ndarray, done: np.ndarray, value: th.Tensor, log_prob: th.Tensor + self, + obs: Union[np.ndarray, dict], + action: np.ndarray, + reward: np.ndarray, + done: np.ndarray, + value: th.Tensor, + log_prob: th.Tensor, ) -> None: """ :param obs: Observation @@ -358,7 +510,11 @@ def add( # Reshape 0-d tensor to avoid error log_prob = log_prob.reshape(-1, 1) - self.observations[self.pos] = np.array(obs).copy() + if self.is_dict_data: + for key in self.observations.keys(): + self.observations[key][self.pos] = np.array(obs[key]).copy() + else: + self.observations[self.pos] = np.array(obs).copy() self.actions[self.pos] = np.array(action).copy() self.rewards[self.pos] = np.array(reward).copy() self.dones[self.pos] = np.array(done).copy() @@ -368,12 +524,22 @@ def add( if self.pos == self.buffer_size: self.full = True - def get(self, batch_size: Optional[int] = None) -> Generator[RolloutBufferSamples, None, None]: + def get( + self, batch_size: Optional[int] = None + ) -> Generator[RolloutBufferSamples, None, None]: assert self.full, "" indices = np.random.permutation(self.buffer_size * self.n_envs) # Prepare the data if not self.generator_ready: - for tensor in ["observations", "actions", "values", "log_probs", "advantages", "returns"]: + + _tensor_names = ["actions", "values", "log_probs", "advantages", "returns"] + if self.is_dict_data: + for key, obs in self.observations.items(): + self.observations[key] = self.swap_and_flatten(obs) + else: + _tensor_names.append("observations") + + for tensor in _tensor_names: self.__dict__[tensor] = self.swap_and_flatten(self.__dict__[tensor]) self.generator_ready = True @@ -386,7 +552,22 @@ def get(self, batch_size: Optional[int] = None) -> Generator[RolloutBufferSample yield self._get_samples(indices[start_idx : start_idx + batch_size]) start_idx += batch_size - def _get_samples(self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None) -> RolloutBufferSamples: + def _get_samples( + self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None + ) -> RolloutBufferSamples: + if self.is_dict_data: + return DictRolloutBufferSamples( + observations={ + key: self.to_torch(obs[batch_inds]) + for (key, obs) in self.observations.items() + }, + actions=self.to_torch(self.actions[batch_inds]), + old_values=self.to_torch(self.values[batch_inds].flatten()), + old_log_prob=self.to_torch(self.log_probs[batch_inds].flatten()), + advantages=self.to_torch(self.advantages[batch_inds].flatten()), + returns=self.to_torch(self.returns[batch_inds].flatten()), + ) + data = ( self.observations[batch_inds], self.actions[batch_inds], diff --git a/stable_baselines3/common/multi_input_envs.py b/stable_baselines3/common/multi_input_envs.py new file mode 100644 index 000000000..495d50ced --- /dev/null +++ b/stable_baselines3/common/multi_input_envs.py @@ -0,0 +1,210 @@ +import gym +import numpy as np + + +class SimpleMultiObsEnv(gym.Env): + ## simple 4x4 grid world + # + # ____________ + # | 0 1 2 3| + # | 4|¯5¯¯6¯| 7| + # | 8|_9_10_|11| + # |12 13 14 15| + # ¯¯¯¯¯¯¯¯¯¯¯¯¯¯ + # start is 0 + # states 5, 6, 9, and 10 are blocked + # goal is 15 + # actions are = [left, down, right, up] + + # simple linear state env of 15 states but encoded with a vector and an image observation + # State Mapping + # State Vector Img + # 0 Vec * 0 Img * 0 + # 1 Vec * 0 Img * 1/3 + # 2 Vec * 0 Img * 2/3 + # 3 Vec * 0 Img * 3/3 + # 4 Vec * 1/3 Img * 0 + # 5 Vec * 1/3 Img * 1/3 + # 6 Vec * 1/3 Img * 2/3 + # 7 Vec * 2/3 Img * 0 + # 8 Vec * 2/3 Img * 1/3 + # 9 Vec * 2/3 Img * 2/3 + # 10 Vec * 2/3 Img * 3/3 + # 11 Vec * 3/3 Img * 0 + # 12 Vec * 3/3 Img * 1/3 + # 13 Vec * 3/3 Img * 2/3 + # 14 Vec * 3/3 Img * 3/3 + + def __init__(self, num_col=4, num_row=4, random_start=True, noise=0.0): + super(SimpleMultiObsEnv, self).__init__() + + self.vector_size = 5 + self.img_size = [1, 20, 20] + + self.random_start = random_start + self.action_space = gym.spaces.Discrete(3) + self.observation_space = gym.spaces.Dict( + spaces={ + "vec": gym.spaces.Box(0, 1, (self.vector_size,)), + "img": gym.spaces.Box(0, 255, self.img_size, dtype=np.uint8), + } + ) + self.count = 0 + self.max_count = 100 + self.log = "" + self.state = 0 + self.action2str = ["left", "down", "right", "up"] + self.noise = noise + self.init_possible_transitions() + + self.init_state_mapping(num_col, num_row) + + self.max_state = len(self.state_mapping) - 1 + + def random_upsample_img( + self, v_rng=(0, 255), initial_size=(4, 4), up_size=(20, 20) + ): + im = np.random.randint(v_rng[0], v_rng[1], initial_size, dtype=np.int32) + return np.array( + [ + [ + [ + im[int(initial_size[0] * r / up_size[0])][ + int(initial_size[1] * c / up_size[1]) + ] + for c in range(up_size[0]) + ] + for r in range(up_size[1]) + ] + ] + ).astype(np.int32) + + def init_state_mapping(self, num_col, num_row): + self.num_col = num_col + self.state_mapping = [] + + col_vecs = [np.random.random(self.vector_size) for i in range(num_col)] + row_imgs = [self.random_upsample_img() for j in range(num_row)] + for i in range(num_col): + for j in range(num_row): + self.state_mapping.append({"vec": col_vecs[i], "img": row_imgs[j]}) + + def get_state_mapping(self): + state_dict = self.state_mapping[self.state] + if self.noise > 0: + state_dict["vec"] += np.random.random(self.vector_size) * self.noise + img_noise = int(255 * self.noise) + state_dict["img"] += np.random.randint( + -img_noise, img_noise, (1, 20, 20), dtype=np.int32 + ) + state_dict["img"] = np.clip(state_dict["img"], 0, 255) + return state_dict + + def init_possible_transitions(self): + self.left_possible = [1, 2, 3, 13, 14, 15] + self.down_possible = [0, 4, 8, 3, 7, 11] + self.right_possible = [0, 1, 2, 12, 13, 14] + self.up_possible = [4, 8, 12, 7, 11, 15] + + def step(self, action): + self.count += 1 + + prev_state = self.state + + rwd = -0.1 + # define state transition + if self.state in self.left_possible and action == 0: # left + self.state -= 1 + elif self.state in self.down_possible and action == 1: # down + self.state += self.num_col + elif self.state in self.right_possible and action == 2: # right + self.state += 1 + elif self.state in self.up_possible and action == 3: # up + self.state -= self.num_col + + got_to_end = self.state == self.max_state + rwd = 1 if got_to_end else rwd + done = self.count > self.max_count or got_to_end + + self.log = f"Went {self.action2str[action]} in state {prev_state}, got to state {self.state}" + + return self.get_state_mapping(), rwd, done, {"got_to_end": got_to_end} + + def render(self, mode=None): + print(self.log) + + def reset(self): + self.count = 0 + if not self.random_start: + self.state = 0 + else: + self.state = np.random.randint(0, self.max_state) + return self.state_mapping[self.state] + + +class NineRoomMultiObsEnv(SimpleMultiObsEnv): + + ## 9 room grid world + # + # ____________________________________ + # | 0 1 2 | 3 4 5 | 6 7 8 | + # | 9 10 11 12 13 14 15 16 17 | + # | 18 19 20 | 21 22 23 | 24 25 26 | + # |¯¯¯¯ ¯¯¯|¯¯¯¯ ¯¯¯¯|¯¯¯¯ ¯¯¯¯| + # | 27 28 29 | 30 31 32 | 33 34 35 | + # | 36 37 38 39 40 41 42 43 44 | + # | 45 46 47 | 48 49 50 | 51 52 53 | + # |¯¯¯ ¯¯¯|¯¯¯¯ ¯¯¯¯|¯¯¯¯ ¯¯¯¯| + # | 54 55 56 | 57 58 59 | 60 61 62 | + # | 63 64 65 66 67 68 69 70 71 | + # | 72 73 74 | 75 76 77 | 78 79 80 | + # ¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯ + # start is 0 + # goal is 80 + # actions are = [left, down, right, up] + + def __init__(self, random_start=True, noise=0.0): + super(NineRoomMultiObsEnv, self).__init__( + 9, 9, random_start=random_start, noise=noise + ) + + def init_possible_transitions(self): + self.left_possible = ( + [1, 2, 4, 5, 7, 8] + + list(range(10, 18)) + + [19, 20, 22, 23, 25, 26] + + [28, 29, 31, 32, 34, 35] + + list(range(37, 45)) + + [46, 47, 49, 50, 52, 53] + + [55, 56, 58, 59, 61, 62] + + list(range(64, 72)) + + [73, 74, 76, 77, 79, 80] + ) + + self.down_possible = ( + list(range(18)) + + [19, 22, 25] + + list(range(27, 45)) + + [46, 49, 52] + + list(range(54, 72)) + ) + + self.right_possible = ( + [0, 1, 3, 4, 6, 7] + + list(range(9, 17)) + + [18, 19, 21, 22, 24, 25] + + [27, 28, 30, 31, 33, 34] + + list(range(36, 44)) + + [45, 46, 48, 49, 51, 52] + + [54, 55, 57, 58, 60, 61] + + list(range(63, 71)) + + [72, 73, 75, 76, 78, 79] + ) + + self.up_possible = ( + list(range(9, 27)) + + [28, 31, 34] + + list(range(36, 54)) + + [55, 58, 61] + + list(range(63, 81)) + ) diff --git a/stable_baselines3/common/on_policy_algorithm.py b/stable_baselines3/common/on_policy_algorithm.py index 9f7a66538..80b54f5bd 100644 --- a/stable_baselines3/common/on_policy_algorithm.py +++ b/stable_baselines3/common/on_policy_algorithm.py @@ -11,7 +11,7 @@ from stable_baselines3.common.callbacks import BaseCallback from stable_baselines3.common.policies import ActorCriticPolicy from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule -from stable_baselines3.common.utils import safe_mean +from stable_baselines3.common.utils import safe_mean, obs_as_tensor from stable_baselines3.common.vec_env import VecEnv @@ -121,7 +121,11 @@ def _setup_model(self) -> None: self.policy = self.policy.to(self.device) def collect_rollouts( - self, env: VecEnv, callback: BaseCallback, rollout_buffer: RolloutBuffer, n_rollout_steps: int + self, + env: VecEnv, + callback: BaseCallback, + rollout_buffer: RolloutBuffer, + n_rollout_steps: int, ) -> bool: """ Collect experiences using the current policy and fill a ``RolloutBuffer``. @@ -146,13 +150,17 @@ def collect_rollouts( callback.on_rollout_start() while n_steps < n_rollout_steps: - if self.use_sde and self.sde_sample_freq > 0 and n_steps % self.sde_sample_freq == 0: + if ( + self.use_sde + and self.sde_sample_freq > 0 + and n_steps % self.sde_sample_freq == 0 + ): # Sample a new noise matrix self.policy.reset_noise(env.num_envs) with th.no_grad(): - # Convert to pytorch tensor - obs_tensor = th.as_tensor(self._last_obs).to(self.device) + # Convert to pytorch tensor or to TensorDict + obs_tensor = obs_as_tensor(self._last_obs, self.device) actions, values, log_probs = self.policy.forward(obs_tensor) actions = actions.cpu().numpy() @@ -160,7 +168,9 @@ def collect_rollouts( clipped_actions = actions # Clip the actions to avoid out of bound error if isinstance(self.action_space, gym.spaces.Box): - clipped_actions = np.clip(actions, self.action_space.low, self.action_space.high) + clipped_actions = np.clip( + actions, self.action_space.low, self.action_space.high + ) new_obs, rewards, dones, infos = env.step(clipped_actions) @@ -177,13 +187,15 @@ def collect_rollouts( if isinstance(self.action_space, gym.spaces.Discrete): # Reshape in case of discrete action actions = actions.reshape(-1, 1) - rollout_buffer.add(self._last_obs, actions, rewards, self._last_dones, values, log_probs) + rollout_buffer.add( + self._last_obs, actions, rewards, self._last_dones, values, log_probs + ) self._last_obs = new_obs self._last_dones = dones with th.no_grad(): # Compute value for the last timestep - obs_tensor = th.as_tensor(new_obs).to(self.device) + obs_tensor = obs_as_tensor(new_obs, self.device) _, values, _ = self.policy.forward(obs_tensor) rollout_buffer.compute_returns_and_advantage(last_values=values, dones=dones) @@ -214,14 +226,23 @@ def learn( iteration = 0 total_timesteps, callback = self._setup_learn( - total_timesteps, eval_env, callback, eval_freq, n_eval_episodes, eval_log_path, reset_num_timesteps, tb_log_name + total_timesteps, + eval_env, + callback, + eval_freq, + n_eval_episodes, + eval_log_path, + reset_num_timesteps, + tb_log_name, ) callback.on_training_start(locals(), globals()) while self.num_timesteps < total_timesteps: - continue_training = self.collect_rollouts(self.env, callback, self.rollout_buffer, n_rollout_steps=self.n_steps) + continue_training = self.collect_rollouts( + self.env, callback, self.rollout_buffer, n_rollout_steps=self.n_steps + ) if continue_training is False: break @@ -234,11 +255,23 @@ def learn( fps = int(self.num_timesteps / (time.time() - self.start_time)) logger.record("time/iterations", iteration, exclude="tensorboard") if len(self.ep_info_buffer) > 0 and len(self.ep_info_buffer[0]) > 0: - logger.record("rollout/ep_rew_mean", safe_mean([ep_info["r"] for ep_info in self.ep_info_buffer])) - logger.record("rollout/ep_len_mean", safe_mean([ep_info["l"] for ep_info in self.ep_info_buffer])) + logger.record( + "rollout/ep_rew_mean", + safe_mean([ep_info["r"] for ep_info in self.ep_info_buffer]), + ) + logger.record( + "rollout/ep_len_mean", + safe_mean([ep_info["l"] for ep_info in self.ep_info_buffer]), + ) logger.record("time/fps", fps) - logger.record("time/time_elapsed", int(time.time() - self.start_time), exclude="tensorboard") - logger.record("time/total_timesteps", self.num_timesteps, exclude="tensorboard") + logger.record( + "time/time_elapsed", + int(time.time() - self.start_time), + exclude="tensorboard", + ) + logger.record( + "time/total_timesteps", self.num_timesteps, exclude="tensorboard" + ) logger.dump(step=self.num_timesteps) self.train() diff --git a/stable_baselines3/common/policies.py b/stable_baselines3/common/policies.py index 5c97431f9..c0d800e69 100644 --- a/stable_baselines3/common/policies.py +++ b/stable_baselines3/common/policies.py @@ -5,6 +5,8 @@ from functools import partial from typing import Any, Dict, List, Optional, Tuple, Type, Union +import copy + import gym import numpy as np import torch as th @@ -19,8 +21,20 @@ StateDependentNoiseDistribution, make_proba_distribution, ) -from stable_baselines3.common.preprocessing import get_action_dim, is_image_space, preprocess_obs -from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor, MlpExtractor, NatureCNN, create_mlp +from stable_baselines3.common.preprocessing import ( + get_action_dim, + is_image_space, + preprocess_obs, +) +from stable_baselines3.common.torch_layers import ( + BaseFeaturesExtractor, + FlattenExtractor, + MlpExtractor, + NatureCNN, + create_mlp, + CombinedExtractor, +) + from stable_baselines3.common.type_aliases import Schedule from stable_baselines3.common.utils import get_device, is_vectorized_observation from stable_baselines3.common.vec_env import VecTransposeImage @@ -85,7 +99,9 @@ def forward(self, *args, **kwargs): del args, kwargs def _update_features_extractor( - self, net_kwargs: Dict[str, Any], features_extractor: Optional[BaseFeaturesExtractor] = None + self, + net_kwargs: Dict[str, Any], + features_extractor: Optional[BaseFeaturesExtractor] = None, ) -> Dict[str, Any]: """ Update the network keyword arguments and create a new features extractor object if needed. @@ -101,12 +117,19 @@ def _update_features_extractor( if features_extractor is None: # The features extractor is not shared, create a new one features_extractor = self.make_features_extractor() - net_kwargs.update(dict(features_extractor=features_extractor, features_dim=features_extractor.features_dim)) + net_kwargs.update( + dict( + features_extractor=features_extractor, + features_dim=features_extractor.features_dim, + ) + ) return net_kwargs def make_features_extractor(self) -> BaseFeaturesExtractor: """ Helper method to create a features extractor.""" - return self.features_extractor_class(self.observation_space, **self.features_extractor_kwargs) + return self.features_extractor_class( + self.observation_space, **self.features_extractor_kwargs + ) def extract_features(self, obs: th.Tensor) -> th.Tensor: """ @@ -116,7 +139,9 @@ def extract_features(self, obs: th.Tensor) -> th.Tensor: :return: """ assert self.features_extractor is not None, "No features extractor was set" - preprocessed_obs = preprocess_obs(obs, self.observation_space, normalize_images=self.normalize_images) + preprocessed_obs = preprocess_obs( + obs, self.observation_space, normalize_images=self.normalize_images + ) return self.features_extractor(preprocessed_obs) def _get_data(self) -> Dict[str, Any]: @@ -177,7 +202,9 @@ def load_from_vector(self, vector: np.ndarray) -> None: :param vector: """ - th.nn.utils.vector_to_parameters(th.FloatTensor(vector).to(self.device), self.parameters()) + th.nn.utils.vector_to_parameters( + th.FloatTensor(vector).to(self.device), self.parameters() + ) def parameters_to_vector(self) -> np.ndarray: """ @@ -185,7 +212,9 @@ def parameters_to_vector(self) -> np.ndarray: :return: """ - return th.nn.utils.parameters_to_vector(self.parameters()).detach().cpu().numpy() + return ( + th.nn.utils.parameters_to_vector(self.parameters()).detach().cpu().numpy() + ) class BasePolicy(BaseModel): @@ -225,7 +254,9 @@ def init_weights(module: nn.Module, gain: float = 1) -> None: module.bias.data.fill_(0.0) @abstractmethod - def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor: + def _predict( + self, observation: th.Tensor, deterministic: bool = False + ) -> th.Tensor: """ Get the action according to the policy for a given observation. @@ -260,16 +291,39 @@ def predict( # state = self.initial_state # if mask is None: # mask = [False for _ in range(self.n_envs)] - if isinstance(observation, dict): + # Need to check the observation if its a ObsDictWrapper + + if isinstance(self.observation_space, ObsDictWrapper): observation = ObsDictWrapper.convert_dict(observation) - else: - observation = np.array(observation) + elif isinstance(observation, dict): + # need to copy the dict as the dict in VecFrameStack will become a torch tensor + observation = copy.deepcopy(observation) + for key, obs in observation.items(): + obs_space = self.observation_space.spaces[key] + if is_image_space(obs_space): + obs = np.array(obs) + if not ( + obs.shape == obs_space.shape or obs.shape[1:] == obs_space.shape + ): + # Try to re-order the channels + transpose_obs = VecTransposeImage.transpose_image(obs) + if ( + transpose_obs.shape == obs_space.shape + or transpose_obs.shape[1:] == obs_space.shape + ): + observation = transpose_obs + else: + observation[key] = obs.reshape( + (-1,) + self.observation_space[key].shape + ) + + elif is_image_space(self.observation_space): + # Handle the different cases for images + # as PyTorch use channel first format - # Handle the different cases for images - # as PyTorch use channel first format - if is_image_space(self.observation_space): if not ( - observation.shape == self.observation_space.shape or observation.shape[1:] == self.observation_space.shape + observation.shape == self.observation_space.shape + or observation.shape[1:] == self.observation_space.shape ): # Try to re-order the channels transpose_obs = VecTransposeImage.transpose_image(observation) @@ -278,12 +332,18 @@ def predict( or transpose_obs.shape[1:] == self.observation_space.shape ): observation = transpose_obs + else: + observation = np.array(observation) vectorized_env = is_vectorized_observation(observation, self.observation_space) - observation = observation.reshape((-1,) + self.observation_space.shape) + if isinstance(observation, dict): + for key, obs in observation.items(): + observation[key] = th.as_tensor(observation[key]).to(self.device) + else: + observation = observation.reshape((-1,) + self.observation_space.shape) + observation = th.as_tensor(observation).to(self.device) - observation = th.as_tensor(observation).to(self.device) with th.no_grad(): actions = self._predict(observation, deterministic=deterministic) # Convert to numpy @@ -296,11 +356,15 @@ def predict( else: # Actions could be on arbitrary scale, so clip the actions to avoid # out of bound error (e.g. if sampling from a Gaussian distribution) - actions = np.clip(actions, self.action_space.low, self.action_space.high) + actions = np.clip( + actions, self.action_space.low, self.action_space.high + ) if not vectorized_env: if state is not None: - raise ValueError("Error: The environment must be vectorized when using recurrent policies.") + raise ValueError( + "Error: The environment must be vectorized when using recurrent policies." + ) actions = actions[0] return actions, state @@ -409,7 +473,9 @@ def __init__( self.activation_fn = activation_fn self.ortho_init = ortho_init - self.features_extractor = features_extractor_class(self.observation_space, **self.features_extractor_kwargs) + self.features_extractor = features_extractor_class( + self.observation_space, **self.features_extractor_kwargs + ) self.features_dim = self.features_extractor.features_dim self.normalize_images = normalize_images @@ -430,7 +496,9 @@ def __init__( self.dist_kwargs = dist_kwargs # Action distribution - self.action_dist = make_proba_distribution(action_space, use_sde=use_sde, dist_kwargs=dist_kwargs) + self.action_dist = make_proba_distribution( + action_space, use_sde=use_sde, dist_kwargs=dist_kwargs + ) self._build(lr_schedule) @@ -465,7 +533,9 @@ def reset_noise(self, n_envs: int = 1) -> None: :param n_envs: """ - assert isinstance(self.action_dist, StateDependentNoiseDistribution), "reset_noise() is only available when using gSDE" + assert isinstance( + self.action_dist, StateDependentNoiseDistribution + ), "reset_noise() is only available when using gSDE" self.action_dist.sample_weights(self.log_std, batch_size=n_envs) def _build_mlp_extractor(self) -> None: @@ -477,7 +547,10 @@ def _build_mlp_extractor(self) -> None: # net_arch here is an empty list and mlp_extractor does not # really contain any layers (acts like an identity module). self.mlp_extractor = MlpExtractor( - self.features_dim, net_arch=self.net_arch, activation_fn=self.activation_fn, device=self.device + self.features_dim, + net_arch=self.net_arch, + activation_fn=self.activation_fn, + device=self.device, ) def _build(self, lr_schedule: Schedule) -> None: @@ -502,16 +575,26 @@ def _build(self, lr_schedule: Schedule) -> None: latent_dim=latent_dim_pi, log_std_init=self.log_std_init ) elif isinstance(self.action_dist, StateDependentNoiseDistribution): - latent_sde_dim = latent_dim_pi if self.sde_net_arch is None else latent_sde_dim + latent_sde_dim = ( + latent_dim_pi if self.sde_net_arch is None else latent_sde_dim + ) self.action_net, self.log_std = self.action_dist.proba_distribution_net( - latent_dim=latent_dim_pi, latent_sde_dim=latent_sde_dim, log_std_init=self.log_std_init + latent_dim=latent_dim_pi, + latent_sde_dim=latent_sde_dim, + log_std_init=self.log_std_init, ) elif isinstance(self.action_dist, CategoricalDistribution): - self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi) + self.action_net = self.action_dist.proba_distribution_net( + latent_dim=latent_dim_pi + ) elif isinstance(self.action_dist, MultiCategoricalDistribution): - self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi) + self.action_net = self.action_dist.proba_distribution_net( + latent_dim=latent_dim_pi + ) elif isinstance(self.action_dist, BernoulliDistribution): - self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi) + self.action_net = self.action_dist.proba_distribution_net( + latent_dim=latent_dim_pi + ) else: raise NotImplementedError(f"Unsupported distribution '{self.action_dist}'.") @@ -533,9 +616,13 @@ def _build(self, lr_schedule: Schedule) -> None: module.apply(partial(self.init_weights, gain=gain)) # Setup optimizer with initial learning rate - self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs) + self.optimizer = self.optimizer_class( + self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs + ) - def forward(self, obs: th.Tensor, deterministic: bool = False) -> Tuple[th.Tensor, th.Tensor, th.Tensor]: + def forward( + self, obs: th.Tensor, deterministic: bool = False + ) -> Tuple[th.Tensor, th.Tensor, th.Tensor]: """ Forward pass in all the networks (actor and critic) @@ -546,7 +633,9 @@ def forward(self, obs: th.Tensor, deterministic: bool = False) -> Tuple[th.Tenso latent_pi, latent_vf, latent_sde = self._get_latent(obs) # Evaluate the values for the given observations values = self.value_net(latent_vf) - distribution = self._get_action_dist_from_latent(latent_pi, latent_sde=latent_sde) + distribution = self._get_action_dist_from_latent( + latent_pi, latent_sde=latent_sde + ) actions = distribution.get_actions(deterministic=deterministic) log_prob = distribution.log_prob(actions) return actions, values, log_prob @@ -570,7 +659,9 @@ def _get_latent(self, obs: th.Tensor) -> Tuple[th.Tensor, th.Tensor, th.Tensor]: latent_sde = self.sde_features_extractor(features) return latent_pi, latent_vf, latent_sde - def _get_action_dist_from_latent(self, latent_pi: th.Tensor, latent_sde: Optional[th.Tensor] = None) -> Distribution: + def _get_action_dist_from_latent( + self, latent_pi: th.Tensor, latent_sde: Optional[th.Tensor] = None + ) -> Distribution: """ Retrieve action distribution given the latent codes. @@ -592,11 +683,15 @@ def _get_action_dist_from_latent(self, latent_pi: th.Tensor, latent_sde: Optiona # Here mean_actions are the logits (before rounding to get the binary actions) return self.action_dist.proba_distribution(action_logits=mean_actions) elif isinstance(self.action_dist, StateDependentNoiseDistribution): - return self.action_dist.proba_distribution(mean_actions, self.log_std, latent_sde) + return self.action_dist.proba_distribution( + mean_actions, self.log_std, latent_sde + ) else: raise ValueError("Invalid action distribution") - def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor: + def _predict( + self, observation: th.Tensor, deterministic: bool = False + ) -> th.Tensor: """ Get the action according to the policy for a given observation. @@ -608,7 +703,9 @@ def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Te distribution = self._get_action_dist_from_latent(latent_pi, latent_sde) return distribution.get_actions(deterministic=deterministic) - def evaluate_actions(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, th.Tensor, th.Tensor]: + def evaluate_actions( + self, obs: th.Tensor, actions: th.Tensor + ) -> Tuple[th.Tensor, th.Tensor, th.Tensor]: """ Evaluate actions according to the current policy, given the observations. @@ -700,6 +797,81 @@ def __init__( ) +class MultiInputActorCriticPolicy(ActorCriticPolicy): + """ + MultiInputActorClass policy class for actor-critic algorithms (has both policy and value prediction). + Used by A2C, PPO and the likes. + + :param observation_space: Observation space (Tuple) + :param action_space: Action space + :param lr_schedule: Learning rate schedule (could be constant) + :param net_arch: The specification of the policy and value networks. + :param activation_fn: Activation function + :param ortho_init: Whether to use or not orthogonal initialization + :param use_sde: Whether to use State Dependent Exploration or not + :param log_std_init: Initial value for the log standard deviation + :param full_std: Whether to use (n_features x n_actions) parameters + for the std instead of only (n_features,) when using gSDE + :param sde_net_arch: Network architecture for extracting features + when using gSDE. If None, the latent features from the policy will be used. + Pass an empty list to use the states as features. + :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure + a positive standard deviation (cf paper). It allows to keep variance + above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough. + :param squash_output: Whether to squash the output using a tanh function, + this allows to ensure boundaries when using gSDE. + :param features_extractor_class: Uses the CombinedExtractor + :param features_extractor_kwargs: Keyword arguments + to pass to the feature extractor. + :param normalize_images: Whether to normalize images or not, + dividing by 255.0 (True by default) + :param optimizer_class: The optimizer to use, + ``th.optim.Adam`` by default + :param optimizer_kwargs: Additional keyword arguments, + excluding the learning rate, to pass to the optimizer + """ + + def __init__( + self, + observation_space: gym.spaces.Dict, + action_space: gym.spaces.Space, + lr_schedule: Callable, + net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None, + activation_fn: Type[nn.Module] = nn.Tanh, + ortho_init: bool = True, + use_sde: bool = False, + log_std_init: float = 0.0, + full_std: bool = True, + sde_net_arch: Optional[List[int]] = None, + use_expln: bool = False, + squash_output: bool = False, + features_extractor_class: Type[BaseFeaturesExtractor] = CombinedExtractor, + features_extractor_kwargs: Optional[Dict[str, Any]] = None, + normalize_images: bool = True, + optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, + optimizer_kwargs: Optional[Dict[str, Any]] = None, + ): + super(MultiInputActorCriticPolicy, self).__init__( + observation_space, + action_space, + lr_schedule, + net_arch, + activation_fn, + ortho_init, + use_sde, + log_std_init, + full_std, + sde_net_arch, + use_expln, + squash_output, + features_extractor_class, + features_extractor_kwargs, + normalize_images, + optimizer_class, + optimizer_kwargs, + ) + + class ContinuousCritic(BaseModel): """ Critic network(s) for DDPG/SAC/TD3. @@ -791,7 +963,13 @@ def create_sde_features_extractor( # Special case: when using states as features (i.e. sde_net_arch is an empty list) # don't use any activation function sde_activation = activation_fn if len(sde_net_arch) > 0 else None - latent_sde_net = create_mlp(features_dim, -1, sde_net_arch, activation_fn=sde_activation, squash_output=False) + latent_sde_net = create_mlp( + features_dim, + -1, + sde_net_arch, + activation_fn=sde_activation, + squash_output=False, + ) latent_sde_dim = sde_net_arch[-1] if len(sde_net_arch) > 0 else features_dim sde_features_extractor = nn.Sequential(*latent_sde_net) return sde_features_extractor, latent_sde_dim @@ -800,7 +978,9 @@ def create_sde_features_extractor( _policy_registry = dict() # type: Dict[Type[BasePolicy], Dict[str, Type[BasePolicy]]] -def get_policy_from_name(base_policy_type: Type[BasePolicy], name: str) -> Type[BasePolicy]: +def get_policy_from_name( + base_policy_type: Type[BasePolicy], name: str +) -> Type[BasePolicy]: """ Returns the registered policy from the base type and name. See `register_policy` for registering policies and explanation. @@ -849,7 +1029,9 @@ def register_policy(name: str, policy: Type[BasePolicy]) -> None: sub_class = cls break if sub_class is None: - raise ValueError(f"Error: the policy {policy} is not of any known subclasses of BasePolicy!") + raise ValueError( + f"Error: the policy {policy} is not of any known subclasses of BasePolicy!" + ) if sub_class not in _policy_registry: _policy_registry[sub_class] = {} @@ -858,5 +1040,7 @@ def register_policy(name: str, policy: Type[BasePolicy]) -> None: # we try to register. If not so, # do not override and complain. if _policy_registry[sub_class][name] != policy: - raise ValueError(f"Error: the name {name} is already registered for a different policy, will not override.") + raise ValueError( + f"Error: the name {name} is already registered for a different policy, will not override." + ) _policy_registry[sub_class][name] = policy diff --git a/stable_baselines3/common/preprocessing.py b/stable_baselines3/common/preprocessing.py index 881970a04..9ae0c5367 100644 --- a/stable_baselines3/common/preprocessing.py +++ b/stable_baselines3/common/preprocessing.py @@ -20,11 +20,17 @@ def is_image_space_channels_first(observation_space: spaces.Box) -> bool: """ smallest_dimension = np.argmin(observation_space.shape).item() if smallest_dimension == 1: - warnings.warn("Treating image space as channels-last, while second dimension was smallest of the three.") + warnings.warn( + "Treating image space as channels-last, while second dimension was smallest of the three." + ) return smallest_dimension == 0 -def is_image_space(observation_space: spaces.Space, channels_last: bool = True, check_channels: bool = False) -> bool: +def is_image_space( + observation_space: spaces.Space, + channels_last: bool = True, + check_channels: bool = False, +) -> bool: """ Check if a observation space has the shape, limits and dtype of a valid image. @@ -61,7 +67,16 @@ def is_image_space(observation_space: spaces.Space, channels_last: bool = True, return False -def preprocess_obs(obs: th.Tensor, observation_space: spaces.Space, normalize_images: bool = True) -> th.Tensor: +def has_image_space(observation_space: spaces.Dict): + for key, subspace in observation_space.spaces.items(): + if is_image_space(subspace): + return True + return False + + +def preprocess_obs( + obs: th.Tensor, observation_space: spaces.Space, normalize_images: bool = True +) -> th.Tensor: """ Preprocess observation to be to a neural network. For images, it normalizes the values by dividing them by 255 (to have values in [0, 1]) @@ -86,7 +101,9 @@ def preprocess_obs(obs: th.Tensor, observation_space: spaces.Space, normalize_im # Tensor concatenation of one hot encodings of each Categorical sub-space return th.cat( [ - F.one_hot(obs_.long(), num_classes=int(observation_space.nvec[idx])).float() + F.one_hot( + obs_.long(), num_classes=int(observation_space.nvec[idx]) + ).float() for idx, obs_ in enumerate(th.split(obs.long(), 1, dim=1)) ], dim=-1, @@ -95,8 +112,15 @@ def preprocess_obs(obs: th.Tensor, observation_space: spaces.Space, normalize_im elif isinstance(observation_space, spaces.MultiBinary): return obs.float() + elif isinstance(observation_space, spaces.Dict): + for key, _obs in obs.items(): + obs[key] = _obs.float() + return obs + else: - raise NotImplementedError(f"Preprocessing not implemented for {observation_space}") + raise NotImplementedError( + f"Preprocessing not implemented for {observation_space}" + ) def get_obs_shape(observation_space: spaces.Space) -> Tuple[int, ...]: @@ -117,8 +141,15 @@ def get_obs_shape(observation_space: spaces.Space) -> Tuple[int, ...]: elif isinstance(observation_space, spaces.MultiBinary): # Number of binary features return (int(observation_space.n),) + elif isinstance(observation_space, spaces.Dict): + return { + key: subspace.shape for (key, subspace) in observation_space.spaces.items() + } + else: - raise NotImplementedError(f"{observation_space} observation space is not supported") + raise NotImplementedError( + f"{observation_space} observation space is not supported" + ) def get_flattened_obs_dim(observation_space: spaces.Space) -> int: diff --git a/stable_baselines3/common/torch_layers.py b/stable_baselines3/common/torch_layers.py index 165d37d8c..231ae64ed 100644 --- a/stable_baselines3/common/torch_layers.py +++ b/stable_baselines3/common/torch_layers.py @@ -6,6 +6,7 @@ from torch import nn from stable_baselines3.common.preprocessing import get_flattened_obs_dim, is_image_space +from stable_baselines3.common.type_aliases import TensorDict from stable_baselines3.common.utils import get_device @@ -40,7 +41,9 @@ class FlattenExtractor(BaseFeaturesExtractor): """ def __init__(self, observation_space: gym.Space): - super(FlattenExtractor, self).__init__(observation_space, get_flattened_obs_dim(observation_space)) + super(FlattenExtractor, self).__init__( + observation_space, get_flattened_obs_dim(observation_space) + ) self.flatten = nn.Flatten() def forward(self, observations: th.Tensor) -> th.Tensor: @@ -84,7 +87,9 @@ def __init__(self, observation_space: gym.spaces.Box, features_dim: int = 512): # Compute shape by doing one forward pass with th.no_grad(): - n_flatten = self.cnn(th.as_tensor(observation_space.sample()[None]).float()).shape[1] + n_flatten = self.cnn( + th.as_tensor(observation_space.sample()[None]).float() + ).shape[1] self.linear = nn.Sequential(nn.Linear(n_flatten, features_dim), nn.ReLU()) @@ -93,7 +98,11 @@ def forward(self, observations: th.Tensor) -> th.Tensor: def create_mlp( - input_dim: int, output_dim: int, net_arch: List[int], activation_fn: Type[nn.Module] = nn.ReLU, squash_output: bool = False + input_dim: int, + output_dim: int, + net_arch: List[int], + activation_fn: Type[nn.Module] = nn.ReLU, + squash_output: bool = False, ) -> List[nn.Module]: """ Create a multi layer perceptron (MLP), which is @@ -165,26 +174,37 @@ def __init__( super(MlpExtractor, self).__init__() device = get_device(device) shared_net, policy_net, value_net = [], [], [] - policy_only_layers = [] # Layer sizes of the network that only belongs to the policy network - value_only_layers = [] # Layer sizes of the network that only belongs to the value network + policy_only_layers = ( + [] + ) # Layer sizes of the network that only belongs to the policy network + value_only_layers = ( + [] + ) # Layer sizes of the network that only belongs to the value network last_layer_dim_shared = feature_dim # Iterate through the shared layers and build the shared parts of the network for idx, layer in enumerate(net_arch): if isinstance(layer, int): # Check that this is a shared layer - layer_size = layer # TODO: give layer a meaningful name - shared_net.append(nn.Linear(last_layer_dim_shared, layer_size)) + shared_net.append( + nn.Linear(last_layer_dim_shared, layer) + ) # add linear of size layer shared_net.append(activation_fn()) - last_layer_dim_shared = layer_size + last_layer_dim_shared = layer else: - assert isinstance(layer, dict), "Error: the net_arch list can only contain ints and dicts" + assert isinstance( + layer, dict + ), "Error: the net_arch list can only contain ints and dicts" if "pi" in layer: - assert isinstance(layer["pi"], list), "Error: net_arch[-1]['pi'] must contain a list of integers." + assert isinstance( + layer["pi"], list + ), "Error: net_arch[-1]['pi'] must contain a list of integers." policy_only_layers = layer["pi"] if "vf" in layer: - assert isinstance(layer["vf"], list), "Error: net_arch[-1]['vf'] must contain a list of integers." + assert isinstance( + layer["vf"], list + ), "Error: net_arch[-1]['vf'] must contain a list of integers." value_only_layers = layer["vf"] break # From here on the network splits up in policy and value network @@ -192,15 +212,21 @@ def __init__( last_layer_dim_vf = last_layer_dim_shared # Build the non-shared part of the network - for idx, (pi_layer_size, vf_layer_size) in enumerate(zip_longest(policy_only_layers, value_only_layers)): + for idx, (pi_layer_size, vf_layer_size) in enumerate( + zip_longest(policy_only_layers, value_only_layers) + ): if pi_layer_size is not None: - assert isinstance(pi_layer_size, int), "Error: net_arch[-1]['pi'] must only contain integers." + assert isinstance( + pi_layer_size, int + ), "Error: net_arch[-1]['pi'] must only contain integers." policy_net.append(nn.Linear(last_layer_dim_pi, pi_layer_size)) policy_net.append(activation_fn()) last_layer_dim_pi = pi_layer_size if vf_layer_size is not None: - assert isinstance(vf_layer_size, int), "Error: net_arch[-1]['vf'] must only contain integers." + assert isinstance( + vf_layer_size, int + ), "Error: net_arch[-1]['vf'] must only contain integers." value_net.append(nn.Linear(last_layer_dim_vf, vf_layer_size)) value_net.append(activation_fn()) last_layer_dim_vf = vf_layer_size @@ -224,7 +250,85 @@ def forward(self, features: th.Tensor) -> Tuple[th.Tensor, th.Tensor]: return self.policy_net(shared_latent), self.value_net(shared_latent) -def get_actor_critic_arch(net_arch: Union[List[int], Dict[str, List[int]]]) -> Tuple[List[int], List[int]]: +class CombinedExtractor(BaseFeaturesExtractor): + def __init__( + self, + observation_space: gym.spaces.Dict, + features_dim: int = 64, + cnn_output_dim: int = 64, + mlp_output_dim: int = 64, + mlp_net_arch: List[int] = [64, 64], + activation_fn: Type[nn.Module] = nn.ReLU, + comb_net_arch: List[int] = [64, 64], + ): + super(CombinedExtractor, self).__init__( + observation_space, features_dim=features_dim + ) + + extractors = {} + + total_concat_size = 0 + for (key, subspace) in observation_space.spaces.items(): + + if is_image_space(subspace): + n_input_channels = subspace.shape[0] + cnn = nn.Sequential( + nn.Conv2d(n_input_channels, 32, kernel_size=8, stride=4, padding=0), + nn.ReLU(), + nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=0), + nn.ReLU(), + nn.Flatten(), + ) + + # TODO is this the best practice for finding out the size? + with th.no_grad(): + n_flatten = cnn( + th.as_tensor(subspace.sample()[None]).float() + ).shape[1] + + cnn_linear = nn.Sequential( + nn.Linear(n_flatten, cnn_output_dim), nn.ReLU() + ) + + extractors[key] = nn.Sequential(*(list(cnn) + list(cnn_linear))) + + total_concat_size += cnn_output_dim + + else: + extractors[key] = nn.Sequential( + *create_mlp( + subspace.shape[0], + mlp_output_dim, + mlp_net_arch, + activation_fn, + squash_output=False, + ) + ) + + total_concat_size += mlp_output_dim + + self.extractors = nn.ModuleDict(extractors) + + self.combined = nn.Sequential( + *create_mlp( + total_concat_size, + features_dim, + comb_net_arch, + activation_fn, + squash_output=False, + ) + ) + + def forward(self, observations: TensorDict) -> th.Tensor: + encoded_tensor_list = [ + extractor(observations[key]) for key, extractor in self.extractors.items() + ] + return self.combined(th.cat(encoded_tensor_list, dim=1)) + + +def get_actor_critic_arch( + net_arch: Union[List[int], Dict[str, List[int]]] +) -> Tuple[List[int], List[int]]: """ Get the actor and critic network architectures for off-policy actor-critic algorithms (SAC, TD3, DDPG). @@ -257,8 +361,14 @@ def get_actor_critic_arch(net_arch: Union[List[int], Dict[str, List[int]]]) -> T if isinstance(net_arch, list): actor_arch, critic_arch = net_arch, net_arch else: - assert isinstance(net_arch, dict), "Error: the net_arch can only contain be a list of ints or a dict" - assert "pi" in net_arch, "Error: no key 'pi' was provided in net_arch for the actor network" - assert "qf" in net_arch, "Error: no key 'qf' was provided in net_arch for the critic network" + assert isinstance( + net_arch, dict + ), "Error: the net_arch can only contain be a list of ints or a dict" + assert ( + "pi" in net_arch + ), "Error: no key 'pi' was provided in net_arch for the actor network" + assert ( + "qf" in net_arch + ), "Error: no key 'qf' was provided in net_arch for the critic network" actor_arch, critic_arch = net_arch["pi"], net_arch["qf"] return actor_arch, critic_arch diff --git a/stable_baselines3/common/type_aliases.py b/stable_baselines3/common/type_aliases.py index 80cc354de..dde8f78b1 100644 --- a/stable_baselines3/common/type_aliases.py +++ b/stable_baselines3/common/type_aliases.py @@ -11,9 +11,9 @@ GymEnv = Union[gym.Env, vec_env.VecEnv] GymObs = Union[Tuple, Dict[str, Any], np.ndarray, int] GymStepReturn = Tuple[GymObs, float, bool, Dict] -TensorDict = Dict[str, th.Tensor] +TensorDict = Dict[Union[str, int], th.Tensor] OptimizerStateDict = Dict[str, Any] -MaybeCallback = Union[None, Callable, List[callbacks.BaseCallback], callbacks.BaseCallback] + # A schedule takes the remaining progress as input # and ouputs a scalar (e.g. learning rate, clip range, ...) Schedule = Callable[[float], float] @@ -28,6 +28,15 @@ class RolloutBufferSamples(NamedTuple): returns: th.Tensor +class DictRolloutBufferSamples(RolloutBufferSamples): + observations: TensorDict + actions: th.Tensor + old_values: th.Tensor + old_log_prob: th.Tensor + advantages: th.Tensor + returns: th.Tensor + + class ReplayBufferSamples(NamedTuple): observations: th.Tensor actions: th.Tensor @@ -36,6 +45,14 @@ class ReplayBufferSamples(NamedTuple): rewards: th.Tensor +class DictReplayBufferSamples(ReplayBufferSamples): + observations: TensorDict + actions: th.Tensor + next_observations: th.Tensor + dones: th.Tensor + rewards: th.Tensor + + class RolloutReturn(NamedTuple): episode_reward: float episode_timesteps: int diff --git a/stable_baselines3/common/utils.py b/stable_baselines3/common/utils.py index e8ca9f66c..50cc42227 100644 --- a/stable_baselines3/common/utils.py +++ b/stable_baselines3/common/utils.py @@ -3,7 +3,8 @@ import random from collections import deque from itertools import zip_longest -from typing import Iterable, Optional, Union +from typing import Callable, Iterable, Optional, Union, Dict + import gym import numpy as np @@ -16,7 +17,7 @@ SummaryWriter = None from stable_baselines3.common import logger -from stable_baselines3.common.type_aliases import GymEnv, Schedule +from stable_baselines3.common.type_aliases import GymEnv, TensorDict, Schedule def set_random_seed(seed: int, using_cuda: bool = False) -> None: @@ -161,13 +162,20 @@ def get_latest_run_id(log_path: Optional[str] = None, log_name: str = "") -> int for path in glob.glob(f"{log_path}/{log_name}_[0-9]*"): file_name = path.split(os.sep)[-1] ext = file_name.split("_")[-1] - if log_name == "_".join(file_name.split("_")[:-1]) and ext.isdigit() and int(ext) > max_run_id: + if ( + log_name == "_".join(file_name.split("_")[:-1]) + and ext.isdigit() + and int(ext) > max_run_id + ): max_run_id = int(ext) return max_run_id def configure_logger( - verbose: int = 0, tensorboard_log: Optional[str] = None, tb_log_name: str = "", reset_num_timesteps: bool = True + verbose: int = 0, + tensorboard_log: Optional[str] = None, + tb_log_name: str = "", + reset_num_timesteps: bool = True, ) -> None: """ Configure the logger's outputs. @@ -190,7 +198,9 @@ def configure_logger( logger.configure(format_strings=[""]) -def check_for_correct_spaces(env: GymEnv, observation_space: gym.spaces.Space, action_space: gym.spaces.Space) -> None: +def check_for_correct_spaces( + env: GymEnv, observation_space: gym.spaces.Space, action_space: gym.spaces.Space +) -> None: """ Checks that the environment has same spaces as provided ones. Used by BaseAlgorithm to check if spaces match after loading the model with given env. @@ -203,12 +213,18 @@ def check_for_correct_spaces(env: GymEnv, observation_space: gym.spaces.Space, a :param action_space: Action space to check against """ if observation_space != env.observation_space: - raise ValueError(f"Observation spaces do not match: {observation_space} != {env.observation_space}") + raise ValueError( + f"Observation spaces do not match: {observation_space} != {env.observation_space}" + ) if action_space != env.action_space: - raise ValueError(f"Action spaces do not match: {action_space} != {env.action_space}") + raise ValueError( + f"Action spaces do not match: {action_space} != {env.action_space}" + ) -def is_vectorized_observation(observation: np.ndarray, observation_space: gym.spaces.Space) -> bool: +def is_vectorized_observation( + observation: np.ndarray, observation_space: gym.spaces.Space +) -> bool: """ For every observation type, detects and validates the shape, then returns whether or not the observation is vectorized. @@ -226,10 +242,14 @@ def is_vectorized_observation(observation: np.ndarray, observation_space: gym.sp raise ValueError( f"Error: Unexpected observation shape {observation.shape} for " + f"Box environment, please use {observation_space.shape} " - + "or (n_env, {}) for the observation shape.".format(", ".join(map(str, observation_space.shape))) + + "or (n_env, {}) for the observation shape.".format( + ", ".join(map(str, observation_space.shape)) + ) ) elif isinstance(observation_space, gym.spaces.Discrete): - if observation.shape == (): # A numpy array of a number, has shape empty tuple '()' + if ( + observation.shape == () + ): # A numpy array of a number, has shape empty tuple '()' return False elif len(observation.shape) == 1: return True @@ -238,11 +258,12 @@ def is_vectorized_observation(observation: np.ndarray, observation_space: gym.sp f"Error: Unexpected observation shape {observation.shape} for " + "Discrete environment, please use (1,) or (n_env, 1) for the observation shape." ) - elif isinstance(observation_space, gym.spaces.MultiDiscrete): if observation.shape == (len(observation_space.nvec),): return False - elif len(observation.shape) == 2 and observation.shape[1] == len(observation_space.nvec): + elif len(observation.shape) == 2 and observation.shape[1] == len( + observation_space.nvec + ): return True else: raise ValueError( @@ -253,7 +274,9 @@ def is_vectorized_observation(observation: np.ndarray, observation_space: gym.sp elif isinstance(observation_space, gym.spaces.MultiBinary): if observation.shape == (observation_space.n,): return False - elif len(observation.shape) == 2 and observation.shape[1] == observation_space.n: + elif ( + len(observation.shape) == 2 and observation.shape[1] == observation_space.n + ): return True else: raise ValueError( @@ -261,9 +284,29 @@ def is_vectorized_observation(observation: np.ndarray, observation_space: gym.sp + f"environment, please use ({observation_space.n},) or " + f"(n_env, {observation_space.n}) for the observation shape." ) + elif isinstance(observation_space, gym.spaces.Dict): + for key, subspace in observation_space.spaces.items(): + if observation[key].shape == subspace.shape: + return False + + all_good = True + + for key, subspace in observation_space.spaces.items(): + if observation[key].shape[1:] != subspace.shape: + all_good = False + break + + if all_good: + return True + else: + raise ValueError( + f"Error: Unexpected observation shape {observation.shape} for " + + f"Tuple environment, please use {(obs.shape for obs in observation_space.spaces)} " + ) else: raise ValueError( - "Error: Cannot determine if the observation is vectorized " + f" with the space type {observation_space}." + "Error: Cannot determine if the observation is vectorized " + + f" with the space type {observation_space}." ) @@ -296,7 +339,11 @@ def zip_strict(*iterables: Iterable) -> Iterable: yield combo -def polyak_update(params: Iterable[th.nn.Parameter], target_params: Iterable[th.nn.Parameter], tau: float) -> None: +def polyak_update( + params: Iterable[th.nn.Parameter], + target_params: Iterable[th.nn.Parameter], + tau: float, +) -> None: """ Perform a Polyak average update on ``target_params`` using ``params``: target parameters are slowly updated towards the main parameters. @@ -317,3 +364,14 @@ def polyak_update(params: Iterable[th.nn.Parameter], target_params: Iterable[th. for param, target_param in zip_strict(params, target_params): target_param.data.mul_(1 - tau) th.add(target_param.data, param.data, alpha=tau, out=target_param.data) + + +def obs_as_tensor( + obs: Union[np.ndarray, Dict[Union[str, int], np.ndarray]], device: th.device +) -> Union[th.tensor, TensorDict]: + if isinstance(obs, np.ndarray): + return th.as_tensor(obs).to(device) + elif isinstance(obs, dict): + return {key: th.as_tensor(_obs).to(device) for (key, _obs) in obs.items()} + else: + raise Exception(f"Unrecognized type of observation {type(obs)}") diff --git a/stable_baselines3/common/vec_env/vec_frame_stack.py b/stable_baselines3/common/vec_env/vec_frame_stack.py index ff9a79652..7087d237a 100644 --- a/stable_baselines3/common/vec_env/vec_frame_stack.py +++ b/stable_baselines3/common/vec_env/vec_frame_stack.py @@ -1,10 +1,14 @@ import warnings -from typing import Any, Dict, List, Optional, Tuple +from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np from gym import spaces -from stable_baselines3.common.preprocessing import is_image_space, is_image_space_channels_first +from stable_baselines3.common.preprocessing import ( + is_image_space, + has_image_space, + is_image_space_channels_first, +) from stable_baselines3.common.vec_env.base_vec_env import VecEnv, VecEnvWrapper @@ -23,73 +27,199 @@ class VecFrameStack(VecEnvWrapper): If None, automatically detect channel to stack over in case of image observation or default to "last" (default). """ - def __init__(self, venv: VecEnv, n_stack: int, channels_order: Optional[str] = None): + def __init__( + self, venv: VecEnv, n_stack: int, channels_order: Optional[str] = None + ): self.venv = venv self.n_stack = n_stack wrapped_obs_space = venv.observation_space - assert isinstance(wrapped_obs_space, spaces.Box), "VecFrameStack only work with gym.spaces.Box observation space" + if isinstance(wrapped_obs_space, spaces.Box): + ( + self.channels_first, + self.stack_dimension, + self.stackedobs, + observation_space, + ) = self.compute_stacking(channels_order, wrapped_obs_space) + + elif isinstance(wrapped_obs_space, spaces.Dict): + self.channels_first = {} + self.stack_dimension = {} + self.stackedobs = {} + space_dict = {} + for (key, subspace) in wrapped_obs_space.spaces.items(): + assert isinstance( + subspace, spaces.Box + ), "VecFrameStack with gym.spaces.Dict only works with nested gym.spaces.Box" + ( + self.channels_first[key], + self.stack_dimension[key], + self.stackedobs[key], + space_dict[key], + ) = self.compute_stacking(channels_order, subspace) + observation_space = spaces.Dict(spaces=space_dict) + else: + raise Exception( + "VecFrameStack only works with gym.spaces.Box and gym.spaces.Dict observation spaces" + ) + + VecEnvWrapper.__init__(self, venv, observation_space=observation_space) + + def compute_stacking(self, channels_order, obs_space): + channels_first = False if channels_order is None: # Detect channel location automatically for images - if is_image_space(wrapped_obs_space): - self.channels_first = is_image_space_channels_first(wrapped_obs_space) + if is_image_space(obs_space): + channels_first = is_image_space_channels_first(obs_space) else: # Default behavior for non-image space, stack on the last axis - self.channels_first = False + channels_first = False else: - assert channels_order in {"last", "first"}, "`channels_order` must be one of following: 'last', 'first'" + assert channels_order in { + "last", + "first", + }, "`channels_order` must be one of following: 'last', 'first'" - self.channels_first = channels_order == "first" + channels_first = channels_order == "first" # This includes the vec-env dimension (first) - self.stack_dimension = 1 if self.channels_first else -1 - repeat_axis = 0 if self.channels_first else -1 - low = np.repeat(wrapped_obs_space.low, self.n_stack, axis=repeat_axis) - high = np.repeat(wrapped_obs_space.high, self.n_stack, axis=repeat_axis) - self.stackedobs = np.zeros((venv.num_envs,) + low.shape, low.dtype) - observation_space = spaces.Box(low=low, high=high, dtype=venv.observation_space.dtype) - VecEnvWrapper.__init__(self, venv, observation_space=observation_space) + stack_dimension = 1 if channels_first else -1 + repeat_axis = 0 if channels_first else -1 + low = np.repeat(obs_space.low, self.n_stack, axis=repeat_axis) + high = np.repeat(obs_space.high, self.n_stack, axis=repeat_axis) + stackedobs = np.zeros((self.venv.num_envs,) + low.shape, low.dtype) + observation_space = spaces.Box(low=low, high=high, dtype=obs_space.dtype) + return channels_first, stack_dimension, stackedobs, observation_space + + def step_wait( + self, + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, List[Dict[str, Any]]]: - def step_wait(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray, List[Dict[str, Any]]]: observations, rewards, dones, infos = self.venv.step_wait() - # Let pytype know that observation is not a dict - assert isinstance(observations, np.ndarray) - stack_ax_size = observations.shape[self.stack_dimension] - self.stackedobs = np.roll(self.stackedobs, shift=-stack_ax_size, axis=self.stack_dimension) - for i, done in enumerate(dones): - if done: - if "terminal_observation" in infos[i]: - old_terminal = infos[i]["terminal_observation"] - if self.channels_first: - new_terminal = np.concatenate( - (self.stackedobs[i, :-stack_ax_size, ...], old_terminal), axis=self.stack_dimension - ) + + if isinstance(self.venv.observation_space, spaces.Box): + stack_ax_size = observations.shape[self.stack_dimension] + self.stackedobs = np.roll( + self.stackedobs, shift=-stack_ax_size, axis=self.stack_dimension + ) + for i, done in enumerate(dones): + if done: + if "terminal_observation" in infos[i]: + old_terminal = infos[i]["terminal_observation"] + if self.channels_first: + new_terminal = np.concatenate( + ( + self.stackedobs[i, :-stack_ax_size, ...], + old_terminal, + ), + axis=self.stack_dimension, + ) + else: + new_terminal = np.concatenate( + ( + self.stackedobs[i, ..., :-stack_ax_size], + old_terminal, + ), + axis=self.stack_dimension, + ) + infos[i]["terminal_observation"] = new_terminal else: - new_terminal = np.concatenate( - (self.stackedobs[i, ..., :-stack_ax_size], old_terminal), axis=self.stack_dimension + warnings.warn( + "VecFrameStack wrapping a VecEnv without terminal_observation info" ) - infos[i]["terminal_observation"] = new_terminal + self.stackedobs[i] = 0 + if self.channels_first: + self.stackedobs[ + :, -observations.shape[self.stack_dimension] :, ... + ] = observations + else: + self.stackedobs[ + ..., -observations.shape[self.stack_dimension] : + ] = observations + elif isinstance(self.venv.observation_space, spaces.Dict): + for key in self.stackedobs.keys(): + stack_ax_size = observations[key].shape[self.stack_dimension[key]] + self.stackedobs[key] = np.roll( + self.stackedobs[key], + shift=-stack_ax_size, + axis=self.stack_dimension[key], + ) + + for i, done in enumerate(dones): + if done: + if "terminal_observation" in infos[i]: + old_terminal = infos[i]["terminal_observation"][key] + if self.channels_first[key]: + # new_terminal = np.concatenate( + # (self.stackedobs[key][i, :-stack_ax_size, ...], old_terminal), axis=self.stack_dimension[key] + # ) + # ValueError: all the input array dimensions for the concatenation axis must match exactly, + # but along dimension 0, the array at index 0 has size 6 and the array at index 1 has size 2 + new_terminal = np.vstack( + ( + self.stackedobs[key][i, :-stack_ax_size, ...], + old_terminal, + ) + ) + else: + new_terminal = np.concatenate( + ( + self.stackedobs[key][i, ..., :-stack_ax_size], + old_terminal, + ), + axis=self.stack_dimension[key], + ) + infos[i]["terminal_observation"][key] = new_terminal + else: + warnings.warn( + "VecFrameStack wrapping a VecEnv without terminal_observation info" + ) + self.stackedobs[key][i] = 0 + if self.channels_first: + self.stackedobs[key][ + :, -observations[key].shape[self.stack_dimension[key]] :, ... + ] = observations[key] else: - warnings.warn("VecFrameStack wrapping a VecEnv without terminal_observation info") - self.stackedobs[i] = 0 - if self.channels_first: - self.stackedobs[:, -observations.shape[self.stack_dimension] :, ...] = observations + self.stackedobs[key][ + ..., -observations[key].shape[self.stack_dimension] : + ] = observations[key] else: - self.stackedobs[..., -observations.shape[self.stack_dimension] :] = observations + raise Exception( + f"Unhandled observation type {type(self.venv.observation_space)}" + ) return self.stackedobs, rewards, dones, infos - def reset(self) -> np.ndarray: + def reset(self) -> Union[np.ndarray, Dict[str, np.ndarray]]: """ Reset all environments """ - obs: np.ndarray = self.venv.reset() # pytype:disable=annotation-type-mismatch - self.stackedobs[...] = 0 - if self.channels_first: - self.stackedobs[:, -obs.shape[self.stack_dimension] :, ...] = obs - else: - self.stackedobs[..., -obs.shape[self.stack_dimension] :] = obs + observation = self.venv.reset() # pytype:disable=annotation-type-mismatch + + if isinstance(self.venv.observation_space, spaces.Box): + self.stackedobs[...] = 0 + if self.channels_first: + self.stackedobs[ + :, -observation.shape[self.stack_dimension] :, ... + ] = observation + else: + self.stackedobs[ + ..., -observation.shape[self.stack_dimension] : + ] = observation + + elif isinstance(self.venv.observation_space, spaces.Dict): + for key, obs in observation.items(): + self.stackedobs[key][...] = 0 + if self.channels_first[key]: + self.stackedobs[key][ + :, -obs.shape[self.stack_dimension[key]] :, ... + ] = obs + else: + self.stackedobs[key][ + ..., -obs.shape[self.stack_dimension[key]] : + ] = obs + return self.stackedobs def close(self) -> None: diff --git a/tests/test_her.py b/tests/test_her.py index 5e9cccc07..a939cc5ec 100644 --- a/tests/test_her.py +++ b/tests/test_her.py @@ -129,7 +129,9 @@ def test_save_load(tmp_path, model_class, use_sde, online_sampling): params = deepcopy(model.policy.state_dict()) # Modify all parameters to be random values - random_params = dict((param_name, th.rand_like(param)) for param_name, param in params.items()) + random_params = dict( + (param_name, th.rand_like(param)) for param_name, param in params.items() + ) # Update model parameters with the new random values model.policy.load_state_dict(random_params) @@ -137,7 +139,9 @@ def test_save_load(tmp_path, model_class, use_sde, online_sampling): new_params = model.policy.state_dict() # Check that all params are different now for k in params: - assert not th.allclose(params[k], new_params[k]), "Parameters did not change as expected." + assert not th.allclose( + params[k], new_params[k] + ), "Parameters did not change as expected." params = new_params @@ -154,7 +158,9 @@ def test_save_load(tmp_path, model_class, use_sde, online_sampling): # Check that all params are the same as before save load procedure now for key in params: - assert th.allclose(params[key], new_params[key]), "Model parameters not the same after save and load." + assert th.allclose( + params[key], new_params[key] + ), "Model parameters not the same after save and load." # check if model still selects the same actions new_selected_actions, _ = model.predict(observations, deterministic=True) @@ -164,7 +170,9 @@ def test_save_load(tmp_path, model_class, use_sde, online_sampling): model.learn(total_timesteps=300) # Test that the change of parameters works - model = HER.load(str(tmp_path / "test_save.zip"), env=env, verbose=3, learning_rate=2.0) + model = HER.load( + str(tmp_path / "test_save.zip"), env=env, verbose=3, learning_rate=2.0 + ) assert model.model.learning_rate == 2.0 assert model.verbose == 3 @@ -172,8 +180,13 @@ def test_save_load(tmp_path, model_class, use_sde, online_sampling): os.remove(tmp_path / "test_save.zip") -@pytest.mark.parametrize("online_sampling, truncate_last_trajectory", [(False, None), (True, True), (True, False)]) -def test_save_load_replay_buffer(tmp_path, recwarn, online_sampling, truncate_last_trajectory): +@pytest.mark.parametrize( + "online_sampling, truncate_last_trajectory", + [(False, None), (True, True), (True, False)], +) +def test_save_load_replay_buffer( + tmp_path, recwarn, online_sampling, truncate_last_trajectory +): """ Test if 'save_replay_buffer' and 'load_replay_buffer' works correctly """ @@ -214,7 +227,9 @@ def test_save_load_replay_buffer(tmp_path, recwarn, online_sampling, truncate_la if truncate_last_trajectory: assert len(recwarn) == 1 warning = recwarn.pop(UserWarning) - assert "The last trajectory in the replay buffer will be truncated" in str(warning.message) + assert "The last trajectory in the replay buffer will be truncated" in str( + warning.message + ) else: assert len(recwarn) == 0 @@ -229,10 +244,12 @@ def test_save_load_replay_buffer(tmp_path, recwarn, online_sampling, truncate_la model.replay_buffer.buffer["next_obs"][:n_episodes_stored], ) assert np.allclose( - old_replay_buffer.buffer["action"][:n_episodes_stored], model.replay_buffer.buffer["action"][:n_episodes_stored] + old_replay_buffer.buffer["action"][:n_episodes_stored], + model.replay_buffer.buffer["action"][:n_episodes_stored], ) assert np.allclose( - old_replay_buffer.buffer["reward"][:n_episodes_stored], model.replay_buffer.buffer["reward"][:n_episodes_stored] + old_replay_buffer.buffer["reward"][:n_episodes_stored], + model.replay_buffer.buffer["reward"][:n_episodes_stored], ) # we might change the last done of the last trajectory so we don't compare it assert np.allclose( @@ -240,7 +257,9 @@ def test_save_load_replay_buffer(tmp_path, recwarn, online_sampling, truncate_la model.replay_buffer.buffer["done"][: n_episodes_stored - 1], ) else: - assert np.allclose(old_replay_buffer.observations, model.replay_buffer.observations) + assert np.allclose( + old_replay_buffer.observations, model.replay_buffer.observations + ) assert np.allclose(old_replay_buffer.actions, model.replay_buffer.actions) assert np.allclose(old_replay_buffer.rewards, model.replay_buffer.rewards) assert np.allclose(old_replay_buffer.dones, model.replay_buffer.dones) @@ -286,14 +305,20 @@ def test_get_max_episode_length(): get_time_limit(dict_env, current_max_episode_length=None) default_length = 10 - assert get_time_limit(dict_env, current_max_episode_length=default_length) == default_length + assert ( + get_time_limit(dict_env, current_max_episode_length=default_length) + == default_length + ) env = gym.make("CartPole-v1") vec_env = DummyVecEnv([lambda: env]) assert get_time_limit(vec_env, current_max_episode_length=None) == 500 # Overwrite max_episode_steps - assert get_time_limit(vec_env, current_max_episode_length=default_length) == default_length + assert ( + get_time_limit(vec_env, current_max_episode_length=default_length) + == default_length + ) # Set max_episode_steps to None env.spec.max_episode_steps = None From b7f33865c227abb9d7df3070133f812d9e7cb7ba Mon Sep 17 00:00:00 2001 From: Jaden Travnik Date: Tue, 24 Nov 2020 19:27:09 -0500 Subject: [PATCH 02/70] Fixing missing refs from a quick merge from master --- stable_baselines3/common/policies.py | 2 +- stable_baselines3/common/type_aliases.py | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/stable_baselines3/common/policies.py b/stable_baselines3/common/policies.py index c0d800e69..5dc148b35 100644 --- a/stable_baselines3/common/policies.py +++ b/stable_baselines3/common/policies.py @@ -3,7 +3,7 @@ import collections from abc import ABC, abstractmethod from functools import partial -from typing import Any, Dict, List, Optional, Tuple, Type, Union +from typing import Any, Dict, List, Optional, Tuple, Type, Union, Callable import copy diff --git a/stable_baselines3/common/type_aliases.py b/stable_baselines3/common/type_aliases.py index dde8f78b1..d84767543 100644 --- a/stable_baselines3/common/type_aliases.py +++ b/stable_baselines3/common/type_aliases.py @@ -13,6 +13,9 @@ GymStepReturn = Tuple[GymObs, float, bool, Dict] TensorDict = Dict[Union[str, int], th.Tensor] OptimizerStateDict = Dict[str, Any] +MaybeCallback = Union[ + None, Callable, List[callbacks.BaseCallback], callbacks.BaseCallback +] # A schedule takes the remaining progress as input # and ouputs a scalar (e.g. learning rate, clip range, ...) From 21fecd3e88aa9c2fd9e00b049af7a0f1b897ab41 Mon Sep 17 00:00:00 2001 From: Antonin RAFFIN Date: Thu, 26 Nov 2020 19:14:08 +0100 Subject: [PATCH 03/70] Reformat --- stable_baselines3/common/base_class.py | 5 +- stable_baselines3/common/buffers.py | 111 ++++----------- stable_baselines3/common/multi_input_envs.py | 32 +---- .../common/on_policy_algorithm.py | 24 +--- stable_baselines3/common/policies.py | 126 +++++------------- stable_baselines3/common/preprocessing.py | 28 ++-- stable_baselines3/common/torch_layers.py | 76 +++-------- stable_baselines3/common/type_aliases.py | 4 +- stable_baselines3/common/utils.py | 46 ++----- .../common/vec_env/vec_frame_stack.py | 62 +++------ tests/test_her.py | 38 ++---- 11 files changed, 139 insertions(+), 413 deletions(-) diff --git a/stable_baselines3/common/base_class.py b/stable_baselines3/common/base_class.py index 9008c2aa4..4c337851e 100644 --- a/stable_baselines3/common/base_class.py +++ b/stable_baselines3/common/base_class.py @@ -197,8 +197,9 @@ def _wrap_env(env: GymEnv, verbose: int = 0, monitor_wrapper: bool = True) -> Ve env = VecTransposeImage(env) # check if wrapper for dict support is needed when using HER - if isinstance(env.observation_space, gym.spaces.dict.Dict) and \ - set(env.observation_space.spaces.keys()) == set(["observation", "desired_goal"]): + if isinstance(env.observation_space, gym.spaces.dict.Dict) and set(env.observation_space.spaces.keys()) == set( + ["observation", "desired_goal", "achieved_goal"] + ): env = ObsDictWrapper(env) return env diff --git a/stable_baselines3/common/buffers.py b/stable_baselines3/common/buffers.py index 7d7cfb189..9401632d0 100644 --- a/stable_baselines3/common/buffers.py +++ b/stable_baselines3/common/buffers.py @@ -14,10 +14,10 @@ from stable_baselines3.common.preprocessing import get_action_dim, get_obs_shape from stable_baselines3.common.type_aliases import ( + DictReplayBufferSamples, + DictRolloutBufferSamples, ReplayBufferSamples, RolloutBufferSamples, - DictRolloutBufferSamples, - DictReplayBufferSamples, ) from stable_baselines3.common.vec_env import VecNormalize @@ -144,9 +144,7 @@ def _normalize_obs( return obs @staticmethod - def _normalize_reward( - reward: np.ndarray, env: Optional[VecNormalize] = None - ) -> np.ndarray: + def _normalize_reward(reward: np.ndarray, env: Optional[VecNormalize] = None) -> np.ndarray: if env is not None: return env.normalize_reward(reward).astype(np.float32) return reward @@ -177,9 +175,7 @@ def __init__( n_envs: int = 1, optimize_memory_usage: bool = False, ): - super(ReplayBuffer, self).__init__( - buffer_size, observation_space, action_space, device, n_envs=n_envs - ) + super(ReplayBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs) assert n_envs == 1, "Replay buffer only support single environment for now" @@ -191,8 +187,7 @@ def __init__( if self.is_dict_data: self.observations = { - key: np.zeros((self.buffer_size, self.n_envs) + _obs_shape) - for key, _obs_shape in self.obs_shape.items() + key: np.zeros((self.buffer_size, self.n_envs) + _obs_shape) for key, _obs_shape in self.obs_shape.items() } else: self.observations = np.zeros( @@ -205,17 +200,14 @@ def __init__( else: if self.is_dict_data: self.next_observations = { - key: np.zeros((self.buffer_size, self.n_envs) + _obs_shape) - for key, _obs_shape in self.obs_shape.items() + key: np.zeros((self.buffer_size, self.n_envs) + _obs_shape) for key, _obs_shape in self.obs_shape.items() } else: self.next_observations = np.zeros( (self.buffer_size, self.n_envs) + self.obs_shape, dtype=observation_space.dtype, ) - self.actions = np.zeros( - (self.buffer_size, self.n_envs, self.action_dim), dtype=action_space.dtype - ) + self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=action_space.dtype) self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) self.dones = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) @@ -227,12 +219,7 @@ def __init__( else: obs_nbytes = self.observations.nbytes - total_memory_usage = ( - obs_nbytes - + self.actions.nbytes - + self.rewards.nbytes - + self.dones.nbytes - ) + total_memory_usage = obs_nbytes + self.actions.nbytes + self.rewards.nbytes + self.dones.nbytes if self.next_observations is not None: next_obs_nbytes = 0 if self.is_dict_data: @@ -270,19 +257,13 @@ def add( if self.optimize_memory_usage: if self.is_dict_data: for key in self.observations.keys(): - self.observations[key][ - (self.pos + 1) % self.buffer_size - ] = np.array(next_obs[key]).copy() + self.observations[key][(self.pos + 1) % self.buffer_size] = np.array(next_obs[key]).copy() else: - self.observations[(self.pos + 1) % self.buffer_size] = np.array( - next_obs - ).copy() + self.observations[(self.pos + 1) % self.buffer_size] = np.array(next_obs).copy() else: if self.is_dict_data: for key in self.next_observations.keys(): - self.next_observations[key][self.pos] = np.array( - next_obs[key] - ).copy() + self.next_observations[key][self.pos] = np.array(next_obs[key]).copy() else: self.next_observations[self.pos] = np.array(next_obs).copy() @@ -295,9 +276,7 @@ def add( self.full = True self.pos = 0 - def sample( - self, batch_size: int, env: Optional[VecNormalize] = None - ) -> ReplayBufferSamples: + def sample(self, batch_size: int, env: Optional[VecNormalize] = None) -> ReplayBufferSamples: """ Sample elements from the replay buffer. Custom sampling when using memory efficient variant, @@ -314,16 +293,12 @@ def sample( # Do not sample the element with index `self.pos` as the transitions is invalid # (we use only one array to store `obs` and `next_obs`) if self.full: - batch_inds = ( - np.random.randint(1, self.buffer_size, size=batch_size) + self.pos - ) % self.buffer_size + batch_inds = (np.random.randint(1, self.buffer_size, size=batch_size) + self.pos) % self.buffer_size else: batch_inds = np.random.randint(0, self.pos, size=batch_size) return self._get_samples(batch_inds, env=env) - def _get_samples( - self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None - ) -> ReplayBufferSamples: + def _get_samples(self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None) -> ReplayBufferSamples: if self.is_dict_data: if self.optimize_memory_usage: @@ -343,27 +318,20 @@ def _get_samples( } normalized_obs = { - key: self.to_torch(self._normalize_obs(obs[batch_inds, 0, :], env)) - for key, obs in self.observations.items() + key: self.to_torch(self._normalize_obs(obs[batch_inds, 0, :], env)) for key, obs in self.observations.items() } return DictReplayBufferSamples( observations=normalized_obs, actions=self.to_torch(self.actions[batch_inds]), next_observations=next_obs, dones=self.to_torch(self.dones[batch_inds]), - returns=self.to_torch( - self._normalize_reward(self.rewards[batch_inds], env) - ), + returns=self.to_torch(self._normalize_reward(self.rewards[batch_inds], env)), ) if self.optimize_memory_usage: - next_obs = self._normalize_obs( - self.observations[(batch_inds + 1) % self.buffer_size, 0, :], env - ) + next_obs = self._normalize_obs(self.observations[(batch_inds + 1) % self.buffer_size, 0, :], env) else: - next_obs = self._normalize_obs( - self.next_observations[batch_inds, 0, :], env - ) + next_obs = self._normalize_obs(self.next_observations[batch_inds, 0, :], env) data = ( self._normalize_obs(self.observations[batch_inds, 0, :], env), @@ -409,9 +377,7 @@ def __init__( n_envs: int = 1, ): - super(RolloutBuffer, self).__init__( - buffer_size, observation_space, action_space, device, n_envs=n_envs - ) + super(RolloutBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs) self.gae_lambda = gae_lambda self.gamma = gamma self.observations, self.actions, self.rewards, self.advantages = ( @@ -429,16 +395,10 @@ def reset(self) -> None: if self.is_dict_data: self.observations = {} for (key, obs_input_shape) in self.obs_shape.items(): - self.observations[key] = np.zeros( - (self.buffer_size, self.n_envs) + obs_input_shape, dtype=np.float32 - ) + self.observations[key] = np.zeros((self.buffer_size, self.n_envs) + obs_input_shape, dtype=np.float32) else: - self.observations = np.zeros( - (self.buffer_size, self.n_envs) + self.obs_shape, dtype=np.float32 - ) - self.actions = np.zeros( - (self.buffer_size, self.n_envs, self.action_dim), dtype=np.float32 - ) + self.observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape, dtype=np.float32) + self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=np.float32) self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) self.returns = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) self.dones = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) @@ -448,9 +408,7 @@ def reset(self) -> None: self.generator_ready = False super(RolloutBuffer, self).reset() - def compute_returns_and_advantage( - self, last_values: th.Tensor, dones: np.ndarray - ) -> None: + def compute_returns_and_advantage(self, last_values: th.Tensor, dones: np.ndarray) -> None: """ Post-processing step: compute the returns (sum of discounted rewards) and GAE advantage. @@ -476,14 +434,8 @@ def compute_returns_and_advantage( else: next_non_terminal = 1.0 - self.dones[step + 1] next_values = self.values[step + 1] - delta = ( - self.rewards[step] - + self.gamma * next_values * next_non_terminal - - self.values[step] - ) - last_gae_lam = ( - delta + self.gamma * self.gae_lambda * next_non_terminal * last_gae_lam - ) + delta = self.rewards[step] + self.gamma * next_values * next_non_terminal - self.values[step] + last_gae_lam = delta + self.gamma * self.gae_lambda * next_non_terminal * last_gae_lam self.advantages[step] = last_gae_lam self.returns = self.advantages + self.values @@ -524,9 +476,7 @@ def add( if self.pos == self.buffer_size: self.full = True - def get( - self, batch_size: Optional[int] = None - ) -> Generator[RolloutBufferSamples, None, None]: + def get(self, batch_size: Optional[int] = None) -> Generator[RolloutBufferSamples, None, None]: assert self.full, "" indices = np.random.permutation(self.buffer_size * self.n_envs) # Prepare the data @@ -552,15 +502,10 @@ def get( yield self._get_samples(indices[start_idx : start_idx + batch_size]) start_idx += batch_size - def _get_samples( - self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None - ) -> RolloutBufferSamples: + def _get_samples(self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None) -> RolloutBufferSamples: if self.is_dict_data: return DictRolloutBufferSamples( - observations={ - key: self.to_torch(obs[batch_inds]) - for (key, obs) in self.observations.items() - }, + observations={key: self.to_torch(obs[batch_inds]) for (key, obs) in self.observations.items()}, actions=self.to_torch(self.actions[batch_inds]), old_values=self.to_torch(self.values[batch_inds].flatten()), old_log_prob=self.to_torch(self.log_probs[batch_inds].flatten()), diff --git a/stable_baselines3/common/multi_input_envs.py b/stable_baselines3/common/multi_input_envs.py index 495d50ced..d0df1b78a 100644 --- a/stable_baselines3/common/multi_input_envs.py +++ b/stable_baselines3/common/multi_input_envs.py @@ -61,17 +61,13 @@ def __init__(self, num_col=4, num_row=4, random_start=True, noise=0.0): self.max_state = len(self.state_mapping) - 1 - def random_upsample_img( - self, v_rng=(0, 255), initial_size=(4, 4), up_size=(20, 20) - ): + def random_upsample_img(self, v_rng=(0, 255), initial_size=(4, 4), up_size=(20, 20)): im = np.random.randint(v_rng[0], v_rng[1], initial_size, dtype=np.int32) return np.array( [ [ [ - im[int(initial_size[0] * r / up_size[0])][ - int(initial_size[1] * c / up_size[1]) - ] + im[int(initial_size[0] * r / up_size[0])][int(initial_size[1] * c / up_size[1])] for c in range(up_size[0]) ] for r in range(up_size[1]) @@ -94,9 +90,7 @@ def get_state_mapping(self): if self.noise > 0: state_dict["vec"] += np.random.random(self.vector_size) * self.noise img_noise = int(255 * self.noise) - state_dict["img"] += np.random.randint( - -img_noise, img_noise, (1, 20, 20), dtype=np.int32 - ) + state_dict["img"] += np.random.randint(-img_noise, img_noise, (1, 20, 20), dtype=np.int32) state_dict["img"] = np.clip(state_dict["img"], 0, 255) return state_dict @@ -164,9 +158,7 @@ class NineRoomMultiObsEnv(SimpleMultiObsEnv): # actions are = [left, down, right, up] def __init__(self, random_start=True, noise=0.0): - super(NineRoomMultiObsEnv, self).__init__( - 9, 9, random_start=random_start, noise=noise - ) + super(NineRoomMultiObsEnv, self).__init__(9, 9, random_start=random_start, noise=noise) def init_possible_transitions(self): self.left_possible = ( @@ -181,13 +173,7 @@ def init_possible_transitions(self): + [73, 74, 76, 77, 79, 80] ) - self.down_possible = ( - list(range(18)) - + [19, 22, 25] - + list(range(27, 45)) - + [46, 49, 52] - + list(range(54, 72)) - ) + self.down_possible = list(range(18)) + [19, 22, 25] + list(range(27, 45)) + [46, 49, 52] + list(range(54, 72)) self.right_possible = ( [0, 1, 3, 4, 6, 7] @@ -201,10 +187,4 @@ def init_possible_transitions(self): + [72, 73, 75, 76, 78, 79] ) - self.up_possible = ( - list(range(9, 27)) - + [28, 31, 34] - + list(range(36, 54)) - + [55, 58, 61] - + list(range(63, 81)) - ) + self.up_possible = list(range(9, 27)) + [28, 31, 34] + list(range(36, 54)) + [55, 58, 61] + list(range(63, 81)) diff --git a/stable_baselines3/common/on_policy_algorithm.py b/stable_baselines3/common/on_policy_algorithm.py index 80b54f5bd..497de8136 100644 --- a/stable_baselines3/common/on_policy_algorithm.py +++ b/stable_baselines3/common/on_policy_algorithm.py @@ -11,7 +11,7 @@ from stable_baselines3.common.callbacks import BaseCallback from stable_baselines3.common.policies import ActorCriticPolicy from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule -from stable_baselines3.common.utils import safe_mean, obs_as_tensor +from stable_baselines3.common.utils import obs_as_tensor, safe_mean from stable_baselines3.common.vec_env import VecEnv @@ -150,11 +150,7 @@ def collect_rollouts( callback.on_rollout_start() while n_steps < n_rollout_steps: - if ( - self.use_sde - and self.sde_sample_freq > 0 - and n_steps % self.sde_sample_freq == 0 - ): + if self.use_sde and self.sde_sample_freq > 0 and n_steps % self.sde_sample_freq == 0: # Sample a new noise matrix self.policy.reset_noise(env.num_envs) @@ -168,9 +164,7 @@ def collect_rollouts( clipped_actions = actions # Clip the actions to avoid out of bound error if isinstance(self.action_space, gym.spaces.Box): - clipped_actions = np.clip( - actions, self.action_space.low, self.action_space.high - ) + clipped_actions = np.clip(actions, self.action_space.low, self.action_space.high) new_obs, rewards, dones, infos = env.step(clipped_actions) @@ -187,9 +181,7 @@ def collect_rollouts( if isinstance(self.action_space, gym.spaces.Discrete): # Reshape in case of discrete action actions = actions.reshape(-1, 1) - rollout_buffer.add( - self._last_obs, actions, rewards, self._last_dones, values, log_probs - ) + rollout_buffer.add(self._last_obs, actions, rewards, self._last_dones, values, log_probs) self._last_obs = new_obs self._last_dones = dones @@ -240,9 +232,7 @@ def learn( while self.num_timesteps < total_timesteps: - continue_training = self.collect_rollouts( - self.env, callback, self.rollout_buffer, n_rollout_steps=self.n_steps - ) + continue_training = self.collect_rollouts(self.env, callback, self.rollout_buffer, n_rollout_steps=self.n_steps) if continue_training is False: break @@ -269,9 +259,7 @@ def learn( int(time.time() - self.start_time), exclude="tensorboard", ) - logger.record( - "time/total_timesteps", self.num_timesteps, exclude="tensorboard" - ) + logger.record("time/total_timesteps", self.num_timesteps, exclude="tensorboard") logger.dump(step=self.num_timesteps) self.train() diff --git a/stable_baselines3/common/policies.py b/stable_baselines3/common/policies.py index 5dc148b35..561a951b2 100644 --- a/stable_baselines3/common/policies.py +++ b/stable_baselines3/common/policies.py @@ -1,11 +1,10 @@ """Policies: abstract base class and concrete implementations.""" import collections +import copy from abc import ABC, abstractmethod from functools import partial -from typing import Any, Dict, List, Optional, Tuple, Type, Union, Callable - -import copy +from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union import gym import numpy as np @@ -21,20 +20,15 @@ StateDependentNoiseDistribution, make_proba_distribution, ) -from stable_baselines3.common.preprocessing import ( - get_action_dim, - is_image_space, - preprocess_obs, -) +from stable_baselines3.common.preprocessing import get_action_dim, is_image_space, preprocess_obs from stable_baselines3.common.torch_layers import ( BaseFeaturesExtractor, + CombinedExtractor, FlattenExtractor, MlpExtractor, NatureCNN, create_mlp, - CombinedExtractor, ) - from stable_baselines3.common.type_aliases import Schedule from stable_baselines3.common.utils import get_device, is_vectorized_observation from stable_baselines3.common.vec_env import VecTransposeImage @@ -127,9 +121,7 @@ def _update_features_extractor( def make_features_extractor(self) -> BaseFeaturesExtractor: """ Helper method to create a features extractor.""" - return self.features_extractor_class( - self.observation_space, **self.features_extractor_kwargs - ) + return self.features_extractor_class(self.observation_space, **self.features_extractor_kwargs) def extract_features(self, obs: th.Tensor) -> th.Tensor: """ @@ -139,9 +131,7 @@ def extract_features(self, obs: th.Tensor) -> th.Tensor: :return: """ assert self.features_extractor is not None, "No features extractor was set" - preprocessed_obs = preprocess_obs( - obs, self.observation_space, normalize_images=self.normalize_images - ) + preprocessed_obs = preprocess_obs(obs, self.observation_space, normalize_images=self.normalize_images) return self.features_extractor(preprocessed_obs) def _get_data(self) -> Dict[str, Any]: @@ -202,9 +192,7 @@ def load_from_vector(self, vector: np.ndarray) -> None: :param vector: """ - th.nn.utils.vector_to_parameters( - th.FloatTensor(vector).to(self.device), self.parameters() - ) + th.nn.utils.vector_to_parameters(th.FloatTensor(vector).to(self.device), self.parameters()) def parameters_to_vector(self) -> np.ndarray: """ @@ -212,9 +200,7 @@ def parameters_to_vector(self) -> np.ndarray: :return: """ - return ( - th.nn.utils.parameters_to_vector(self.parameters()).detach().cpu().numpy() - ) + return th.nn.utils.parameters_to_vector(self.parameters()).detach().cpu().numpy() class BasePolicy(BaseModel): @@ -254,9 +240,7 @@ def init_weights(module: nn.Module, gain: float = 1) -> None: module.bias.data.fill_(0.0) @abstractmethod - def _predict( - self, observation: th.Tensor, deterministic: bool = False - ) -> th.Tensor: + def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor: """ Get the action according to the policy for a given observation. @@ -302,28 +286,20 @@ def predict( obs_space = self.observation_space.spaces[key] if is_image_space(obs_space): obs = np.array(obs) - if not ( - obs.shape == obs_space.shape or obs.shape[1:] == obs_space.shape - ): + if not (obs.shape == obs_space.shape or obs.shape[1:] == obs_space.shape): # Try to re-order the channels transpose_obs = VecTransposeImage.transpose_image(obs) - if ( - transpose_obs.shape == obs_space.shape - or transpose_obs.shape[1:] == obs_space.shape - ): + if transpose_obs.shape == obs_space.shape or transpose_obs.shape[1:] == obs_space.shape: observation = transpose_obs else: - observation[key] = obs.reshape( - (-1,) + self.observation_space[key].shape - ) + observation[key] = obs.reshape((-1,) + self.observation_space[key].shape) elif is_image_space(self.observation_space): # Handle the different cases for images # as PyTorch use channel first format if not ( - observation.shape == self.observation_space.shape - or observation.shape[1:] == self.observation_space.shape + observation.shape == self.observation_space.shape or observation.shape[1:] == self.observation_space.shape ): # Try to re-order the channels transpose_obs = VecTransposeImage.transpose_image(observation) @@ -356,15 +332,11 @@ def predict( else: # Actions could be on arbitrary scale, so clip the actions to avoid # out of bound error (e.g. if sampling from a Gaussian distribution) - actions = np.clip( - actions, self.action_space.low, self.action_space.high - ) + actions = np.clip(actions, self.action_space.low, self.action_space.high) if not vectorized_env: if state is not None: - raise ValueError( - "Error: The environment must be vectorized when using recurrent policies." - ) + raise ValueError("Error: The environment must be vectorized when using recurrent policies.") actions = actions[0] return actions, state @@ -473,9 +445,7 @@ def __init__( self.activation_fn = activation_fn self.ortho_init = ortho_init - self.features_extractor = features_extractor_class( - self.observation_space, **self.features_extractor_kwargs - ) + self.features_extractor = features_extractor_class(self.observation_space, **self.features_extractor_kwargs) self.features_dim = self.features_extractor.features_dim self.normalize_images = normalize_images @@ -496,9 +466,7 @@ def __init__( self.dist_kwargs = dist_kwargs # Action distribution - self.action_dist = make_proba_distribution( - action_space, use_sde=use_sde, dist_kwargs=dist_kwargs - ) + self.action_dist = make_proba_distribution(action_space, use_sde=use_sde, dist_kwargs=dist_kwargs) self._build(lr_schedule) @@ -533,9 +501,7 @@ def reset_noise(self, n_envs: int = 1) -> None: :param n_envs: """ - assert isinstance( - self.action_dist, StateDependentNoiseDistribution - ), "reset_noise() is only available when using gSDE" + assert isinstance(self.action_dist, StateDependentNoiseDistribution), "reset_noise() is only available when using gSDE" self.action_dist.sample_weights(self.log_std, batch_size=n_envs) def _build_mlp_extractor(self) -> None: @@ -575,26 +541,18 @@ def _build(self, lr_schedule: Schedule) -> None: latent_dim=latent_dim_pi, log_std_init=self.log_std_init ) elif isinstance(self.action_dist, StateDependentNoiseDistribution): - latent_sde_dim = ( - latent_dim_pi if self.sde_net_arch is None else latent_sde_dim - ) + latent_sde_dim = latent_dim_pi if self.sde_net_arch is None else latent_sde_dim self.action_net, self.log_std = self.action_dist.proba_distribution_net( latent_dim=latent_dim_pi, latent_sde_dim=latent_sde_dim, log_std_init=self.log_std_init, ) elif isinstance(self.action_dist, CategoricalDistribution): - self.action_net = self.action_dist.proba_distribution_net( - latent_dim=latent_dim_pi - ) + self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi) elif isinstance(self.action_dist, MultiCategoricalDistribution): - self.action_net = self.action_dist.proba_distribution_net( - latent_dim=latent_dim_pi - ) + self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi) elif isinstance(self.action_dist, BernoulliDistribution): - self.action_net = self.action_dist.proba_distribution_net( - latent_dim=latent_dim_pi - ) + self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi) else: raise NotImplementedError(f"Unsupported distribution '{self.action_dist}'.") @@ -616,13 +574,9 @@ def _build(self, lr_schedule: Schedule) -> None: module.apply(partial(self.init_weights, gain=gain)) # Setup optimizer with initial learning rate - self.optimizer = self.optimizer_class( - self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs - ) + self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs) - def forward( - self, obs: th.Tensor, deterministic: bool = False - ) -> Tuple[th.Tensor, th.Tensor, th.Tensor]: + def forward(self, obs: th.Tensor, deterministic: bool = False) -> Tuple[th.Tensor, th.Tensor, th.Tensor]: """ Forward pass in all the networks (actor and critic) @@ -633,9 +587,7 @@ def forward( latent_pi, latent_vf, latent_sde = self._get_latent(obs) # Evaluate the values for the given observations values = self.value_net(latent_vf) - distribution = self._get_action_dist_from_latent( - latent_pi, latent_sde=latent_sde - ) + distribution = self._get_action_dist_from_latent(latent_pi, latent_sde=latent_sde) actions = distribution.get_actions(deterministic=deterministic) log_prob = distribution.log_prob(actions) return actions, values, log_prob @@ -659,9 +611,7 @@ def _get_latent(self, obs: th.Tensor) -> Tuple[th.Tensor, th.Tensor, th.Tensor]: latent_sde = self.sde_features_extractor(features) return latent_pi, latent_vf, latent_sde - def _get_action_dist_from_latent( - self, latent_pi: th.Tensor, latent_sde: Optional[th.Tensor] = None - ) -> Distribution: + def _get_action_dist_from_latent(self, latent_pi: th.Tensor, latent_sde: Optional[th.Tensor] = None) -> Distribution: """ Retrieve action distribution given the latent codes. @@ -683,15 +633,11 @@ def _get_action_dist_from_latent( # Here mean_actions are the logits (before rounding to get the binary actions) return self.action_dist.proba_distribution(action_logits=mean_actions) elif isinstance(self.action_dist, StateDependentNoiseDistribution): - return self.action_dist.proba_distribution( - mean_actions, self.log_std, latent_sde - ) + return self.action_dist.proba_distribution(mean_actions, self.log_std, latent_sde) else: raise ValueError("Invalid action distribution") - def _predict( - self, observation: th.Tensor, deterministic: bool = False - ) -> th.Tensor: + def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor: """ Get the action according to the policy for a given observation. @@ -703,9 +649,7 @@ def _predict( distribution = self._get_action_dist_from_latent(latent_pi, latent_sde) return distribution.get_actions(deterministic=deterministic) - def evaluate_actions( - self, obs: th.Tensor, actions: th.Tensor - ) -> Tuple[th.Tensor, th.Tensor, th.Tensor]: + def evaluate_actions(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, th.Tensor, th.Tensor]: """ Evaluate actions according to the current policy, given the observations. @@ -978,9 +922,7 @@ def create_sde_features_extractor( _policy_registry = dict() # type: Dict[Type[BasePolicy], Dict[str, Type[BasePolicy]]] -def get_policy_from_name( - base_policy_type: Type[BasePolicy], name: str -) -> Type[BasePolicy]: +def get_policy_from_name(base_policy_type: Type[BasePolicy], name: str) -> Type[BasePolicy]: """ Returns the registered policy from the base type and name. See `register_policy` for registering policies and explanation. @@ -1029,9 +971,7 @@ def register_policy(name: str, policy: Type[BasePolicy]) -> None: sub_class = cls break if sub_class is None: - raise ValueError( - f"Error: the policy {policy} is not of any known subclasses of BasePolicy!" - ) + raise ValueError(f"Error: the policy {policy} is not of any known subclasses of BasePolicy!") if sub_class not in _policy_registry: _policy_registry[sub_class] = {} @@ -1040,7 +980,5 @@ def register_policy(name: str, policy: Type[BasePolicy]) -> None: # we try to register. If not so, # do not override and complain. if _policy_registry[sub_class][name] != policy: - raise ValueError( - f"Error: the name {name} is already registered for a different policy, will not override." - ) + raise ValueError(f"Error: the name {name} is already registered for a different policy, will not override.") _policy_registry[sub_class][name] = policy diff --git a/stable_baselines3/common/preprocessing.py b/stable_baselines3/common/preprocessing.py index 9ae0c5367..bc6995f8d 100644 --- a/stable_baselines3/common/preprocessing.py +++ b/stable_baselines3/common/preprocessing.py @@ -1,5 +1,5 @@ import warnings -from typing import Tuple +from typing import Dict, Tuple, Union import numpy as np import torch as th @@ -20,9 +20,7 @@ def is_image_space_channels_first(observation_space: spaces.Box) -> bool: """ smallest_dimension = np.argmin(observation_space.shape).item() if smallest_dimension == 1: - warnings.warn( - "Treating image space as channels-last, while second dimension was smallest of the three." - ) + warnings.warn("Treating image space as channels-last, while second dimension was smallest of the three.") return smallest_dimension == 0 @@ -74,9 +72,7 @@ def has_image_space(observation_space: spaces.Dict): return False -def preprocess_obs( - obs: th.Tensor, observation_space: spaces.Space, normalize_images: bool = True -) -> th.Tensor: +def preprocess_obs(obs: th.Tensor, observation_space: spaces.Space, normalize_images: bool = True) -> th.Tensor: """ Preprocess observation to be to a neural network. For images, it normalizes the values by dividing them by 255 (to have values in [0, 1]) @@ -101,9 +97,7 @@ def preprocess_obs( # Tensor concatenation of one hot encodings of each Categorical sub-space return th.cat( [ - F.one_hot( - obs_.long(), num_classes=int(observation_space.nvec[idx]) - ).float() + F.one_hot(obs_.long(), num_classes=int(observation_space.nvec[idx])).float() for idx, obs_ in enumerate(th.split(obs.long(), 1, dim=1)) ], dim=-1, @@ -118,12 +112,10 @@ def preprocess_obs( return obs else: - raise NotImplementedError( - f"Preprocessing not implemented for {observation_space}" - ) + raise NotImplementedError(f"Preprocessing not implemented for {observation_space}") -def get_obs_shape(observation_space: spaces.Space) -> Tuple[int, ...]: +def get_obs_shape(observation_space: spaces.Space) -> Union[Tuple[int, ...], Dict[str, Tuple[int, ...]]]: """ Get the shape of the observation (useful for the buffers). @@ -142,14 +134,10 @@ def get_obs_shape(observation_space: spaces.Space) -> Tuple[int, ...]: # Number of binary features return (int(observation_space.n),) elif isinstance(observation_space, spaces.Dict): - return { - key: subspace.shape for (key, subspace) in observation_space.spaces.items() - } + return {key: subspace.shape for (key, subspace) in observation_space.spaces.items()} else: - raise NotImplementedError( - f"{observation_space} observation space is not supported" - ) + raise NotImplementedError(f"{observation_space} observation space is not supported") def get_flattened_obs_dim(observation_space: spaces.Space) -> int: diff --git a/stable_baselines3/common/torch_layers.py b/stable_baselines3/common/torch_layers.py index 231ae64ed..4212d9d1e 100644 --- a/stable_baselines3/common/torch_layers.py +++ b/stable_baselines3/common/torch_layers.py @@ -41,9 +41,7 @@ class FlattenExtractor(BaseFeaturesExtractor): """ def __init__(self, observation_space: gym.Space): - super(FlattenExtractor, self).__init__( - observation_space, get_flattened_obs_dim(observation_space) - ) + super(FlattenExtractor, self).__init__(observation_space, get_flattened_obs_dim(observation_space)) self.flatten = nn.Flatten() def forward(self, observations: th.Tensor) -> th.Tensor: @@ -87,9 +85,7 @@ def __init__(self, observation_space: gym.spaces.Box, features_dim: int = 512): # Compute shape by doing one forward pass with th.no_grad(): - n_flatten = self.cnn( - th.as_tensor(observation_space.sample()[None]).float() - ).shape[1] + n_flatten = self.cnn(th.as_tensor(observation_space.sample()[None]).float()).shape[1] self.linear = nn.Sequential(nn.Linear(n_flatten, features_dim), nn.ReLU()) @@ -174,37 +170,25 @@ def __init__( super(MlpExtractor, self).__init__() device = get_device(device) shared_net, policy_net, value_net = [], [], [] - policy_only_layers = ( - [] - ) # Layer sizes of the network that only belongs to the policy network - value_only_layers = ( - [] - ) # Layer sizes of the network that only belongs to the value network + policy_only_layers = [] # Layer sizes of the network that only belongs to the policy network + value_only_layers = [] # Layer sizes of the network that only belongs to the value network last_layer_dim_shared = feature_dim # Iterate through the shared layers and build the shared parts of the network for idx, layer in enumerate(net_arch): if isinstance(layer, int): # Check that this is a shared layer # TODO: give layer a meaningful name - shared_net.append( - nn.Linear(last_layer_dim_shared, layer) - ) # add linear of size layer + shared_net.append(nn.Linear(last_layer_dim_shared, layer)) # add linear of size layer shared_net.append(activation_fn()) last_layer_dim_shared = layer else: - assert isinstance( - layer, dict - ), "Error: the net_arch list can only contain ints and dicts" + assert isinstance(layer, dict), "Error: the net_arch list can only contain ints and dicts" if "pi" in layer: - assert isinstance( - layer["pi"], list - ), "Error: net_arch[-1]['pi'] must contain a list of integers." + assert isinstance(layer["pi"], list), "Error: net_arch[-1]['pi'] must contain a list of integers." policy_only_layers = layer["pi"] if "vf" in layer: - assert isinstance( - layer["vf"], list - ), "Error: net_arch[-1]['vf'] must contain a list of integers." + assert isinstance(layer["vf"], list), "Error: net_arch[-1]['vf'] must contain a list of integers." value_only_layers = layer["vf"] break # From here on the network splits up in policy and value network @@ -212,21 +196,15 @@ def __init__( last_layer_dim_vf = last_layer_dim_shared # Build the non-shared part of the network - for idx, (pi_layer_size, vf_layer_size) in enumerate( - zip_longest(policy_only_layers, value_only_layers) - ): + for idx, (pi_layer_size, vf_layer_size) in enumerate(zip_longest(policy_only_layers, value_only_layers)): if pi_layer_size is not None: - assert isinstance( - pi_layer_size, int - ), "Error: net_arch[-1]['pi'] must only contain integers." + assert isinstance(pi_layer_size, int), "Error: net_arch[-1]['pi'] must only contain integers." policy_net.append(nn.Linear(last_layer_dim_pi, pi_layer_size)) policy_net.append(activation_fn()) last_layer_dim_pi = pi_layer_size if vf_layer_size is not None: - assert isinstance( - vf_layer_size, int - ), "Error: net_arch[-1]['vf'] must only contain integers." + assert isinstance(vf_layer_size, int), "Error: net_arch[-1]['vf'] must only contain integers." value_net.append(nn.Linear(last_layer_dim_vf, vf_layer_size)) value_net.append(activation_fn()) last_layer_dim_vf = vf_layer_size @@ -261,9 +239,7 @@ def __init__( activation_fn: Type[nn.Module] = nn.ReLU, comb_net_arch: List[int] = [64, 64], ): - super(CombinedExtractor, self).__init__( - observation_space, features_dim=features_dim - ) + super(CombinedExtractor, self).__init__(observation_space, features_dim=features_dim) extractors = {} @@ -282,13 +258,9 @@ def __init__( # TODO is this the best practice for finding out the size? with th.no_grad(): - n_flatten = cnn( - th.as_tensor(subspace.sample()[None]).float() - ).shape[1] + n_flatten = cnn(th.as_tensor(subspace.sample()[None]).float()).shape[1] - cnn_linear = nn.Sequential( - nn.Linear(n_flatten, cnn_output_dim), nn.ReLU() - ) + cnn_linear = nn.Sequential(nn.Linear(n_flatten, cnn_output_dim), nn.ReLU()) extractors[key] = nn.Sequential(*(list(cnn) + list(cnn_linear))) @@ -320,15 +292,11 @@ def __init__( ) def forward(self, observations: TensorDict) -> th.Tensor: - encoded_tensor_list = [ - extractor(observations[key]) for key, extractor in self.extractors.items() - ] + encoded_tensor_list = [extractor(observations[key]) for key, extractor in self.extractors.items()] return self.combined(th.cat(encoded_tensor_list, dim=1)) -def get_actor_critic_arch( - net_arch: Union[List[int], Dict[str, List[int]]] -) -> Tuple[List[int], List[int]]: +def get_actor_critic_arch(net_arch: Union[List[int], Dict[str, List[int]]]) -> Tuple[List[int], List[int]]: """ Get the actor and critic network architectures for off-policy actor-critic algorithms (SAC, TD3, DDPG). @@ -361,14 +329,8 @@ def get_actor_critic_arch( if isinstance(net_arch, list): actor_arch, critic_arch = net_arch, net_arch else: - assert isinstance( - net_arch, dict - ), "Error: the net_arch can only contain be a list of ints or a dict" - assert ( - "pi" in net_arch - ), "Error: no key 'pi' was provided in net_arch for the actor network" - assert ( - "qf" in net_arch - ), "Error: no key 'qf' was provided in net_arch for the critic network" + assert isinstance(net_arch, dict), "Error: the net_arch can only contain be a list of ints or a dict" + assert "pi" in net_arch, "Error: no key 'pi' was provided in net_arch for the actor network" + assert "qf" in net_arch, "Error: no key 'qf' was provided in net_arch for the critic network" actor_arch, critic_arch = net_arch["pi"], net_arch["qf"] return actor_arch, critic_arch diff --git a/stable_baselines3/common/type_aliases.py b/stable_baselines3/common/type_aliases.py index d84767543..87dd01a90 100644 --- a/stable_baselines3/common/type_aliases.py +++ b/stable_baselines3/common/type_aliases.py @@ -13,9 +13,7 @@ GymStepReturn = Tuple[GymObs, float, bool, Dict] TensorDict = Dict[Union[str, int], th.Tensor] OptimizerStateDict = Dict[str, Any] -MaybeCallback = Union[ - None, Callable, List[callbacks.BaseCallback], callbacks.BaseCallback -] +MaybeCallback = Union[None, Callable, List[callbacks.BaseCallback], callbacks.BaseCallback] # A schedule takes the remaining progress as input # and ouputs a scalar (e.g. learning rate, clip range, ...) diff --git a/stable_baselines3/common/utils.py b/stable_baselines3/common/utils.py index 50cc42227..185efa360 100644 --- a/stable_baselines3/common/utils.py +++ b/stable_baselines3/common/utils.py @@ -3,8 +3,7 @@ import random from collections import deque from itertools import zip_longest -from typing import Callable, Iterable, Optional, Union, Dict - +from typing import Callable, Dict, Iterable, Optional, Union import gym import numpy as np @@ -17,7 +16,7 @@ SummaryWriter = None from stable_baselines3.common import logger -from stable_baselines3.common.type_aliases import GymEnv, TensorDict, Schedule +from stable_baselines3.common.type_aliases import GymEnv, Schedule, TensorDict def set_random_seed(seed: int, using_cuda: bool = False) -> None: @@ -162,11 +161,7 @@ def get_latest_run_id(log_path: Optional[str] = None, log_name: str = "") -> int for path in glob.glob(f"{log_path}/{log_name}_[0-9]*"): file_name = path.split(os.sep)[-1] ext = file_name.split("_")[-1] - if ( - log_name == "_".join(file_name.split("_")[:-1]) - and ext.isdigit() - and int(ext) > max_run_id - ): + if log_name == "_".join(file_name.split("_")[:-1]) and ext.isdigit() and int(ext) > max_run_id: max_run_id = int(ext) return max_run_id @@ -198,9 +193,7 @@ def configure_logger( logger.configure(format_strings=[""]) -def check_for_correct_spaces( - env: GymEnv, observation_space: gym.spaces.Space, action_space: gym.spaces.Space -) -> None: +def check_for_correct_spaces(env: GymEnv, observation_space: gym.spaces.Space, action_space: gym.spaces.Space) -> None: """ Checks that the environment has same spaces as provided ones. Used by BaseAlgorithm to check if spaces match after loading the model with given env. @@ -213,18 +206,12 @@ def check_for_correct_spaces( :param action_space: Action space to check against """ if observation_space != env.observation_space: - raise ValueError( - f"Observation spaces do not match: {observation_space} != {env.observation_space}" - ) + raise ValueError(f"Observation spaces do not match: {observation_space} != {env.observation_space}") if action_space != env.action_space: - raise ValueError( - f"Action spaces do not match: {action_space} != {env.action_space}" - ) + raise ValueError(f"Action spaces do not match: {action_space} != {env.action_space}") -def is_vectorized_observation( - observation: np.ndarray, observation_space: gym.spaces.Space -) -> bool: +def is_vectorized_observation(observation: np.ndarray, observation_space: gym.spaces.Space) -> bool: """ For every observation type, detects and validates the shape, then returns whether or not the observation is vectorized. @@ -242,14 +229,10 @@ def is_vectorized_observation( raise ValueError( f"Error: Unexpected observation shape {observation.shape} for " + f"Box environment, please use {observation_space.shape} " - + "or (n_env, {}) for the observation shape.".format( - ", ".join(map(str, observation_space.shape)) - ) + + "or (n_env, {}) for the observation shape.".format(", ".join(map(str, observation_space.shape))) ) elif isinstance(observation_space, gym.spaces.Discrete): - if ( - observation.shape == () - ): # A numpy array of a number, has shape empty tuple '()' + if observation.shape == (): # A numpy array of a number, has shape empty tuple '()' return False elif len(observation.shape) == 1: return True @@ -261,9 +244,7 @@ def is_vectorized_observation( elif isinstance(observation_space, gym.spaces.MultiDiscrete): if observation.shape == (len(observation_space.nvec),): return False - elif len(observation.shape) == 2 and observation.shape[1] == len( - observation_space.nvec - ): + elif len(observation.shape) == 2 and observation.shape[1] == len(observation_space.nvec): return True else: raise ValueError( @@ -274,9 +255,7 @@ def is_vectorized_observation( elif isinstance(observation_space, gym.spaces.MultiBinary): if observation.shape == (observation_space.n,): return False - elif ( - len(observation.shape) == 2 and observation.shape[1] == observation_space.n - ): + elif len(observation.shape) == 2 and observation.shape[1] == observation_space.n: return True else: raise ValueError( @@ -305,8 +284,7 @@ def is_vectorized_observation( ) else: raise ValueError( - "Error: Cannot determine if the observation is vectorized " - + f" with the space type {observation_space}." + "Error: Cannot determine if the observation is vectorized " + f" with the space type {observation_space}." ) diff --git a/stable_baselines3/common/vec_env/vec_frame_stack.py b/stable_baselines3/common/vec_env/vec_frame_stack.py index 7087d237a..4feca8e8d 100644 --- a/stable_baselines3/common/vec_env/vec_frame_stack.py +++ b/stable_baselines3/common/vec_env/vec_frame_stack.py @@ -4,11 +4,7 @@ import numpy as np from gym import spaces -from stable_baselines3.common.preprocessing import ( - is_image_space, - has_image_space, - is_image_space_channels_first, -) +from stable_baselines3.common.preprocessing import has_image_space, is_image_space, is_image_space_channels_first from stable_baselines3.common.vec_env.base_vec_env import VecEnv, VecEnvWrapper @@ -27,9 +23,7 @@ class VecFrameStack(VecEnvWrapper): If None, automatically detect channel to stack over in case of image observation or default to "last" (default). """ - def __init__( - self, venv: VecEnv, n_stack: int, channels_order: Optional[str] = None - ): + def __init__(self, venv: VecEnv, n_stack: int, channels_order: Optional[str] = None): self.venv = venv self.n_stack = n_stack @@ -60,9 +54,7 @@ def __init__( ) = self.compute_stacking(channels_order, subspace) observation_space = spaces.Dict(spaces=space_dict) else: - raise Exception( - "VecFrameStack only works with gym.spaces.Box and gym.spaces.Dict observation spaces" - ) + raise Exception("VecFrameStack only works with gym.spaces.Box and gym.spaces.Dict observation spaces") VecEnvWrapper.__init__(self, venv, observation_space=observation_space) @@ -100,9 +92,7 @@ def step_wait( if isinstance(self.venv.observation_space, spaces.Box): stack_ax_size = observations.shape[self.stack_dimension] - self.stackedobs = np.roll( - self.stackedobs, shift=-stack_ax_size, axis=self.stack_dimension - ) + self.stackedobs = np.roll(self.stackedobs, shift=-stack_ax_size, axis=self.stack_dimension) for i, done in enumerate(dones): if done: if "terminal_observation" in infos[i]: @@ -125,18 +115,12 @@ def step_wait( ) infos[i]["terminal_observation"] = new_terminal else: - warnings.warn( - "VecFrameStack wrapping a VecEnv without terminal_observation info" - ) + warnings.warn("VecFrameStack wrapping a VecEnv without terminal_observation info") self.stackedobs[i] = 0 if self.channels_first: - self.stackedobs[ - :, -observations.shape[self.stack_dimension] :, ... - ] = observations + self.stackedobs[:, -observations.shape[self.stack_dimension] :, ...] = observations else: - self.stackedobs[ - ..., -observations.shape[self.stack_dimension] : - ] = observations + self.stackedobs[..., -observations.shape[self.stack_dimension] :] = observations elif isinstance(self.venv.observation_space, spaces.Dict): for key in self.stackedobs.keys(): stack_ax_size = observations[key].shape[self.stack_dimension[key]] @@ -172,22 +156,14 @@ def step_wait( ) infos[i]["terminal_observation"][key] = new_terminal else: - warnings.warn( - "VecFrameStack wrapping a VecEnv without terminal_observation info" - ) + warnings.warn("VecFrameStack wrapping a VecEnv without terminal_observation info") self.stackedobs[key][i] = 0 if self.channels_first: - self.stackedobs[key][ - :, -observations[key].shape[self.stack_dimension[key]] :, ... - ] = observations[key] + self.stackedobs[key][:, -observations[key].shape[self.stack_dimension[key]] :, ...] = observations[key] else: - self.stackedobs[key][ - ..., -observations[key].shape[self.stack_dimension] : - ] = observations[key] + self.stackedobs[key][..., -observations[key].shape[self.stack_dimension] :] = observations[key] else: - raise Exception( - f"Unhandled observation type {type(self.venv.observation_space)}" - ) + raise Exception(f"Unhandled observation type {type(self.venv.observation_space)}") return self.stackedobs, rewards, dones, infos @@ -200,25 +176,17 @@ def reset(self) -> Union[np.ndarray, Dict[str, np.ndarray]]: if isinstance(self.venv.observation_space, spaces.Box): self.stackedobs[...] = 0 if self.channels_first: - self.stackedobs[ - :, -observation.shape[self.stack_dimension] :, ... - ] = observation + self.stackedobs[:, -observation.shape[self.stack_dimension] :, ...] = observation else: - self.stackedobs[ - ..., -observation.shape[self.stack_dimension] : - ] = observation + self.stackedobs[..., -observation.shape[self.stack_dimension] :] = observation elif isinstance(self.venv.observation_space, spaces.Dict): for key, obs in observation.items(): self.stackedobs[key][...] = 0 if self.channels_first[key]: - self.stackedobs[key][ - :, -obs.shape[self.stack_dimension[key]] :, ... - ] = obs + self.stackedobs[key][:, -obs.shape[self.stack_dimension[key]] :, ...] = obs else: - self.stackedobs[key][ - ..., -obs.shape[self.stack_dimension[key]] : - ] = obs + self.stackedobs[key][..., -obs.shape[self.stack_dimension[key]] :] = obs return self.stackedobs diff --git a/tests/test_her.py b/tests/test_her.py index a939cc5ec..e94a479ad 100644 --- a/tests/test_her.py +++ b/tests/test_her.py @@ -129,9 +129,7 @@ def test_save_load(tmp_path, model_class, use_sde, online_sampling): params = deepcopy(model.policy.state_dict()) # Modify all parameters to be random values - random_params = dict( - (param_name, th.rand_like(param)) for param_name, param in params.items() - ) + random_params = dict((param_name, th.rand_like(param)) for param_name, param in params.items()) # Update model parameters with the new random values model.policy.load_state_dict(random_params) @@ -139,9 +137,7 @@ def test_save_load(tmp_path, model_class, use_sde, online_sampling): new_params = model.policy.state_dict() # Check that all params are different now for k in params: - assert not th.allclose( - params[k], new_params[k] - ), "Parameters did not change as expected." + assert not th.allclose(params[k], new_params[k]), "Parameters did not change as expected." params = new_params @@ -158,9 +154,7 @@ def test_save_load(tmp_path, model_class, use_sde, online_sampling): # Check that all params are the same as before save load procedure now for key in params: - assert th.allclose( - params[key], new_params[key] - ), "Model parameters not the same after save and load." + assert th.allclose(params[key], new_params[key]), "Model parameters not the same after save and load." # check if model still selects the same actions new_selected_actions, _ = model.predict(observations, deterministic=True) @@ -170,9 +164,7 @@ def test_save_load(tmp_path, model_class, use_sde, online_sampling): model.learn(total_timesteps=300) # Test that the change of parameters works - model = HER.load( - str(tmp_path / "test_save.zip"), env=env, verbose=3, learning_rate=2.0 - ) + model = HER.load(str(tmp_path / "test_save.zip"), env=env, verbose=3, learning_rate=2.0) assert model.model.learning_rate == 2.0 assert model.verbose == 3 @@ -184,9 +176,7 @@ def test_save_load(tmp_path, model_class, use_sde, online_sampling): "online_sampling, truncate_last_trajectory", [(False, None), (True, True), (True, False)], ) -def test_save_load_replay_buffer( - tmp_path, recwarn, online_sampling, truncate_last_trajectory -): +def test_save_load_replay_buffer(tmp_path, recwarn, online_sampling, truncate_last_trajectory): """ Test if 'save_replay_buffer' and 'load_replay_buffer' works correctly """ @@ -227,9 +217,7 @@ def test_save_load_replay_buffer( if truncate_last_trajectory: assert len(recwarn) == 1 warning = recwarn.pop(UserWarning) - assert "The last trajectory in the replay buffer will be truncated" in str( - warning.message - ) + assert "The last trajectory in the replay buffer will be truncated" in str(warning.message) else: assert len(recwarn) == 0 @@ -257,9 +245,7 @@ def test_save_load_replay_buffer( model.replay_buffer.buffer["done"][: n_episodes_stored - 1], ) else: - assert np.allclose( - old_replay_buffer.observations, model.replay_buffer.observations - ) + assert np.allclose(old_replay_buffer.observations, model.replay_buffer.observations) assert np.allclose(old_replay_buffer.actions, model.replay_buffer.actions) assert np.allclose(old_replay_buffer.rewards, model.replay_buffer.rewards) assert np.allclose(old_replay_buffer.dones, model.replay_buffer.dones) @@ -305,20 +291,14 @@ def test_get_max_episode_length(): get_time_limit(dict_env, current_max_episode_length=None) default_length = 10 - assert ( - get_time_limit(dict_env, current_max_episode_length=default_length) - == default_length - ) + assert get_time_limit(dict_env, current_max_episode_length=default_length) == default_length env = gym.make("CartPole-v1") vec_env = DummyVecEnv([lambda: env]) assert get_time_limit(vec_env, current_max_episode_length=None) == 500 # Overwrite max_episode_steps - assert ( - get_time_limit(vec_env, current_max_episode_length=default_length) - == default_length - ) + assert get_time_limit(vec_env, current_max_episode_length=default_length) == default_length # Set max_episode_steps to None env.spec.max_episode_steps = None From b2a1c14597a759473c6e6ec83926e220eec08268 Mon Sep 17 00:00:00 2001 From: Jaden Travnik Date: Thu, 26 Nov 2020 14:25:50 -0500 Subject: [PATCH 04/70] Adding DictBuffers --- stable_baselines3/common/buffers.py | 466 +++++++++++++----- .../common/off_policy_algorithm.py | 121 ++++- .../common/on_policy_algorithm.py | 32 +- 3 files changed, 465 insertions(+), 154 deletions(-) diff --git a/stable_baselines3/common/buffers.py b/stable_baselines3/common/buffers.py index 7d7cfb189..d11ba58de 100644 --- a/stable_baselines3/common/buffers.py +++ b/stable_baselines3/common/buffers.py @@ -47,7 +47,7 @@ def __init__( self.observation_space = observation_space self.action_space = action_space self.obs_shape = get_obs_shape(observation_space) - self.is_dict_data = isinstance(self.observation_space, spaces.Dict) + self.action_dim = get_action_dim(action_space) self.pos = 0 self.full = False @@ -189,30 +189,18 @@ def __init__( self.optimize_memory_usage = optimize_memory_usage - if self.is_dict_data: - self.observations = { - key: np.zeros((self.buffer_size, self.n_envs) + _obs_shape) - for key, _obs_shape in self.obs_shape.items() - } - else: - self.observations = np.zeros( - (self.buffer_size, self.n_envs) + self.obs_shape, - dtype=observation_space.dtype, - ) + self.observations = np.zeros( + (self.buffer_size, self.n_envs) + self.obs_shape, + dtype=observation_space.dtype, + ) if optimize_memory_usage: # `observations` contains also the next observation self.next_observations = None else: - if self.is_dict_data: - self.next_observations = { - key: np.zeros((self.buffer_size, self.n_envs) + _obs_shape) - for key, _obs_shape in self.obs_shape.items() - } - else: - self.next_observations = np.zeros( - (self.buffer_size, self.n_envs) + self.obs_shape, - dtype=observation_space.dtype, - ) + self.next_observations = np.zeros( + (self.buffer_size, self.n_envs) + self.obs_shape, + dtype=observation_space.dtype, + ) self.actions = np.zeros( (self.buffer_size, self.n_envs, self.action_dim), dtype=action_space.dtype ) @@ -220,12 +208,7 @@ def __init__( self.dones = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) if psutil is not None: - obs_nbytes = 0 - if self.is_dict_data: - for key, obs in self.observations.items(): - obs_nbytes += obs.nbytes - else: - obs_nbytes = self.observations.nbytes + obs_nbytes = self.observations.nbytes total_memory_usage = ( obs_nbytes @@ -234,12 +217,7 @@ def __init__( + self.dones.nbytes ) if self.next_observations is not None: - next_obs_nbytes = 0 - if self.is_dict_data: - for key, obs in self.observations.items(): - next_obs_nbytes += obs.nbytes - else: - next_obs_nbytes = self.next_observations.nbytes + next_obs_nbytes = self.next_observations.nbytes total_memory_usage += next_obs_nbytes if total_memory_usage > mem_available: @@ -261,30 +239,14 @@ def add( ) -> None: # Copy to avoid modification by reference - if self.is_dict_data: - for key in self.observations.keys(): - self.observations[key][self.pos] = np.array(obs[key]).copy() - else: - self.observations[self.pos] = np.array(obs).copy() + self.observations[self.pos] = np.array(obs).copy() if self.optimize_memory_usage: - if self.is_dict_data: - for key in self.observations.keys(): - self.observations[key][ - (self.pos + 1) % self.buffer_size - ] = np.array(next_obs[key]).copy() - else: - self.observations[(self.pos + 1) % self.buffer_size] = np.array( - next_obs - ).copy() + self.observations[(self.pos + 1) % self.buffer_size] = np.array( + next_obs + ).copy() else: - if self.is_dict_data: - for key in self.next_observations.keys(): - self.next_observations[key][self.pos] = np.array( - next_obs[key] - ).copy() - else: - self.next_observations[self.pos] = np.array(next_obs).copy() + self.next_observations[self.pos] = np.array(next_obs).copy() self.actions[self.pos] = np.array(action).copy() self.rewards[self.pos] = np.array(reward).copy() @@ -325,37 +287,6 @@ def _get_samples( self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None ) -> ReplayBufferSamples: - if self.is_dict_data: - if self.optimize_memory_usage: - next_obs = { - key: self.to_torch( - self._normalize_obs( - obs[(batch_inds + 1) % self.buffer_size, 0, :], - env, - ) - ) - for key, obs in self.observations.items() - } - else: - next_obs = { - key: self.to_torch(self._normalize_obs(obs[batch_inds, 0, :], env)) - for key, obs in self.next_observations.items() - } - - normalized_obs = { - key: self.to_torch(self._normalize_obs(obs[batch_inds, 0, :], env)) - for key, obs in self.observations.items() - } - return DictReplayBufferSamples( - observations=normalized_obs, - actions=self.to_torch(self.actions[batch_inds]), - next_observations=next_obs, - dones=self.to_torch(self.dones[batch_inds]), - returns=self.to_torch( - self._normalize_reward(self.rewards[batch_inds], env) - ), - ) - if self.optimize_memory_usage: next_obs = self._normalize_obs( self.observations[(batch_inds + 1) % self.buffer_size, 0, :], env @@ -426,16 +357,9 @@ def __init__( def reset(self) -> None: - if self.is_dict_data: - self.observations = {} - for (key, obs_input_shape) in self.obs_shape.items(): - self.observations[key] = np.zeros( - (self.buffer_size, self.n_envs) + obs_input_shape, dtype=np.float32 - ) - else: - self.observations = np.zeros( - (self.buffer_size, self.n_envs) + self.obs_shape, dtype=np.float32 - ) + self.observations = np.zeros( + (self.buffer_size, self.n_envs) + self.obs_shape, dtype=np.float32 + ) self.actions = np.zeros( (self.buffer_size, self.n_envs, self.action_dim), dtype=np.float32 ) @@ -510,11 +434,7 @@ def add( # Reshape 0-d tensor to avoid error log_prob = log_prob.reshape(-1, 1) - if self.is_dict_data: - for key in self.observations.keys(): - self.observations[key][self.pos] = np.array(obs[key]).copy() - else: - self.observations[self.pos] = np.array(obs).copy() + self.observations[self.pos] = np.array(obs).copy() self.actions[self.pos] = np.array(action).copy() self.rewards[self.pos] = np.array(reward).copy() self.dones[self.pos] = np.array(done).copy() @@ -532,12 +452,14 @@ def get( # Prepare the data if not self.generator_ready: - _tensor_names = ["actions", "values", "log_probs", "advantages", "returns"] - if self.is_dict_data: - for key, obs in self.observations.items(): - self.observations[key] = self.swap_and_flatten(obs) - else: - _tensor_names.append("observations") + _tensor_names = [ + "observations", + "actions", + "values", + "log_probs", + "advantages", + "returns", + ] for tensor in _tensor_names: self.__dict__[tensor] = self.swap_and_flatten(self.__dict__[tensor]) @@ -555,18 +477,6 @@ def get( def _get_samples( self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None ) -> RolloutBufferSamples: - if self.is_dict_data: - return DictRolloutBufferSamples( - observations={ - key: self.to_torch(obs[batch_inds]) - for (key, obs) in self.observations.items() - }, - actions=self.to_torch(self.actions[batch_inds]), - old_values=self.to_torch(self.values[batch_inds].flatten()), - old_log_prob=self.to_torch(self.log_probs[batch_inds].flatten()), - advantages=self.to_torch(self.advantages[batch_inds].flatten()), - returns=self.to_torch(self.returns[batch_inds].flatten()), - ) data = ( self.observations[batch_inds], @@ -577,3 +487,323 @@ def _get_samples( self.returns[batch_inds].flatten(), ) return RolloutBufferSamples(*tuple(map(self.to_torch, data))) + + +class DictReplayBuffer(ReplayBuffer): + """ + Replay buffer used in off-policy algorithms like SAC/TD3. + + :param buffer_size: Max number of element in the buffer + :param observation_space: Observation space + :param action_space: Action space + :param device: + :param n_envs: Number of parallel environments + :param optimize_memory_usage: Enable a memory efficient variant + of the replay buffer which reduces by almost a factor two the memory used, + at a cost of more complexity. + See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195 + and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274 + """ + + def __init__( + self, + buffer_size: int, + observation_space: spaces.Space, + action_space: spaces.Space, + device: Union[th.device, str] = "cpu", + n_envs: int = 1, + optimize_memory_usage: bool = False, + ): + super(BaseBuffer, self).__init__( + buffer_size, observation_space, action_space, device, n_envs=n_envs + ) + + assert n_envs == 1, "Replay buffer only support single environment for now" + + # Check that the replay buffer can fit into the memory + if psutil is not None: + mem_available = psutil.virtual_memory().available + + self.optimize_memory_usage = optimize_memory_usage + + self.observations = { + key: np.zeros((self.buffer_size, self.n_envs) + _obs_shape) + for key, _obs_shape in self.obs_shape.items() + } + if optimize_memory_usage: + # `observations` contains also the next observation + self.next_observations = None + else: + self.next_observations = { + key: np.zeros((self.buffer_size, self.n_envs) + _obs_shape) + for key, _obs_shape in self.obs_shape.items() + } + + self.actions = np.zeros( + (self.buffer_size, self.n_envs, self.action_dim), dtype=action_space.dtype + ) + self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) + self.dones = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) + + if psutil is not None: + obs_nbytes = 0 + for key, obs in self.observations.items(): + obs_nbytes += obs.nbytes + + total_memory_usage = ( + obs_nbytes + + self.actions.nbytes + + self.rewards.nbytes + + self.dones.nbytes + ) + if self.next_observations is not None: + next_obs_nbytes = 0 + for key, obs in self.observations.items(): + next_obs_nbytes += obs.nbytes + total_memory_usage += next_obs_nbytes + + if total_memory_usage > mem_available: + # Convert to GB + total_memory_usage /= 1e9 + mem_available /= 1e9 + warnings.warn( + "This system does not have apparently enough memory to store the complete " + f"replay buffer {total_memory_usage:.2f}GB > {mem_available:.2f}GB" + ) + + def add( + self, + obs: Union[np.ndarray, dict], + next_obs: np.ndarray, + action: np.ndarray, + reward: np.ndarray, + done: np.ndarray, + ) -> None: + # Copy to avoid modification by reference + + for key in self.observations.keys(): + self.observations[key][self.pos] = np.array(obs[key]).copy() + + if self.optimize_memory_usage: + for key in self.observations.keys(): + self.observations[key][(self.pos + 1) % self.buffer_size] = np.array( + next_obs[key] + ).copy() + else: + for key in self.next_observations.keys(): + self.next_observations[key][self.pos] = np.array(next_obs[key]).copy() + + self.actions[self.pos] = np.array(action).copy() + self.rewards[self.pos] = np.array(reward).copy() + self.dones[self.pos] = np.array(done).copy() + + self.pos += 1 + if self.pos == self.buffer_size: + self.full = True + self.pos = 0 + + def sample( + self, batch_size: int, env: Optional[VecNormalize] = None + ) -> DictReplayBufferSamples: + """ + Sample elements from the replay buffer. + Custom sampling when using memory efficient variant, + as we should not sample the element with index `self.pos` + See https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274 + + :param batch_size: Number of element to sample + :param env: associated gym VecEnv + to normalize the observations/rewards when sampling + :return: + """ + if not self.optimize_memory_usage: + return super().sample(batch_size=batch_size, env=env) + # Do not sample the element with index `self.pos` as the transitions is invalid + # (we use only one array to store `obs` and `next_obs`) + if self.full: + batch_inds = ( + np.random.randint(1, self.buffer_size, size=batch_size) + self.pos + ) % self.buffer_size + else: + batch_inds = np.random.randint(0, self.pos, size=batch_size) + return self._get_samples(batch_inds, env=env) + + def _get_samples( + self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None + ) -> DictReplayBufferSamples: + + if self.optimize_memory_usage: + next_obs = { + key: self.to_torch( + self._normalize_obs( + obs[(batch_inds + 1) % self.buffer_size, 0, :], + env, + ) + ) + for key, obs in self.observations.items() + } + else: + next_obs = { + key: self.to_torch(self._normalize_obs(obs[batch_inds, 0, :], env)) + for key, obs in self.next_observations.items() + } + + normalized_obs = { + key: self.to_torch(self._normalize_obs(obs[batch_inds, 0, :], env)) + for key, obs in self.observations.items() + } + + return DictReplayBufferSamples( + observations=normalized_obs, + actions=self.to_torch(self.actions[batch_inds]), + next_observations=next_obs, + dones=self.to_torch(self.dones[batch_inds]), + returns=self.to_torch( + self._normalize_reward(self.rewards[batch_inds], env) + ), + ) + + +class DictRolloutBuffer(RolloutBuffer): + """ + Rollout buffer used in on-policy algorithms like A2C/PPO. + It corresponds to ``buffer_size`` transitions collected + using the current policy. + This experience will be discarded after the policy update. + In order to use PPO objective, we also store the current value of each state + and the log probability of each taken action. + + The term rollout here refers to the model-free notion and should not + be used with the concept of rollout used in model-based RL or planning. + Hence, it is only involved in policy and value function training but not action selection. + + :param buffer_size: Max number of element in the buffer + :param observation_space: Observation space + :param action_space: Action space + :param device: + :param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator + Equivalent to classic advantage when set to 1. + :param gamma: Discount factor + :param n_envs: Number of parallel environments + """ + + def __init__( + self, + buffer_size: int, + observation_space: spaces.Space, + action_space: spaces.Space, + device: Union[th.device, str] = "cpu", + gae_lambda: float = 1, + gamma: float = 0.99, + n_envs: int = 1, + ): + + super(RolloutBuffer, self).__init__( + buffer_size, observation_space, action_space, device, n_envs=n_envs + ) + self.gae_lambda = gae_lambda + self.gamma = gamma + self.observations, self.actions, self.rewards, self.advantages = ( + None, + None, + None, + None, + ) + self.returns, self.dones, self.values, self.log_probs = None, None, None, None + self.generator_ready = False + self.reset() + + def reset(self) -> None: + + self.observations = {} + for (key, obs_input_shape) in self.obs_shape.items(): + self.observations[key] = np.zeros( + (self.buffer_size, self.n_envs) + obs_input_shape, dtype=np.float32 + ) + self.actions = np.zeros( + (self.buffer_size, self.n_envs, self.action_dim), dtype=np.float32 + ) + self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) + self.returns = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) + self.dones = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) + self.values = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) + self.log_probs = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) + self.advantages = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) + self.generator_ready = False + super(RolloutBuffer, self).reset() + + def add( + self, + obs: Union[np.ndarray, dict], + action: np.ndarray, + reward: np.ndarray, + done: np.ndarray, + value: th.Tensor, + log_prob: th.Tensor, + ) -> None: + """ + :param obs: Observation + :param action: Action + :param reward: + :param done: End of episode signal. + :param value: estimated value of the current state + following the current policy. + :param log_prob: log probability of the action + following the current policy. + """ + if len(log_prob.shape) == 0: + # Reshape 0-d tensor to avoid error + log_prob = log_prob.reshape(-1, 1) + + for key in self.observations.keys(): + self.observations[key][self.pos] = np.array(obs[key]).copy() + self.actions[self.pos] = np.array(action).copy() + self.rewards[self.pos] = np.array(reward).copy() + self.dones[self.pos] = np.array(done).copy() + self.values[self.pos] = value.clone().cpu().numpy().flatten() + self.log_probs[self.pos] = log_prob.clone().cpu().numpy() + self.pos += 1 + if self.pos == self.buffer_size: + self.full = True + + def get( + self, batch_size: Optional[int] = None + ) -> Generator[DictRolloutBufferSamples, None, None]: + assert self.full, "" + indices = np.random.permutation(self.buffer_size * self.n_envs) + # Prepare the data + if not self.generator_ready: + + for key, obs in self.observations.items(): + self.observations[key] = self.swap_and_flatten(obs) + + _tensor_names = ["actions", "values", "log_probs", "advantages", "returns"] + + for tensor in _tensor_names: + self.__dict__[tensor] = self.swap_and_flatten(self.__dict__[tensor]) + self.generator_ready = True + + # Return everything, don't create minibatches + if batch_size is None: + batch_size = self.buffer_size * self.n_envs + + start_idx = 0 + while start_idx < self.buffer_size * self.n_envs: + yield self._get_samples(indices[start_idx : start_idx + batch_size]) + start_idx += batch_size + + def _get_samples( + self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None + ) -> RolloutBufferSamples: + + return DictRolloutBufferSamples( + observations={ + key: self.to_torch(obs[batch_inds]) + for (key, obs) in self.observations.items() + }, + actions=self.to_torch(self.actions[batch_inds]), + old_values=self.to_torch(self.values[batch_inds].flatten()), + old_log_prob=self.to_torch(self.log_probs[batch_inds].flatten()), + advantages=self.to_torch(self.advantages[batch_inds].flatten()), + returns=self.to_torch(self.returns[batch_inds].flatten()), + ) \ No newline at end of file diff --git a/stable_baselines3/common/off_policy_algorithm.py b/stable_baselines3/common/off_policy_algorithm.py index 45456105d..726be7443 100644 --- a/stable_baselines3/common/off_policy_algorithm.py +++ b/stable_baselines3/common/off_policy_algorithm.py @@ -10,12 +10,17 @@ from stable_baselines3.common import logger from stable_baselines3.common.base_class import BaseAlgorithm -from stable_baselines3.common.buffers import ReplayBuffer +from stable_baselines3.common.buffers import ReplayBuffer, DictReplayBuffer from stable_baselines3.common.callbacks import BaseCallback from stable_baselines3.common.noise import ActionNoise from stable_baselines3.common.policies import BasePolicy from stable_baselines3.common.save_util import load_from_pkl, save_to_pkl -from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, RolloutReturn, Schedule +from stable_baselines3.common.type_aliases import ( + GymEnv, + MaybeCallback, + RolloutReturn, + Schedule, +) from stable_baselines3.common.utils import safe_mean from stable_baselines3.common.vec_env import VecEnv @@ -154,13 +159,23 @@ def __init__( def _setup_model(self) -> None: self._setup_lr_schedule() self.set_random_seed(self.seed) - self.replay_buffer = ReplayBuffer( - self.buffer_size, - self.observation_space, - self.action_space, - self.device, - optimize_memory_usage=self.optimize_memory_usage, - ) + + if isinstance(self.observation_space, gym.spaces.Dict): + self.replay_buffer = DictReplayBuffer( + self.buffer_size, + self.observation_space, + self.action_space, + self.device, + optimize_memory_usage=self.optimize_memory_usage, + ) + else: + self.replay_buffer = ReplayBuffer( + self.buffer_size, + self.observation_space, + self.action_space, + self.device, + optimize_memory_usage=self.optimize_memory_usage, + ) self.policy = self.policy_class( self.observation_space, self.action_space, @@ -169,7 +184,9 @@ def _setup_model(self) -> None: ) self.policy = self.policy.to(self.device) - def save_replay_buffer(self, path: Union[str, pathlib.Path, io.BufferedIOBase]) -> None: + def save_replay_buffer( + self, path: Union[str, pathlib.Path, io.BufferedIOBase] + ) -> None: """ Save the replay buffer as a pickle file. @@ -179,14 +196,18 @@ def save_replay_buffer(self, path: Union[str, pathlib.Path, io.BufferedIOBase]) assert self.replay_buffer is not None, "The replay buffer is not defined" save_to_pkl(path, self.replay_buffer, self.verbose) - def load_replay_buffer(self, path: Union[str, pathlib.Path, io.BufferedIOBase]) -> None: + def load_replay_buffer( + self, path: Union[str, pathlib.Path, io.BufferedIOBase] + ) -> None: """ Load a replay buffer from a pickle file. :param path: Path to the pickled replay buffer. """ self.replay_buffer = load_from_pkl(path, self.verbose) - assert isinstance(self.replay_buffer, ReplayBuffer), "The replay buffer must inherit from ReplayBuffer class" + assert isinstance( + self.replay_buffer, ReplayBuffer + ), "The replay buffer must inherit from ReplayBuffer class" def _setup_learn( self, @@ -224,7 +245,14 @@ def _setup_learn( self.replay_buffer.dones[pos] = True return super()._setup_learn( - total_timesteps, eval_env, callback, eval_freq, n_eval_episodes, log_path, reset_num_timesteps, tb_log_name + total_timesteps, + eval_env, + callback, + eval_freq, + n_eval_episodes, + log_path, + reset_num_timesteps, + tb_log_name, ) def learn( @@ -241,7 +269,14 @@ def learn( ) -> "OffPolicyAlgorithm": total_timesteps, callback = self._setup_learn( - total_timesteps, eval_env, callback, eval_freq, n_eval_episodes, eval_log_path, reset_num_timesteps, tb_log_name + total_timesteps, + eval_env, + callback, + eval_freq, + n_eval_episodes, + eval_log_path, + reset_num_timesteps, + tb_log_name, ) callback.on_training_start(locals(), globals()) @@ -265,7 +300,11 @@ def learn( if self.num_timesteps > 0 and self.num_timesteps > self.learning_starts: # If no `gradient_steps` is specified, # do as many gradients steps as steps performed during the rollout - gradient_steps = self.gradient_steps if self.gradient_steps > 0 else rollout.episode_timesteps + gradient_steps = ( + self.gradient_steps + if self.gradient_steps > 0 + else rollout.episode_timesteps + ) self.train(batch_size=self.batch_size, gradient_steps=gradient_steps) callback.on_training_end() @@ -297,7 +336,9 @@ def _sample_action( The two differs when the action space is not normalized (bounds are not [-1, 1]). """ # Select action randomly or according to policy - if self.num_timesteps < learning_starts and not (self.use_sde and self.use_sde_at_warmup): + if self.num_timesteps < learning_starts and not ( + self.use_sde and self.use_sde_at_warmup + ): # Warmup phase unscaled_action = np.array([self.action_space.sample()]) else: @@ -330,10 +371,20 @@ def _dump_logs(self) -> None: fps = int(self.num_timesteps / (time.time() - self.start_time)) logger.record("time/episodes", self._episode_num, exclude="tensorboard") if len(self.ep_info_buffer) > 0 and len(self.ep_info_buffer[0]) > 0: - logger.record("rollout/ep_rew_mean", safe_mean([ep_info["r"] for ep_info in self.ep_info_buffer])) - logger.record("rollout/ep_len_mean", safe_mean([ep_info["l"] for ep_info in self.ep_info_buffer])) + logger.record( + "rollout/ep_rew_mean", + safe_mean([ep_info["r"] for ep_info in self.ep_info_buffer]), + ) + logger.record( + "rollout/ep_len_mean", + safe_mean([ep_info["l"] for ep_info in self.ep_info_buffer]), + ) logger.record("time/fps", fps) - logger.record("time/time_elapsed", int(time.time() - self.start_time), exclude="tensorboard") + logger.record( + "time/time_elapsed", + int(time.time() - self.start_time), + exclude="tensorboard", + ) logger.record("time/total timesteps", self.num_timesteps, exclude="tensorboard") if self.use_sde: logger.record("train/std", (self.actor.get_std()).mean().item()) @@ -398,12 +449,18 @@ def collect_rollouts( while not done: - if self.use_sde and self.sde_sample_freq > 0 and total_steps % self.sde_sample_freq == 0: + if ( + self.use_sde + and self.sde_sample_freq > 0 + and total_steps % self.sde_sample_freq == 0 + ): # Sample a new noise matrix self.actor.reset_noise() # Select action randomly or according to policy - action, buffer_action = self._sample_action(learning_starts, action_noise) + action, buffer_action = self._sample_action( + learning_starts, action_noise + ) # Rescale and perform action new_obs, reward, done, infos = env.step(action) @@ -416,7 +473,9 @@ def collect_rollouts( callback.update_locals(locals()) # Only stop training if return value is False, not when it is None. if callback.on_step() is False: - return RolloutReturn(0.0, total_steps, total_episodes, continue_training=False) + return RolloutReturn( + 0.0, total_steps, total_episodes, continue_training=False + ) episode_reward += reward @@ -431,16 +490,24 @@ def collect_rollouts( reward_ = self._vec_normalize_env.get_original_reward() else: # Avoid changing the original ones - self._last_original_obs, new_obs_, reward_ = self._last_obs, new_obs, reward + self._last_original_obs, new_obs_, reward_ = ( + self._last_obs, + new_obs, + reward, + ) - replay_buffer.add(self._last_original_obs, new_obs_, buffer_action, reward_, done) + replay_buffer.add( + self._last_original_obs, new_obs_, buffer_action, reward_, done + ) self._last_obs = new_obs # Save the unnormalized observation if self._vec_normalize_env is not None: self._last_original_obs = new_obs_ - self._update_current_progress_remaining(self.num_timesteps, self._total_timesteps) + self._update_current_progress_remaining( + self.num_timesteps, self._total_timesteps + ) # For DQN, check if the target network should be updated # and update the exploration schedule @@ -468,4 +535,6 @@ def collect_rollouts( callback.on_rollout_end() - return RolloutReturn(mean_reward, total_steps, total_episodes, continue_training) + return RolloutReturn( + mean_reward, total_steps, total_episodes, continue_training + ) diff --git a/stable_baselines3/common/on_policy_algorithm.py b/stable_baselines3/common/on_policy_algorithm.py index 80b54f5bd..86a162211 100644 --- a/stable_baselines3/common/on_policy_algorithm.py +++ b/stable_baselines3/common/on_policy_algorithm.py @@ -7,7 +7,7 @@ from stable_baselines3.common import logger from stable_baselines3.common.base_class import BaseAlgorithm -from stable_baselines3.common.buffers import RolloutBuffer +from stable_baselines3.common.buffers import RolloutBuffer, DictRolloutBuffer from stable_baselines3.common.callbacks import BaseCallback from stable_baselines3.common.policies import ActorCriticPolicy from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule @@ -102,15 +102,27 @@ def _setup_model(self) -> None: self._setup_lr_schedule() self.set_random_seed(self.seed) - self.rollout_buffer = RolloutBuffer( - self.n_steps, - self.observation_space, - self.action_space, - self.device, - gamma=self.gamma, - gae_lambda=self.gae_lambda, - n_envs=self.n_envs, - ) + if isinstance(self.observation_space, gym.spaces.Dict): + self.rollout_buffer = DictRolloutBuffer( + self.n_steps, + self.observation_space, + self.action_space, + self.device, + gamma=self.gamma, + gae_lambda=self.gae_lambda, + n_envs=self.n_envs, + ) + else: + self.rollout_buffer = RolloutBuffer( + self.n_steps, + self.observation_space, + self.action_space, + self.device, + gamma=self.gamma, + gae_lambda=self.gae_lambda, + n_envs=self.n_envs, + ) + self.policy = self.policy_class( self.observation_space, self.action_space, From 8a04e61829cc0b54e55196882840aaea65eab095 Mon Sep 17 00:00:00 2001 From: Antonin RAFFIN Date: Fri, 27 Nov 2020 12:09:06 +0100 Subject: [PATCH 05/70] Reformat --- stable_baselines3/common/buffers.py | 51 ++++--------------- .../common/off_policy_algorithm.py | 9 +--- .../common/on_policy_algorithm.py | 2 +- stable_baselines3/common/policies.py | 6 +-- 4 files changed, 14 insertions(+), 54 deletions(-) diff --git a/stable_baselines3/common/buffers.py b/stable_baselines3/common/buffers.py index c3e89dfbf..f677a31c5 100644 --- a/stable_baselines3/common/buffers.py +++ b/stable_baselines3/common/buffers.py @@ -136,8 +136,7 @@ def to_torch(self, array: np.ndarray, copy: bool = True) -> th.Tensor: @staticmethod def _normalize_obs( - obs: Union[np.ndarray, Dict[str, np.ndarray]], - env: Optional[VecNormalize] = None, + obs: Union[np.ndarray, Dict[str, np.ndarray]], env: Optional[VecNormalize] = None ) -> Union[np.ndarray, Dict[str, np.ndarray]]: if env is not None: return env.normalize_obs(obs) @@ -185,19 +184,13 @@ def __init__( self.optimize_memory_usage = optimize_memory_usage - self.observations = np.zeros( - (self.buffer_size, self.n_envs) + self.obs_shape, - dtype=observation_space.dtype, - ) + self.observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape, dtype=observation_space.dtype) if optimize_memory_usage: # `observations` contains also the next observation self.next_observations = None else: - self.next_observations = np.zeros( - (self.buffer_size, self.n_envs) + self.obs_shape, - dtype=observation_space.dtype, - ) + self.next_observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape, dtype=observation_space.dtype) self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=action_space.dtype) @@ -205,12 +198,10 @@ def __init__( self.dones = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) if psutil is not None: - obs_nbytes = self.observations.nbytes + total_memory_usage = self.observations.nbytes + self.actions.nbytes + self.rewards.nbytes + self.dones.nbytes - total_memory_usage = obs_nbytes + self.actions.nbytes + self.rewards.nbytes + self.dones.nbytes if self.next_observations is not None: - next_obs_nbytes = self.next_observations.nbytes - total_memory_usage += next_obs_nbytes + total_memory_usage += self.next_observations.nbytes if total_memory_usage > mem_available: # Convert to GB @@ -222,12 +213,7 @@ def __init__( ) def add( - self, - obs: Union[np.ndarray, dict], - next_obs: np.ndarray, - action: np.ndarray, - reward: np.ndarray, - done: np.ndarray, + self, obs: Union[np.ndarray, dict], next_obs: np.ndarray, action: np.ndarray, reward: np.ndarray, done: np.ndarray ) -> None: # Copy to avoid modification by reference @@ -324,12 +310,7 @@ def __init__( super(RolloutBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs) self.gae_lambda = gae_lambda self.gamma = gamma - self.observations, self.actions, self.rewards, self.advantages = ( - None, - None, - None, - None, - ) + self.observations, self.actions, self.rewards, self.advantages = None, None, None, None self.returns, self.dones, self.values, self.log_probs = None, None, None, None self.generator_ready = False self.reset() @@ -417,14 +398,7 @@ def get(self, batch_size: Optional[int] = None) -> Generator[RolloutBufferSample # Prepare the data if not self.generator_ready: - _tensor_names = [ - "observations", - "actions", - "values", - "log_probs", - "advantages", - "returns", - ] + _tensor_names = ["observations", "actions", "values", "log_probs", "advantages", "returns"] for tensor in _tensor_names: self.__dict__[tensor] = self.swap_and_flatten(self.__dict__[tensor]) @@ -641,12 +615,7 @@ def __init__( super(RolloutBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs) self.gae_lambda = gae_lambda self.gamma = gamma - self.observations, self.actions, self.rewards, self.advantages = ( - None, - None, - None, - None, - ) + self.observations, self.actions, self.rewards, self.advantages = None, None, None, None self.returns, self.dones, self.values, self.log_probs = None, None, None, None self.generator_ready = False self.reset() @@ -654,7 +623,7 @@ def __init__( def reset(self) -> None: self.observations = {} - for (key, obs_input_shape) in self.obs_shape.items(): + for key, obs_input_shape in self.obs_shape.items(): self.observations[key] = np.zeros((self.buffer_size, self.n_envs) + obs_input_shape, dtype=np.float32) self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=np.float32) self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) diff --git a/stable_baselines3/common/off_policy_algorithm.py b/stable_baselines3/common/off_policy_algorithm.py index b93611012..9aa51b707 100644 --- a/stable_baselines3/common/off_policy_algorithm.py +++ b/stable_baselines3/common/off_policy_algorithm.py @@ -10,17 +10,12 @@ from stable_baselines3.common import logger from stable_baselines3.common.base_class import BaseAlgorithm -from stable_baselines3.common.buffers import ReplayBuffer, DictReplayBuffer +from stable_baselines3.common.buffers import DictReplayBuffer, ReplayBuffer from stable_baselines3.common.callbacks import BaseCallback from stable_baselines3.common.noise import ActionNoise from stable_baselines3.common.policies import BasePolicy from stable_baselines3.common.save_util import load_from_pkl, save_to_pkl -from stable_baselines3.common.type_aliases import ( - GymEnv, - MaybeCallback, - RolloutReturn, - Schedule, -) +from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, RolloutReturn, Schedule from stable_baselines3.common.utils import safe_mean from stable_baselines3.common.vec_env import VecEnv diff --git a/stable_baselines3/common/on_policy_algorithm.py b/stable_baselines3/common/on_policy_algorithm.py index 9e636a5e8..98e217249 100644 --- a/stable_baselines3/common/on_policy_algorithm.py +++ b/stable_baselines3/common/on_policy_algorithm.py @@ -7,7 +7,7 @@ from stable_baselines3.common import logger from stable_baselines3.common.base_class import BaseAlgorithm -from stable_baselines3.common.buffers import RolloutBuffer, DictRolloutBuffer +from stable_baselines3.common.buffers import DictRolloutBuffer, RolloutBuffer from stable_baselines3.common.callbacks import BaseCallback from stable_baselines3.common.policies import ActorCriticPolicy from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule diff --git a/stable_baselines3/common/policies.py b/stable_baselines3/common/policies.py index ff9cd3f95..561a951b2 100644 --- a/stable_baselines3/common/policies.py +++ b/stable_baselines3/common/policies.py @@ -20,11 +20,7 @@ StateDependentNoiseDistribution, make_proba_distribution, ) -from stable_baselines3.common.preprocessing import ( - get_action_dim, - is_image_space, - preprocess_obs, -) +from stable_baselines3.common.preprocessing import get_action_dim, is_image_space, preprocess_obs from stable_baselines3.common.torch_layers import ( BaseFeaturesExtractor, CombinedExtractor, From 86b3c14dfb3c4159413757b3dec71ae81ee6b8de Mon Sep 17 00:00:00 2001 From: Antonin RAFFIN Date: Fri, 27 Nov 2020 12:23:54 +0100 Subject: [PATCH 06/70] Minor reformat --- .../common/on_policy_algorithm.py | 25 +++---------------- stable_baselines3/common/policies.py | 7 +----- 2 files changed, 5 insertions(+), 27 deletions(-) diff --git a/stable_baselines3/common/on_policy_algorithm.py b/stable_baselines3/common/on_policy_algorithm.py index 98e217249..59336c14f 100644 --- a/stable_baselines3/common/on_policy_algorithm.py +++ b/stable_baselines3/common/on_policy_algorithm.py @@ -230,14 +230,7 @@ def learn( iteration = 0 total_timesteps, callback = self._setup_learn( - total_timesteps, - eval_env, - callback, - eval_freq, - n_eval_episodes, - eval_log_path, - reset_num_timesteps, - tb_log_name, + total_timesteps, eval_env, callback, eval_freq, n_eval_episodes, eval_log_path, reset_num_timesteps, tb_log_name ) callback.on_training_start(locals(), globals()) @@ -257,20 +250,10 @@ def learn( fps = int(self.num_timesteps / (time.time() - self.start_time)) logger.record("time/iterations", iteration, exclude="tensorboard") if len(self.ep_info_buffer) > 0 and len(self.ep_info_buffer[0]) > 0: - logger.record( - "rollout/ep_rew_mean", - safe_mean([ep_info["r"] for ep_info in self.ep_info_buffer]), - ) - logger.record( - "rollout/ep_len_mean", - safe_mean([ep_info["l"] for ep_info in self.ep_info_buffer]), - ) + logger.record("rollout/ep_rew_mean", safe_mean([ep_info["r"] for ep_info in self.ep_info_buffer])) + logger.record("rollout/ep_len_mean", safe_mean([ep_info["l"] for ep_info in self.ep_info_buffer])) logger.record("time/fps", fps) - logger.record( - "time/time_elapsed", - int(time.time() - self.start_time), - exclude="tensorboard", - ) + logger.record("time/time_elapsed", int(time.time() - self.start_time), exclude="tensorboard") logger.record("time/total_timesteps", self.num_timesteps, exclude="tensorboard") logger.dump(step=self.num_timesteps) diff --git a/stable_baselines3/common/policies.py b/stable_baselines3/common/policies.py index 561a951b2..5aec4a137 100644 --- a/stable_baselines3/common/policies.py +++ b/stable_baselines3/common/policies.py @@ -111,12 +111,7 @@ def _update_features_extractor( if features_extractor is None: # The features extractor is not shared, create a new one features_extractor = self.make_features_extractor() - net_kwargs.update( - dict( - features_extractor=features_extractor, - features_dim=features_extractor.features_dim, - ) - ) + net_kwargs.update(dict(features_extractor=features_extractor, features_dim=features_extractor.features_dim)) return net_kwargs def make_features_extractor(self) -> BaseFeaturesExtractor: From f60d43922a3d5f99ae2f932dc997217179048650 Mon Sep 17 00:00:00 2001 From: Jaden Travnik Date: Fri, 27 Nov 2020 13:14:09 -0500 Subject: [PATCH 07/70] added slow dict test. Added SACMultiInputPolicy for future. Added private static image transpose helper to common policy --- multi_input_tests.py | 87 ---------------- stable_baselines3/common/buffers.py | 8 +- stable_baselines3/common/multi_input_envs.py | 5 +- stable_baselines3/common/policies.py | 32 +++--- stable_baselines3/ppo/policies.py | 9 +- stable_baselines3/sac/policies.py | 102 +++++++++++++++++-- tests/test_dict_env.py | 39 +++++++ 7 files changed, 166 insertions(+), 116 deletions(-) delete mode 100644 multi_input_tests.py create mode 100644 tests/test_dict_env.py diff --git a/multi_input_tests.py b/multi_input_tests.py deleted file mode 100644 index e0cca9214..000000000 --- a/multi_input_tests.py +++ /dev/null @@ -1,87 +0,0 @@ -import argparse -import gym -import numpy as np - -from stable_baselines3 import PPO, SAC -from stable_baselines3.common.policies import MultiInputActorCriticPolicy -from stable_baselines3.common.vec_env import ( - DummyVecEnv, - VecFrameStack, - VecTransposeImage, -) - -from stable_baselines3.common.multi_input_envs import ( - SimpleMultiObsEnv, - NineRoomMultiObsEnv, -) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Runs the multi_input_tests script") - parser.add_argument( - "--timesteps", - type=int, - default=3000, - help="Number of timesteps to train for (default: 3000)", - ) - parser.add_argument( - "--num_envs", - type=int, - default=10, - help="Number of environments to use (default: 10)", - ) - parser.add_argument( - "--frame_stacks", - type=int, - default=1, - help="Number of stacked frames to use (default: 4)", - ) - parser.add_argument( - "--room9", - action="store_true", - help="If true, uses more complex 9 room environment", - ) - args = parser.parse_args() - - ENV_CLS = NineRoomMultiObsEnv if args.room9 else SimpleMultiObsEnv - - make_env = lambda: ENV_CLS(random_start=True) - - env = DummyVecEnv([make_env for i in range(args.num_envs)]) - if args.frame_stacks > 1: - env = VecFrameStack(env, n_stack=args.frame_stacks) - - model = PPO(MultiInputActorCriticPolicy, env) - - model.learn(args.timesteps) - env.close() - print("Done training, starting testing") - - make_env = lambda: ENV_CLS(random_start=False) - test_env = DummyVecEnv([make_env]) - if args.frame_stacks > 1: - test_env = VecFrameStack(test_env, n_stack=args.frame_stacks) - - obs = test_env.reset() - num_episodes = 1 - trajectories = [[]] - i_step, i_episode = 0, 0 - while i_episode < num_episodes: - action, _states = model.predict(obs, deterministic=False) - obs, reward, done, info = test_env.step(action) - test_env.render() - trajectories[-1].append((test_env.get_attr("state")[0], action[0])) - - i_step += 1 - - if done[0]: - if info[0]["got_to_end"]: - print(f"Episode {i_episode} : Got to end in {i_step} steps") - else: - print(f"Episode {i_episode} : Did not get to end") - obs = test_env.reset() - i_step = 0 - trajectories.append([]) - i_episode += 1 - - test_env.close() diff --git a/stable_baselines3/common/buffers.py b/stable_baselines3/common/buffers.py index c3e89dfbf..29423e7dc 100644 --- a/stable_baselines3/common/buffers.py +++ b/stable_baselines3/common/buffers.py @@ -476,7 +476,7 @@ def __init__( n_envs: int = 1, optimize_memory_usage: bool = False, ): - super(BaseBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs) + super(ReplayBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs) assert n_envs == 1, "Replay buffer only support single environment for now" @@ -484,6 +484,10 @@ def __init__( if psutil is not None: mem_available = psutil.virtual_memory().available + if optimize_memory_usage: + optimize_memory_usage = False + # disabling as this adds quite a bit of complexity + # https://github.com/DLR-RM/stable-baselines3/pull/243#discussion_r531535702 self.optimize_memory_usage = optimize_memory_usage self.observations = { @@ -600,7 +604,7 @@ def _get_samples(self, batch_inds: np.ndarray, env: Optional[VecNormalize] = Non actions=self.to_torch(self.actions[batch_inds]), next_observations=next_obs, dones=self.to_torch(self.dones[batch_inds]), - returns=self.to_torch(self._normalize_reward(self.rewards[batch_inds], env)), + rewards=self.to_torch(self._normalize_reward(self.rewards[batch_inds], env)), ) diff --git a/stable_baselines3/common/multi_input_envs.py b/stable_baselines3/common/multi_input_envs.py index d0df1b78a..a5a376ed4 100644 --- a/stable_baselines3/common/multi_input_envs.py +++ b/stable_baselines3/common/multi_input_envs.py @@ -90,7 +90,7 @@ def get_state_mapping(self): if self.noise > 0: state_dict["vec"] += np.random.random(self.vector_size) * self.noise img_noise = int(255 * self.noise) - state_dict["img"] += np.random.randint(-img_noise, img_noise, (1, 20, 20), dtype=np.int32) + state_dict["img"] += np.random.randint(-img_noise, img_noise, tuple(self.img_size), dtype=np.int32) state_dict["img"] = np.clip(state_dict["img"], 0, 255) return state_dict @@ -101,6 +101,9 @@ def init_possible_transitions(self): self.up_possible = [4, 8, 12, 7, 11, 15] def step(self, action): + + action = int(action) + self.count += 1 prev_state = self.state diff --git a/stable_baselines3/common/policies.py b/stable_baselines3/common/policies.py index ff9cd3f95..d671ec64f 100644 --- a/stable_baselines3/common/policies.py +++ b/stable_baselines3/common/policies.py @@ -35,6 +35,7 @@ ) from stable_baselines3.common.type_aliases import Schedule from stable_baselines3.common.utils import get_device, is_vectorized_observation +from stable_baselines3.common.env_util import is_wrapped from stable_baselines3.common.vec_env import VecTransposeImage from stable_baselines3.common.vec_env.obs_dict_wrapper import ObsDictWrapper @@ -256,6 +257,16 @@ def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Te :return: Taken action according to the policy """ + @staticmethod + def try_transpose_img_observation(obs: np.ndarray, observation_space: gym.spaces.Space): + obs = np.array(obs) + if not (obs.shape == observation_space.shape or obs.shape[1:] == observation_space.shape): + # Try to re-order the channels + transpose_obs = VecTransposeImage.transpose_image(obs) + if transpose_obs.shape == observation_space.shape or transpose_obs.shape[1:] == observation_space.shape: + obs = transpose_obs + return obs + def predict( self, observation: np.ndarray, @@ -281,7 +292,7 @@ def predict( # mask = [False for _ in range(self.n_envs)] # Need to check the observation if its a ObsDictWrapper - if isinstance(self.observation_space, ObsDictWrapper): + if is_wrapped(self.observation_space, ObsDictWrapper): observation = ObsDictWrapper.convert_dict(observation) elif isinstance(observation, dict): # need to copy the dict as the dict in VecFrameStack will become a torch tensor @@ -289,29 +300,14 @@ def predict( for key, obs in observation.items(): obs_space = self.observation_space.spaces[key] if is_image_space(obs_space): - obs = np.array(obs) - if not (obs.shape == obs_space.shape or obs.shape[1:] == obs_space.shape): - # Try to re-order the channels - transpose_obs = VecTransposeImage.transpose_image(obs) - if transpose_obs.shape == obs_space.shape or transpose_obs.shape[1:] == obs_space.shape: - observation = transpose_obs + obs = BasePolicy.try_transpose_img_observation(obs, obs_space) else: observation[key] = obs.reshape((-1,) + self.observation_space[key].shape) elif is_image_space(self.observation_space): # Handle the different cases for images # as PyTorch use channel first format - - if not ( - observation.shape == self.observation_space.shape or observation.shape[1:] == self.observation_space.shape - ): - # Try to re-order the channels - transpose_obs = VecTransposeImage.transpose_image(observation) - if ( - transpose_obs.shape == self.observation_space.shape - or transpose_obs.shape[1:] == self.observation_space.shape - ): - observation = transpose_obs + observation = BasePolicy.try_transpose_img_observation(observation, self.observation_space) else: observation = np.array(observation) diff --git a/stable_baselines3/ppo/policies.py b/stable_baselines3/ppo/policies.py index 7d21de8bf..01d33cdc4 100644 --- a/stable_baselines3/ppo/policies.py +++ b/stable_baselines3/ppo/policies.py @@ -1,9 +1,16 @@ # This file is here just to define MlpPolicy/CnnPolicy # that work for PPO -from stable_baselines3.common.policies import ActorCriticCnnPolicy, ActorCriticPolicy, register_policy +from stable_baselines3.common.policies import ( + register_policy, + ActorCriticPolicy, + ActorCriticCnnPolicy, + MultiInputActorCriticPolicy, +) MlpPolicy = ActorCriticPolicy CnnPolicy = ActorCriticCnnPolicy +MultiInputPolicy = MultiInputActorCriticPolicy register_policy("MlpPolicy", ActorCriticPolicy) register_policy("CnnPolicy", ActorCriticCnnPolicy) +register_policy("MultiInputPolicy", MultiInputPolicy) diff --git a/stable_baselines3/sac/policies.py b/stable_baselines3/sac/policies.py index 8ba5897a4..3c37c8ffb 100644 --- a/stable_baselines3/sac/policies.py +++ b/stable_baselines3/sac/policies.py @@ -4,13 +4,22 @@ import torch as th from torch import nn -from stable_baselines3.common.distributions import SquashedDiagGaussianDistribution, StateDependentNoiseDistribution -from stable_baselines3.common.policies import BasePolicy, ContinuousCritic, create_sde_features_extractor, register_policy +from stable_baselines3.common.distributions import ( + SquashedDiagGaussianDistribution, + StateDependentNoiseDistribution, +) +from stable_baselines3.common.policies import ( + BasePolicy, + ContinuousCritic, + create_sde_features_extractor, + register_policy, +) from stable_baselines3.common.preprocessing import get_action_dim from stable_baselines3.common.torch_layers import ( BaseFeaturesExtractor, FlattenExtractor, NatureCNN, + CombinedExtractor, create_mlp, get_actor_critic_arch, ) @@ -93,15 +102,22 @@ def __init__( latent_sde_dim = last_layer_dim # Separate features extractor for gSDE if sde_net_arch is not None: - self.sde_features_extractor, latent_sde_dim = create_sde_features_extractor( - features_dim, sde_net_arch, activation_fn - ) + ( + self.sde_features_extractor, + latent_sde_dim, + ) = create_sde_features_extractor(features_dim, sde_net_arch, activation_fn) self.action_dist = StateDependentNoiseDistribution( - action_dim, full_std=full_std, use_expln=use_expln, learn_features=True, squash_output=True + action_dim, + full_std=full_std, + use_expln=use_expln, + learn_features=True, + squash_output=True, ) self.mu, self.log_std = self.action_dist.proba_distribution_net( - latent_dim=last_layer_dim, latent_sde_dim=latent_sde_dim, log_std_init=log_std_init + latent_dim=last_layer_dim, + latent_sde_dim=latent_sde_dim, + log_std_init=log_std_init, ) # Avoid numerical issues by limiting the mean of the Gaussian # to be in [-clip_mean, clip_mean] @@ -435,5 +451,77 @@ def __init__( ) +class MultiInputSACPolicy(SACPolicy): + """ + Policy class (with both actor and critic) for SAC. + + :param observation_space: Observation space + :param action_space: Action space + :param lr_schedule: Learning rate schedule (could be constant) + :param net_arch: The specification of the policy and value networks. + :param activation_fn: Activation function + :param use_sde: Whether to use State Dependent Exploration or not + :param log_std_init: Initial value for the log standard deviation + :param sde_net_arch: Network architecture for extracting features + when using gSDE. If None, the latent features from the policy will be used. + Pass an empty list to use the states as features. + :param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure + a positive standard deviation (cf paper). It allows to keep variance + above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough. + :param clip_mean: Clip the mean output when using gSDE to avoid numerical instability. + :param features_extractor_class: Features extractor to use. + :param normalize_images: Whether to normalize images or not, + dividing by 255.0 (True by default) + :param optimizer_class: The optimizer to use, + ``th.optim.Adam`` by default + :param optimizer_kwargs: Additional keyword arguments, + excluding the learning rate, to pass to the optimizer + :param n_critics: Number of critic networks to create. + :param share_features_extractor: Whether to share or not the features extractor + between the actor and the critic (this saves computation time) + """ + + def __init__( + self, + observation_space: gym.spaces.Space, + action_space: gym.spaces.Space, + lr_schedule: Schedule, + net_arch: Optional[Union[List[int], Dict[str, List[int]]]] = None, + activation_fn: Type[nn.Module] = nn.ReLU, + use_sde: bool = False, + log_std_init: float = -3, + sde_net_arch: Optional[List[int]] = None, + use_expln: bool = False, + clip_mean: float = 2.0, + features_extractor_class: Type[BaseFeaturesExtractor] = CombinedExtractor, + features_extractor_kwargs: Optional[Dict[str, Any]] = None, + normalize_images: bool = True, + optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, + optimizer_kwargs: Optional[Dict[str, Any]] = None, + n_critics: int = 2, + share_features_extractor: bool = True, + ): + super(MultiInputSACPolicy, self).__init__( + observation_space, + action_space, + lr_schedule, + net_arch, + activation_fn, + use_sde, + log_std_init, + sde_net_arch, + use_expln, + clip_mean, + features_extractor_class, + features_extractor_kwargs, + normalize_images, + optimizer_class, + optimizer_kwargs, + n_critics, + share_features_extractor, + ) + + register_policy("MlpPolicy", MlpPolicy) register_policy("CnnPolicy", CnnPolicy) +register_policy("MultiInputPolicy", MultiInputSACPolicy) diff --git a/tests/test_dict_env.py b/tests/test_dict_env.py new file mode 100644 index 000000000..0dcdfbf49 --- /dev/null +++ b/tests/test_dict_env.py @@ -0,0 +1,39 @@ +import gym +import numpy as np +import pytest + +from stable_baselines3 import PPO, SAC, TD3, DQN +from stable_baselines3.common.vec_env import ( + DummyVecEnv, + VecFrameStack, + VecTransposeImage, +) + +from stable_baselines3.common.multi_input_envs import ( + SimpleMultiObsEnv, + NineRoomMultiObsEnv, +) +from stable_baselines3.common.evaluation import evaluate_policy + + +@pytest.mark.slow +@pytest.mark.parametrize("model_class", [PPO]) # , SAC, TD3, DQN]) +def test_dict_spaces(model_class): + """ + Additional tests for PPO/SAC/TD3/DQN to check observation space support + for Dictionary spaces using MultiInputPolicy. + """ + make_env = lambda: SimpleMultiObsEnv(random_start=True) + env = DummyVecEnv([make_env]) + # env = VecFrameStack(env, n_stack=2) + + model = model_class( + "MultiInputPolicy", + env, + gamma=0.5, + seed=1, + policy_kwargs=dict(net_arch=[64]), + ) + model.learn(total_timesteps=500) + + evaluate_policy(model, env, n_eval_episodes=5, warn=False) From da1de6e4a0e15db16433a34a93b57ffa78c12ed2 Mon Sep 17 00:00:00 2001 From: Jaden Travnik Date: Fri, 27 Nov 2020 14:48:05 -0500 Subject: [PATCH 08/70] Ran black on buffers --- stable_baselines3/common/buffers.py | 48 +++++++++++++++++++++++------ 1 file changed, 39 insertions(+), 9 deletions(-) diff --git a/stable_baselines3/common/buffers.py b/stable_baselines3/common/buffers.py index da65dd16b..eddebc1e9 100644 --- a/stable_baselines3/common/buffers.py +++ b/stable_baselines3/common/buffers.py @@ -136,7 +136,8 @@ def to_torch(self, array: np.ndarray, copy: bool = True) -> th.Tensor: @staticmethod def _normalize_obs( - obs: Union[np.ndarray, Dict[str, np.ndarray]], env: Optional[VecNormalize] = None + obs: Union[np.ndarray, Dict[str, np.ndarray]], + env: Optional[VecNormalize] = None, ) -> Union[np.ndarray, Dict[str, np.ndarray]]: if env is not None: return env.normalize_obs(obs) @@ -184,13 +185,19 @@ def __init__( self.optimize_memory_usage = optimize_memory_usage - self.observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape, dtype=observation_space.dtype) + self.observations = np.zeros( + (self.buffer_size, self.n_envs) + self.obs_shape, + dtype=observation_space.dtype, + ) if optimize_memory_usage: # `observations` contains also the next observation self.next_observations = None else: - self.next_observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape, dtype=observation_space.dtype) + self.next_observations = np.zeros( + (self.buffer_size, self.n_envs) + self.obs_shape, + dtype=observation_space.dtype, + ) self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=action_space.dtype) @@ -213,7 +220,12 @@ def __init__( ) def add( - self, obs: Union[np.ndarray, dict], next_obs: np.ndarray, action: np.ndarray, reward: np.ndarray, done: np.ndarray + self, + obs: Union[np.ndarray, dict], + next_obs: np.ndarray, + action: np.ndarray, + reward: np.ndarray, + done: np.ndarray, ) -> None: # Copy to avoid modification by reference @@ -310,7 +322,12 @@ def __init__( super(RolloutBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs) self.gae_lambda = gae_lambda self.gamma = gamma - self.observations, self.actions, self.rewards, self.advantages = None, None, None, None + self.observations, self.actions, self.rewards, self.advantages = ( + None, + None, + None, + None, + ) self.returns, self.dones, self.values, self.log_probs = None, None, None, None self.generator_ready = False self.reset() @@ -398,7 +415,14 @@ def get(self, batch_size: Optional[int] = None) -> Generator[RolloutBufferSample # Prepare the data if not self.generator_ready: - _tensor_names = ["observations", "actions", "values", "log_probs", "advantages", "returns"] + _tensor_names = [ + "observations", + "actions", + "values", + "log_probs", + "advantages", + "returns", + ] for tensor in _tensor_names: self.__dict__[tensor] = self.swap_and_flatten(self.__dict__[tensor]) @@ -427,7 +451,7 @@ def _get_samples(self, batch_inds: np.ndarray, env: Optional[VecNormalize] = Non class DictReplayBuffer(ReplayBuffer): """ - Replay buffer used in off-policy algorithms like SAC/TD3. + Dict Replay buffer used in off-policy algorithms like SAC/TD3. :param buffer_size: Max number of element in the buffer :param observation_space: Observation space @@ -439,6 +463,7 @@ class DictReplayBuffer(ReplayBuffer): at a cost of more complexity. See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195 and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274 + Disabled for now (see https://github.com/DLR-RM/stable-baselines3/pull/243#discussion_r531535702) """ def __init__( @@ -584,7 +609,7 @@ def _get_samples(self, batch_inds: np.ndarray, env: Optional[VecNormalize] = Non class DictRolloutBuffer(RolloutBuffer): """ - Rollout buffer used in on-policy algorithms like A2C/PPO. + Dict Rollout buffer used in on-policy algorithms like A2C/PPO. It corresponds to ``buffer_size`` transitions collected using the current policy. This experience will be discarded after the policy update. @@ -619,7 +644,12 @@ def __init__( super(RolloutBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs) self.gae_lambda = gae_lambda self.gamma = gamma - self.observations, self.actions, self.rewards, self.advantages = None, None, None, None + self.observations, self.actions, self.rewards, self.advantages = ( + None, + None, + None, + None, + ) self.returns, self.dones, self.values, self.log_probs = None, None, None, None self.generator_ready = False self.reset() From 51249da802fad6ab9c589e9ed52f1ab37a6a0d9b Mon Sep 17 00:00:00 2001 From: Jaden Travnik Date: Fri, 27 Nov 2020 16:26:47 -0500 Subject: [PATCH 09/70] Ran isort --- stable_baselines3/common/policies.py | 2 +- stable_baselines3/ppo/policies.py | 4 ++-- stable_baselines3/sac/policies.py | 14 +++----------- tests/test_dict_env.py | 14 +++----------- 4 files changed, 9 insertions(+), 25 deletions(-) diff --git a/stable_baselines3/common/policies.py b/stable_baselines3/common/policies.py index 5e8d4c56a..39a28a9a3 100644 --- a/stable_baselines3/common/policies.py +++ b/stable_baselines3/common/policies.py @@ -20,6 +20,7 @@ StateDependentNoiseDistribution, make_proba_distribution, ) +from stable_baselines3.common.env_util import is_wrapped from stable_baselines3.common.preprocessing import get_action_dim, is_image_space, preprocess_obs from stable_baselines3.common.torch_layers import ( BaseFeaturesExtractor, @@ -31,7 +32,6 @@ ) from stable_baselines3.common.type_aliases import Schedule from stable_baselines3.common.utils import get_device, is_vectorized_observation -from stable_baselines3.common.env_util import is_wrapped from stable_baselines3.common.vec_env import VecTransposeImage from stable_baselines3.common.vec_env.obs_dict_wrapper import ObsDictWrapper diff --git a/stable_baselines3/ppo/policies.py b/stable_baselines3/ppo/policies.py index 01d33cdc4..7427cfc4a 100644 --- a/stable_baselines3/ppo/policies.py +++ b/stable_baselines3/ppo/policies.py @@ -1,10 +1,10 @@ # This file is here just to define MlpPolicy/CnnPolicy # that work for PPO from stable_baselines3.common.policies import ( - register_policy, - ActorCriticPolicy, ActorCriticCnnPolicy, + ActorCriticPolicy, MultiInputActorCriticPolicy, + register_policy, ) MlpPolicy = ActorCriticPolicy diff --git a/stable_baselines3/sac/policies.py b/stable_baselines3/sac/policies.py index 3c37c8ffb..7bb24c965 100644 --- a/stable_baselines3/sac/policies.py +++ b/stable_baselines3/sac/policies.py @@ -4,22 +4,14 @@ import torch as th from torch import nn -from stable_baselines3.common.distributions import ( - SquashedDiagGaussianDistribution, - StateDependentNoiseDistribution, -) -from stable_baselines3.common.policies import ( - BasePolicy, - ContinuousCritic, - create_sde_features_extractor, - register_policy, -) +from stable_baselines3.common.distributions import SquashedDiagGaussianDistribution, StateDependentNoiseDistribution +from stable_baselines3.common.policies import BasePolicy, ContinuousCritic, create_sde_features_extractor, register_policy from stable_baselines3.common.preprocessing import get_action_dim from stable_baselines3.common.torch_layers import ( BaseFeaturesExtractor, + CombinedExtractor, FlattenExtractor, NatureCNN, - CombinedExtractor, create_mlp, get_actor_critic_arch, ) diff --git a/tests/test_dict_env.py b/tests/test_dict_env.py index 0dcdfbf49..a50d5852d 100644 --- a/tests/test_dict_env.py +++ b/tests/test_dict_env.py @@ -2,18 +2,10 @@ import numpy as np import pytest -from stable_baselines3 import PPO, SAC, TD3, DQN -from stable_baselines3.common.vec_env import ( - DummyVecEnv, - VecFrameStack, - VecTransposeImage, -) - -from stable_baselines3.common.multi_input_envs import ( - SimpleMultiObsEnv, - NineRoomMultiObsEnv, -) +from stable_baselines3 import DQN, PPO, SAC, TD3 from stable_baselines3.common.evaluation import evaluate_policy +from stable_baselines3.common.multi_input_envs import NineRoomMultiObsEnv, SimpleMultiObsEnv +from stable_baselines3.common.vec_env import DummyVecEnv, VecFrameStack, VecTransposeImage @pytest.mark.slow From 761a67f41558c66ca99af86a4dbc6a83e0941c3f Mon Sep 17 00:00:00 2001 From: Jaden Travnik Date: Mon, 30 Nov 2020 16:10:51 -0500 Subject: [PATCH 10/70] Adding StackedObservations classes used within VecStackEnvs wrappers. Made test_dict_env shorter and removed slow --- stable_baselines3/common/buffers.py | 3 + .../common/stacked_observations.py | 206 ++++++++++++++++++ .../common/vec_env/vec_frame_stack.py | 162 ++------------ tests/test_dict_env.py | 12 +- 4 files changed, 237 insertions(+), 146 deletions(-) create mode 100644 stable_baselines3/common/stacked_observations.py diff --git a/stable_baselines3/common/buffers.py b/stable_baselines3/common/buffers.py index eddebc1e9..e74dafd4b 100644 --- a/stable_baselines3/common/buffers.py +++ b/stable_baselines3/common/buffers.py @@ -452,6 +452,7 @@ def _get_samples(self, batch_inds: np.ndarray, env: Optional[VecNormalize] = Non class DictReplayBuffer(ReplayBuffer): """ Dict Replay buffer used in off-policy algorithms like SAC/TD3. + Extends the ReplayBuffer to use dictionary observations :param buffer_size: Max number of element in the buffer :param observation_space: Observation space @@ -610,6 +611,8 @@ def _get_samples(self, batch_inds: np.ndarray, env: Optional[VecNormalize] = Non class DictRolloutBuffer(RolloutBuffer): """ Dict Rollout buffer used in on-policy algorithms like A2C/PPO. + Extends the RolloutBuffer to use dictionary observations + It corresponds to ``buffer_size`` transitions collected using the current policy. This experience will be discarded after the policy update. diff --git a/stable_baselines3/common/stacked_observations.py b/stable_baselines3/common/stacked_observations.py new file mode 100644 index 000000000..ee6b33d75 --- /dev/null +++ b/stable_baselines3/common/stacked_observations.py @@ -0,0 +1,206 @@ +import warnings +from typing import Any, Dict, List, Optional, Tuple, Union + +import numpy as np +from gym import spaces + +from stable_baselines3.common.preprocessing import ( + has_image_space, + is_image_space, + is_image_space_channels_first, +) + + +class StackedObservations: + """ + Frame stacking wrapper for data. + + Dimension to stack over is either first (channels-first) or + last (channels-last), which is detected automatically using + ``common.preprocessing.is_image_space_channels_first`` if + observation is an image space. + + :param num_envs: number of environments + :param n_stack: Number of frames to stack + :param channels_order: If "first", stack on first image dimension. If "last", stack on last dimension. + If None, automatically detect channel to stack over in case of image observation or default to "last" (default). + """ + + def __init__( + self, + num_envs: int, + n_stack: int, + observation_space: spaces.Space, + channels_order: Optional[str] = None, + ): + + self.n_stack = n_stack + ( + self.channels_first, + self.stack_dimension, + self.stackedobs, + self.repeat_axis, + ) = self.compute_stacking(num_envs, n_stack, observation_space, channels_order) + + @staticmethod + def compute_stacking( + num_envs: int, + n_stack: int, + observation_space: spaces.Space, + channels_order: Optional[str] = None, + ): + channels_first = False + if channels_order is None: + # Detect channel location automatically for images + if is_image_space(observation_space): + channels_first = is_image_space_channels_first(observation_space) + else: + # Default behavior for non-image space, stack on the last axis + channels_first = False + else: + assert channels_order in { + "last", + "first", + }, "`channels_order` must be one of following: 'last', 'first'" + + channels_first = channels_order == "first" + + # This includes the vec-env dimension (first) + stack_dimension = 1 if channels_first else -1 + repeat_axis = 0 if channels_first else -1 + low = np.repeat(observation_space.low, n_stack, axis=repeat_axis) + stackedobs = np.zeros((num_envs,) + low.shape, low.dtype) + return channels_first, stack_dimension, stackedobs, repeat_axis + + def stack_observation_space(self, observation_space): + low = np.repeat(observation_space.low, self.n_stack, axis=self.repeat_axis) + high = np.repeat(observation_space.high, self.n_stack, axis=self.repeat_axis) + return spaces.Box(low=low, high=high, dtype=observation_space.dtype) + + def reset(self, observation): + self.stackedobs[...] = 0 + if self.channels_first: + self.stackedobs[:, -observation.shape[self.stack_dimension] :, ...] = observation + else: + self.stackedobs[..., -observation.shape[self.stack_dimension] :] = observation + return self.stackedobs + + def update(self, observations, dones, infos): + stack_ax_size = observations.shape[self.stack_dimension] + self.stackedobs = np.roll(self.stackedobs, shift=-stack_ax_size, axis=self.stack_dimension) + for i, done in enumerate(dones): + if done: + if "terminal_observation" in infos[i]: + old_terminal = infos[i]["terminal_observation"] + if self.channels_first: + new_terminal = np.concatenate( + ( + self.stackedobs[i, :-stack_ax_size, ...], + old_terminal, + ), + axis=self.stack_dimension, + ) + else: + new_terminal = np.concatenate( + ( + self.stackedobs[i, ..., :-stack_ax_size], + old_terminal, + ), + axis=self.stack_dimension, + ) + infos[i]["terminal_observation"] = new_terminal + else: + warnings.warn("VecFrameStack wrapping a VecEnv without terminal_observation info") + self.stackedobs[i] = 0 + if self.channels_first: + self.stackedobs[:, -observations.shape[self.stack_dimension] :, ...] = observations + else: + self.stackedobs[..., -observations.shape[self.stack_dimension] :] = observations + return self.stackedobs, infos + + +class StackedDictObservations(StackedObservations): + """ + Frame stacking wrapper for dictionary data. + + Dimension to stack over is either first (channels-first) or + last (channels-last), which is detected automatically using + ``common.preprocessing.is_image_space_channels_first`` if + observation is an image space. + + :param num_envs: number of environments + :param n_stack: Number of frames to stack + :param channels_order: If "first", stack on first image dimension. If "last", stack on last dimension. + If None, automatically detect channel to stack over in case of image observation or default to "last" (default). + """ + + def __init__(self, num_envs, n_stack, observation_space, channels_order): + self.n_stack = n_stack + self.channels_first = {} + self.stack_dimension = {} + self.stackedobs = {} + self.repeat_axis = {} + + for (key, subspace) in observation_space.spaces.items(): + assert isinstance(subspace, spaces.Box), "StackedDictObservations only works with nested gym.spaces.Box" + ( + self.channels_first[key], + self.stack_dimension[key], + self.stackedobs[key], + self.repeat_axis[key], + ) = self.compute_stacking(num_envs, n_stack, subspace, channels_order) + + def stack_observation_space(self, observation_space): + spaces_dict = {} + for key, subspace in observation_space.spaces.items(): + low = np.repeat(subspace.low, self.n_stack, axis=self.repeat_axis[key]) + high = np.repeat(subspace.high, self.n_stack, axis=self.repeat_axis[key]) + spaces_dict[key] = spaces.Box(low=low, high=high, dtype=subspace.dtype) + return spaces.Dict(spaces=spaces_dict) + + def reset(self, observation): + for key, obs in observation.items(): + self.stackedobs[key][...] = 0 + if self.channels_first[key]: + self.stackedobs[key][:, -obs.shape[self.stack_dimension[key]] :, ...] = obs + else: + self.stackedobs[key][..., -obs.shape[self.stack_dimension[key]] :] = obs + return self.stackedobs + + def update(self, observations, dones, infos): + for key in self.stackedobs.keys(): + stack_ax_size = observations[key].shape[self.stack_dimension[key]] + self.stackedobs[key] = np.roll( + self.stackedobs[key], + shift=-stack_ax_size, + axis=self.stack_dimension[key], + ) + + for i, done in enumerate(dones): + if done: + if "terminal_observation" in infos[i]: + old_terminal = infos[i]["terminal_observation"][key] + if self.channels_first[key]: + new_terminal = np.vstack( + ( + self.stackedobs[key][i, :-stack_ax_size, ...], + old_terminal, + ) + ) + else: + new_terminal = np.concatenate( + ( + self.stackedobs[key][i, ..., :-stack_ax_size], + old_terminal, + ), + axis=self.stack_dimension[key], + ) + infos[i]["terminal_observation"][key] = new_terminal + else: + warnings.warn("VecFrameStack wrapping a VecEnv without terminal_observation info") + self.stackedobs[key][i] = 0 + if self.channels_first: + self.stackedobs[key][:, -observations[key].shape[self.stack_dimension[key]] :, ...] = observations[key] + else: + self.stackedobs[key][..., -observations[key].shape[self.stack_dimension] :] = observations[key] + return self.stackedobs, infos diff --git a/stable_baselines3/common/vec_env/vec_frame_stack.py b/stable_baselines3/common/vec_env/vec_frame_stack.py index 4feca8e8d..ac05eca65 100644 --- a/stable_baselines3/common/vec_env/vec_frame_stack.py +++ b/stable_baselines3/common/vec_env/vec_frame_stack.py @@ -4,18 +4,24 @@ import numpy as np from gym import spaces -from stable_baselines3.common.preprocessing import has_image_space, is_image_space, is_image_space_channels_first +from stable_baselines3.common.preprocessing import ( + has_image_space, + is_image_space, + is_image_space_channels_first, +) from stable_baselines3.common.vec_env.base_vec_env import VecEnv, VecEnvWrapper +from stable_baselines3.common.stacked_observations import ( + StackedObservations, + StackedDictObservations, +) + class VecFrameStack(VecEnvWrapper): """ Frame stacking wrapper for vectorized environment. Designed for image observations. - Dimension to stack over is either first (channels-first) or - last (channels-last), which is detected automatically using - ``common.preprocessing.is_image_space_channels_first`` if - observation is an image space. + Uses the StackedObservations class, or StackedDictObservations depending on the observations space :param venv: the vectorized environment to wrap :param n_stack: Number of frames to stack @@ -30,142 +36,26 @@ def __init__(self, venv: VecEnv, n_stack: int, channels_order: Optional[str] = N wrapped_obs_space = venv.observation_space if isinstance(wrapped_obs_space, spaces.Box): - ( - self.channels_first, - self.stack_dimension, - self.stackedobs, - observation_space, - ) = self.compute_stacking(channels_order, wrapped_obs_space) + self.stackedobs = StackedObservations(venv.num_envs, n_stack, wrapped_obs_space, channels_order) elif isinstance(wrapped_obs_space, spaces.Dict): - self.channels_first = {} - self.stack_dimension = {} - self.stackedobs = {} - space_dict = {} - for (key, subspace) in wrapped_obs_space.spaces.items(): - assert isinstance( - subspace, spaces.Box - ), "VecFrameStack with gym.spaces.Dict only works with nested gym.spaces.Box" - ( - self.channels_first[key], - self.stack_dimension[key], - self.stackedobs[key], - space_dict[key], - ) = self.compute_stacking(channels_order, subspace) - observation_space = spaces.Dict(spaces=space_dict) + self.stackedobs = StackedDictObservations(venv.num_envs, n_stack, wrapped_obs_space, channels_order) + else: raise Exception("VecFrameStack only works with gym.spaces.Box and gym.spaces.Dict observation spaces") + observation_space = self.stackedobs.stack_observation_space(wrapped_obs_space) VecEnvWrapper.__init__(self, venv, observation_space=observation_space) - def compute_stacking(self, channels_order, obs_space): - channels_first = False - if channels_order is None: - # Detect channel location automatically for images - if is_image_space(obs_space): - channels_first = is_image_space_channels_first(obs_space) - else: - # Default behavior for non-image space, stack on the last axis - channels_first = False - else: - assert channels_order in { - "last", - "first", - }, "`channels_order` must be one of following: 'last', 'first'" - - channels_first = channels_order == "first" - - # This includes the vec-env dimension (first) - stack_dimension = 1 if channels_first else -1 - repeat_axis = 0 if channels_first else -1 - low = np.repeat(obs_space.low, self.n_stack, axis=repeat_axis) - high = np.repeat(obs_space.high, self.n_stack, axis=repeat_axis) - stackedobs = np.zeros((self.venv.num_envs,) + low.shape, low.dtype) - observation_space = spaces.Box(low=low, high=high, dtype=obs_space.dtype) - return channels_first, stack_dimension, stackedobs, observation_space - def step_wait( self, ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, List[Dict[str, Any]]]: observations, rewards, dones, infos = self.venv.step_wait() - if isinstance(self.venv.observation_space, spaces.Box): - stack_ax_size = observations.shape[self.stack_dimension] - self.stackedobs = np.roll(self.stackedobs, shift=-stack_ax_size, axis=self.stack_dimension) - for i, done in enumerate(dones): - if done: - if "terminal_observation" in infos[i]: - old_terminal = infos[i]["terminal_observation"] - if self.channels_first: - new_terminal = np.concatenate( - ( - self.stackedobs[i, :-stack_ax_size, ...], - old_terminal, - ), - axis=self.stack_dimension, - ) - else: - new_terminal = np.concatenate( - ( - self.stackedobs[i, ..., :-stack_ax_size], - old_terminal, - ), - axis=self.stack_dimension, - ) - infos[i]["terminal_observation"] = new_terminal - else: - warnings.warn("VecFrameStack wrapping a VecEnv without terminal_observation info") - self.stackedobs[i] = 0 - if self.channels_first: - self.stackedobs[:, -observations.shape[self.stack_dimension] :, ...] = observations - else: - self.stackedobs[..., -observations.shape[self.stack_dimension] :] = observations - elif isinstance(self.venv.observation_space, spaces.Dict): - for key in self.stackedobs.keys(): - stack_ax_size = observations[key].shape[self.stack_dimension[key]] - self.stackedobs[key] = np.roll( - self.stackedobs[key], - shift=-stack_ax_size, - axis=self.stack_dimension[key], - ) - - for i, done in enumerate(dones): - if done: - if "terminal_observation" in infos[i]: - old_terminal = infos[i]["terminal_observation"][key] - if self.channels_first[key]: - # new_terminal = np.concatenate( - # (self.stackedobs[key][i, :-stack_ax_size, ...], old_terminal), axis=self.stack_dimension[key] - # ) - # ValueError: all the input array dimensions for the concatenation axis must match exactly, - # but along dimension 0, the array at index 0 has size 6 and the array at index 1 has size 2 - new_terminal = np.vstack( - ( - self.stackedobs[key][i, :-stack_ax_size, ...], - old_terminal, - ) - ) - else: - new_terminal = np.concatenate( - ( - self.stackedobs[key][i, ..., :-stack_ax_size], - old_terminal, - ), - axis=self.stack_dimension[key], - ) - infos[i]["terminal_observation"][key] = new_terminal - else: - warnings.warn("VecFrameStack wrapping a VecEnv without terminal_observation info") - self.stackedobs[key][i] = 0 - if self.channels_first: - self.stackedobs[key][:, -observations[key].shape[self.stack_dimension[key]] :, ...] = observations[key] - else: - self.stackedobs[key][..., -observations[key].shape[self.stack_dimension] :] = observations[key] - else: - raise Exception(f"Unhandled observation type {type(self.venv.observation_space)}") + observations, infos = self.stackedobs.update(observations, dones, infos) - return self.stackedobs, rewards, dones, infos + return observations, rewards, dones, infos def reset(self) -> Union[np.ndarray, Dict[str, np.ndarray]]: """ @@ -173,22 +63,8 @@ def reset(self) -> Union[np.ndarray, Dict[str, np.ndarray]]: """ observation = self.venv.reset() # pytype:disable=annotation-type-mismatch - if isinstance(self.venv.observation_space, spaces.Box): - self.stackedobs[...] = 0 - if self.channels_first: - self.stackedobs[:, -observation.shape[self.stack_dimension] :, ...] = observation - else: - self.stackedobs[..., -observation.shape[self.stack_dimension] :] = observation - - elif isinstance(self.venv.observation_space, spaces.Dict): - for key, obs in observation.items(): - self.stackedobs[key][...] = 0 - if self.channels_first[key]: - self.stackedobs[key][:, -obs.shape[self.stack_dimension[key]] :, ...] = obs - else: - self.stackedobs[key][..., -obs.shape[self.stack_dimension[key]] :] = obs - - return self.stackedobs + observation = self.stackedobs.reset(observation) + return observation def close(self) -> None: self.venv.close() diff --git a/tests/test_dict_env.py b/tests/test_dict_env.py index a50d5852d..c79e9f680 100644 --- a/tests/test_dict_env.py +++ b/tests/test_dict_env.py @@ -4,11 +4,17 @@ from stable_baselines3 import DQN, PPO, SAC, TD3 from stable_baselines3.common.evaluation import evaluate_policy -from stable_baselines3.common.multi_input_envs import NineRoomMultiObsEnv, SimpleMultiObsEnv -from stable_baselines3.common.vec_env import DummyVecEnv, VecFrameStack, VecTransposeImage +from stable_baselines3.common.multi_input_envs import ( + NineRoomMultiObsEnv, + SimpleMultiObsEnv, +) +from stable_baselines3.common.vec_env import ( + DummyVecEnv, + VecFrameStack, + VecTransposeImage, +) -@pytest.mark.slow @pytest.mark.parametrize("model_class", [PPO]) # , SAC, TD3, DQN]) def test_dict_spaces(model_class): """ From 3cb69f5781c8ddfa126a32217d0877225efa4cde Mon Sep 17 00:00:00 2001 From: Jaden Travnik Date: Mon, 30 Nov 2020 16:16:23 -0500 Subject: [PATCH 11/70] Running isort :facepalm --- stable_baselines3/common/stacked_observations.py | 6 +----- stable_baselines3/common/vec_env/vec_frame_stack.py | 12 ++---------- tests/test_dict_env.py | 11 ++--------- 3 files changed, 5 insertions(+), 24 deletions(-) diff --git a/stable_baselines3/common/stacked_observations.py b/stable_baselines3/common/stacked_observations.py index ee6b33d75..d936c5605 100644 --- a/stable_baselines3/common/stacked_observations.py +++ b/stable_baselines3/common/stacked_observations.py @@ -4,11 +4,7 @@ import numpy as np from gym import spaces -from stable_baselines3.common.preprocessing import ( - has_image_space, - is_image_space, - is_image_space_channels_first, -) +from stable_baselines3.common.preprocessing import has_image_space, is_image_space, is_image_space_channels_first class StackedObservations: diff --git a/stable_baselines3/common/vec_env/vec_frame_stack.py b/stable_baselines3/common/vec_env/vec_frame_stack.py index ac05eca65..50d7da3e7 100644 --- a/stable_baselines3/common/vec_env/vec_frame_stack.py +++ b/stable_baselines3/common/vec_env/vec_frame_stack.py @@ -4,18 +4,10 @@ import numpy as np from gym import spaces -from stable_baselines3.common.preprocessing import ( - has_image_space, - is_image_space, - is_image_space_channels_first, -) +from stable_baselines3.common.preprocessing import has_image_space, is_image_space, is_image_space_channels_first +from stable_baselines3.common.stacked_observations import StackedDictObservations, StackedObservations from stable_baselines3.common.vec_env.base_vec_env import VecEnv, VecEnvWrapper -from stable_baselines3.common.stacked_observations import ( - StackedObservations, - StackedDictObservations, -) - class VecFrameStack(VecEnvWrapper): """ diff --git a/tests/test_dict_env.py b/tests/test_dict_env.py index c79e9f680..250d0065b 100644 --- a/tests/test_dict_env.py +++ b/tests/test_dict_env.py @@ -4,15 +4,8 @@ from stable_baselines3 import DQN, PPO, SAC, TD3 from stable_baselines3.common.evaluation import evaluate_policy -from stable_baselines3.common.multi_input_envs import ( - NineRoomMultiObsEnv, - SimpleMultiObsEnv, -) -from stable_baselines3.common.vec_env import ( - DummyVecEnv, - VecFrameStack, - VecTransposeImage, -) +from stable_baselines3.common.multi_input_envs import NineRoomMultiObsEnv, SimpleMultiObsEnv +from stable_baselines3.common.vec_env import DummyVecEnv, VecFrameStack, VecTransposeImage @pytest.mark.parametrize("model_class", [PPO]) # , SAC, TD3, DQN]) From 82fe425ef1bf4ad0073949995b421e71ffdcf4f8 Mon Sep 17 00:00:00 2001 From: Antonin RAFFIN Date: Tue, 1 Dec 2020 13:06:44 +0100 Subject: [PATCH 12/70] Fixed typing issues --- stable_baselines3/common/buffers.py | 17 ++++++++++++----- stable_baselines3/common/policies.py | 6 +++--- .../common/vec_env/vec_frame_stack.py | 2 +- tests/test_dict_env.py | 1 + 4 files changed, 17 insertions(+), 9 deletions(-) diff --git a/stable_baselines3/common/buffers.py b/stable_baselines3/common/buffers.py index e74dafd4b..693ccd5f3 100644 --- a/stable_baselines3/common/buffers.py +++ b/stable_baselines3/common/buffers.py @@ -476,8 +476,9 @@ def __init__( n_envs: int = 1, optimize_memory_usage: bool = False, ): - super(ReplayBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs) + super(DictReplayBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs) + assert isinstance(self.obs_shape, dict), "DictReplayBuffer must be used with Dict obs space only" assert n_envs == 1, "Replay buffer only support single environment for now" # Check that the replay buffer can fit into the memory @@ -568,7 +569,10 @@ def sample(self, batch_size: int, env: Optional[VecNormalize] = None) -> DictRep :return: """ if not self.optimize_memory_usage: - return super().sample(batch_size=batch_size, env=env) + upper_bound = self.buffer_size if self.full else self.pos + batch_inds = np.random.randint(0, upper_bound, size=batch_size) + return self._get_samples(batch_inds, env=env) + # Do not sample the element with index `self.pos` as the transitions is invalid # (we use only one array to store `obs` and `next_obs`) if self.full: @@ -644,7 +648,10 @@ def __init__( n_envs: int = 1, ): - super(RolloutBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs) + super(DictRolloutBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs) + + assert isinstance(self.obs_shape, dict), "DictRolloutBuffer must be used with Dict obs space only" + self.gae_lambda = gae_lambda self.gamma = gamma self.observations, self.actions, self.rewards, self.advantages = ( @@ -658,7 +665,7 @@ def __init__( self.reset() def reset(self) -> None: - + assert isinstance(self.obs_shape, dict), "DictRolloutBuffer must be used with Dict obs space only" self.observations = {} for key, obs_input_shape in self.obs_shape.items(): self.observations[key] = np.zeros((self.buffer_size, self.n_envs) + obs_input_shape, dtype=np.float32) @@ -730,7 +737,7 @@ def get(self, batch_size: Optional[int] = None) -> Generator[DictRolloutBufferSa yield self._get_samples(indices[start_idx : start_idx + batch_size]) start_idx += batch_size - def _get_samples(self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None) -> RolloutBufferSamples: + def _get_samples(self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None) -> DictRolloutBufferSamples: return DictRolloutBufferSamples( observations={key: self.to_torch(obs[batch_inds]) for (key, obs) in self.observations.items()}, diff --git a/stable_baselines3/common/policies.py b/stable_baselines3/common/policies.py index 39a28a9a3..34e1c9126 100644 --- a/stable_baselines3/common/policies.py +++ b/stable_baselines3/common/policies.py @@ -20,7 +20,6 @@ StateDependentNoiseDistribution, make_proba_distribution, ) -from stable_baselines3.common.env_util import is_wrapped from stable_baselines3.common.preprocessing import get_action_dim, is_image_space, preprocess_obs from stable_baselines3.common.torch_layers import ( BaseFeaturesExtractor, @@ -260,7 +259,7 @@ def try_transpose_img_observation(obs: np.ndarray, observation_space: gym.spaces def predict( self, - observation: np.ndarray, + observation: Union[np.ndarray, Dict[str, np.ndarray]], state: Optional[np.ndarray] = None, mask: Optional[np.ndarray] = None, deterministic: bool = False, @@ -283,7 +282,8 @@ def predict( # mask = [False for _ in range(self.n_envs)] # Need to check the observation if its a ObsDictWrapper - if is_wrapped(self.observation_space, ObsDictWrapper): + # Special Case for GoalEnd (using HER normally) + if isinstance(observation, dict) and set(observation.keys()) == set(["observation", "desired_goal", "achieved_goal"]): observation = ObsDictWrapper.convert_dict(observation) elif isinstance(observation, dict): # need to copy the dict as the dict in VecFrameStack will become a torch tensor diff --git a/stable_baselines3/common/vec_env/vec_frame_stack.py b/stable_baselines3/common/vec_env/vec_frame_stack.py index 50d7da3e7..1653010c3 100644 --- a/stable_baselines3/common/vec_env/vec_frame_stack.py +++ b/stable_baselines3/common/vec_env/vec_frame_stack.py @@ -41,7 +41,7 @@ def __init__(self, venv: VecEnv, n_stack: int, channels_order: Optional[str] = N def step_wait( self, - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, List[Dict[str, Any]]]: + ) -> Tuple[Union[np.ndarray, Dict[str, np.ndarray]], np.ndarray, np.ndarray, List[Dict[str, Any]]]: observations, rewards, dones, infos = self.venv.step_wait() diff --git a/tests/test_dict_env.py b/tests/test_dict_env.py index 250d0065b..fbc0b395b 100644 --- a/tests/test_dict_env.py +++ b/tests/test_dict_env.py @@ -23,6 +23,7 @@ def test_dict_spaces(model_class): env, gamma=0.5, seed=1, + n_steps=250, policy_kwargs=dict(net_arch=[64]), ) model.learn(total_timesteps=500) From 201799d40f5930fe49ae76b2b7815d5f1ecd0cd6 Mon Sep 17 00:00:00 2001 From: Jaden Travnik Date: Tue, 1 Dec 2020 12:13:07 -0500 Subject: [PATCH 13/70] Adding docstrings and typing. Using util for moving data to device. --- docs/misc/changelog.rst | 4 +- stable_baselines3/common/multi_input_envs.py | 174 ++++++++++++------ .../common/off_policy_algorithm.py | 26 +-- stable_baselines3/common/policies.py | 26 ++- stable_baselines3/common/preprocessing.py | 16 +- .../common/stacked_observations.py | 63 ++++++- stable_baselines3/common/torch_layers.py | 14 ++ stable_baselines3/common/utils.py | 6 + tests/test_dict_env.py | 2 +- 9 files changed, 232 insertions(+), 99 deletions(-) diff --git a/docs/misc/changelog.rst b/docs/misc/changelog.rst index e4912b2e1..5b599e3ba 100644 --- a/docs/misc/changelog.rst +++ b/docs/misc/changelog.rst @@ -8,7 +8,6 @@ Pre-Release 0.11.0a1 (WIP) Breaking Changes: ^^^^^^^^^^^^^^^^^ -- Breaks HER as HER needs to be updated to use the new dictionary observations - ``evaluate_policy`` now returns rewards/episode lengths from a ``Monitor`` wrapper if one is present, this allows to return the unnormalized reward in the case of Atari games for instance. - Renamed ``common.vec_env.is_wrapped`` to ``common.vec_env.is_vecenv_wrapped`` to avoid confusion @@ -20,7 +19,8 @@ New Features: automatic check for image spaces. - ``VecFrameStack`` now has a ``channels_order`` argument to tell if observations should be stacked on the first or last observation dimension (originally always stacked on last). -- Add support for dictionary observations in both RolloutBuffer (need to be tested in ReplayBuffer) +- Add ``DictRolloutBuffer`` to support dictionary observations (need to be tested in ReplayBuffer) +- Add ``StackedObservations`` and ``StackedDictObservations`` that are used within ``VecFrameStack`` - Added simple 4x4 and 9room test environments - Added ``common.env_util.is_wrapped`` and ``common.env_util.unwrap_wrapper`` functions for checking/unwrapping an environment for specific wrapper. diff --git a/stable_baselines3/common/multi_input_envs.py b/stable_baselines3/common/multi_input_envs.py index a5a376ed4..432ec2b9b 100644 --- a/stable_baselines3/common/multi_input_envs.py +++ b/stable_baselines3/common/multi_input_envs.py @@ -1,41 +1,40 @@ +from typing import Dict, Iterable, Union + import gym import numpy as np class SimpleMultiObsEnv(gym.Env): - ## simple 4x4 grid world - # - # ____________ - # | 0 1 2 3| - # | 4|¯5¯¯6¯| 7| - # | 8|_9_10_|11| - # |12 13 14 15| - # ¯¯¯¯¯¯¯¯¯¯¯¯¯¯ - # start is 0 - # states 5, 6, 9, and 10 are blocked - # goal is 15 - # actions are = [left, down, right, up] - - # simple linear state env of 15 states but encoded with a vector and an image observation - # State Mapping - # State Vector Img - # 0 Vec * 0 Img * 0 - # 1 Vec * 0 Img * 1/3 - # 2 Vec * 0 Img * 2/3 - # 3 Vec * 0 Img * 3/3 - # 4 Vec * 1/3 Img * 0 - # 5 Vec * 1/3 Img * 1/3 - # 6 Vec * 1/3 Img * 2/3 - # 7 Vec * 2/3 Img * 0 - # 8 Vec * 2/3 Img * 1/3 - # 9 Vec * 2/3 Img * 2/3 - # 10 Vec * 2/3 Img * 3/3 - # 11 Vec * 3/3 Img * 0 - # 12 Vec * 3/3 Img * 1/3 - # 13 Vec * 3/3 Img * 2/3 - # 14 Vec * 3/3 Img * 3/3 - - def __init__(self, num_col=4, num_row=4, random_start=True, noise=0.0): + """ + Base class for GridWorld-based MultiObs Environments 4x4 grid world + + + ____________ + | 0 1 2 3| + | 4|¯5¯¯6¯| 7| + | 8|_9_10_|11| + |12 13 14 15| + ¯¯¯¯¯¯¯¯¯¯¯¯¯¯ + start is 0 + states 5, 6, 9, and 10 are blocked + goal is 15 + actions are = [left, down, right, up] + + simple linear state env of 15 states but encoded with a vector and an image observation + + :param num_col: Number of columns in the grid + :param num_row: Number of rows in the grid + :param random_start: If true, agent starts in random position + :param noise: Noise added to the observations + """ + + def __init__( + self, + num_col: int = 4, + num_row: int = 4, + random_start: bool = True, + noise: float = 0.0, + ): super(SimpleMultiObsEnv, self).__init__() self.vector_size = 5 @@ -61,7 +60,20 @@ def __init__(self, num_col=4, num_row=4, random_start=True, noise=0.0): self.max_state = len(self.state_mapping) - 1 - def random_upsample_img(self, v_rng=(0, 255), initial_size=(4, 4), up_size=(20, 20)): + def random_upsample_img( + self, + v_rng: Iterable = (0, 255), + initial_size: Iterable = (4, 4), + up_size: Iterable = (20, 20), + ) -> np.ndarray: + """ + Generated a random image and upsample it + + :param v_rng: The range of values for the img + :param initial_size: The initial size of the image to generate + :param up_size: The size of the upsample + :return: upsampled img + """ im = np.random.randint(v_rng[0], v_rng[1], initial_size, dtype=np.int32) return np.array( [ @@ -75,7 +87,13 @@ def random_upsample_img(self, v_rng=(0, 255), initial_size=(4, 4), up_size=(20, ] ).astype(np.int32) - def init_state_mapping(self, num_col, num_row): + def init_state_mapping(self, num_col: int, num_row: int) -> None: + """ + Initializes the state_mapping array which holds the observation values for each state + + :param num_col: + :param num_row: + """ self.num_col = num_col self.state_mapping = [] @@ -85,7 +103,11 @@ def init_state_mapping(self, num_col, num_row): for j in range(num_row): self.state_mapping.append({"vec": col_vecs[i], "img": row_imgs[j]}) - def get_state_mapping(self): + def get_state_mapping(self) -> Dict: + """ + Uses the state to get the observation mapping and applies noise if there is any + :return: observation dict {'vec': ..., 'img': ...} + """ state_dict = self.state_mapping[self.state] if self.noise > 0: state_dict["vec"] += np.random.random(self.vector_size) * self.noise @@ -94,14 +116,35 @@ def get_state_mapping(self): state_dict["img"] = np.clip(state_dict["img"], 0, 255) return state_dict - def init_possible_transitions(self): + def init_possible_transitions(self) -> None: + """ + Initializes the transitions of the environment + The environment exploits the cardinal directions of the grid by noting that + they correspond to simple addition and subtraction from the cell id within the grid + + - up => means moving up a row => means subtracting the length of a column + - down => means moving down a row => means adding the length of a column + - left => means moving left by one => means subtracting 1 + - right => means moving right by one => means adding 1 + + Thus one only needs to specify in which states each action is possible + in order to define the transitions of the environment + """ self.left_possible = [1, 2, 3, 13, 14, 15] self.down_possible = [0, 4, 8, 3, 7, 11] self.right_possible = [0, 1, 2, 12, 13, 14] self.up_possible = [4, 8, 12, 7, 11, 15] - def step(self, action): + def step(self, action: Union[int, float]): + """ + Run one timestep of the environment's dynamics. When end of + episode is reached, you are responsible for calling `reset()` + to reset this environment's state. + Accepts an action and returns a tuple (observation, reward, done, info). + :param action: + :return: tuple (observation, reward, done, info). + """ action = int(action) self.count += 1 @@ -127,10 +170,17 @@ def step(self, action): return self.get_state_mapping(), rwd, done, {"got_to_end": got_to_end} - def render(self, mode=None): + def render(self, mode: str = None) -> None: + """ + Prints the log of the environment + """ print(self.log) - def reset(self): + def reset(self) -> None: + """ + Resets the environment state and step count and returns reset observation + :return: observation dict {'vec': ..., 'img': ...} + """ self.count = 0 if not self.random_start: self.state = 0 @@ -140,30 +190,32 @@ def reset(self): class NineRoomMultiObsEnv(SimpleMultiObsEnv): - - ## 9 room grid world - # - # ____________________________________ - # | 0 1 2 | 3 4 5 | 6 7 8 | - # | 9 10 11 12 13 14 15 16 17 | - # | 18 19 20 | 21 22 23 | 24 25 26 | - # |¯¯¯¯ ¯¯¯|¯¯¯¯ ¯¯¯¯|¯¯¯¯ ¯¯¯¯| - # | 27 28 29 | 30 31 32 | 33 34 35 | - # | 36 37 38 39 40 41 42 43 44 | - # | 45 46 47 | 48 49 50 | 51 52 53 | - # |¯¯¯ ¯¯¯|¯¯¯¯ ¯¯¯¯|¯¯¯¯ ¯¯¯¯| - # | 54 55 56 | 57 58 59 | 60 61 62 | - # | 63 64 65 66 67 68 69 70 71 | - # | 72 73 74 | 75 76 77 | 78 79 80 | - # ¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯ - # start is 0 - # goal is 80 - # actions are = [left, down, right, up] - - def __init__(self, random_start=True, noise=0.0): + """ + Extension of the SimpleMultiObsEnv to a 9 room grid world + ____________________________________ + | 0 1 2 | 3 4 5 | 6 7 8 | + | 9 10 11 12 13 14 15 16 17 | + | 18 19 20 | 21 22 23 | 24 25 26 | + |¯¯¯¯ ¯¯¯|¯¯¯¯ ¯¯¯¯|¯¯¯¯ ¯¯¯¯| + | 27 28 29 | 30 31 32 | 33 34 35 | + | 36 37 38 39 40 41 42 43 44 | + | 45 46 47 | 48 49 50 | 51 52 53 | + |¯¯¯ ¯¯¯|¯¯¯¯ ¯¯¯¯|¯¯¯¯ ¯¯¯¯| + | 54 55 56 | 57 58 59 | 60 61 62 | + | 63 64 65 66 67 68 69 70 71 | + | 72 73 74 | 75 76 77 | 78 79 80 | + ¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯ + :param random_start: If true, agent starts in random position + :param noise: Noise added to the observations + """ + + def __init__(self, random_start: bool = True, noise: float = 0.0): super(NineRoomMultiObsEnv, self).__init__(9, 9, random_start=random_start, noise=noise) def init_possible_transitions(self): + """ + Initializes the state_mapping array which holds the observation values for each state + """ self.left_possible = ( [1, 2, 4, 5, 7, 8] + list(range(10, 18)) diff --git a/stable_baselines3/common/off_policy_algorithm.py b/stable_baselines3/common/off_policy_algorithm.py index 9aa51b707..7d03fe448 100644 --- a/stable_baselines3/common/off_policy_algorithm.py +++ b/stable_baselines3/common/off_policy_algorithm.py @@ -155,22 +155,16 @@ def _setup_model(self) -> None: self._setup_lr_schedule() self.set_random_seed(self.seed) - if isinstance(self.observation_space, gym.spaces.Dict): - self.replay_buffer = DictReplayBuffer( - self.buffer_size, - self.observation_space, - self.action_space, - self.device, - optimize_memory_usage=self.optimize_memory_usage, - ) - else: - self.replay_buffer = ReplayBuffer( - self.buffer_size, - self.observation_space, - self.action_space, - self.device, - optimize_memory_usage=self.optimize_memory_usage, - ) + BUFFER_CLS = DictReplayBuffer if isinstance(self.observation_space, gym.spaces.Dict) else ReplayBuffer + + self.replay_buffer = BUFFER_CLS( + self.buffer_size, + self.observation_space, + self.action_space, + self.device, + optimize_memory_usage=self.optimize_memory_usage, + ) + self.policy = self.policy_class( self.observation_space, self.action_space, diff --git a/stable_baselines3/common/policies.py b/stable_baselines3/common/policies.py index 39a28a9a3..de134f984 100644 --- a/stable_baselines3/common/policies.py +++ b/stable_baselines3/common/policies.py @@ -31,7 +31,7 @@ create_mlp, ) from stable_baselines3.common.type_aliases import Schedule -from stable_baselines3.common.utils import get_device, is_vectorized_observation +from stable_baselines3.common.utils import get_device, is_vectorized_observation, obs_as_tensor from stable_baselines3.common.vec_env import VecTransposeImage from stable_baselines3.common.vec_env.obs_dict_wrapper import ObsDictWrapper @@ -112,7 +112,12 @@ def _update_features_extractor( if features_extractor is None: # The features extractor is not shared, create a new one features_extractor = self.make_features_extractor() - net_kwargs.update(dict(features_extractor=features_extractor, features_dim=features_extractor.features_dim)) + net_kwargs.update( + dict( + features_extractor=features_extractor, + features_dim=features_extractor.features_dim, + ) + ) return net_kwargs def make_features_extractor(self) -> BaseFeaturesExtractor: @@ -249,7 +254,14 @@ def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Te """ @staticmethod - def try_transpose_img_observation(obs: np.ndarray, observation_space: gym.spaces.Space): + def try_transpose_img_observation(obs: np.ndarray, observation_space: gym.spaces.Space) -> np.ndarray: + """ + Try to transpose an observation such that it matches the shape of the observation_space + + :param obs: + :param observation_space: + :return: (potentially transposed) observation + """ obs = np.array(obs) if not (obs.shape == observation_space.shape or obs.shape[1:] == observation_space.shape): # Try to re-order the channels @@ -304,12 +316,10 @@ def predict( vectorized_env = is_vectorized_observation(observation, self.observation_space) - if isinstance(observation, dict): - for key, obs in observation.items(): - observation[key] = th.as_tensor(observation[key]).to(self.device) - else: + if not isinstance(observation, dict): observation = observation.reshape((-1,) + self.observation_space.shape) - observation = th.as_tensor(observation).to(self.device) + + observation = obs_as_tensor(observation, self.device) with th.no_grad(): actions = self._predict(observation, deterministic=deterministic) diff --git a/stable_baselines3/common/preprocessing.py b/stable_baselines3/common/preprocessing.py index bc6995f8d..9b5a92787 100644 --- a/stable_baselines3/common/preprocessing.py +++ b/stable_baselines3/common/preprocessing.py @@ -65,14 +65,22 @@ def is_image_space( return False -def has_image_space(observation_space: spaces.Dict): +def has_image_space(observation_space: spaces.Dict) -> bool: + """ + Check if a Dict observation space has an image space within its subspaces + + :param observation_space: + :return: + """ for key, subspace in observation_space.spaces.items(): if is_image_space(subspace): return True return False -def preprocess_obs(obs: th.Tensor, observation_space: spaces.Space, normalize_images: bool = True) -> th.Tensor: +def preprocess_obs( + obs: th.Tensor, observation_space: spaces.Space, normalize_images: bool = True +) -> Union[th.Tensor, Dict[str, th.Tensor]]: """ Preprocess observation to be to a neural network. For images, it normalizes the values by dividing them by 255 (to have values in [0, 1]) @@ -115,7 +123,9 @@ def preprocess_obs(obs: th.Tensor, observation_space: spaces.Space, normalize_im raise NotImplementedError(f"Preprocessing not implemented for {observation_space}") -def get_obs_shape(observation_space: spaces.Space) -> Union[Tuple[int, ...], Dict[str, Tuple[int, ...]]]: +def get_obs_shape( + observation_space: spaces.Space, +) -> Union[Tuple[int, ...], Dict[str, Tuple[int, ...]]]: """ Get the shape of the observation (useful for the buffers). diff --git a/stable_baselines3/common/stacked_observations.py b/stable_baselines3/common/stacked_observations.py index d936c5605..bd292bfb5 100644 --- a/stable_baselines3/common/stacked_observations.py +++ b/stable_baselines3/common/stacked_observations.py @@ -44,7 +44,15 @@ def compute_stacking( n_stack: int, observation_space: spaces.Space, channels_order: Optional[str] = None, - ): + ) -> Tuple: + """ + Calculates the parameters in order to stack observations + :param num_envs: + :param n_stack: + :param observation_space: + :param channels_order: + :return: tuple of channels_first, stack_dimension, stackedobs, repeat_axis + """ channels_first = False if channels_order is None: # Detect channel location automatically for images @@ -68,12 +76,21 @@ def compute_stacking( stackedobs = np.zeros((num_envs,) + low.shape, low.dtype) return channels_first, stack_dimension, stackedobs, repeat_axis - def stack_observation_space(self, observation_space): + def stack_observation_space(self, observation_space: spaces.Box) -> spaces.Box: + """ + Given an observation space, returns a new observation space with stacked observations + :return: + """ low = np.repeat(observation_space.low, self.n_stack, axis=self.repeat_axis) high = np.repeat(observation_space.high, self.n_stack, axis=self.repeat_axis) return spaces.Box(low=low, high=high, dtype=observation_space.dtype) - def reset(self, observation): + def reset(self, observation: np.ndarray) -> np.ndarray: + """ + Resets the stackedobs, adds the reset observation to the stack, and returns the stack + :param observation: + :return: + """ self.stackedobs[...] = 0 if self.channels_first: self.stackedobs[:, -observation.shape[self.stack_dimension] :, ...] = observation @@ -81,7 +98,14 @@ def reset(self, observation): self.stackedobs[..., -observation.shape[self.stack_dimension] :] = observation return self.stackedobs - def update(self, observations, dones, infos): + def update(self, observations: np.ndarray, dones: np.ndarray, infos: Dict) -> Tuple[np.ndarray, Dict]: + """ + Adds the observations to the stack and uses the dones to update the infos. + :param observations: + :param dones: + :param infos: + :return: tuple of the stacked observations and the updated infos + """ stack_ax_size = observations.shape[self.stack_dimension] self.stackedobs = np.roll(self.stackedobs, shift=-stack_ax_size, axis=self.stack_dimension) for i, done in enumerate(dones): @@ -130,7 +154,13 @@ class StackedDictObservations(StackedObservations): If None, automatically detect channel to stack over in case of image observation or default to "last" (default). """ - def __init__(self, num_envs, n_stack, observation_space, channels_order): + def __init__( + self, + num_envs: int, + n_stack: int, + observation_space: spaces.Dict, + channels_order: Optional[str] = None, + ): self.n_stack = n_stack self.channels_first = {} self.stack_dimension = {} @@ -146,7 +176,12 @@ def __init__(self, num_envs, n_stack, observation_space, channels_order): self.repeat_axis[key], ) = self.compute_stacking(num_envs, n_stack, subspace, channels_order) - def stack_observation_space(self, observation_space): + def stack_observation_space(self, observation_space: spaces.Dict) -> spaces.Dict: + """ + Returns the stacked verson of a Dict observation space + :param observation_space: + :return: stacked observation space + """ spaces_dict = {} for key, subspace in observation_space.spaces.items(): low = np.repeat(subspace.low, self.n_stack, axis=self.repeat_axis[key]) @@ -154,7 +189,12 @@ def stack_observation_space(self, observation_space): spaces_dict[key] = spaces.Box(low=low, high=high, dtype=subspace.dtype) return spaces.Dict(spaces=spaces_dict) - def reset(self, observation): + def reset(self, observation: Dict) -> Dict: + """ + Resets the stacked observations, adds the reset observation to the stack, and returns the stack + :param observation: + :return: stacked observations + """ for key, obs in observation.items(): self.stackedobs[key][...] = 0 if self.channels_first[key]: @@ -163,7 +203,14 @@ def reset(self, observation): self.stackedobs[key][..., -obs.shape[self.stack_dimension[key]] :] = obs return self.stackedobs - def update(self, observations, dones, infos): + def update(self, observations: Dict, dones: np.ndarray, infos: Dict) -> Tuple[Dict, Dict]: + """ + Adds the observations to the stack and uses the dones to update the infos. + :param observations: + :param dones: + :param infos: + :return: tuple of the stacked observations and the updated infos + """ for key in self.stackedobs.keys(): stack_ax_size = observations[key].shape[self.stack_dimension[key]] self.stackedobs[key] = np.roll( diff --git a/stable_baselines3/common/torch_layers.py b/stable_baselines3/common/torch_layers.py index 4212d9d1e..688bf0fc6 100644 --- a/stable_baselines3/common/torch_layers.py +++ b/stable_baselines3/common/torch_layers.py @@ -229,6 +229,20 @@ def forward(self, features: th.Tensor) -> Tuple[th.Tensor, th.Tensor]: class CombinedExtractor(BaseFeaturesExtractor): + """ + Combined feature extractor for Dict observation spaces. + Builds a feature extractor for each key of the space + + :param observation_space: + :param features_dim: Number of features extracted. + This corresponds to the number of unit for the last layer. + :param cnn_output_dim: Number of features to output from each cnn submodule + :param mlp_output_dim: Number of features to output from each mlp submodule + :param mlp_net_arch: Architecture of each mlp network module + :param activation_fn: The activation function to use within each mlp + :param comb_net_arch: Architecture of the combined network module which calculates the final feature extracted + """ + def __init__( self, observation_space: gym.spaces.Dict, diff --git a/stable_baselines3/common/utils.py b/stable_baselines3/common/utils.py index 185efa360..e9a9107fb 100644 --- a/stable_baselines3/common/utils.py +++ b/stable_baselines3/common/utils.py @@ -347,6 +347,12 @@ def polyak_update( def obs_as_tensor( obs: Union[np.ndarray, Dict[Union[str, int], np.ndarray]], device: th.device ) -> Union[th.tensor, TensorDict]: + """ + Moves the observeration to the given device + :param obs: + :param device: + :return: + """ if isinstance(obs, np.ndarray): return th.as_tensor(obs).to(device) elif isinstance(obs, dict): diff --git a/tests/test_dict_env.py b/tests/test_dict_env.py index 250d0065b..812aae56e 100644 --- a/tests/test_dict_env.py +++ b/tests/test_dict_env.py @@ -16,7 +16,7 @@ def test_dict_spaces(model_class): """ make_env = lambda: SimpleMultiObsEnv(random_start=True) env = DummyVecEnv([make_env]) - # env = VecFrameStack(env, n_stack=2) + env = VecFrameStack(env, n_stack=2) model = model_class( "MultiInputPolicy", From 683bbf2d96d0252f6906342f6f085dfca08633fb Mon Sep 17 00:00:00 2001 From: Jaden Travnik Date: Tue, 1 Dec 2020 13:08:21 -0500 Subject: [PATCH 14/70] Fixed trailing commas --- stable_baselines3/common/base_class.py | 10 +---- stable_baselines3/common/buffers.py | 40 +++--------------- .../common/off_policy_algorithm.py | 40 +++--------------- .../common/on_policy_algorithm.py | 31 +++++--------- stable_baselines3/common/policies.py | 19 ++------- .../common/stacked_observations.py | 42 ++++--------------- stable_baselines3/common/torch_layers.py | 16 +------ stable_baselines3/sac/policies.py | 16 ++----- tests/test_dict_env.py | 8 +--- tests/test_her.py | 20 ++------- 10 files changed, 46 insertions(+), 196 deletions(-) diff --git a/stable_baselines3/common/base_class.py b/stable_baselines3/common/base_class.py index 4c337851e..05a0cf6ea 100644 --- a/stable_baselines3/common/base_class.py +++ b/stable_baselines3/common/base_class.py @@ -261,15 +261,7 @@ def _excluded_save_params(self) -> List[str]: :return: List of parameters that should be excluded from being saved with pickle. """ - return [ - "policy", - "device", - "env", - "eval_env", - "replay_buffer", - "rollout_buffer", - "_vec_normalize_env", - ] + return ["policy", "device", "env", "eval_env", "replay_buffer", "rollout_buffer", "_vec_normalize_env"] def _get_torch_save_params(self) -> Tuple[List[str], List[str]]: """ diff --git a/stable_baselines3/common/buffers.py b/stable_baselines3/common/buffers.py index e74dafd4b..6fd3aaf3e 100644 --- a/stable_baselines3/common/buffers.py +++ b/stable_baselines3/common/buffers.py @@ -185,19 +185,13 @@ def __init__( self.optimize_memory_usage = optimize_memory_usage - self.observations = np.zeros( - (self.buffer_size, self.n_envs) + self.obs_shape, - dtype=observation_space.dtype, - ) + self.observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape, dtype=observation_space.dtype) if optimize_memory_usage: # `observations` contains also the next observation self.next_observations = None else: - self.next_observations = np.zeros( - (self.buffer_size, self.n_envs) + self.obs_shape, - dtype=observation_space.dtype, - ) + self.next_observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape, dtype=observation_space.dtype) self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=action_space.dtype) @@ -322,12 +316,7 @@ def __init__( super(RolloutBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs) self.gae_lambda = gae_lambda self.gamma = gamma - self.observations, self.actions, self.rewards, self.advantages = ( - None, - None, - None, - None, - ) + self.observations, self.actions, self.rewards, self.advantages = (None, None, None, None) self.returns, self.dones, self.values, self.log_probs = None, None, None, None self.generator_ready = False self.reset() @@ -415,14 +404,7 @@ def get(self, batch_size: Optional[int] = None) -> Generator[RolloutBufferSample # Prepare the data if not self.generator_ready: - _tensor_names = [ - "observations", - "actions", - "values", - "log_probs", - "advantages", - "returns", - ] + _tensor_names = ["observations", "actions", "values", "log_probs", "advantages", "returns"] for tensor in _tensor_names: self.__dict__[tensor] = self.swap_and_flatten(self.__dict__[tensor]) @@ -581,12 +563,7 @@ def _get_samples(self, batch_inds: np.ndarray, env: Optional[VecNormalize] = Non if self.optimize_memory_usage: next_obs = { - key: self.to_torch( - self._normalize_obs( - obs[(batch_inds + 1) % self.buffer_size, 0, :], - env, - ) - ) + key: self.to_torch(self._normalize_obs(obs[(batch_inds + 1) % self.buffer_size, 0, :], env)) for key, obs in self.observations.items() } else: @@ -647,12 +624,7 @@ def __init__( super(RolloutBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs) self.gae_lambda = gae_lambda self.gamma = gamma - self.observations, self.actions, self.rewards, self.advantages = ( - None, - None, - None, - None, - ) + self.observations, self.actions, self.rewards, self.advantages = (None, None, None, None) self.returns, self.dones, self.values, self.log_probs = None, None, None, None self.generator_ready = False self.reset() diff --git a/stable_baselines3/common/off_policy_algorithm.py b/stable_baselines3/common/off_policy_algorithm.py index 7d03fe448..a98a89ebf 100644 --- a/stable_baselines3/common/off_policy_algorithm.py +++ b/stable_baselines3/common/off_policy_algorithm.py @@ -228,14 +228,7 @@ def _setup_learn( self.replay_buffer.dones[pos] = True return super()._setup_learn( - total_timesteps, - eval_env, - callback, - eval_freq, - n_eval_episodes, - log_path, - reset_num_timesteps, - tb_log_name, + total_timesteps, eval_env, callback, eval_freq, n_eval_episodes, log_path, reset_num_timesteps, tb_log_name ) def learn( @@ -252,14 +245,7 @@ def learn( ) -> "OffPolicyAlgorithm": total_timesteps, callback = self._setup_learn( - total_timesteps, - eval_env, - callback, - eval_freq, - n_eval_episodes, - eval_log_path, - reset_num_timesteps, - tb_log_name, + total_timesteps, eval_env, callback, eval_freq, n_eval_episodes, eval_log_path, reset_num_timesteps, tb_log_name ) callback.on_training_start(locals(), globals()) @@ -348,20 +334,10 @@ def _dump_logs(self) -> None: fps = int(self.num_timesteps / (time.time() - self.start_time)) logger.record("time/episodes", self._episode_num, exclude="tensorboard") if len(self.ep_info_buffer) > 0 and len(self.ep_info_buffer[0]) > 0: - logger.record( - "rollout/ep_rew_mean", - safe_mean([ep_info["r"] for ep_info in self.ep_info_buffer]), - ) - logger.record( - "rollout/ep_len_mean", - safe_mean([ep_info["l"] for ep_info in self.ep_info_buffer]), - ) + logger.record("rollout/ep_rew_mean", safe_mean([ep_info["r"] for ep_info in self.ep_info_buffer])) + logger.record("rollout/ep_len_mean", safe_mean([ep_info["l"] for ep_info in self.ep_info_buffer])) logger.record("time/fps", fps) - logger.record( - "time/time_elapsed", - int(time.time() - self.start_time), - exclude="tensorboard", - ) + logger.record("time/time_elapsed", int(time.time() - self.start_time), exclude="tensorboard") logger.record("time/total timesteps", self.num_timesteps, exclude="tensorboard") if self.use_sde: logger.record("train/std", (self.actor.get_std()).mean().item()) @@ -459,11 +435,7 @@ def collect_rollouts( reward_ = self._vec_normalize_env.get_original_reward() else: # Avoid changing the original ones - self._last_original_obs, new_obs_, reward_ = ( - self._last_obs, - new_obs, - reward, - ) + self._last_original_obs, new_obs_, reward_ = (self._last_obs, new_obs, reward) replay_buffer.add(self._last_original_obs, new_obs_, buffer_action, reward_, done) diff --git a/stable_baselines3/common/on_policy_algorithm.py b/stable_baselines3/common/on_policy_algorithm.py index 59336c14f..ebf17b4bb 100644 --- a/stable_baselines3/common/on_policy_algorithm.py +++ b/stable_baselines3/common/on_policy_algorithm.py @@ -102,26 +102,17 @@ def _setup_model(self) -> None: self._setup_lr_schedule() self.set_random_seed(self.seed) - if isinstance(self.observation_space, gym.spaces.Dict): - self.rollout_buffer = DictRolloutBuffer( - self.n_steps, - self.observation_space, - self.action_space, - self.device, - gamma=self.gamma, - gae_lambda=self.gae_lambda, - n_envs=self.n_envs, - ) - else: - self.rollout_buffer = RolloutBuffer( - self.n_steps, - self.observation_space, - self.action_space, - self.device, - gamma=self.gamma, - gae_lambda=self.gae_lambda, - n_envs=self.n_envs, - ) + BUFFER_CLS = DictRolloutBuffer if isinstance(self.observation_space, gym.spaces.Dict) else RolloutBuffer + + self.rollout_buffer = BUFFER_CLS( + self.n_steps, + self.observation_space, + self.action_space, + self.device, + gamma=self.gamma, + gae_lambda=self.gae_lambda, + n_envs=self.n_envs, + ) self.policy = self.policy_class( self.observation_space, diff --git a/stable_baselines3/common/policies.py b/stable_baselines3/common/policies.py index de134f984..9ab44827b 100644 --- a/stable_baselines3/common/policies.py +++ b/stable_baselines3/common/policies.py @@ -112,12 +112,7 @@ def _update_features_extractor( if features_extractor is None: # The features extractor is not shared, create a new one features_extractor = self.make_features_extractor() - net_kwargs.update( - dict( - features_extractor=features_extractor, - features_dim=features_extractor.features_dim, - ) - ) + net_kwargs.update(dict(features_extractor=features_extractor, features_dim=features_extractor.features_dim)) return net_kwargs def make_features_extractor(self) -> BaseFeaturesExtractor: @@ -544,9 +539,7 @@ def _build(self, lr_schedule: Schedule) -> None: elif isinstance(self.action_dist, StateDependentNoiseDistribution): latent_sde_dim = latent_dim_pi if self.sde_net_arch is None else latent_sde_dim self.action_net, self.log_std = self.action_dist.proba_distribution_net( - latent_dim=latent_dim_pi, - latent_sde_dim=latent_sde_dim, - log_std_init=self.log_std_init, + latent_dim=latent_dim_pi, latent_sde_dim=latent_sde_dim, log_std_init=self.log_std_init ) elif isinstance(self.action_dist, CategoricalDistribution): self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi) @@ -908,13 +901,7 @@ def create_sde_features_extractor( # Special case: when using states as features (i.e. sde_net_arch is an empty list) # don't use any activation function sde_activation = activation_fn if len(sde_net_arch) > 0 else None - latent_sde_net = create_mlp( - features_dim, - -1, - sde_net_arch, - activation_fn=sde_activation, - squash_output=False, - ) + latent_sde_net = create_mlp(features_dim, -1, sde_net_arch, activation_fn=sde_activation, squash_output=False) latent_sde_dim = sde_net_arch[-1] if len(sde_net_arch) > 0 else features_dim sde_features_extractor = nn.Sequential(*latent_sde_net) return sde_features_extractor, latent_sde_dim diff --git a/stable_baselines3/common/stacked_observations.py b/stable_baselines3/common/stacked_observations.py index bd292bfb5..a5df9fe0f 100644 --- a/stable_baselines3/common/stacked_observations.py +++ b/stable_baselines3/common/stacked_observations.py @@ -31,12 +31,9 @@ def __init__( ): self.n_stack = n_stack - ( - self.channels_first, - self.stack_dimension, - self.stackedobs, - self.repeat_axis, - ) = self.compute_stacking(num_envs, n_stack, observation_space, channels_order) + (self.channels_first, self.stack_dimension, self.stackedobs, self.repeat_axis) = self.compute_stacking( + num_envs, n_stack, observation_space, channels_order + ) @staticmethod def compute_stacking( @@ -62,10 +59,7 @@ def compute_stacking( # Default behavior for non-image space, stack on the last axis channels_first = False else: - assert channels_order in { - "last", - "first", - }, "`channels_order` must be one of following: 'last', 'first'" + assert channels_order in {"last", "first"}, "`channels_order` must be one of following: 'last', 'first'" channels_first = channels_order == "first" @@ -114,18 +108,12 @@ def update(self, observations: np.ndarray, dones: np.ndarray, infos: Dict) -> Tu old_terminal = infos[i]["terminal_observation"] if self.channels_first: new_terminal = np.concatenate( - ( - self.stackedobs[i, :-stack_ax_size, ...], - old_terminal, - ), + (self.stackedobs[i, :-stack_ax_size, ...], old_terminal), axis=self.stack_dimension, ) else: new_terminal = np.concatenate( - ( - self.stackedobs[i, ..., :-stack_ax_size], - old_terminal, - ), + (self.stackedobs[i, ..., :-stack_ax_size], old_terminal), axis=self.stack_dimension, ) infos[i]["terminal_observation"] = new_terminal @@ -213,29 +201,17 @@ def update(self, observations: Dict, dones: np.ndarray, infos: Dict) -> Tuple[Di """ for key in self.stackedobs.keys(): stack_ax_size = observations[key].shape[self.stack_dimension[key]] - self.stackedobs[key] = np.roll( - self.stackedobs[key], - shift=-stack_ax_size, - axis=self.stack_dimension[key], - ) + self.stackedobs[key] = np.roll(self.stackedobs[key], shift=-stack_ax_size, axis=self.stack_dimension[key]) for i, done in enumerate(dones): if done: if "terminal_observation" in infos[i]: old_terminal = infos[i]["terminal_observation"][key] if self.channels_first[key]: - new_terminal = np.vstack( - ( - self.stackedobs[key][i, :-stack_ax_size, ...], - old_terminal, - ) - ) + new_terminal = np.vstack((self.stackedobs[key][i, :-stack_ax_size, ...], old_terminal)) else: new_terminal = np.concatenate( - ( - self.stackedobs[key][i, ..., :-stack_ax_size], - old_terminal, - ), + (self.stackedobs[key][i, ..., :-stack_ax_size], old_terminal), axis=self.stack_dimension[key], ) infos[i]["terminal_observation"][key] = new_terminal diff --git a/stable_baselines3/common/torch_layers.py b/stable_baselines3/common/torch_layers.py index 688bf0fc6..7ddfda136 100644 --- a/stable_baselines3/common/torch_layers.py +++ b/stable_baselines3/common/torch_layers.py @@ -282,13 +282,7 @@ def __init__( else: extractors[key] = nn.Sequential( - *create_mlp( - subspace.shape[0], - mlp_output_dim, - mlp_net_arch, - activation_fn, - squash_output=False, - ) + *create_mlp(subspace.shape[0], mlp_output_dim, mlp_net_arch, activation_fn, squash_output=False) ) total_concat_size += mlp_output_dim @@ -296,13 +290,7 @@ def __init__( self.extractors = nn.ModuleDict(extractors) self.combined = nn.Sequential( - *create_mlp( - total_concat_size, - features_dim, - comb_net_arch, - activation_fn, - squash_output=False, - ) + *create_mlp(total_concat_size, features_dim, comb_net_arch, activation_fn, squash_output=False) ) def forward(self, observations: TensorDict) -> th.Tensor: diff --git a/stable_baselines3/sac/policies.py b/stable_baselines3/sac/policies.py index 7bb24c965..7b88deee1 100644 --- a/stable_baselines3/sac/policies.py +++ b/stable_baselines3/sac/policies.py @@ -100,16 +100,10 @@ def __init__( ) = create_sde_features_extractor(features_dim, sde_net_arch, activation_fn) self.action_dist = StateDependentNoiseDistribution( - action_dim, - full_std=full_std, - use_expln=use_expln, - learn_features=True, - squash_output=True, + action_dim, full_std=full_std, use_expln=use_expln, learn_features=True, squash_output=True ) self.mu, self.log_std = self.action_dist.proba_distribution_net( - latent_dim=last_layer_dim, - latent_sde_dim=latent_sde_dim, - log_std_init=log_std_init, + latent_dim=last_layer_dim, latent_sde_dim=latent_sde_dim, log_std_init=log_std_init ) # Avoid numerical issues by limiting the mean of the Gaussian # to be in [-clip_mean, clip_mean] @@ -290,11 +284,7 @@ def __init__( self.actor_kwargs.update(sde_kwargs) self.critic_kwargs = self.net_args.copy() self.critic_kwargs.update( - { - "n_critics": n_critics, - "net_arch": critic_arch, - "share_features_extractor": share_features_extractor, - } + {"n_critics": n_critics, "net_arch": critic_arch, "share_features_extractor": share_features_extractor} ) self.actor, self.actor_target = None, None diff --git a/tests/test_dict_env.py b/tests/test_dict_env.py index 812aae56e..163c4068c 100644 --- a/tests/test_dict_env.py +++ b/tests/test_dict_env.py @@ -18,13 +18,7 @@ def test_dict_spaces(model_class): env = DummyVecEnv([make_env]) env = VecFrameStack(env, n_stack=2) - model = model_class( - "MultiInputPolicy", - env, - gamma=0.5, - seed=1, - policy_kwargs=dict(net_arch=[64]), - ) + model = model_class("MultiInputPolicy", env, gamma=0.5, seed=1, policy_kwargs=dict(net_arch=[64])) model.learn(total_timesteps=500) evaluate_policy(model, env, n_eval_episodes=5, warn=False) diff --git a/tests/test_her.py b/tests/test_her.py index e94a479ad..814a3500b 100644 --- a/tests/test_her.py +++ b/tests/test_her.py @@ -44,14 +44,7 @@ def test_her(model_class, online_sampling): @pytest.mark.parametrize( "goal_selection_strategy", - [ - "final", - "episode", - "future", - GoalSelectionStrategy.FINAL, - GoalSelectionStrategy.EPISODE, - GoalSelectionStrategy.FUTURE, - ], + ["final", "episode", "future", GoalSelectionStrategy.FINAL, GoalSelectionStrategy.EPISODE, GoalSelectionStrategy.FUTURE], ) @pytest.mark.parametrize("online_sampling", [True, False]) def test_goal_selection_strategy(goal_selection_strategy, online_sampling): @@ -172,10 +165,7 @@ def test_save_load(tmp_path, model_class, use_sde, online_sampling): os.remove(tmp_path / "test_save.zip") -@pytest.mark.parametrize( - "online_sampling, truncate_last_trajectory", - [(False, None), (True, True), (True, False)], -) +@pytest.mark.parametrize("online_sampling, truncate_last_trajectory", [(False, None), (True, True), (True, False)]) def test_save_load_replay_buffer(tmp_path, recwarn, online_sampling, truncate_last_trajectory): """ Test if 'save_replay_buffer' and 'load_replay_buffer' works correctly @@ -232,12 +222,10 @@ def test_save_load_replay_buffer(tmp_path, recwarn, online_sampling, truncate_la model.replay_buffer.buffer["next_obs"][:n_episodes_stored], ) assert np.allclose( - old_replay_buffer.buffer["action"][:n_episodes_stored], - model.replay_buffer.buffer["action"][:n_episodes_stored], + old_replay_buffer.buffer["action"][:n_episodes_stored], model.replay_buffer.buffer["action"][:n_episodes_stored] ) assert np.allclose( - old_replay_buffer.buffer["reward"][:n_episodes_stored], - model.replay_buffer.buffer["reward"][:n_episodes_stored], + old_replay_buffer.buffer["reward"][:n_episodes_stored], model.replay_buffer.buffer["reward"][:n_episodes_stored] ) # we might change the last done of the last trajectory so we don't compare it assert np.allclose( From 15ceb358e1f8065698e23a2d91d3823092885424 Mon Sep 17 00:00:00 2001 From: Antonin RAFFIN Date: Wed, 2 Dec 2020 11:48:33 +0100 Subject: [PATCH 15/70] Fix types --- docs/Makefile | 3 +- stable_baselines3/common/multi_input_envs.py | 52 +++++++++++-------- .../common/stacked_observations.py | 38 +++++++++----- stable_baselines3/common/utils.py | 2 +- 4 files changed, 58 insertions(+), 37 deletions(-) diff --git a/docs/Makefile b/docs/Makefile index 47f98cd72..5a4c19e51 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -2,6 +2,7 @@ # # You can set these variables from the command line. +# For debug: SPHINXOPTS = -nWT --keep-going -vvv SPHINXOPTS = -W # make warnings fatal SPHINXBUILD = sphinx-build SPHINXPROJ = StableBaselines @@ -17,4 +18,4 @@ help: # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/stable_baselines3/common/multi_input_envs.py b/stable_baselines3/common/multi_input_envs.py index 432ec2b9b..c78f0e72a 100644 --- a/stable_baselines3/common/multi_input_envs.py +++ b/stable_baselines3/common/multi_input_envs.py @@ -1,8 +1,10 @@ -from typing import Dict, Iterable, Union +from typing import Dict, Tuple, Union import gym import numpy as np +from stable_baselines3.common.type_aliases import GymStepReturn + class SimpleMultiObsEnv(gym.Env): """ @@ -62,27 +64,27 @@ def __init__( def random_upsample_img( self, - v_rng: Iterable = (0, 255), - initial_size: Iterable = (4, 4), - up_size: Iterable = (20, 20), + value_range: Tuple[int, int] = (0, 255), + initial_size: Tuple[int, int] = (4, 4), + up_size: Tuple[int, int] = (20, 20), ) -> np.ndarray: """ - Generated a random image and upsample it + Generated a random image and upsample it. - :param v_rng: The range of values for the img + :param value_range: The range of values for the img :param initial_size: The initial size of the image to generate :param up_size: The size of the upsample :return: upsampled img """ - im = np.random.randint(v_rng[0], v_rng[1], initial_size, dtype=np.int32) + im = np.random.randint(value_range[0], value_range[1], initial_size, dtype=np.int32) return np.array( [ [ [ - im[int(initial_size[0] * r / up_size[0])][int(initial_size[1] * c / up_size[1])] - for c in range(up_size[0]) + im[int(initial_size[0] * row / up_size[0])][int(initial_size[1] * col / up_size[1])] + for col in range(up_size[0]) ] - for r in range(up_size[1]) + for row in range(up_size[1]) ] ] ).astype(np.int32) @@ -91,8 +93,8 @@ def init_state_mapping(self, num_col: int, num_row: int) -> None: """ Initializes the state_mapping array which holds the observation values for each state - :param num_col: - :param num_row: + :param num_col: Number of columns. + :param num_row: Number of rows. """ self.num_col = num_col self.state_mapping = [] @@ -103,9 +105,10 @@ def init_state_mapping(self, num_col: int, num_row: int) -> None: for j in range(num_row): self.state_mapping.append({"vec": col_vecs[i], "img": row_imgs[j]}) - def get_state_mapping(self) -> Dict: + def get_state_mapping(self) -> Dict[str, np.ndarray]: """ - Uses the state to get the observation mapping and applies noise if there is any + Uses the state to get the observation mapping and applies noise if there is any. + :return: observation dict {'vec': ..., 'img': ...} """ state_dict = self.state_mapping[self.state] @@ -135,7 +138,7 @@ def init_possible_transitions(self) -> None: self.right_possible = [0, 1, 2, 12, 13, 14] self.up_possible = [4, 8, 12, 7, 11, 15] - def step(self, action: Union[int, float]): + def step(self, action: Union[int, float]) -> GymStepReturn: """ Run one timestep of the environment's dynamics. When end of episode is reached, you are responsible for calling `reset()` @@ -151,7 +154,7 @@ def step(self, action: Union[int, float]): prev_state = self.state - rwd = -0.1 + reward = -0.1 # define state transition if self.state in self.left_possible and action == 0: # left self.state -= 1 @@ -163,22 +166,25 @@ def step(self, action: Union[int, float]): self.state -= self.num_col got_to_end = self.state == self.max_state - rwd = 1 if got_to_end else rwd + reward = 1 if got_to_end else reward done = self.count > self.max_count or got_to_end self.log = f"Went {self.action2str[action]} in state {prev_state}, got to state {self.state}" - return self.get_state_mapping(), rwd, done, {"got_to_end": got_to_end} + return self.get_state_mapping(), reward, done, {"got_to_end": got_to_end} - def render(self, mode: str = None) -> None: + def render(self, mode: str = "human") -> None: """ - Prints the log of the environment + Prints the log of the environment. + + :param mode: """ print(self.log) - def reset(self) -> None: + def reset(self) -> Dict[str, np.ndarray]: """ - Resets the environment state and step count and returns reset observation + Resets the environment state and step count and returns reset observation. + :return: observation dict {'vec': ..., 'img': ...} """ self.count = 0 @@ -212,7 +218,7 @@ class NineRoomMultiObsEnv(SimpleMultiObsEnv): def __init__(self, random_start: bool = True, noise: float = 0.0): super(NineRoomMultiObsEnv, self).__init__(9, 9, random_start=random_start, noise=noise) - def init_possible_transitions(self): + def init_possible_transitions(self) -> None: """ Initializes the state_mapping array which holds the observation values for each state """ diff --git a/stable_baselines3/common/stacked_observations.py b/stable_baselines3/common/stacked_observations.py index a5df9fe0f..9c6486eee 100644 --- a/stable_baselines3/common/stacked_observations.py +++ b/stable_baselines3/common/stacked_observations.py @@ -1,13 +1,13 @@ import warnings -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Dict, List, Optional, Tuple import numpy as np from gym import spaces -from stable_baselines3.common.preprocessing import has_image_space, is_image_space, is_image_space_channels_first +from stable_baselines3.common.preprocessing import is_image_space, is_image_space_channels_first -class StackedObservations: +class StackedObservations(object): """ Frame stacking wrapper for data. @@ -31,17 +31,18 @@ def __init__( ): self.n_stack = n_stack - (self.channels_first, self.stack_dimension, self.stackedobs, self.repeat_axis) = self.compute_stacking( + self.channels_first, self.stack_dimension, self.stackedobs, self.repeat_axis = self.compute_stacking( num_envs, n_stack, observation_space, channels_order ) + super().__init__() @staticmethod def compute_stacking( num_envs: int, n_stack: int, - observation_space: spaces.Space, + observation_space: spaces.Box, channels_order: Optional[str] = None, - ) -> Tuple: + ) -> Tuple[bool, int, np.ndarray, int]: """ Calculates the parameters in order to stack observations :param num_envs: @@ -82,6 +83,7 @@ def stack_observation_space(self, observation_space: spaces.Box) -> spaces.Box: def reset(self, observation: np.ndarray) -> np.ndarray: """ Resets the stackedobs, adds the reset observation to the stack, and returns the stack + :param observation: :return: """ @@ -92,9 +94,15 @@ def reset(self, observation: np.ndarray) -> np.ndarray: self.stackedobs[..., -observation.shape[self.stack_dimension] :] = observation return self.stackedobs - def update(self, observations: np.ndarray, dones: np.ndarray, infos: Dict) -> Tuple[np.ndarray, Dict]: + def update( + self, + observations: np.ndarray, + dones: np.ndarray, + infos: List[Dict[str, Any]], + ) -> Tuple[np.ndarray, List[Dict[str, Any]]]: """ Adds the observations to the stack and uses the dones to update the infos. + :param observations: :param dones: :param infos: @@ -155,7 +163,8 @@ def __init__( self.stackedobs = {} self.repeat_axis = {} - for (key, subspace) in observation_space.spaces.items(): + for key, subspace in observation_space.spaces.items(): + # assert isinstance(key, str), f"The key {key} of the observation space is not a string" assert isinstance(subspace, spaces.Box), "StackedDictObservations only works with nested gym.spaces.Box" ( self.channels_first[key], @@ -177,7 +186,7 @@ def stack_observation_space(self, observation_space: spaces.Dict) -> spaces.Dict spaces_dict[key] = spaces.Box(low=low, high=high, dtype=subspace.dtype) return spaces.Dict(spaces=spaces_dict) - def reset(self, observation: Dict) -> Dict: + def reset(self, observation: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: """ Resets the stacked observations, adds the reset observation to the stack, and returns the stack :param observation: @@ -191,7 +200,12 @@ def reset(self, observation: Dict) -> Dict: self.stackedobs[key][..., -obs.shape[self.stack_dimension[key]] :] = obs return self.stackedobs - def update(self, observations: Dict, dones: np.ndarray, infos: Dict) -> Tuple[Dict, Dict]: + def update( + self, + observations: Dict[str, np.ndarray], + dones: np.ndarray, + infos: List[Dict[str, Any]], + ) -> Tuple[Dict[str, np.ndarray], List[Dict[str, Any]]]: """ Adds the observations to the stack and uses the dones to update the infos. :param observations: @@ -219,7 +233,7 @@ def update(self, observations: Dict, dones: np.ndarray, infos: Dict) -> Tuple[Di warnings.warn("VecFrameStack wrapping a VecEnv without terminal_observation info") self.stackedobs[key][i] = 0 if self.channels_first: - self.stackedobs[key][:, -observations[key].shape[self.stack_dimension[key]] :, ...] = observations[key] + self.stackedobs[key][:, -stack_ax_size:, ...] = observations[key] else: - self.stackedobs[key][..., -observations[key].shape[self.stack_dimension] :] = observations[key] + self.stackedobs[key][..., -stack_ax_size:] = observations[key] return self.stackedobs, infos diff --git a/stable_baselines3/common/utils.py b/stable_baselines3/common/utils.py index e9a9107fb..deb19c712 100644 --- a/stable_baselines3/common/utils.py +++ b/stable_baselines3/common/utils.py @@ -346,7 +346,7 @@ def polyak_update( def obs_as_tensor( obs: Union[np.ndarray, Dict[Union[str, int], np.ndarray]], device: th.device -) -> Union[th.tensor, TensorDict]: +) -> Union[th.Tensor, TensorDict]: """ Moves the observeration to the given device :param obs: From f9cab8a84062332acb35f367d9041f2f3e31a5d2 Mon Sep 17 00:00:00 2001 From: Antonin RAFFIN Date: Wed, 2 Dec 2020 11:56:00 +0100 Subject: [PATCH 16/70] Minor edits --- stable_baselines3/common/policies.py | 2 +- stable_baselines3/common/stacked_observations.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/stable_baselines3/common/policies.py b/stable_baselines3/common/policies.py index 8c8517e77..18f0572d7 100644 --- a/stable_baselines3/common/policies.py +++ b/stable_baselines3/common/policies.py @@ -289,7 +289,7 @@ def predict( # mask = [False for _ in range(self.n_envs)] # Need to check the observation if its a ObsDictWrapper - # Special Case for GoalEnd (using HER normally) + # Special Case for GoalEnv (using HER normally) if isinstance(observation, dict) and set(observation.keys()) == set(["observation", "desired_goal", "achieved_goal"]): observation = ObsDictWrapper.convert_dict(observation) elif isinstance(observation, dict): diff --git a/stable_baselines3/common/stacked_observations.py b/stable_baselines3/common/stacked_observations.py index 9c6486eee..355be0aa2 100644 --- a/stable_baselines3/common/stacked_observations.py +++ b/stable_baselines3/common/stacked_observations.py @@ -164,7 +164,6 @@ def __init__( self.repeat_axis = {} for key, subspace in observation_space.spaces.items(): - # assert isinstance(key, str), f"The key {key} of the observation space is not a string" assert isinstance(subspace, spaces.Box), "StackedDictObservations only works with nested gym.spaces.Box" ( self.channels_first[key], From 5b178f423ee8db23ef564191effad00928fd2ef3 Mon Sep 17 00:00:00 2001 From: Antonin RAFFIN Date: Wed, 2 Dec 2020 12:08:07 +0100 Subject: [PATCH 17/70] Avoid duplicating code --- stable_baselines3/common/buffers.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/stable_baselines3/common/buffers.py b/stable_baselines3/common/buffers.py index 0636742db..dd6e87538 100644 --- a/stable_baselines3/common/buffers.py +++ b/stable_baselines3/common/buffers.py @@ -551,9 +551,7 @@ def sample(self, batch_size: int, env: Optional[VecNormalize] = None) -> DictRep :return: """ if not self.optimize_memory_usage: - upper_bound = self.buffer_size if self.full else self.pos - batch_inds = np.random.randint(0, upper_bound, size=batch_size) - return self._get_samples(batch_inds, env=env) + return super(ReplayBuffer, self).sample(batch_size=batch_size, env=env) # Do not sample the element with index `self.pos` as the transitions is invalid # (we use only one array to store `obs` and `next_obs`) From d692027ac658370cd9875f06ab60b97831a7e69f Mon Sep 17 00:00:00 2001 From: Antonin RAFFIN Date: Wed, 2 Dec 2020 12:42:49 +0100 Subject: [PATCH 18/70] Fix calls to parents --- stable_baselines3/common/buffers.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/stable_baselines3/common/buffers.py b/stable_baselines3/common/buffers.py index dd6e87538..4e9e42208 100644 --- a/stable_baselines3/common/buffers.py +++ b/stable_baselines3/common/buffers.py @@ -316,7 +316,7 @@ def __init__( super(RolloutBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs) self.gae_lambda = gae_lambda self.gamma = gamma - self.observations, self.actions, self.rewards, self.advantages = (None, None, None, None) + self.observations, self.actions, self.rewards, self.advantages = None, None, None, None self.returns, self.dones, self.values, self.log_probs = None, None, None, None self.generator_ready = False self.reset() @@ -458,7 +458,7 @@ def __init__( n_envs: int = 1, optimize_memory_usage: bool = False, ): - super(DictReplayBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs) + super(ReplayBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs) assert isinstance(self.obs_shape, dict), "DictReplayBuffer must be used with Dict obs space only" assert n_envs == 1, "Replay buffer only support single environment for now" @@ -623,13 +623,13 @@ def __init__( n_envs: int = 1, ): - super(DictRolloutBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs) + super(RolloutBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs) assert isinstance(self.obs_shape, dict), "DictRolloutBuffer must be used with Dict obs space only" self.gae_lambda = gae_lambda self.gamma = gamma - self.observations, self.actions, self.rewards, self.advantages = (None, None, None, None) + self.observations, self.actions, self.rewards, self.advantages = None, None, None, None self.returns, self.dones, self.values, self.log_probs = None, None, None, None self.generator_ready = False self.reset() From 8b22f967ce3d8e24ae6b0e82004fc7374513a088 Mon Sep 17 00:00:00 2001 From: Jaden Travnik Date: Wed, 2 Dec 2020 10:21:58 -0500 Subject: [PATCH 19/70] Adding assert to buffers. Updating changelong --- docs/misc/changelog.rst | 4 +-- stable_baselines3/common/buffers.py | 55 +++++++++++++++++++---------- 2 files changed, 39 insertions(+), 20 deletions(-) diff --git a/docs/misc/changelog.rst b/docs/misc/changelog.rst index ff7b6d1c5..15b48d483 100644 --- a/docs/misc/changelog.rst +++ b/docs/misc/changelog.rst @@ -19,7 +19,7 @@ New Features: automatic check for image spaces. - ``VecFrameStack`` now has a ``channels_order`` argument to tell if observations should be stacked on the first or last observation dimension (originally always stacked on last). -- Add ``DictRolloutBuffer`` to support dictionary observations (need to be tested in ReplayBuffer) +- Add ``DictRolloutBuffer`` to support dictionary observations (need to be tested in ReplayBuffer) (@JadenTravnik) - Add ``StackedObservations`` and ``StackedDictObservations`` that are used within ``VecFrameStack`` - Added simple 4x4 and 9room test environments - Added ``common.env_util.is_wrapped`` and ``common.env_util.unwrap_wrapper`` functions for checking/unwrapping @@ -529,4 +529,4 @@ And all the contributors: @flodorner @KuKuXia @NeoExtended @PartiallyTyped @mmcenta @richardwu @kinalmehta @rolandgvc @tkelestemur @mloo3 @tirafesi @blurLake @koulakis @joeljosephjin @shwang @rk37 @andyshih12 @RaphaelWag @xicocaio @diditforlulz273 @liorcohen5 @ManifoldFR @mloo3 @SwamyDev @wmmc88 @megan-klaiber @thisray -@tfederico @hn2 +@tfederico @hn2 @JadenTravnik diff --git a/stable_baselines3/common/buffers.py b/stable_baselines3/common/buffers.py index 4e9e42208..9514d3546 100644 --- a/stable_baselines3/common/buffers.py +++ b/stable_baselines3/common/buffers.py @@ -3,23 +3,20 @@ from typing import Dict, Generator, Optional, Union import numpy as np -import torch as th from gym import spaces +import torch as th +from stable_baselines3.common.preprocessing import get_action_dim, get_obs_shape +from stable_baselines3.common.type_aliases import (DictReplayBufferSamples, DictRolloutBufferSamples, ReplayBufferSamples, + RolloutBufferSamples) +from stable_baselines3.common.vec_env import VecNormalize + try: # Check memory used by replay buffer when possible import psutil except ImportError: psutil = None -from stable_baselines3.common.preprocessing import get_action_dim, get_obs_shape -from stable_baselines3.common.type_aliases import ( - DictReplayBufferSamples, - DictRolloutBufferSamples, - ReplayBufferSamples, - RolloutBufferSamples, -) -from stable_baselines3.common.vec_env import VecNormalize class BaseBuffer(ABC): @@ -185,13 +182,19 @@ def __init__( self.optimize_memory_usage = optimize_memory_usage - self.observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape, dtype=observation_space.dtype) + self.observations = np.zeros( + (self.buffer_size, self.n_envs) + self.obs_shape, + dtype=observation_space.dtype, + ) if optimize_memory_usage: # `observations` contains also the next observation self.next_observations = None else: - self.next_observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape, dtype=observation_space.dtype) + self.next_observations = np.zeros( + (self.buffer_size, self.n_envs) + self.obs_shape, + dtype=observation_space.dtype, + ) self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=action_space.dtype) @@ -316,7 +319,12 @@ def __init__( super(RolloutBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs) self.gae_lambda = gae_lambda self.gamma = gamma - self.observations, self.actions, self.rewards, self.advantages = None, None, None, None + self.observations, self.actions, self.rewards, self.advantages = ( + None, + None, + None, + None, + ) self.returns, self.dones, self.values, self.log_probs = None, None, None, None self.generator_ready = False self.reset() @@ -404,7 +412,14 @@ def get(self, batch_size: Optional[int] = None) -> Generator[RolloutBufferSample # Prepare the data if not self.generator_ready: - _tensor_names = ["observations", "actions", "values", "log_probs", "advantages", "returns"] + _tensor_names = [ + "observations", + "actions", + "values", + "log_probs", + "advantages", + "returns", + ] for tensor in _tensor_names: self.__dict__[tensor] = self.swap_and_flatten(self.__dict__[tensor]) @@ -467,10 +482,9 @@ def __init__( if psutil is not None: mem_available = psutil.virtual_memory().available - if optimize_memory_usage: - optimize_memory_usage = False - # disabling as this adds quite a bit of complexity - # https://github.com/DLR-RM/stable-baselines3/pull/243#discussion_r531535702 + assert optimize_memory_usage == False, "DictReplayBuffer does not support optimize_memory_usage" + # disabling as this adds quite a bit of complexity + # https://github.com/DLR-RM/stable-baselines3/pull/243#discussion_r531535702 self.optimize_memory_usage = optimize_memory_usage self.observations = { @@ -629,7 +643,12 @@ def __init__( self.gae_lambda = gae_lambda self.gamma = gamma - self.observations, self.actions, self.rewards, self.advantages = None, None, None, None + self.observations, self.actions, self.rewards, self.advantages = ( + None, + None, + None, + None, + ) self.returns, self.dones, self.values, self.log_probs = None, None, None, None self.generator_ready = False self.reset() From 70dfa83c693e882a94a236592abee4dc6a4aee23 Mon Sep 17 00:00:00 2001 From: Jaden Travnik Date: Wed, 2 Dec 2020 10:38:04 -0500 Subject: [PATCH 20/70] Running format on buffers --- stable_baselines3/common/buffers.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/stable_baselines3/common/buffers.py b/stable_baselines3/common/buffers.py index 9514d3546..420bc580f 100644 --- a/stable_baselines3/common/buffers.py +++ b/stable_baselines3/common/buffers.py @@ -3,12 +3,16 @@ from typing import Dict, Generator, Optional, Union import numpy as np +import torch as th from gym import spaces -import torch as th from stable_baselines3.common.preprocessing import get_action_dim, get_obs_shape -from stable_baselines3.common.type_aliases import (DictReplayBufferSamples, DictRolloutBufferSamples, ReplayBufferSamples, - RolloutBufferSamples) +from stable_baselines3.common.type_aliases import ( + DictReplayBufferSamples, + DictRolloutBufferSamples, + ReplayBufferSamples, + RolloutBufferSamples, +) from stable_baselines3.common.vec_env import VecNormalize try: @@ -18,7 +22,6 @@ psutil = None - class BaseBuffer(ABC): """ Base class that represent a buffer (rollout or replay) From a006b5a93c2e9a200cd8544e78105640ef696945 Mon Sep 17 00:00:00 2001 From: Jaden Travnik Date: Tue, 8 Dec 2020 14:18:28 -0500 Subject: [PATCH 21/70] Adding multi-input policies to dqn,td3,a2c. Fixing warnings. Fixed bug with DictReplayBuffer as Replay buffers use only 1 env --- stable_baselines3/a2c/policies.py | 9 ++- stable_baselines3/common/buffers.py | 3 +- stable_baselines3/common/multi_input_envs.py | 15 ++++- stable_baselines3/common/utils.py | 2 +- .../common/vec_env/vec_frame_stack.py | 3 +- stable_baselines3/dqn/dqn.py | 10 +++- stable_baselines3/dqn/policies.py | 54 +++++++++++++++++- stable_baselines3/td3/policies.py | 56 +++++++++++++++++++ tests/test_dict_env.py | 31 +++++----- 9 files changed, 154 insertions(+), 29 deletions(-) diff --git a/stable_baselines3/a2c/policies.py b/stable_baselines3/a2c/policies.py index eed0ddea1..79c85f8f7 100644 --- a/stable_baselines3/a2c/policies.py +++ b/stable_baselines3/a2c/policies.py @@ -1,9 +1,16 @@ # This file is here just to define MlpPolicy/CnnPolicy # that work for A2C -from stable_baselines3.common.policies import ActorCriticCnnPolicy, ActorCriticPolicy, register_policy +from stable_baselines3.common.policies import ( + ActorCriticCnnPolicy, + ActorCriticPolicy, + MultiInputActorCriticPolicy, + register_policy, +) MlpPolicy = ActorCriticPolicy CnnPolicy = ActorCriticCnnPolicy +MultiInputPolicy = MultiInputActorCriticPolicy register_policy("MlpPolicy", ActorCriticPolicy) register_policy("CnnPolicy", ActorCriticCnnPolicy) +register_policy("MultiInputPolicy", MultiInputPolicy) diff --git a/stable_baselines3/common/buffers.py b/stable_baselines3/common/buffers.py index 420bc580f..4bd887401 100644 --- a/stable_baselines3/common/buffers.py +++ b/stable_baselines3/common/buffers.py @@ -501,7 +501,8 @@ def __init__( key: np.zeros((self.buffer_size, self.n_envs) + _obs_shape) for key, _obs_shape in self.obs_shape.items() } - self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=action_space.dtype) + # only 1 env is supported + self.actions = np.zeros((self.buffer_size, self.action_dim), dtype=action_space.dtype) self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) self.dones = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) diff --git a/stable_baselines3/common/multi_input_envs.py b/stable_baselines3/common/multi_input_envs.py index c78f0e72a..7b1d905df 100644 --- a/stable_baselines3/common/multi_input_envs.py +++ b/stable_baselines3/common/multi_input_envs.py @@ -36,6 +36,7 @@ def __init__( num_row: int = 4, random_start: bool = True, noise: float = 0.0, + discrete_actions: bool = True, ): super(SimpleMultiObsEnv, self).__init__() @@ -43,7 +44,12 @@ def __init__( self.img_size = [1, 20, 20] self.random_start = random_start - self.action_space = gym.spaces.Discrete(3) + self.discrete_actions = discrete_actions + if discrete_actions: + self.action_space = gym.spaces.Discrete(4) + else: + self.action_space = gym.spaces.Box(0, 1, (4,)) + self.observation_space = gym.spaces.Dict( spaces={ "vec": gym.spaces.Box(0, 1, (self.vector_size,)), @@ -138,7 +144,7 @@ def init_possible_transitions(self) -> None: self.right_possible = [0, 1, 2, 12, 13, 14] self.up_possible = [4, 8, 12, 7, 11, 15] - def step(self, action: Union[int, float]) -> GymStepReturn: + def step(self, action: Union[int, float, np.ndarray]) -> GymStepReturn: """ Run one timestep of the environment's dynamics. When end of episode is reached, you are responsible for calling `reset()` @@ -148,7 +154,10 @@ def step(self, action: Union[int, float]) -> GymStepReturn: :param action: :return: tuple (observation, reward, done, info). """ - action = int(action) + if not self.discrete_actions: + action = np.argmax(action) + else: + action = int(action) self.count += 1 diff --git a/stable_baselines3/common/utils.py b/stable_baselines3/common/utils.py index deb19c712..28b548955 100644 --- a/stable_baselines3/common/utils.py +++ b/stable_baselines3/common/utils.py @@ -3,7 +3,7 @@ import random from collections import deque from itertools import zip_longest -from typing import Callable, Dict, Iterable, Optional, Union +from typing import Dict, Iterable, Optional, Union import gym import numpy as np diff --git a/stable_baselines3/common/vec_env/vec_frame_stack.py b/stable_baselines3/common/vec_env/vec_frame_stack.py index 1653010c3..918b7837f 100644 --- a/stable_baselines3/common/vec_env/vec_frame_stack.py +++ b/stable_baselines3/common/vec_env/vec_frame_stack.py @@ -4,7 +4,6 @@ import numpy as np from gym import spaces -from stable_baselines3.common.preprocessing import has_image_space, is_image_space, is_image_space_channels_first from stable_baselines3.common.stacked_observations import StackedDictObservations, StackedObservations from stable_baselines3.common.vec_env.base_vec_env import VecEnv, VecEnvWrapper @@ -41,7 +40,7 @@ def __init__(self, venv: VecEnv, n_stack: int, channels_order: Optional[str] = N def step_wait( self, - ) -> Tuple[Union[np.ndarray, Dict[str, np.ndarray]], np.ndarray, np.ndarray, List[Dict[str, Any]]]: + ) -> Tuple[Union[np.ndarray, Dict[str, np.ndarray]], np.ndarray, np.ndarray, List[Dict[str, Any]],]: observations, rewards, dones, infos = self.venv.step_wait() diff --git a/stable_baselines3/dqn/dqn.py b/stable_baselines3/dqn/dqn.py index 045c377d3..82c0600ec 100644 --- a/stable_baselines3/dqn/dqn.py +++ b/stable_baselines3/dqn/dqn.py @@ -127,7 +127,9 @@ def _setup_model(self) -> None: super(DQN, self)._setup_model() self._create_aliases() self.exploration_schedule = get_linear_fn( - self.exploration_initial_eps, self.exploration_final_eps, self.exploration_fraction + self.exploration_initial_eps, + self.exploration_final_eps, + self.exploration_fraction, ) def _create_aliases(self) -> None: @@ -166,7 +168,6 @@ def train(self, gradient_steps: int, batch_size: int = 100) -> None: # Get current Q estimates current_q = self.q_net(replay_data.observations) - # Retrieve the q-values for the actions from the replay buffer current_q = th.gather(current_q, dim=1, index=replay_data.actions.long()) @@ -206,7 +207,10 @@ def predict( """ if not deterministic and np.random.rand() < self.exploration_rate: if is_vectorized_observation(observation, self.observation_space): - n_batch = observation.shape[0] + if isinstance(self.observation_space, gym.spaces.Dict): + n_batch = observation[list(observation.keys())[0]].shape[0] + else: + n_batch = observation.shape[0] action = np.array([self.action_space.sample() for _ in range(n_batch)]) else: action = np.array(self.action_space.sample()) diff --git a/stable_baselines3/dqn/policies.py b/stable_baselines3/dqn/policies.py index f72424ec1..06a435d89 100644 --- a/stable_baselines3/dqn/policies.py +++ b/stable_baselines3/dqn/policies.py @@ -5,7 +5,13 @@ from torch import nn from stable_baselines3.common.policies import BasePolicy, register_policy -from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor, NatureCNN, create_mlp +from stable_baselines3.common.torch_layers import ( + BaseFeaturesExtractor, + CombinedExtractor, + FlattenExtractor, + NatureCNN, + create_mlp, +) from stable_baselines3.common.type_aliases import Schedule @@ -234,5 +240,51 @@ def __init__( ) +class MultiInputPolicy(DQNPolicy): + """ + Policy class for DQN when using dict observations as input. + + :param observation_space: Observation space + :param action_space: Action space + :param lr_schedule: Learning rate schedule (could be constant) + :param net_arch: The specification of the policy and value networks. + :param activation_fn: Activation function + :param features_extractor_class: Features extractor to use. + :param normalize_images: Whether to normalize images or not, + dividing by 255.0 (True by default) + :param optimizer_class: The optimizer to use, + ``th.optim.Adam`` by default + :param optimizer_kwargs: Additional keyword arguments, + excluding the learning rate, to pass to the optimizer + """ + + def __init__( + self, + observation_space: gym.spaces.Dict, + action_space: gym.spaces.Space, + lr_schedule: Schedule, + net_arch: Optional[List[int]] = None, + activation_fn: Type[nn.Module] = nn.ReLU, + features_extractor_class: Type[BaseFeaturesExtractor] = CombinedExtractor, + features_extractor_kwargs: Optional[Dict[str, Any]] = None, + normalize_images: bool = True, + optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, + optimizer_kwargs: Optional[Dict[str, Any]] = None, + ): + super(MultiInputPolicy, self).__init__( + observation_space, + action_space, + lr_schedule, + net_arch, + activation_fn, + features_extractor_class, + features_extractor_kwargs, + normalize_images, + optimizer_class, + optimizer_kwargs, + ) + + register_policy("MlpPolicy", MlpPolicy) register_policy("CnnPolicy", CnnPolicy) +register_policy("MultiInputPolicy", MultiInputPolicy) diff --git a/stable_baselines3/td3/policies.py b/stable_baselines3/td3/policies.py index 225a7b6f5..d0e8b748d 100644 --- a/stable_baselines3/td3/policies.py +++ b/stable_baselines3/td3/policies.py @@ -8,6 +8,7 @@ from stable_baselines3.common.preprocessing import get_action_dim from stable_baselines3.common.torch_layers import ( BaseFeaturesExtractor, + CombinedExtractor, FlattenExtractor, NatureCNN, create_mlp, @@ -280,5 +281,60 @@ def __init__( ) +class MultiInputPolicy(TD3Policy): + """ + Policy class (with both actor and critic) for TD3 to be used with Dict observation spaces. + + :param observation_space: Observation space + :param action_space: Action space + :param lr_schedule: Learning rate schedule (could be constant) + :param net_arch: The specification of the policy and value networks. + :param activation_fn: Activation function + :param features_extractor_class: Features extractor to use. + :param features_extractor_kwargs: Keyword arguments + to pass to the features extractor. + :param normalize_images: Whether to normalize images or not, + dividing by 255.0 (True by default) + :param optimizer_class: The optimizer to use, + ``th.optim.Adam`` by default + :param optimizer_kwargs: Additional keyword arguments, + excluding the learning rate, to pass to the optimizer + :param n_critics: Number of critic networks to create. + :param share_features_extractor: Whether to share or not the features extractor + between the actor and the critic (this saves computation time) + """ + + def __init__( + self, + observation_space: gym.spaces.Dict, + action_space: gym.spaces.Space, + lr_schedule: Schedule, + net_arch: Optional[Union[List[int], Dict[str, List[int]]]] = None, + activation_fn: Type[nn.Module] = nn.ReLU, + features_extractor_class: Type[BaseFeaturesExtractor] = CombinedExtractor, + features_extractor_kwargs: Optional[Dict[str, Any]] = None, + normalize_images: bool = True, + optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, + optimizer_kwargs: Optional[Dict[str, Any]] = None, + n_critics: int = 2, + share_features_extractor: bool = True, + ): + super(MultiInputPolicy, self).__init__( + observation_space, + action_space, + lr_schedule, + net_arch, + activation_fn, + features_extractor_class, + features_extractor_kwargs, + normalize_images, + optimizer_class, + optimizer_kwargs, + n_critics, + share_features_extractor, + ) + + register_policy("MlpPolicy", MlpPolicy) register_policy("CnnPolicy", CnnPolicy) +register_policy("MultiInputPolicy", MultiInputPolicy) diff --git a/tests/test_dict_env.py b/tests/test_dict_env.py index e1b3707f1..b11ec4759 100644 --- a/tests/test_dict_env.py +++ b/tests/test_dict_env.py @@ -1,31 +1,28 @@ -import gym -import numpy as np import pytest -from stable_baselines3 import DQN, PPO, SAC, TD3 +from stable_baselines3 import A2C, DQN, PPO, SAC, TD3 from stable_baselines3.common.evaluation import evaluate_policy from stable_baselines3.common.multi_input_envs import NineRoomMultiObsEnv, SimpleMultiObsEnv -from stable_baselines3.common.vec_env import DummyVecEnv, VecFrameStack, VecTransposeImage +from stable_baselines3.common.vec_env import DummyVecEnv, VecFrameStack -@pytest.mark.parametrize("model_class", [PPO]) # , SAC, TD3, DQN]) +@pytest.mark.parametrize("model_class", [PPO, A2C, DQN, SAC, TD3]) def test_dict_spaces(model_class): """ - Additional tests for PPO/SAC/TD3/DQN to check observation space support + Additional tests for PPO/A2C/SAC/TD3/DQN to check observation space support for Dictionary spaces using MultiInputPolicy. """ - make_env = lambda: SimpleMultiObsEnv(random_start=True) - env = DummyVecEnv([make_env]) + use_discrete_actions = model_class not in [SAC, TD3] + env = DummyVecEnv([lambda: SimpleMultiObsEnv(random_start=True, discrete_actions=use_discrete_actions)]) env = VecFrameStack(env, n_stack=2) + kwargs = {} + n_steps = 250 - model = model_class( - "MultiInputPolicy", - env, - gamma=0.5, - seed=1, - n_steps=250, - policy_kwargs=dict(net_arch=[64]), - ) - model.learn(total_timesteps=500) + if model_class == DQN: + kwargs = dict(learning_starts=0) + + model = model_class("MultiInputPolicy", env, gamma=0.5, seed=1, **kwargs) + + model.learn(total_timesteps=n_steps) evaluate_policy(model, env, n_eval_episodes=5, warn=False) From 12361ae5ed5a6a9cce7d0c663894b8e064ed1ad5 Mon Sep 17 00:00:00 2001 From: Jaden Travnik Date: Tue, 8 Dec 2020 15:24:58 -0500 Subject: [PATCH 22/70] Fixing warnings, splitting is_vectorized_observation into multiple functions based on space type --- stable_baselines3/common/buffers.py | 2 +- stable_baselines3/common/utils.py | 188 ++++++++++++------ .../common/vec_env/vec_frame_stack.py | 1 - tests/test_dict_env.py | 2 +- 4 files changed, 127 insertions(+), 66 deletions(-) diff --git a/stable_baselines3/common/buffers.py b/stable_baselines3/common/buffers.py index 4bd887401..addd9e739 100644 --- a/stable_baselines3/common/buffers.py +++ b/stable_baselines3/common/buffers.py @@ -485,7 +485,7 @@ def __init__( if psutil is not None: mem_available = psutil.virtual_memory().available - assert optimize_memory_usage == False, "DictReplayBuffer does not support optimize_memory_usage" + assert optimize_memory_usage is False, "DictReplayBuffer does not support optimize_memory_usage" # disabling as this adds quite a bit of complexity # https://github.com/DLR-RM/stable-baselines3/pull/243#discussion_r531535702 self.optimize_memory_usage = optimize_memory_usage diff --git a/stable_baselines3/common/utils.py b/stable_baselines3/common/utils.py index 28b548955..0bd115a79 100644 --- a/stable_baselines3/common/utils.py +++ b/stable_baselines3/common/utils.py @@ -211,78 +211,140 @@ def check_for_correct_spaces(env: GymEnv, observation_space: gym.spaces.Space, a raise ValueError(f"Action spaces do not match: {action_space} != {env.action_space}") -def is_vectorized_observation(observation: np.ndarray, observation_space: gym.spaces.Space) -> bool: +def is_vectorized_box_observation(observation: np.ndarray, observation_space: gym.spaces.Box) -> bool: """ - For every observation type, detects and validates the shape, + For box observation type, detects and validates the shape, then returns whether or not the observation is vectorized. :param observation: the input observation to validate :param observation_space: the observation space :return: whether the given observation is vectorized or not """ - if isinstance(observation_space, gym.spaces.Box): - if observation.shape == observation_space.shape: - return False - elif observation.shape[1:] == observation_space.shape: - return True - else: - raise ValueError( - f"Error: Unexpected observation shape {observation.shape} for " - + f"Box environment, please use {observation_space.shape} " - + "or (n_env, {}) for the observation shape.".format(", ".join(map(str, observation_space.shape))) - ) - elif isinstance(observation_space, gym.spaces.Discrete): - if observation.shape == (): # A numpy array of a number, has shape empty tuple '()' - return False - elif len(observation.shape) == 1: - return True - else: - raise ValueError( - f"Error: Unexpected observation shape {observation.shape} for " - + "Discrete environment, please use (1,) or (n_env, 1) for the observation shape." - ) - elif isinstance(observation_space, gym.spaces.MultiDiscrete): - if observation.shape == (len(observation_space.nvec),): - return False - elif len(observation.shape) == 2 and observation.shape[1] == len(observation_space.nvec): - return True - else: - raise ValueError( - f"Error: Unexpected observation shape {observation.shape} for MultiDiscrete " - + f"environment, please use ({len(observation_space.nvec)},) or " - + f"(n_env, {len(observation_space.nvec)}) for the observation shape." - ) - elif isinstance(observation_space, gym.spaces.MultiBinary): - if observation.shape == (observation_space.n,): + if observation.shape == observation_space.shape: + return False + elif observation.shape[1:] == observation_space.shape: + return True + else: + raise ValueError( + f"Error: Unexpected observation shape {observation.shape} for " + + f"Box environment, please use {observation_space.shape} " + + "or (n_env, {}) for the observation shape.".format(", ".join(map(str, observation_space.shape))) + ) + + +def is_vectorized_discrete_observation(observation: np.ndarray, observation_space: gym.spaces.Discrete) -> bool: + """ + For discrete observation type, detects and validates the shape, + then returns whether or not the observation is vectorized. + + :param observation: the input observation to validate + :param observation_space: the observation space + :return: whether the given observation is vectorized or not + """ + if observation.shape == (): # A numpy array of a number, has shape empty tuple '()' + return False + elif len(observation.shape) == 1: + return True + else: + raise ValueError( + f"Error: Unexpected observation shape {observation.shape} for " + + "Discrete environment, please use (1,) or (n_env, 1) for the observation shape." + ) + + +def is_vectorized_multidiscrete_observation(observation: np.ndarray, observation_space: gym.spaces.MultiDiscrete) -> bool: + """ + For multidiscrete observation type, detects and validates the shape, + then returns whether or not the observation is vectorized. + + :param observation: the input observation to validate + :param observation_space: the observation space + :return: whether the given observation is vectorized or not + """ + if observation.shape == (len(observation_space.nvec),): + return False + elif len(observation.shape) == 2 and observation.shape[1] == len(observation_space.nvec): + return True + else: + raise ValueError( + f"Error: Unexpected observation shape {observation.shape} for MultiDiscrete " + + f"environment, please use ({len(observation_space.nvec)},) or " + + f"(n_env, {len(observation_space.nvec)}) for the observation shape." + ) + + +def is_vectorized_multibinary_observation(observation: np.ndarray, observation_space: gym.spaces.MultiBinary) -> bool: + """ + For multibinary observation type, detects and validates the shape, + then returns whether or not the observation is vectorized. + + :param observation: the input observation to validate + :param observation_space: the observation space + :return: whether the given observation is vectorized or not + """ + if observation.shape == (observation_space.n,): + return False + elif len(observation.shape) == 2 and observation.shape[1] == observation_space.n: + return True + else: + raise ValueError( + f"Error: Unexpected observation shape {observation.shape} for MultiBinary " + + f"environment, please use ({observation_space.n},) or " + + f"(n_env, {observation_space.n}) for the observation shape." + ) + + +def is_vectorized_dict_observation(observation: np.ndarray, observation_space: gym.spaces.Dict) -> bool: + """ + For dict observation type, detects and validates the shape, + then returns whether or not the observation is vectorized. + + :param observation: the input observation to validate + :param observation_space: the observation space + :return: whether the given observation is vectorized or not + """ + for key, subspace in observation_space.spaces.items(): + if observation[key].shape == subspace.shape: return False - elif len(observation.shape) == 2 and observation.shape[1] == observation_space.n: - return True - else: - raise ValueError( - f"Error: Unexpected observation shape {observation.shape} for MultiBinary " - + f"environment, please use ({observation_space.n},) or " - + f"(n_env, {observation_space.n}) for the observation shape." - ) - elif isinstance(observation_space, gym.spaces.Dict): - for key, subspace in observation_space.spaces.items(): - if observation[key].shape == subspace.shape: - return False - - all_good = True - - for key, subspace in observation_space.spaces.items(): - if observation[key].shape[1:] != subspace.shape: - all_good = False - break - - if all_good: - return True - else: - raise ValueError( - f"Error: Unexpected observation shape {observation.shape} for " - + f"Tuple environment, please use {(obs.shape for obs in observation_space.spaces)} " - ) + + all_good = True + + for key, subspace in observation_space.spaces.items(): + if observation[key].shape[1:] != subspace.shape: + all_good = False + break + + if all_good: + return True else: + raise ValueError( + f"Error: Unexpected observation shape {observation.shape} for " + + f"Tuple environment, please use {(obs.shape for obs in observation_space.spaces)} " + ) + + +def is_vectorized_observation(observation: np.ndarray, observation_space: gym.spaces.Space) -> bool: + """ + For every observation type, detects and validates the shape, + then returns whether or not the observation is vectorized. + + :param observation: the input observation to validate + :param observation_space: the observation space + :return: whether the given observation is vectorized or not + """ + + is_vec_obs_func_dict = { + gym.spaces.Box: is_vectorized_box_observation, + gym.spaces.Discrete: is_vectorized_discrete_observation, + gym.spaces.MultiDiscrete: is_vectorized_multidiscrete_observation, + gym.spaces.MultiBinary: is_vectorized_multibinary_observation, + gym.spaces.Dict: is_vectorized_dict_observation, + } + + try: + is_vec_obs_func = is_vec_obs_func_dict[type(observation_space)] + return is_vec_obs_func(observation, observation_space) + except KeyError: raise ValueError( "Error: Cannot determine if the observation is vectorized " + f" with the space type {observation_space}." ) diff --git a/stable_baselines3/common/vec_env/vec_frame_stack.py b/stable_baselines3/common/vec_env/vec_frame_stack.py index 918b7837f..884c0e421 100644 --- a/stable_baselines3/common/vec_env/vec_frame_stack.py +++ b/stable_baselines3/common/vec_env/vec_frame_stack.py @@ -1,4 +1,3 @@ -import warnings from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np diff --git a/tests/test_dict_env.py b/tests/test_dict_env.py index b11ec4759..573c4779e 100644 --- a/tests/test_dict_env.py +++ b/tests/test_dict_env.py @@ -2,7 +2,7 @@ from stable_baselines3 import A2C, DQN, PPO, SAC, TD3 from stable_baselines3.common.evaluation import evaluate_policy -from stable_baselines3.common.multi_input_envs import NineRoomMultiObsEnv, SimpleMultiObsEnv +from stable_baselines3.common.multi_input_envs import SimpleMultiObsEnv from stable_baselines3.common.vec_env import DummyVecEnv, VecFrameStack From ce0f1a4fd2bc3bb016e2e9952747013a05d33b0d Mon Sep 17 00:00:00 2001 From: Jaden Travnik Date: Mon, 14 Dec 2020 12:06:44 -0500 Subject: [PATCH 23/70] Created envs folder in common. Updated imports. Moved stacked_obs to vec_env folder --- docs/guide/algos.rst | 3 ++- docs/guide/vec_envs.rst | 4 ++++ docs/modules/her.rst | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/docs/guide/algos.rst b/docs/guide/algos.rst index 2ca362d98..aabf839c8 100644 --- a/docs/guide/algos.rst +++ b/docs/guide/algos.rst @@ -19,7 +19,8 @@ TD3 ✔️ ❌ ❌ ❌ .. note:: - Non-array spaces such as ``Dict`` or ``Tuple`` are not currently supported by any algorithm. + ``Tuple`` spaces are not supported by any environment however ``Dict`` spaces of ``Box`` spaces are. + ``Dict`` spaces of containing other kinds of spaces (e.g., ``Discrete``) have not yet been explored. Actions ``gym.spaces``: diff --git a/docs/guide/vec_envs.rst b/docs/guide/vec_envs.rst index 7a52a2754..f009359b6 100644 --- a/docs/guide/vec_envs.rst +++ b/docs/guide/vec_envs.rst @@ -63,6 +63,10 @@ VecFrameStack .. autoclass:: VecFrameStack :members: +StackedObservations +~~~~~~~~~~~~~ +.. autoclass:: StackedObservations + :members: VecNormalize ~~~~~~~~~~~~ diff --git a/docs/modules/her.rst b/docs/modules/her.rst index 61a58cde1..457073eb8 100644 --- a/docs/modules/her.rst +++ b/docs/modules/her.rst @@ -54,7 +54,7 @@ Example from stable_baselines3 import HER, DDPG, DQN, SAC, TD3 from stable_baselines3.her.goal_selection_strategy import GoalSelectionStrategy - from stable_baselines3.common.bit_flipping_env import BitFlippingEnv + from stable_baselines3.common.env.bit_flipping_env import BitFlippingEnv from stable_baselines3.common.vec_env import DummyVecEnv from stable_baselines3.common.vec_env.obs_dict_wrapper import ObsDictWrapper From 9eee82ac678ce613b5f12f7642eca107c22d20dd Mon Sep 17 00:00:00 2001 From: Jaden Travnik Date: Mon, 14 Dec 2020 17:36:58 -0500 Subject: [PATCH 24/70] Moved envs to envs directory. Moved stacked obs to vec_envs. Started update on documentation --- docs/common/envs.rst | 13 ++++ docs/guide/examples.rst | 39 +++++++++++ docs/guide/vec_envs.rst | 9 ++- docs/index.rst | 1 + stable_baselines3/common/envs/__init__.py | 0 .../common/{ => envs}/bit_flipping_env.py | 0 .../common/{ => envs}/multi_input_envs.py | 0 stable_baselines3/common/vec_env/__init__.py | 1 + .../{ => vec_env}/stacked_observations.py | 69 ++++++++++++------- .../common/vec_env/vec_frame_stack.py | 2 +- tests/test_callbacks.py | 2 +- tests/test_dict_env.py | 2 +- tests/test_envs.py | 11 ++- tests/test_her.py | 22 ++++-- 14 files changed, 137 insertions(+), 34 deletions(-) create mode 100644 docs/common/envs.rst create mode 100644 stable_baselines3/common/envs/__init__.py rename stable_baselines3/common/{ => envs}/bit_flipping_env.py (100%) rename stable_baselines3/common/{ => envs}/multi_input_envs.py (100%) rename stable_baselines3/common/{ => vec_env}/stacked_observations.py (82%) diff --git a/docs/common/envs.rst b/docs/common/envs.rst new file mode 100644 index 000000000..8a03ec021 --- /dev/null +++ b/docs/common/envs.rst @@ -0,0 +1,13 @@ +.. _envs: + +.. automodule:: stable_baselines3.common.envs + + +Custom Environments +=================== + +BitFlippingEnv +-------------- + +.. autoclass:: BitFlippingEnv + :members: \ No newline at end of file diff --git a/docs/guide/examples.rst b/docs/guide/examples.rst index ed0414842..3b9c7e177 100644 --- a/docs/guide/examples.rst +++ b/docs/guide/examples.rst @@ -13,6 +13,7 @@ notebooks: - `All Notebooks `_ - `Getting Started`_ - `Training, Saving, Loading`_ +- `Dict Observations`_ - `Multiprocessing`_ - `Monitor Training and Plotting`_ - `Atari Games`_ @@ -149,6 +150,44 @@ Multiprocessing: Unleashing the Power of Vectorized Environments env.render() +Dict Observations +----------------- + +You can use environments with dictionary observation spaces. This is useful in the case where one can't directly +concatenate observations such as an image from a camera combined with a vector of servo sensor data (e.g., rotation angles). +Stable Baselines provides SimpleMultiObsEnv as an example of this kind of of setting. +The environment is a simple grid world but the observations for each cell come in the form of dictionaries. +These dictionaries are randomly initilaized on the creation of the environment and contain a vector observation and an image observation. + +.. code-block:: python + + import gym + import numpy as np + + from stable_baselines3 import PPO + from stable_baselines3.common.vec_env import DummyVecEnv + from stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv + + if __name__ == '__main__': + + # Stable Baselines provides SimpleMultiObsEnv as an example environment with Dict observations + env = DummyVecEnv([lambda: SimpleMultiObsEnv(random_start=False) for i in range(10)]) + + model = PPO('MultiInputPolicy', env, verbose=1) + model.learn(total_timesteps=1e5) + env.close() + + env = DummyVecEnv([lambda: SimpleMultiObsEnv(random_start=False)]) + obs = env.reset() + + for _ in range(100): + action, _states = model.predict(obs) + obs, rewards, dones, info = env.step(action) + env.render() + if dones[0]: + print('Got to end') + break + Using Callback: Monitoring Training ----------------------------------- diff --git a/docs/guide/vec_envs.rst b/docs/guide/vec_envs.rst index f009359b6..1cdfcac7a 100644 --- a/docs/guide/vec_envs.rst +++ b/docs/guide/vec_envs.rst @@ -64,10 +64,17 @@ VecFrameStack :members: StackedObservations -~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~ + .. autoclass:: StackedObservations :members: +StackedDictObservations +~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: StackedDictObservations + :members: + VecNormalize ~~~~~~~~~~~~ diff --git a/docs/index.rst b/docs/index.rst index c60e5f397..bb1755742 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -72,6 +72,7 @@ Main Features common/atari_wrappers common/env_util + common/envs common/distributions common/evaluation common/env_checker diff --git a/stable_baselines3/common/envs/__init__.py b/stable_baselines3/common/envs/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/stable_baselines3/common/bit_flipping_env.py b/stable_baselines3/common/envs/bit_flipping_env.py similarity index 100% rename from stable_baselines3/common/bit_flipping_env.py rename to stable_baselines3/common/envs/bit_flipping_env.py diff --git a/stable_baselines3/common/multi_input_envs.py b/stable_baselines3/common/envs/multi_input_envs.py similarity index 100% rename from stable_baselines3/common/multi_input_envs.py rename to stable_baselines3/common/envs/multi_input_envs.py diff --git a/stable_baselines3/common/vec_env/__init__.py b/stable_baselines3/common/vec_env/__init__.py index 42f08da6d..8e5c97768 100644 --- a/stable_baselines3/common/vec_env/__init__.py +++ b/stable_baselines3/common/vec_env/__init__.py @@ -5,6 +5,7 @@ from stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper from stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv +from stable_baselines3.common.vec_env.stacked_observations import StackedDictObservations, StackedObservations from stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv from stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan from stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack diff --git a/stable_baselines3/common/stacked_observations.py b/stable_baselines3/common/vec_env/stacked_observations.py similarity index 82% rename from stable_baselines3/common/stacked_observations.py rename to stable_baselines3/common/vec_env/stacked_observations.py index 355be0aa2..c6445f88e 100644 --- a/stable_baselines3/common/stacked_observations.py +++ b/stable_baselines3/common/vec_env/stacked_observations.py @@ -31,9 +31,12 @@ def __init__( ): self.n_stack = n_stack - self.channels_first, self.stack_dimension, self.stackedobs, self.repeat_axis = self.compute_stacking( - num_envs, n_stack, observation_space, channels_order - ) + ( + self.channels_first, + self.stack_dimension, + self.stackedobs, + self.repeat_axis, + ) = self.compute_stacking(num_envs, n_stack, observation_space, channels_order) super().__init__() @staticmethod @@ -45,10 +48,11 @@ def compute_stacking( ) -> Tuple[bool, int, np.ndarray, int]: """ Calculates the parameters in order to stack observations - :param num_envs: - :param n_stack: - :param observation_space: - :param channels_order: + + :param num_envs: Number of environments in the stack + :param n_stack: The number of observations to stack + :param observation_space: The observation space + :param channels_order: The order of the channels :return: tuple of channels_first, stack_dimension, stackedobs, repeat_axis """ channels_first = False @@ -60,7 +64,10 @@ def compute_stacking( # Default behavior for non-image space, stack on the last axis channels_first = False else: - assert channels_order in {"last", "first"}, "`channels_order` must be one of following: 'last', 'first'" + assert channels_order in { + "last", + "first", + }, "`channels_order` must be one of following: 'last', 'first'" channels_first = channels_order == "first" @@ -74,7 +81,8 @@ def compute_stacking( def stack_observation_space(self, observation_space: spaces.Box) -> spaces.Box: """ Given an observation space, returns a new observation space with stacked observations - :return: + + :return: New observation space with stacked dimensions """ low = np.repeat(observation_space.low, self.n_stack, axis=self.repeat_axis) high = np.repeat(observation_space.high, self.n_stack, axis=self.repeat_axis) @@ -84,8 +92,8 @@ def reset(self, observation: np.ndarray) -> np.ndarray: """ Resets the stackedobs, adds the reset observation to the stack, and returns the stack - :param observation: - :return: + :param observation: Reset observation + :return: The stacked reset observation """ self.stackedobs[...] = 0 if self.channels_first: @@ -103,9 +111,9 @@ def update( """ Adds the observations to the stack and uses the dones to update the infos. - :param observations: - :param dones: - :param infos: + :param observations: numpy array of observations + :param dones: numpy array of done info + :param infos: numpy array of info dicts :return: tuple of the stacked observations and the updated infos """ stack_ax_size = observations.shape[self.stack_dimension] @@ -175,7 +183,8 @@ def __init__( def stack_observation_space(self, observation_space: spaces.Dict) -> spaces.Dict: """ Returns the stacked verson of a Dict observation space - :param observation_space: + + :param observation_space: Dict observation space to stack :return: stacked observation space """ spaces_dict = {} @@ -188,8 +197,9 @@ def stack_observation_space(self, observation_space: spaces.Dict) -> spaces.Dict def reset(self, observation: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: """ Resets the stacked observations, adds the reset observation to the stack, and returns the stack - :param observation: - :return: stacked observations + + :param observation: Reset observation + :return: Stacked reset observations """ for key, obs in observation.items(): self.stackedobs[key][...] = 0 @@ -207,24 +217,37 @@ def update( ) -> Tuple[Dict[str, np.ndarray], List[Dict[str, Any]]]: """ Adds the observations to the stack and uses the dones to update the infos. - :param observations: - :param dones: - :param infos: + + :param observations: Dict of numpy arrays of observations + :param dones: numpy array of dones + :param infos: dict of infos :return: tuple of the stacked observations and the updated infos """ for key in self.stackedobs.keys(): stack_ax_size = observations[key].shape[self.stack_dimension[key]] - self.stackedobs[key] = np.roll(self.stackedobs[key], shift=-stack_ax_size, axis=self.stack_dimension[key]) + self.stackedobs[key] = np.roll( + self.stackedobs[key], + shift=-stack_ax_size, + axis=self.stack_dimension[key], + ) for i, done in enumerate(dones): if done: if "terminal_observation" in infos[i]: old_terminal = infos[i]["terminal_observation"][key] if self.channels_first[key]: - new_terminal = np.vstack((self.stackedobs[key][i, :-stack_ax_size, ...], old_terminal)) + new_terminal = np.vstack( + ( + self.stackedobs[key][i, :-stack_ax_size, ...], + old_terminal, + ) + ) else: new_terminal = np.concatenate( - (self.stackedobs[key][i, ..., :-stack_ax_size], old_terminal), + ( + self.stackedobs[key][i, ..., :-stack_ax_size], + old_terminal, + ), axis=self.stack_dimension[key], ) infos[i]["terminal_observation"][key] = new_terminal diff --git a/stable_baselines3/common/vec_env/vec_frame_stack.py b/stable_baselines3/common/vec_env/vec_frame_stack.py index 884c0e421..1c717bd6d 100644 --- a/stable_baselines3/common/vec_env/vec_frame_stack.py +++ b/stable_baselines3/common/vec_env/vec_frame_stack.py @@ -3,8 +3,8 @@ import numpy as np from gym import spaces -from stable_baselines3.common.stacked_observations import StackedDictObservations, StackedObservations from stable_baselines3.common.vec_env.base_vec_env import VecEnv, VecEnvWrapper +from stable_baselines3.common.vec_env.stacked_observations import StackedDictObservations, StackedObservations class VecFrameStack(VecEnvWrapper): diff --git a/tests/test_callbacks.py b/tests/test_callbacks.py index d86a4d62b..3bdcfe64b 100644 --- a/tests/test_callbacks.py +++ b/tests/test_callbacks.py @@ -6,7 +6,6 @@ import pytest from stable_baselines3 import A2C, DDPG, DQN, HER, PPO, SAC, TD3 -from stable_baselines3.common.bit_flipping_env import BitFlippingEnv from stable_baselines3.common.callbacks import ( CallbackList, CheckpointCallback, @@ -16,6 +15,7 @@ StopTrainingOnRewardThreshold, ) from stable_baselines3.common.env_util import make_vec_env +from stable_baselines3.common.envs.bit_flipping_env import BitFlippingEnv from stable_baselines3.common.vec_env import DummyVecEnv from stable_baselines3.common.vec_env.obs_dict_wrapper import ObsDictWrapper diff --git a/tests/test_dict_env.py b/tests/test_dict_env.py index 573c4779e..b0a5e97ac 100644 --- a/tests/test_dict_env.py +++ b/tests/test_dict_env.py @@ -1,8 +1,8 @@ import pytest from stable_baselines3 import A2C, DQN, PPO, SAC, TD3 +from stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv from stable_baselines3.common.evaluation import evaluate_policy -from stable_baselines3.common.multi_input_envs import SimpleMultiObsEnv from stable_baselines3.common.vec_env import DummyVecEnv, VecFrameStack diff --git a/tests/test_envs.py b/tests/test_envs.py index e5f6bf3a9..fc3724c73 100644 --- a/tests/test_envs.py +++ b/tests/test_envs.py @@ -3,8 +3,8 @@ import pytest from gym import spaces -from stable_baselines3.common.bit_flipping_env import BitFlippingEnv from stable_baselines3.common.env_checker import check_env +from stable_baselines3.common.envs.bit_flipping_env import BitFlippingEnv from stable_baselines3.common.identity_env import ( FakeImageEnv, IdentityEnv, @@ -13,7 +13,14 @@ IdentityEnvMultiDiscrete, ) -ENV_CLASSES = [BitFlippingEnv, IdentityEnv, IdentityEnvBox, IdentityEnvMultiBinary, IdentityEnvMultiDiscrete, FakeImageEnv] +ENV_CLASSES = [ + BitFlippingEnv, + IdentityEnv, + IdentityEnvBox, + IdentityEnvMultiBinary, + IdentityEnvMultiDiscrete, + FakeImageEnv, +] @pytest.mark.parametrize("env_id", ["CartPole-v0", "Pendulum-v0"]) diff --git a/tests/test_her.py b/tests/test_her.py index 814a3500b..469a04951 100644 --- a/tests/test_her.py +++ b/tests/test_her.py @@ -9,7 +9,7 @@ import torch as th from stable_baselines3 import DDPG, DQN, HER, SAC, TD3 -from stable_baselines3.common.bit_flipping_env import BitFlippingEnv +from stable_baselines3.common.envs.bit_flipping_env import BitFlippingEnv from stable_baselines3.common.vec_env import DummyVecEnv from stable_baselines3.common.vec_env.obs_dict_wrapper import ObsDictWrapper from stable_baselines3.her.goal_selection_strategy import GoalSelectionStrategy @@ -44,7 +44,14 @@ def test_her(model_class, online_sampling): @pytest.mark.parametrize( "goal_selection_strategy", - ["final", "episode", "future", GoalSelectionStrategy.FINAL, GoalSelectionStrategy.EPISODE, GoalSelectionStrategy.FUTURE], + [ + "final", + "episode", + "future", + GoalSelectionStrategy.FINAL, + GoalSelectionStrategy.EPISODE, + GoalSelectionStrategy.FUTURE, + ], ) @pytest.mark.parametrize("online_sampling", [True, False]) def test_goal_selection_strategy(goal_selection_strategy, online_sampling): @@ -165,7 +172,10 @@ def test_save_load(tmp_path, model_class, use_sde, online_sampling): os.remove(tmp_path / "test_save.zip") -@pytest.mark.parametrize("online_sampling, truncate_last_trajectory", [(False, None), (True, True), (True, False)]) +@pytest.mark.parametrize( + "online_sampling, truncate_last_trajectory", + [(False, None), (True, True), (True, False)], +) def test_save_load_replay_buffer(tmp_path, recwarn, online_sampling, truncate_last_trajectory): """ Test if 'save_replay_buffer' and 'load_replay_buffer' works correctly @@ -222,10 +232,12 @@ def test_save_load_replay_buffer(tmp_path, recwarn, online_sampling, truncate_la model.replay_buffer.buffer["next_obs"][:n_episodes_stored], ) assert np.allclose( - old_replay_buffer.buffer["action"][:n_episodes_stored], model.replay_buffer.buffer["action"][:n_episodes_stored] + old_replay_buffer.buffer["action"][:n_episodes_stored], + model.replay_buffer.buffer["action"][:n_episodes_stored], ) assert np.allclose( - old_replay_buffer.buffer["reward"][:n_episodes_stored], model.replay_buffer.buffer["reward"][:n_episodes_stored] + old_replay_buffer.buffer["reward"][:n_episodes_stored], + model.replay_buffer.buffer["reward"][:n_episodes_stored], ) # we might change the last done of the last trajectory so we don't compare it assert np.allclose( From c3d2138a4af74ad2bf04447167cbda55d710229c Mon Sep 17 00:00:00 2001 From: Antonin RAFFIN Date: Wed, 16 Dec 2020 16:12:35 +0100 Subject: [PATCH 25/70] Fixes --- docs/common/envs.rst | 20 +++++++++- docs/guide/examples.rst | 38 ++++++++----------- docs/guide/vec_envs.rst | 4 +- docs/misc/changelog.rst | 1 + docs/modules/her.rst | 2 +- setup.cfg | 1 + stable_baselines3/common/envs/__init__.py | 2 + .../common/envs/multi_input_envs.py | 8 +++- tests/test_callbacks.py | 2 +- tests/test_envs.py | 2 +- tests/test_her.py | 2 +- 11 files changed, 51 insertions(+), 31 deletions(-) diff --git a/docs/common/envs.rst b/docs/common/envs.rst index 8a03ec021..1a2835eb8 100644 --- a/docs/common/envs.rst +++ b/docs/common/envs.rst @@ -3,11 +3,29 @@ .. automodule:: stable_baselines3.common.envs + Custom Environments =================== +Those environments were created for testing purposes. + + BitFlippingEnv -------------- .. autoclass:: BitFlippingEnv - :members: \ No newline at end of file + :members: + + +SimpleMultiObsEnv +----------------- + +.. autoclass:: SimpleMultiObsEnv + :members: + + +NineRoomMultiObsEnv +------------------- + +.. autoclass:: NineRoomMultiObsEnv + :members: diff --git a/docs/guide/examples.rst b/docs/guide/examples.rst index 3b9c7e177..a8f7205e5 100644 --- a/docs/guide/examples.rst +++ b/docs/guide/examples.rst @@ -13,7 +13,6 @@ notebooks: - `All Notebooks `_ - `Getting Started`_ - `Training, Saving, Loading`_ -- `Dict Observations`_ - `Multiprocessing`_ - `Monitor Training and Plotting`_ - `Atari Games`_ @@ -153,40 +152,33 @@ Multiprocessing: Unleashing the Power of Vectorized Environments Dict Observations ----------------- -You can use environments with dictionary observation spaces. This is useful in the case where one can't directly +You can use environments with dictionary observation spaces. This is useful in the case where one can't directly concatenate observations such as an image from a camera combined with a vector of servo sensor data (e.g., rotation angles). -Stable Baselines provides SimpleMultiObsEnv as an example of this kind of of setting. +Stable Baselines3 provides ``SimpleMultiObsEnv`` as an example of this kind of of setting. The environment is a simple grid world but the observations for each cell come in the form of dictionaries. These dictionaries are randomly initilaized on the creation of the environment and contain a vector observation and an image observation. .. code-block:: python - import gym - import numpy as np - from stable_baselines3 import PPO - from stable_baselines3.common.vec_env import DummyVecEnv - from stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv + from stable_baselines3.common.envs import SimpleMultiObsEnv - if __name__ == '__main__': - # Stable Baselines provides SimpleMultiObsEnv as an example environment with Dict observations - env = DummyVecEnv([lambda: SimpleMultiObsEnv(random_start=False) for i in range(10)]) + # Stable Baselines provides SimpleMultiObsEnv as an example environment with Dict observations + env = SimpleMultiObsEnv(random_start=False) - model = PPO('MultiInputPolicy', env, verbose=1) - model.learn(total_timesteps=1e5) - env.close() + model = PPO("MultiInputPolicy", env, verbose=1) + model.learn(total_timesteps=1e5) - env = DummyVecEnv([lambda: SimpleMultiObsEnv(random_start=False)]) - obs = env.reset() + obs = env.reset() - for _ in range(100): - action, _states = model.predict(obs) - obs, rewards, dones, info = env.step(action) - env.render() - if dones[0]: - print('Got to end') - break + for _ in range(100): + action, _states = model.predict(obs) + obs, rewards, dones, info = env.step(action) + env.render() + if dones[0]: + print("Got to end") + break Using Callback: Monitoring Training diff --git a/docs/guide/vec_envs.rst b/docs/guide/vec_envs.rst index 1cdfcac7a..f7be23332 100644 --- a/docs/guide/vec_envs.rst +++ b/docs/guide/vec_envs.rst @@ -66,13 +66,13 @@ VecFrameStack StackedObservations ~~~~~~~~~~~~~~~~~~~ -.. autoclass:: StackedObservations +.. autoclass:: stable_baselines3.common.vec_env.stacked_observations.StackedObservations :members: StackedDictObservations ~~~~~~~~~~~~~~~~~~~~~~~ -.. autoclass:: StackedDictObservations +.. autoclass:: stable_baselines3.common.vec_env.stacked_observations.StackedDictObservations :members: VecNormalize diff --git a/docs/misc/changelog.rst b/docs/misc/changelog.rst index 1bbff3e6e..a686cabf1 100644 --- a/docs/misc/changelog.rst +++ b/docs/misc/changelog.rst @@ -12,6 +12,7 @@ Breaking Changes: this allows to return the unnormalized reward in the case of Atari games for instance. - Renamed ``common.vec_env.is_wrapped`` to ``common.vec_env.is_vecenv_wrapped`` to avoid confusion with the new ``is_wrapped()`` helper +- All customs environments (e.g. the ``BitFlippingEnv``) were moved to ``stable_baselines3.common.envs`` folder New Features: ^^^^^^^^^^^^^ diff --git a/docs/modules/her.rst b/docs/modules/her.rst index 457073eb8..2d564d2a8 100644 --- a/docs/modules/her.rst +++ b/docs/modules/her.rst @@ -54,7 +54,7 @@ Example from stable_baselines3 import HER, DDPG, DQN, SAC, TD3 from stable_baselines3.her.goal_selection_strategy import GoalSelectionStrategy - from stable_baselines3.common.env.bit_flipping_env import BitFlippingEnv + from stable_baselines3.common.envs import BitFlippingEnv from stable_baselines3.common.vec_env import DummyVecEnv from stable_baselines3.common.vec_env.obs_dict_wrapper import ObsDictWrapper diff --git a/setup.cfg b/setup.cfg index 4b5d43918..1f412536d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -26,6 +26,7 @@ ignore = W503,W504,E203,E231 # line breaks before and after binary operators per-file-ignores = ./stable_baselines3/__init__.py:F401 ./stable_baselines3/common/__init__.py:F401 + ./stable_baselines3/common/envs/__init__.py:F401 ./stable_baselines3/a2c/__init__.py:F401 ./stable_baselines3/ddpg/__init__.py:F401 ./stable_baselines3/dqn/__init__.py:F401 diff --git a/stable_baselines3/common/envs/__init__.py b/stable_baselines3/common/envs/__init__.py index e69de29bb..04c90fbd0 100644 --- a/stable_baselines3/common/envs/__init__.py +++ b/stable_baselines3/common/envs/__init__.py @@ -0,0 +1,2 @@ +from stable_baselines3.common.envs.bit_flipping_env import BitFlippingEnv +from stable_baselines3.common.envs.multi_input_envs import NineRoomMultiObsEnv, SimpleMultiObsEnv diff --git a/stable_baselines3/common/envs/multi_input_envs.py b/stable_baselines3/common/envs/multi_input_envs.py index 7b1d905df..e67f1153d 100644 --- a/stable_baselines3/common/envs/multi_input_envs.py +++ b/stable_baselines3/common/envs/multi_input_envs.py @@ -8,8 +8,9 @@ class SimpleMultiObsEnv(gym.Env): """ - Base class for GridWorld-based MultiObs Environments 4x4 grid world + Base class for GridWorld-based MultiObs Environments 4x4 grid world. + .. code-block:: text ____________ | 0 1 2 3| @@ -17,6 +18,7 @@ class SimpleMultiObsEnv(gym.Env): | 8|_9_10_|11| |12 13 14 15| ¯¯¯¯¯¯¯¯¯¯¯¯¯¯ + start is 0 states 5, 6, 9, and 10 are blocked goal is 15 @@ -207,6 +209,9 @@ def reset(self) -> Dict[str, np.ndarray]: class NineRoomMultiObsEnv(SimpleMultiObsEnv): """ Extension of the SimpleMultiObsEnv to a 9 room grid world + + .. code-block:: text + ____________________________________ | 0 1 2 | 3 4 5 | 6 7 8 | | 9 10 11 12 13 14 15 16 17 | @@ -220,6 +225,7 @@ class NineRoomMultiObsEnv(SimpleMultiObsEnv): | 63 64 65 66 67 68 69 70 71 | | 72 73 74 | 75 76 77 | 78 79 80 | ¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯ + :param random_start: If true, agent starts in random position :param noise: Noise added to the observations """ diff --git a/tests/test_callbacks.py b/tests/test_callbacks.py index 3bdcfe64b..c5d453a12 100644 --- a/tests/test_callbacks.py +++ b/tests/test_callbacks.py @@ -15,7 +15,7 @@ StopTrainingOnRewardThreshold, ) from stable_baselines3.common.env_util import make_vec_env -from stable_baselines3.common.envs.bit_flipping_env import BitFlippingEnv +from stable_baselines3.common.envs import BitFlippingEnv from stable_baselines3.common.vec_env import DummyVecEnv from stable_baselines3.common.vec_env.obs_dict_wrapper import ObsDictWrapper diff --git a/tests/test_envs.py b/tests/test_envs.py index fc3724c73..2dcbeed66 100644 --- a/tests/test_envs.py +++ b/tests/test_envs.py @@ -4,7 +4,7 @@ from gym import spaces from stable_baselines3.common.env_checker import check_env -from stable_baselines3.common.envs.bit_flipping_env import BitFlippingEnv +from stable_baselines3.common.envs import BitFlippingEnv from stable_baselines3.common.identity_env import ( FakeImageEnv, IdentityEnv, diff --git a/tests/test_her.py b/tests/test_her.py index 469a04951..382d84ee9 100644 --- a/tests/test_her.py +++ b/tests/test_her.py @@ -9,7 +9,7 @@ import torch as th from stable_baselines3 import DDPG, DQN, HER, SAC, TD3 -from stable_baselines3.common.envs.bit_flipping_env import BitFlippingEnv +from stable_baselines3.common.envs import BitFlippingEnv from stable_baselines3.common.vec_env import DummyVecEnv from stable_baselines3.common.vec_env.obs_dict_wrapper import ObsDictWrapper from stable_baselines3.her.goal_selection_strategy import GoalSelectionStrategy From 935cef9c175b986cbfabf7c40bbe11b93ba747ee Mon Sep 17 00:00:00 2001 From: Jaden Travnik Date: Mon, 4 Jan 2021 11:48:27 -0500 Subject: [PATCH 26/70] Running code style --- .../common/off_policy_algorithm.py | 61 ++++--------------- stable_baselines3/dqn/dqn.py | 6 +- 2 files changed, 14 insertions(+), 53 deletions(-) diff --git a/stable_baselines3/common/off_policy_algorithm.py b/stable_baselines3/common/off_policy_algorithm.py index 4bfc7e4f9..99e9ec30f 100644 --- a/stable_baselines3/common/off_policy_algorithm.py +++ b/stable_baselines3/common/off_policy_algorithm.py @@ -15,12 +15,7 @@ from stable_baselines3.common.noise import ActionNoise from stable_baselines3.common.policies import BasePolicy from stable_baselines3.common.save_util import load_from_pkl, save_to_pkl -from stable_baselines3.common.type_aliases import ( - GymEnv, - MaybeCallback, - RolloutReturn, - Schedule, -) +from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, RolloutReturn, Schedule from stable_baselines3.common.utils import safe_mean from stable_baselines3.common.vec_env import VecEnv @@ -163,11 +158,7 @@ def _setup_model(self) -> None: self._setup_lr_schedule() self.set_random_seed(self.seed) - BUFFER_CLS = ( - DictReplayBuffer - if isinstance(self.observation_space, gym.spaces.Dict) - else ReplayBuffer - ) + BUFFER_CLS = DictReplayBuffer if isinstance(self.observation_space, gym.spaces.Dict) else ReplayBuffer self.replay_buffer = BUFFER_CLS( self.buffer_size, @@ -185,9 +176,7 @@ def _setup_model(self) -> None: ) self.policy = self.policy.to(self.device) - def save_replay_buffer( - self, path: Union[str, pathlib.Path, io.BufferedIOBase] - ) -> None: + def save_replay_buffer(self, path: Union[str, pathlib.Path, io.BufferedIOBase]) -> None: """ Save the replay buffer as a pickle file. @@ -197,18 +186,14 @@ def save_replay_buffer( assert self.replay_buffer is not None, "The replay buffer is not defined" save_to_pkl(path, self.replay_buffer, self.verbose) - def load_replay_buffer( - self, path: Union[str, pathlib.Path, io.BufferedIOBase] - ) -> None: + def load_replay_buffer(self, path: Union[str, pathlib.Path, io.BufferedIOBase]) -> None: """ Load a replay buffer from a pickle file. :param path: Path to the pickled replay buffer. """ self.replay_buffer = load_from_pkl(path, self.verbose) - assert isinstance( - self.replay_buffer, ReplayBuffer - ), "The replay buffer must inherit from ReplayBuffer class" + assert isinstance(self.replay_buffer, ReplayBuffer), "The replay buffer must inherit from ReplayBuffer class" def _setup_learn( self, @@ -301,11 +286,7 @@ def learn( if self.num_timesteps > 0 and self.num_timesteps > self.learning_starts: # If no `gradient_steps` is specified, # do as many gradients steps as steps performed during the rollout - gradient_steps = ( - self.gradient_steps - if self.gradient_steps > 0 - else rollout.episode_timesteps - ) + gradient_steps = self.gradient_steps if self.gradient_steps > 0 else rollout.episode_timesteps self.train(batch_size=self.batch_size, gradient_steps=gradient_steps) callback.on_training_end() @@ -337,9 +318,7 @@ def _sample_action( The two differs when the action space is not normalized (bounds are not [-1, 1]). """ # Select action randomly or according to policy - if self.num_timesteps < learning_starts and not ( - self.use_sde and self.use_sde_at_warmup - ): + if self.num_timesteps < learning_starts and not (self.use_sde and self.use_sde_at_warmup): # Warmup phase unscaled_action = np.array([self.action_space.sample()]) else: @@ -448,18 +427,12 @@ def collect_rollouts( while not done: - if ( - self.use_sde - and self.sde_sample_freq > 0 - and total_steps % self.sde_sample_freq == 0 - ): + if self.use_sde and self.sde_sample_freq > 0 and total_steps % self.sde_sample_freq == 0: # Sample a new noise matrix self.actor.reset_noise() # Select action randomly or according to policy - action, buffer_action = self._sample_action( - learning_starts, action_noise - ) + action, buffer_action = self._sample_action(learning_starts, action_noise) # Rescale and perform action new_obs, reward, done, infos = env.step(action) @@ -472,9 +445,7 @@ def collect_rollouts( callback.update_locals(locals()) # Only stop training if return value is False, not when it is None. if callback.on_step() is False: - return RolloutReturn( - 0.0, total_steps, total_episodes, continue_training=False - ) + return RolloutReturn(0.0, total_steps, total_episodes, continue_training=False) episode_reward += reward @@ -495,18 +466,14 @@ def collect_rollouts( reward, ) - replay_buffer.add( - self._last_original_obs, new_obs_, buffer_action, reward_, done - ) + replay_buffer.add(self._last_original_obs, new_obs_, buffer_action, reward_, done) self._last_obs = new_obs # Save the unnormalized observation if self._vec_normalize_env is not None: self._last_original_obs = new_obs_ - self._update_current_progress_remaining( - self.num_timesteps, self._total_timesteps - ) + self._update_current_progress_remaining(self.num_timesteps, self._total_timesteps) # For DQN, check if the target network should be updated # and update the exploration schedule @@ -534,6 +501,4 @@ def collect_rollouts( callback.on_rollout_end() - return RolloutReturn( - mean_reward, total_steps, total_episodes, continue_training - ) + return RolloutReturn(mean_reward, total_steps, total_episodes, continue_training) diff --git a/stable_baselines3/dqn/dqn.py b/stable_baselines3/dqn/dqn.py index 9f13d4998..032f89558 100644 --- a/stable_baselines3/dqn/dqn.py +++ b/stable_baselines3/dqn/dqn.py @@ -8,11 +8,7 @@ from stable_baselines3.common import logger from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule -from stable_baselines3.common.utils import ( - get_linear_fn, - is_vectorized_observation, - polyak_update, -) +from stable_baselines3.common.utils import get_linear_fn, is_vectorized_observation, polyak_update from stable_baselines3.dqn.policies import DQNPolicy From a07497bbeb7fa58616da0c5b20d7156ca63e6db6 Mon Sep 17 00:00:00 2001 From: "Anssi \"Miffyli\" Kanervisto" Date: Wed, 6 Jan 2021 17:59:34 +0200 Subject: [PATCH 27/70] Update docstrings on torch_layers --- stable_baselines3/common/torch_layers.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/stable_baselines3/common/torch_layers.py b/stable_baselines3/common/torch_layers.py index 7ddfda136..e38abdc0d 100644 --- a/stable_baselines3/common/torch_layers.py +++ b/stable_baselines3/common/torch_layers.py @@ -231,16 +231,18 @@ def forward(self, features: th.Tensor) -> Tuple[th.Tensor, th.Tensor]: class CombinedExtractor(BaseFeaturesExtractor): """ Combined feature extractor for Dict observation spaces. - Builds a feature extractor for each key of the space + Builds a feature extractor for each key of the space. Input from each space + is fed through a separate submodule (CNN or MLP, depending on input shape), + the output features are concatenated and fed through additional MLP network ("combined"). :param observation_space: :param features_dim: Number of features extracted. This corresponds to the number of unit for the last layer. - :param cnn_output_dim: Number of features to output from each cnn submodule - :param mlp_output_dim: Number of features to output from each mlp submodule - :param mlp_net_arch: Architecture of each mlp network module - :param activation_fn: The activation function to use within each mlp - :param comb_net_arch: Architecture of the combined network module which calculates the final feature extracted + :param cnn_output_dim: Number of features to output from each CNN submodule(s) + :param mlp_output_dim: Number of features to output from each MLP submodule(s) + :param mlp_net_arch: Architecture of each MLP network module + :param activation_fn: The activation function used in all MLP submodules and combined network + :param combined_net_arch: Architecture of the combined network module which calculates the final feature extracted """ def __init__( @@ -251,7 +253,7 @@ def __init__( mlp_output_dim: int = 64, mlp_net_arch: List[int] = [64, 64], activation_fn: Type[nn.Module] = nn.ReLU, - comb_net_arch: List[int] = [64, 64], + combined_net_arch: List[int] = [64, 64], ): super(CombinedExtractor, self).__init__(observation_space, features_dim=features_dim) @@ -290,7 +292,7 @@ def __init__( self.extractors = nn.ModuleDict(extractors) self.combined = nn.Sequential( - *create_mlp(total_concat_size, features_dim, comb_net_arch, activation_fn, squash_output=False) + *create_mlp(total_concat_size, features_dim, combined_net_arch, activation_fn, squash_output=False) ) def forward(self, observations: TensorDict) -> th.Tensor: From 96d1e646bb602493c0e3743b5500f1139f1f3b23 Mon Sep 17 00:00:00 2001 From: "Anssi \"Miffyli\" Kanervisto" Date: Wed, 6 Jan 2021 18:02:29 +0200 Subject: [PATCH 28/70] Decapitalize non-constant variables --- stable_baselines3/common/off_policy_algorithm.py | 4 ++-- stable_baselines3/common/on_policy_algorithm.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/stable_baselines3/common/off_policy_algorithm.py b/stable_baselines3/common/off_policy_algorithm.py index 99e9ec30f..7abd53c76 100644 --- a/stable_baselines3/common/off_policy_algorithm.py +++ b/stable_baselines3/common/off_policy_algorithm.py @@ -158,9 +158,9 @@ def _setup_model(self) -> None: self._setup_lr_schedule() self.set_random_seed(self.seed) - BUFFER_CLS = DictReplayBuffer if isinstance(self.observation_space, gym.spaces.Dict) else ReplayBuffer + buffer_cls = DictReplayBuffer if isinstance(self.observation_space, gym.spaces.Dict) else ReplayBuffer - self.replay_buffer = BUFFER_CLS( + self.replay_buffer = buffer_cls( self.buffer_size, self.observation_space, self.action_space, diff --git a/stable_baselines3/common/on_policy_algorithm.py b/stable_baselines3/common/on_policy_algorithm.py index cae1de4ad..296de596f 100644 --- a/stable_baselines3/common/on_policy_algorithm.py +++ b/stable_baselines3/common/on_policy_algorithm.py @@ -105,9 +105,9 @@ def _setup_model(self) -> None: self._setup_lr_schedule() self.set_random_seed(self.seed) - BUFFER_CLS = DictRolloutBuffer if isinstance(self.observation_space, gym.spaces.Dict) else RolloutBuffer + buffer_cls = DictRolloutBuffer if isinstance(self.observation_space, gym.spaces.Dict) else RolloutBuffer - self.rollout_buffer = BUFFER_CLS( + self.rollout_buffer = buffer_cls( self.n_steps, self.observation_space, self.action_space, From 245f4abeb130c6fc7abac585acd45cbdbb8b9c36 Mon Sep 17 00:00:00 2001 From: Jaden Travnik Date: Wed, 6 Jan 2021 11:16:29 -0500 Subject: [PATCH 29/70] Using NatureCNN architecture in combined extractor. Increasing img size in multi input env. Adding memory reduction in test --- .../common/envs/multi_input_envs.py | 4 ++-- stable_baselines3/common/torch_layers.py | 20 +++++++++++++++++-- tests/test_dict_env.py | 13 ++++++++++-- 3 files changed, 31 insertions(+), 6 deletions(-) diff --git a/stable_baselines3/common/envs/multi_input_envs.py b/stable_baselines3/common/envs/multi_input_envs.py index e67f1153d..bd435adf4 100644 --- a/stable_baselines3/common/envs/multi_input_envs.py +++ b/stable_baselines3/common/envs/multi_input_envs.py @@ -43,7 +43,7 @@ def __init__( super(SimpleMultiObsEnv, self).__init__() self.vector_size = 5 - self.img_size = [1, 20, 20] + self.img_size = [1, 64, 64] self.random_start = random_start self.discrete_actions = discrete_actions @@ -74,7 +74,7 @@ def random_upsample_img( self, value_range: Tuple[int, int] = (0, 255), initial_size: Tuple[int, int] = (4, 4), - up_size: Tuple[int, int] = (20, 20), + up_size: Tuple[int, int] = (64, 64), ) -> np.ndarray: """ Generated a random image and upsample it. diff --git a/stable_baselines3/common/torch_layers.py b/stable_baselines3/common/torch_layers.py index 7ddfda136..14dde8fdc 100644 --- a/stable_baselines3/common/torch_layers.py +++ b/stable_baselines3/common/torch_layers.py @@ -262,11 +262,15 @@ def __init__( if is_image_space(subspace): n_input_channels = subspace.shape[0] + + # Nature CNN cnn = nn.Sequential( nn.Conv2d(n_input_channels, 32, kernel_size=8, stride=4, padding=0), nn.ReLU(), nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=0), nn.ReLU(), + nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0), + nn.ReLU(), nn.Flatten(), ) @@ -282,7 +286,13 @@ def __init__( else: extractors[key] = nn.Sequential( - *create_mlp(subspace.shape[0], mlp_output_dim, mlp_net_arch, activation_fn, squash_output=False) + *create_mlp( + subspace.shape[0], + mlp_output_dim, + mlp_net_arch, + activation_fn, + squash_output=False, + ) ) total_concat_size += mlp_output_dim @@ -290,7 +300,13 @@ def __init__( self.extractors = nn.ModuleDict(extractors) self.combined = nn.Sequential( - *create_mlp(total_concat_size, features_dim, comb_net_arch, activation_fn, squash_output=False) + *create_mlp( + total_concat_size, + features_dim, + comb_net_arch, + activation_fn, + squash_output=False, + ) ) def forward(self, observations: TensorDict) -> th.Tensor: diff --git a/tests/test_dict_env.py b/tests/test_dict_env.py index b0a5e97ac..9c6cf204f 100644 --- a/tests/test_dict_env.py +++ b/tests/test_dict_env.py @@ -18,8 +18,17 @@ def test_dict_spaces(model_class): kwargs = {} n_steps = 250 - if model_class == DQN: - kwargs = dict(learning_starts=0) + if model_class in {A2C, PPO}: + kwargs = dict(n_steps=100) + else: + # Avoid memory error when using replay buffer + # Reduce the size of the features + kwargs = dict( + buffer_size=250, + policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32)), + ) + if model_class == DQN: + kwargs["learning_starts"] = 0 model = model_class("MultiInputPolicy", env, gamma=0.5, seed=1, **kwargs) From 4dc16253c9c8235f70fee6ba6a252aa0fdfc4418 Mon Sep 17 00:00:00 2001 From: Antonin RAFFIN Date: Fri, 8 Jan 2021 11:41:48 +0100 Subject: [PATCH 30/70] Update doc --- README.md | 1 + docs/guide/examples.rst | 10 ---------- docs/modules/a2c.rst | 8 ++++++++ docs/modules/ppo.rst | 8 ++++++++ stable_baselines3/a2c/__init__.py | 2 +- stable_baselines3/common/env_checker.py | 2 ++ stable_baselines3/ppo/__init__.py | 2 +- 7 files changed, 21 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 600c5da31..fb57e401a 100644 --- a/README.md +++ b/README.md @@ -30,6 +30,7 @@ you can take a look at the issues [#48](https://github.com/DLR-RM/stable-baselin | Custom environments | :heavy_check_mark: | | Custom policies | :heavy_check_mark: | | Common interface | :heavy_check_mark: | +| `Dict` observation space support | :heavy_check_mark: | | Ipython / Notebook friendly | :heavy_check_mark: | | Tensorboard support | :heavy_check_mark: | | PEP8 code style | :heavy_check_mark: | diff --git a/docs/guide/examples.rst b/docs/guide/examples.rst index a8f7205e5..8fe943a44 100644 --- a/docs/guide/examples.rst +++ b/docs/guide/examples.rst @@ -170,16 +170,6 @@ These dictionaries are randomly initilaized on the creation of the environment a model = PPO("MultiInputPolicy", env, verbose=1) model.learn(total_timesteps=1e5) - obs = env.reset() - - for _ in range(100): - action, _states = model.predict(obs) - obs, rewards, dones, info = env.step(action) - env.render() - if dones[0]: - print("Got to end") - break - Using Callback: Monitoring Training ----------------------------------- diff --git a/docs/modules/a2c.rst b/docs/modules/a2c.rst index 4c2504085..2913f5c69 100644 --- a/docs/modules/a2c.rst +++ b/docs/modules/a2c.rst @@ -40,6 +40,7 @@ Discrete ✔️ ✔️ Box ✔️ ✔️ MultiDiscrete ✔️ ✔️ MultiBinary ✔️ ✔️ +Dict ❌ ✔️ ============= ====== =========== @@ -164,3 +165,10 @@ A2C Policies .. autoclass:: stable_baselines3.common.policies.ActorCriticCnnPolicy :members: :noindex: + +.. autoclass:: MultiInputPolicy + :members: + +.. autoclass:: stable_baselines3.common.policies.MultiInputActorCriticPolicy + :members: + :noindex: diff --git a/docs/modules/ppo.rst b/docs/modules/ppo.rst index 091323a25..38d0abffd 100644 --- a/docs/modules/ppo.rst +++ b/docs/modules/ppo.rst @@ -42,6 +42,7 @@ Discrete ✔️ ✔️ Box ✔️ ✔️ MultiDiscrete ✔️ ✔️ MultiBinary ✔️ ✔️ +Dict ❌ ✔️ ============= ====== =========== Example @@ -165,3 +166,10 @@ PPO Policies .. autoclass:: stable_baselines3.common.policies.ActorCriticCnnPolicy :members: :noindex: + +.. autoclass:: MultiInputPolicy + :members: + +.. autoclass:: stable_baselines3.common.policies.MultiInputActorCriticPolicy + :members: + :noindex: diff --git a/stable_baselines3/a2c/__init__.py b/stable_baselines3/a2c/__init__.py index e6aeda5bb..7e9996494 100644 --- a/stable_baselines3/a2c/__init__.py +++ b/stable_baselines3/a2c/__init__.py @@ -1,2 +1,2 @@ from stable_baselines3.a2c.a2c import A2C -from stable_baselines3.a2c.policies import CnnPolicy, MlpPolicy +from stable_baselines3.a2c.policies import CnnPolicy, MlpPolicy, MultiInputPolicy diff --git a/stable_baselines3/common/env_checker.py b/stable_baselines3/common/env_checker.py index 262722fd1..cb2d1a7f6 100644 --- a/stable_baselines3/common/env_checker.py +++ b/stable_baselines3/common/env_checker.py @@ -48,6 +48,8 @@ def _check_image_input(observation_space: spaces.Box) -> None: def _check_unsupported_spaces(env: gym.Env, observation_space: spaces.Space, action_space: spaces.Space) -> None: """Emit warnings when the observation space or action space used is not supported by Stable-Baselines.""" + # TODO(antonin): no need for that warning but check that it is a first level dict + # also tell the user to convert tuple to dict obs space if isinstance(observation_space, spaces.Dict) and not isinstance(env, gym.GoalEnv): warnings.warn( "The observation space is a Dict but the environment is not a gym.GoalEnv " diff --git a/stable_baselines3/ppo/__init__.py b/stable_baselines3/ppo/__init__.py index c5b80937c..e5c23fc9c 100644 --- a/stable_baselines3/ppo/__init__.py +++ b/stable_baselines3/ppo/__init__.py @@ -1,2 +1,2 @@ -from stable_baselines3.ppo.policies import CnnPolicy, MlpPolicy +from stable_baselines3.ppo.policies import CnnPolicy, MlpPolicy, MultiInputPolicy from stable_baselines3.ppo.ppo import PPO From 57c1926deacd3b0a413a162b2bfd34cb23d319ec Mon Sep 17 00:00:00 2001 From: Antonin RAFFIN Date: Fri, 8 Jan 2021 14:32:12 +0100 Subject: [PATCH 31/70] Update doc --- docs/modules/ddpg.rst | 5 +++++ docs/modules/dqn.rst | 12 ++++++------ docs/modules/sac.rst | 4 ++++ docs/modules/td3.rst | 5 +++++ stable_baselines3/common/torch_layers.py | 5 +++-- stable_baselines3/ddpg/__init__.py | 2 +- stable_baselines3/ddpg/policies.py | 2 +- stable_baselines3/dqn/__init__.py | 2 +- stable_baselines3/sac/__init__.py | 2 +- stable_baselines3/sac/policies.py | 6 +++--- stable_baselines3/td3/__init__.py | 2 +- tests/test_dict_env.py | 8 ++++---- 12 files changed, 35 insertions(+), 20 deletions(-) diff --git a/docs/modules/ddpg.rst b/docs/modules/ddpg.rst index c14f5da20..118108ce5 100644 --- a/docs/modules/ddpg.rst +++ b/docs/modules/ddpg.rst @@ -23,6 +23,7 @@ trick for DQN with the deterministic policy gradient, to obtain an algorithm for MlpPolicy CnnPolicy + MultiInputPolicy Notes @@ -49,6 +50,7 @@ Discrete ❌ ✔️ Box ✔️ ✔️ MultiDiscrete ❌ ✔️ MultiBinary ❌ ✔️ +Dict ❌ ✔️ ============= ====== =========== @@ -167,3 +169,6 @@ DDPG Policies .. autoclass:: CnnPolicy :members: + +.. autoclass:: MultiInputPolicy + :members: diff --git a/docs/modules/dqn.rst b/docs/modules/dqn.rst index 8b97afad4..1a2dc2ff6 100644 --- a/docs/modules/dqn.rst +++ b/docs/modules/dqn.rst @@ -17,6 +17,7 @@ and make use of different tricks to stabilize the learning with neural networks: MlpPolicy CnnPolicy + MultiInputPolicy Notes @@ -44,6 +45,7 @@ Discrete ✔ ✔ Box ❌ ✔ MultiDiscrete ❌ ✔ MultiBinary ❌ ✔ +Dict ❌ ✔️ ============= ====== =========== @@ -53,20 +55,18 @@ Example .. code-block:: python import gym - import numpy as np from stable_baselines3 import DQN - from stable_baselines3.dqn import MlpPolicy - env = gym.make('CartPole-v0') + env = gym.make('CartPole-v1') - model = DQN(MlpPolicy, env, verbose=1) + model = DQN("MlpPolicy", env, verbose=1) model.learn(total_timesteps=10000, log_interval=4) - model.save("dqn_pendulum") + model.save("dqn_cartpole") del model # remove to demonstrate saving and loading - model = DQN.load("dqn_pendulum") + model = DQN.load("dqn_cartpole") obs = env.reset() while True: diff --git a/docs/modules/sac.rst b/docs/modules/sac.rst index bbe6bfc15..054497f0a 100644 --- a/docs/modules/sac.rst +++ b/docs/modules/sac.rst @@ -56,6 +56,7 @@ Discrete ❌ ✔️ Box ✔️ ✔️ MultiDiscrete ❌ ✔️ MultiBinary ❌ ✔️ +Dict ❌ ✔️ ============= ====== =========== @@ -170,3 +171,6 @@ SAC Policies .. autoclass:: CnnPolicy :members: + +.. autoclass:: MultiInputPolicy + :members: diff --git a/docs/modules/td3.rst b/docs/modules/td3.rst index 2ecf8c9d3..d42faa552 100644 --- a/docs/modules/td3.rst +++ b/docs/modules/td3.rst @@ -19,6 +19,7 @@ We recommend reading `OpenAI Spinning guide on TD3 Date: Fri, 8 Jan 2021 14:40:34 +0100 Subject: [PATCH 32/70] Fix format --- stable_baselines3/ddpg/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stable_baselines3/ddpg/__init__.py b/stable_baselines3/ddpg/__init__.py index 95dfd559c..262e7f1af 100644 --- a/stable_baselines3/ddpg/__init__.py +++ b/stable_baselines3/ddpg/__init__.py @@ -1,2 +1,2 @@ -from stable_baselines3.ddpg.policies import CnnPolicy, MlpPolicy, MultiInputPolicy from stable_baselines3.ddpg.ddpg import DDPG +from stable_baselines3.ddpg.policies import CnnPolicy, MlpPolicy, MultiInputPolicy From 6206b363c13406aedff8675446ae84a4296a58a2 Mon Sep 17 00:00:00 2001 From: Jaden Travnik Date: Tue, 12 Jan 2021 16:23:08 -0500 Subject: [PATCH 33/70] Removing NineRoom env. Using nested preprocess. Removing mutable default args --- docs/common/envs.rst | 9 +- stable_baselines3/common/envs/__init__.py | 2 +- .../common/envs/multi_input_envs.py | 100 +----------------- stable_baselines3/common/preprocessing.py | 2 +- stable_baselines3/common/torch_layers.py | 10 +- 5 files changed, 15 insertions(+), 108 deletions(-) diff --git a/docs/common/envs.rst b/docs/common/envs.rst index 1a2835eb8..4c20ce4c7 100644 --- a/docs/common/envs.rst +++ b/docs/common/envs.rst @@ -21,11 +21,4 @@ SimpleMultiObsEnv ----------------- .. autoclass:: SimpleMultiObsEnv - :members: - - -NineRoomMultiObsEnv -------------------- - -.. autoclass:: NineRoomMultiObsEnv - :members: + :members: \ No newline at end of file diff --git a/stable_baselines3/common/envs/__init__.py b/stable_baselines3/common/envs/__init__.py index 04c90fbd0..93e313bec 100644 --- a/stable_baselines3/common/envs/__init__.py +++ b/stable_baselines3/common/envs/__init__.py @@ -1,2 +1,2 @@ from stable_baselines3.common.envs.bit_flipping_env import BitFlippingEnv -from stable_baselines3.common.envs.multi_input_envs import NineRoomMultiObsEnv, SimpleMultiObsEnv +from stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv diff --git a/stable_baselines3/common/envs/multi_input_envs.py b/stable_baselines3/common/envs/multi_input_envs.py index bd435adf4..1f1c712ce 100644 --- a/stable_baselines3/common/envs/multi_input_envs.py +++ b/stable_baselines3/common/envs/multi_input_envs.py @@ -63,39 +63,12 @@ def __init__( self.log = "" self.state = 0 self.action2str = ["left", "down", "right", "up"] - self.noise = noise self.init_possible_transitions() self.init_state_mapping(num_col, num_row) self.max_state = len(self.state_mapping) - 1 - def random_upsample_img( - self, - value_range: Tuple[int, int] = (0, 255), - initial_size: Tuple[int, int] = (4, 4), - up_size: Tuple[int, int] = (64, 64), - ) -> np.ndarray: - """ - Generated a random image and upsample it. - - :param value_range: The range of values for the img - :param initial_size: The initial size of the image to generate - :param up_size: The size of the upsample - :return: upsampled img - """ - im = np.random.randint(value_range[0], value_range[1], initial_size, dtype=np.int32) - return np.array( - [ - [ - [ - im[int(initial_size[0] * row / up_size[0])][int(initial_size[1] * col / up_size[1])] - for col in range(up_size[0]) - ] - for row in range(up_size[1]) - ] - ] - ).astype(np.int32) def init_state_mapping(self, num_col: int, num_row: int) -> None: """ @@ -107,8 +80,9 @@ def init_state_mapping(self, num_col: int, num_row: int) -> None: self.num_col = num_col self.state_mapping = [] - col_vecs = [np.random.random(self.vector_size) for i in range(num_col)] - row_imgs = [self.random_upsample_img() for j in range(num_row)] + col_vecs = np.random.random((num_col, self.vector_size)) + row_imgs = np.random.randint(0, 255, (num_row, 1, 64, 64), dtype=np.int32) + for i in range(num_col): for j in range(num_row): self.state_mapping.append({"vec": col_vecs[i], "img": row_imgs[j]}) @@ -119,13 +93,7 @@ def get_state_mapping(self) -> Dict[str, np.ndarray]: :return: observation dict {'vec': ..., 'img': ...} """ - state_dict = self.state_mapping[self.state] - if self.noise > 0: - state_dict["vec"] += np.random.random(self.vector_size) * self.noise - img_noise = int(255 * self.noise) - state_dict["img"] += np.random.randint(-img_noise, img_noise, tuple(self.img_size), dtype=np.int32) - state_dict["img"] = np.clip(state_dict["img"], 0, 255) - return state_dict + return self.state_mapping[self.state] def init_possible_transitions(self) -> None: """ @@ -204,63 +172,3 @@ def reset(self) -> Dict[str, np.ndarray]: else: self.state = np.random.randint(0, self.max_state) return self.state_mapping[self.state] - - -class NineRoomMultiObsEnv(SimpleMultiObsEnv): - """ - Extension of the SimpleMultiObsEnv to a 9 room grid world - - .. code-block:: text - - ____________________________________ - | 0 1 2 | 3 4 5 | 6 7 8 | - | 9 10 11 12 13 14 15 16 17 | - | 18 19 20 | 21 22 23 | 24 25 26 | - |¯¯¯¯ ¯¯¯|¯¯¯¯ ¯¯¯¯|¯¯¯¯ ¯¯¯¯| - | 27 28 29 | 30 31 32 | 33 34 35 | - | 36 37 38 39 40 41 42 43 44 | - | 45 46 47 | 48 49 50 | 51 52 53 | - |¯¯¯ ¯¯¯|¯¯¯¯ ¯¯¯¯|¯¯¯¯ ¯¯¯¯| - | 54 55 56 | 57 58 59 | 60 61 62 | - | 63 64 65 66 67 68 69 70 71 | - | 72 73 74 | 75 76 77 | 78 79 80 | - ¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯ - - :param random_start: If true, agent starts in random position - :param noise: Noise added to the observations - """ - - def __init__(self, random_start: bool = True, noise: float = 0.0): - super(NineRoomMultiObsEnv, self).__init__(9, 9, random_start=random_start, noise=noise) - - def init_possible_transitions(self) -> None: - """ - Initializes the state_mapping array which holds the observation values for each state - """ - self.left_possible = ( - [1, 2, 4, 5, 7, 8] - + list(range(10, 18)) - + [19, 20, 22, 23, 25, 26] - + [28, 29, 31, 32, 34, 35] - + list(range(37, 45)) - + [46, 47, 49, 50, 52, 53] - + [55, 56, 58, 59, 61, 62] - + list(range(64, 72)) - + [73, 74, 76, 77, 79, 80] - ) - - self.down_possible = list(range(18)) + [19, 22, 25] + list(range(27, 45)) + [46, 49, 52] + list(range(54, 72)) - - self.right_possible = ( - [0, 1, 3, 4, 6, 7] - + list(range(9, 17)) - + [18, 19, 21, 22, 24, 25] - + [27, 28, 30, 31, 33, 34] - + list(range(36, 44)) - + [45, 46, 48, 49, 51, 52] - + [54, 55, 57, 58, 60, 61] - + list(range(63, 71)) - + [72, 73, 75, 76, 78, 79] - ) - - self.up_possible = list(range(9, 27)) + [28, 31, 34] + list(range(36, 54)) + [55, 58, 61] + list(range(63, 81)) diff --git a/stable_baselines3/common/preprocessing.py b/stable_baselines3/common/preprocessing.py index 9b5a92787..56ac43475 100644 --- a/stable_baselines3/common/preprocessing.py +++ b/stable_baselines3/common/preprocessing.py @@ -116,7 +116,7 @@ def preprocess_obs( elif isinstance(observation_space, spaces.Dict): for key, _obs in obs.items(): - obs[key] = _obs.float() + obs[key] = preprocess_obs(_obs, observation_space[key], normalize_images=normalize_images) return obs else: diff --git a/stable_baselines3/common/torch_layers.py b/stable_baselines3/common/torch_layers.py index 435f4a638..6cbeec8b9 100644 --- a/stable_baselines3/common/torch_layers.py +++ b/stable_baselines3/common/torch_layers.py @@ -251,12 +251,18 @@ def __init__( features_dim: int = 64, cnn_output_dim: int = 64, mlp_output_dim: int = 64, - mlp_net_arch: List[int] = [64, 64], + mlp_net_arch: List[int] = None, activation_fn: Type[nn.Module] = nn.ReLU, - combined_net_arch: List[int] = [64, 64], + combined_net_arch: List[int] = None, ): super(CombinedExtractor, self).__init__(observation_space, features_dim=features_dim) + if mlp_net_arch is None: + mlp_net_arch = [64, 64] + + if combined_net_arch is None: + combined_net_arch = [64, 64] + extractors = {} total_concat_size = 0 From f06497284e8932813a97de177651dbd41ac7926c Mon Sep 17 00:00:00 2001 From: Jaden Travnik Date: Tue, 12 Jan 2021 16:30:25 -0500 Subject: [PATCH 34/70] running code style --- stable_baselines3/common/envs/multi_input_envs.py | 1 - stable_baselines3/common/torch_layers.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/stable_baselines3/common/envs/multi_input_envs.py b/stable_baselines3/common/envs/multi_input_envs.py index 1f1c712ce..3523b1bb3 100644 --- a/stable_baselines3/common/envs/multi_input_envs.py +++ b/stable_baselines3/common/envs/multi_input_envs.py @@ -69,7 +69,6 @@ def __init__( self.max_state = len(self.state_mapping) - 1 - def init_state_mapping(self, num_col: int, num_row: int) -> None: """ Initializes the state_mapping array which holds the observation values for each state diff --git a/stable_baselines3/common/torch_layers.py b/stable_baselines3/common/torch_layers.py index 6cbeec8b9..daf779e9c 100644 --- a/stable_baselines3/common/torch_layers.py +++ b/stable_baselines3/common/torch_layers.py @@ -259,7 +259,7 @@ def __init__( if mlp_net_arch is None: mlp_net_arch = [64, 64] - + if combined_net_arch is None: combined_net_arch = [64, 64] From 90d2577a25dae7007f752c205d22a330c13889fe Mon Sep 17 00:00:00 2001 From: Jaden Travnik Date: Thu, 14 Jan 2021 15:19:31 -0500 Subject: [PATCH 35/70] Passing channel check through to stacked dict observations. --- stable_baselines3/common/torch_layers.py | 11 +++++++++-- .../common/vec_env/stacked_observations.py | 10 +++++++--- stable_baselines3/common/vec_env/vec_frame_stack.py | 4 +++- 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/stable_baselines3/common/torch_layers.py b/stable_baselines3/common/torch_layers.py index daf779e9c..e68f84f88 100644 --- a/stable_baselines3/common/torch_layers.py +++ b/stable_baselines3/common/torch_layers.py @@ -243,6 +243,7 @@ class CombinedExtractor(BaseFeaturesExtractor): :param mlp_net_arch: Architecture of each MLP network module :param activation_fn: The activation function used in all MLP submodules and combined network :param combined_net_arch: Architecture of the combined network module which calculates the final feature extracted + :param check_channels: Whether channels should be checked for is_image_space """ def __init__( @@ -254,6 +255,7 @@ def __init__( mlp_net_arch: List[int] = None, activation_fn: Type[nn.Module] = nn.ReLU, combined_net_arch: List[int] = None, + check_channels: bool=True ): super(CombinedExtractor, self).__init__(observation_space, features_dim=features_dim) @@ -267,7 +269,8 @@ def __init__( total_concat_size = 0 for key, subspace in observation_space.spaces.items(): - if is_image_space(subspace): + + if is_image_space(subspace, check_channels=check_channels): # The observation key is an image: create a CNN for it n_input_channels = subspace.shape[0] @@ -313,7 +316,11 @@ def __init__( ) def forward(self, observations: TensorDict) -> th.Tensor: - encoded_tensor_list = [extractor(observations[key]) for key, extractor in self.extractors.items()] + encoded_tensor_list = [] + + for key, extractor in self.extractors.items(): + encoded_tensor_list.append(extractor(observations[key])) + return self.combined(th.cat(encoded_tensor_list, dim=1)) diff --git a/stable_baselines3/common/vec_env/stacked_observations.py b/stable_baselines3/common/vec_env/stacked_observations.py index c6445f88e..ce4cecb20 100644 --- a/stable_baselines3/common/vec_env/stacked_observations.py +++ b/stable_baselines3/common/vec_env/stacked_observations.py @@ -1,5 +1,5 @@ import warnings -from typing import Any, Dict, List, Optional, Tuple +from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np from gym import spaces @@ -163,7 +163,7 @@ def __init__( num_envs: int, n_stack: int, observation_space: spaces.Dict, - channels_order: Optional[str] = None, + channels_order: Optional[Union[str, Dict[str, str]]] = None, ): self.n_stack = n_stack self.channels_first = {} @@ -173,12 +173,16 @@ def __init__( for key, subspace in observation_space.spaces.items(): assert isinstance(subspace, spaces.Box), "StackedDictObservations only works with nested gym.spaces.Box" + if isinstance(channels_order, str) or channels_order is None: + subspace_channel_order = channels_order + else: + subspace_channel_order = channels_order[key] ( self.channels_first[key], self.stack_dimension[key], self.stackedobs[key], self.repeat_axis[key], - ) = self.compute_stacking(num_envs, n_stack, subspace, channels_order) + ) = self.compute_stacking(num_envs, n_stack, subspace, subspace_channel_order) def stack_observation_space(self, observation_space: spaces.Dict) -> spaces.Dict: """ diff --git a/stable_baselines3/common/vec_env/vec_frame_stack.py b/stable_baselines3/common/vec_env/vec_frame_stack.py index 1c717bd6d..74fce8727 100644 --- a/stable_baselines3/common/vec_env/vec_frame_stack.py +++ b/stable_baselines3/common/vec_env/vec_frame_stack.py @@ -17,15 +17,17 @@ class VecFrameStack(VecEnvWrapper): :param n_stack: Number of frames to stack :param channels_order: If "first", stack on first image dimension. If "last", stack on last dimension. If None, automatically detect channel to stack over in case of image observation or default to "last" (default). + Alternatively channels_order can be a dictionary which can be used with environments with Dict observation spaces """ - def __init__(self, venv: VecEnv, n_stack: int, channels_order: Optional[str] = None): + def __init__(self, venv: VecEnv, n_stack: int, channels_order: Optional[Union[str, Dict[str, str]]] = None): self.venv = venv self.n_stack = n_stack wrapped_obs_space = venv.observation_space if isinstance(wrapped_obs_space, spaces.Box): + assert not isinstance(channels_order, dict), f"Expected None or string for channels_order but received {channels_order}" self.stackedobs = StackedObservations(venv.num_envs, n_stack, wrapped_obs_space, channels_order) elif isinstance(wrapped_obs_space, spaces.Dict): From 8f37cb2321b1b308e567e64943079d0bef8553ea Mon Sep 17 00:00:00 2001 From: Jaden Travnik Date: Thu, 14 Jan 2021 15:22:20 -0500 Subject: [PATCH 36/70] Running black --- stable_baselines3/common/torch_layers.py | 6 +++--- stable_baselines3/common/vec_env/vec_frame_stack.py | 4 +++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/stable_baselines3/common/torch_layers.py b/stable_baselines3/common/torch_layers.py index e68f84f88..d9dc4183c 100644 --- a/stable_baselines3/common/torch_layers.py +++ b/stable_baselines3/common/torch_layers.py @@ -255,7 +255,7 @@ def __init__( mlp_net_arch: List[int] = None, activation_fn: Type[nn.Module] = nn.ReLU, combined_net_arch: List[int] = None, - check_channels: bool=True + check_channels: bool = True, ): super(CombinedExtractor, self).__init__(observation_space, features_dim=features_dim) @@ -269,7 +269,7 @@ def __init__( total_concat_size = 0 for key, subspace in observation_space.spaces.items(): - + if is_image_space(subspace, check_channels=check_channels): # The observation key is an image: create a CNN for it n_input_channels = subspace.shape[0] @@ -319,7 +319,7 @@ def forward(self, observations: TensorDict) -> th.Tensor: encoded_tensor_list = [] for key, extractor in self.extractors.items(): - encoded_tensor_list.append(extractor(observations[key])) + encoded_tensor_list.append(extractor(observations[key])) return self.combined(th.cat(encoded_tensor_list, dim=1)) diff --git a/stable_baselines3/common/vec_env/vec_frame_stack.py b/stable_baselines3/common/vec_env/vec_frame_stack.py index 74fce8727..e06d5125e 100644 --- a/stable_baselines3/common/vec_env/vec_frame_stack.py +++ b/stable_baselines3/common/vec_env/vec_frame_stack.py @@ -27,7 +27,9 @@ def __init__(self, venv: VecEnv, n_stack: int, channels_order: Optional[Union[st wrapped_obs_space = venv.observation_space if isinstance(wrapped_obs_space, spaces.Box): - assert not isinstance(channels_order, dict), f"Expected None or string for channels_order but received {channels_order}" + assert not isinstance( + channels_order, dict + ), f"Expected None or string for channels_order but received {channels_order}" self.stackedobs = StackedObservations(venv.num_envs, n_stack, wrapped_obs_space, channels_order) elif isinstance(wrapped_obs_space, spaces.Dict): From 298475610f430d6b118fca0cabf3f555b68c286d Mon Sep 17 00:00:00 2001 From: Jaden Travnik Date: Thu, 14 Jan 2021 20:43:16 -0500 Subject: [PATCH 37/70] Adding channel control to SimpleMultiObsEnv. Passing check_channels to CombinedExtractor --- stable_baselines3/common/envs/multi_input_envs.py | 11 ++++++++--- tests/test_dict_env.py | 14 ++++++++++---- 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/stable_baselines3/common/envs/multi_input_envs.py b/stable_baselines3/common/envs/multi_input_envs.py index 3523b1bb3..35a76e186 100644 --- a/stable_baselines3/common/envs/multi_input_envs.py +++ b/stable_baselines3/common/envs/multi_input_envs.py @@ -30,6 +30,7 @@ class SimpleMultiObsEnv(gym.Env): :param num_row: Number of rows in the grid :param random_start: If true, agent starts in random position :param noise: Noise added to the observations + :param channel_last: If true, the image will be channel last, else it will be channel first """ def __init__( @@ -39,11 +40,15 @@ def __init__( random_start: bool = True, noise: float = 0.0, discrete_actions: bool = True, + channel_last: bool = True, ): super(SimpleMultiObsEnv, self).__init__() self.vector_size = 5 - self.img_size = [1, 64, 64] + if channel_last: + self.img_size = [64, 64, 1] + else: + self.img_size = [1, 64, 64] self.random_start = random_start self.discrete_actions = discrete_actions @@ -80,11 +85,11 @@ def init_state_mapping(self, num_col: int, num_row: int) -> None: self.state_mapping = [] col_vecs = np.random.random((num_col, self.vector_size)) - row_imgs = np.random.randint(0, 255, (num_row, 1, 64, 64), dtype=np.int32) + row_imgs = np.random.randint(0, 255, (num_row, 64, 64), dtype=np.int32) for i in range(num_col): for j in range(num_row): - self.state_mapping.append({"vec": col_vecs[i], "img": row_imgs[j]}) + self.state_mapping.append({"vec": col_vecs[i], "img": row_imgs[j].reshape(self.img_size)}) def get_state_mapping(self) -> Dict[str, np.ndarray]: """ diff --git a/tests/test_dict_env.py b/tests/test_dict_env.py index 168a3e4f1..c0418917d 100644 --- a/tests/test_dict_env.py +++ b/tests/test_dict_env.py @@ -13,19 +13,25 @@ def test_dict_spaces(model_class): for Dictionary spaces using MultiInputPolicy. """ use_discrete_actions = model_class not in [SAC, TD3, DDPG] - env = DummyVecEnv([lambda: SimpleMultiObsEnv(random_start=True, discrete_actions=use_discrete_actions)]) - env = VecFrameStack(env, n_stack=2) + + channels_order = {"vec": None, "img": "first"} + env = DummyVecEnv( + [lambda: SimpleMultiObsEnv(random_start=True, discrete_actions=use_discrete_actions, channel_last=False)] + ) + + env = VecFrameStack(env, n_stack=3, channels_order=channels_order) + kwargs = {} n_steps = 250 if model_class in {A2C, PPO}: - kwargs = dict(n_steps=100) + kwargs = dict(n_steps=100, policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32, check_channels=False))) else: # Avoid memory error when using replay buffer # Reduce the size of the features kwargs = dict( buffer_size=250, - policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32)), + policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32, check_channels=False)), ) if model_class == DQN: kwargs["learning_starts"] = 0 From 324ef4374c8476eab598a5ed69b58b936087b6a4 Mon Sep 17 00:00:00 2001 From: Antonin RAFFIN Date: Mon, 18 Jan 2021 11:03:38 +0100 Subject: [PATCH 38/70] Remove optimize memory for dict buffers --- stable_baselines3/common/buffers.py | 89 ++++++----------------------- 1 file changed, 16 insertions(+), 73 deletions(-) diff --git a/stable_baselines3/common/buffers.py b/stable_baselines3/common/buffers.py index addd9e739..9b6b6746f 100644 --- a/stable_baselines3/common/buffers.py +++ b/stable_baselines3/common/buffers.py @@ -185,19 +185,13 @@ def __init__( self.optimize_memory_usage = optimize_memory_usage - self.observations = np.zeros( - (self.buffer_size, self.n_envs) + self.obs_shape, - dtype=observation_space.dtype, - ) + self.observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape, dtype=observation_space.dtype) if optimize_memory_usage: # `observations` contains also the next observation self.next_observations = None else: - self.next_observations = np.zeros( - (self.buffer_size, self.n_envs) + self.obs_shape, - dtype=observation_space.dtype, - ) + self.next_observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape, dtype=observation_space.dtype) self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=action_space.dtype) @@ -220,12 +214,7 @@ def __init__( ) def add( - self, - obs: Union[np.ndarray, dict], - next_obs: np.ndarray, - action: np.ndarray, - reward: np.ndarray, - done: np.ndarray, + self, obs: Union[np.ndarray, dict], next_obs: np.ndarray, action: np.ndarray, reward: np.ndarray, done: np.ndarray ) -> None: # Copy to avoid modification by reference @@ -322,12 +311,7 @@ def __init__( super(RolloutBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs) self.gae_lambda = gae_lambda self.gamma = gamma - self.observations, self.actions, self.rewards, self.advantages = ( - None, - None, - None, - None, - ) + self.observations, self.actions, self.rewards, self.advantages = None, None, None, None self.returns, self.dones, self.values, self.log_probs = None, None, None, None self.generator_ready = False self.reset() @@ -460,10 +444,6 @@ class DictReplayBuffer(ReplayBuffer): :param device: :param n_envs: Number of parallel environments :param optimize_memory_usage: Enable a memory efficient variant - of the replay buffer which reduces by almost a factor two the memory used, - at a cost of more complexity. - See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195 - and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274 Disabled for now (see https://github.com/DLR-RM/stable-baselines3/pull/243#discussion_r531535702) """ @@ -493,13 +473,9 @@ def __init__( self.observations = { key: np.zeros((self.buffer_size, self.n_envs) + _obs_shape) for key, _obs_shape in self.obs_shape.items() } - if optimize_memory_usage: - # `observations` contains also the next observation - self.next_observations = None - else: - self.next_observations = { - key: np.zeros((self.buffer_size, self.n_envs) + _obs_shape) for key, _obs_shape in self.obs_shape.items() - } + self.next_observations = { + key: np.zeros((self.buffer_size, self.n_envs) + _obs_shape) for key, _obs_shape in self.obs_shape.items() + } # only 1 env is supported self.actions = np.zeros((self.buffer_size, self.action_dim), dtype=action_space.dtype) @@ -528,24 +504,15 @@ def __init__( ) def add( - self, - obs: Union[np.ndarray, dict], - next_obs: np.ndarray, - action: np.ndarray, - reward: np.ndarray, - done: np.ndarray, + self, obs: Union[np.ndarray, Dict], next_obs: np.ndarray, action: np.ndarray, reward: np.ndarray, done: np.ndarray ) -> None: # Copy to avoid modification by reference for key in self.observations.keys(): self.observations[key][self.pos] = np.array(obs[key]).copy() - if self.optimize_memory_usage: - for key in self.observations.keys(): - self.observations[key][(self.pos + 1) % self.buffer_size] = np.array(next_obs[key]).copy() - else: - for key in self.next_observations.keys(): - self.next_observations[key][self.pos] = np.array(next_obs[key]).copy() + for key in self.next_observations.keys(): + self.next_observations[key][self.pos] = np.array(next_obs[key]).copy() self.actions[self.pos] = np.array(action).copy() self.rewards[self.pos] = np.array(reward).copy() @@ -559,38 +526,19 @@ def add( def sample(self, batch_size: int, env: Optional[VecNormalize] = None) -> DictReplayBufferSamples: """ Sample elements from the replay buffer. - Custom sampling when using memory efficient variant, - as we should not sample the element with index `self.pos` - See https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274 :param batch_size: Number of element to sample :param env: associated gym VecEnv to normalize the observations/rewards when sampling :return: """ - if not self.optimize_memory_usage: - return super(ReplayBuffer, self).sample(batch_size=batch_size, env=env) - - # Do not sample the element with index `self.pos` as the transitions is invalid - # (we use only one array to store `obs` and `next_obs`) - if self.full: - batch_inds = (np.random.randint(1, self.buffer_size, size=batch_size) + self.pos) % self.buffer_size - else: - batch_inds = np.random.randint(0, self.pos, size=batch_size) - return self._get_samples(batch_inds, env=env) + return super(ReplayBuffer, self).sample(batch_size=batch_size, env=env) def _get_samples(self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None) -> DictReplayBufferSamples: - if self.optimize_memory_usage: - next_obs = { - key: self.to_torch(self._normalize_obs(obs[(batch_inds + 1) % self.buffer_size, 0, :], env)) - for key, obs in self.observations.items() - } - else: - next_obs = { - key: self.to_torch(self._normalize_obs(obs[batch_inds, 0, :], env)) - for key, obs in self.next_observations.items() - } + next_obs = { + key: self.to_torch(self._normalize_obs(obs[batch_inds, 0, :], env)) for key, obs in self.next_observations.items() + } normalized_obs = { key: self.to_torch(self._normalize_obs(obs[batch_inds, 0, :], env)) for key, obs in self.observations.items() @@ -647,12 +595,7 @@ def __init__( self.gae_lambda = gae_lambda self.gamma = gamma - self.observations, self.actions, self.rewards, self.advantages = ( - None, - None, - None, - None, - ) + self.observations, self.actions, self.rewards, self.advantages = None, None, None, None self.returns, self.dones, self.values, self.log_probs = None, None, None, None self.generator_ready = False self.reset() @@ -674,7 +617,7 @@ def reset(self) -> None: def add( self, - obs: Union[np.ndarray, dict], + obs: Union[np.ndarray, Dict], action: np.ndarray, reward: np.ndarray, done: np.ndarray, From 2fdcfc6a669b90629b9bf6d98de6a57ab108a76d Mon Sep 17 00:00:00 2001 From: Antonin RAFFIN Date: Mon, 18 Jan 2021 11:18:32 +0100 Subject: [PATCH 39/70] Update doc --- stable_baselines3/common/base_class.py | 1 + .../common/envs/multi_input_envs.py | 18 ++++++++++-------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/stable_baselines3/common/base_class.py b/stable_baselines3/common/base_class.py index 4cf1d7f84..396e2f32d 100644 --- a/stable_baselines3/common/base_class.py +++ b/stable_baselines3/common/base_class.py @@ -205,6 +205,7 @@ def _wrap_env(env: GymEnv, verbose: int = 0, monitor_wrapper: bool = True) -> Ve env = VecTransposeImage(env) # check if wrapper for dict support is needed when using HER + # TODO(antonin): remove this with the new version of HER if isinstance(env.observation_space, gym.spaces.dict.Dict) and set(env.observation_space.spaces.keys()) == set( ["observation", "desired_goal", "achieved_goal"] ): diff --git a/stable_baselines3/common/envs/multi_input_envs.py b/stable_baselines3/common/envs/multi_input_envs.py index 35a76e186..dd49f475e 100644 --- a/stable_baselines3/common/envs/multi_input_envs.py +++ b/stable_baselines3/common/envs/multi_input_envs.py @@ -1,4 +1,4 @@ -from typing import Dict, Tuple, Union +from typing import Dict, Union import gym import numpy as np @@ -24,12 +24,13 @@ class SimpleMultiObsEnv(gym.Env): goal is 15 actions are = [left, down, right, up] - simple linear state env of 15 states but encoded with a vector and an image observation + simple linear state env of 15 states but encoded with a vector and an image observation: + each column is represented by a random vector and each row is + represented by a random image, both sampled once at creation time. :param num_col: Number of columns in the grid :param num_row: Number of rows in the grid :param random_start: If true, agent starts in random position - :param noise: Noise added to the observations :param channel_last: If true, the image will be channel last, else it will be channel first """ @@ -38,7 +39,6 @@ def __init__( num_col: int = 4, num_row: int = 4, random_start: bool = True, - noise: float = 0.0, discrete_actions: bool = True, channel_last: bool = True, ): @@ -64,12 +64,15 @@ def __init__( } ) self.count = 0 + # Timeout self.max_count = 100 self.log = "" self.state = 0 self.action2str = ["left", "down", "right", "up"] self.init_possible_transitions() + self.num_col = num_col + self.state_mapping = [] self.init_state_mapping(num_col, num_row) self.max_state = len(self.state_mapping) - 1 @@ -81,10 +84,9 @@ def init_state_mapping(self, num_col: int, num_row: int) -> None: :param num_col: Number of columns. :param num_row: Number of rows. """ - self.num_col = num_col - self.state_mapping = [] - + # Each column is represented by a random vector col_vecs = np.random.random((num_col, self.vector_size)) + # Each row is represented by a random image row_imgs = np.random.randint(0, 255, (num_row, 64, 64), dtype=np.int32) for i in range(num_col): @@ -93,7 +95,7 @@ def init_state_mapping(self, num_col: int, num_row: int) -> None: def get_state_mapping(self) -> Dict[str, np.ndarray]: """ - Uses the state to get the observation mapping and applies noise if there is any. + Uses the state to get the observation mapping. :return: observation dict {'vec': ..., 'img': ...} """ From 2bab0a3ec235555d93edd7b85b408cf0dc90b4da Mon Sep 17 00:00:00 2001 From: Antonin RAFFIN Date: Mon, 18 Jan 2021 11:40:05 +0100 Subject: [PATCH 40/70] Move identity env --- docs/guide/custom_env.rst | 4 ++-- docs/misc/changelog.rst | 5 +++-- stable_baselines3/common/base_class.py | 2 ++ stable_baselines3/common/{ => envs}/identity_env.py | 0 tests/test_cnn.py | 2 +- tests/test_dict_env.py | 6 +++--- tests/test_envs.py | 6 ++++-- tests/test_identity.py | 2 +- tests/test_save_load.py | 2 +- 9 files changed, 17 insertions(+), 12 deletions(-) rename stable_baselines3/common/{ => envs}/identity_env.py (100%) diff --git a/docs/guide/custom_env.rst b/docs/guide/custom_env.rst index 6adf55dc2..f598e7055 100644 --- a/docs/guide/custom_env.rst +++ b/docs/guide/custom_env.rst @@ -79,5 +79,5 @@ that will allow you to create the RL agent in one line (and use ``gym.make()`` t In the project, for testing purposes, we use a custom environment named ``IdentityEnv`` -defined `in this file `_. -An example of how to use it can be found `here `_. +defined `in this file `_. +An example of how to use it can be found `here `_. diff --git a/docs/misc/changelog.rst b/docs/misc/changelog.rst index d97272e9a..c8c622c03 100644 --- a/docs/misc/changelog.rst +++ b/docs/misc/changelog.rst @@ -3,7 +3,7 @@ Changelog ========== -Pre-Release 0.11.0a5 (WIP) +Pre-Release 0.11.0a6 (WIP) ------------------------------- Breaking Changes: @@ -12,7 +12,7 @@ Breaking Changes: this allows to return the unnormalized reward in the case of Atari games for instance. - Renamed ``common.vec_env.is_wrapped`` to ``common.vec_env.is_vecenv_wrapped`` to avoid confusion with the new ``is_wrapped()`` helper -- All customs environments (e.g. the ``BitFlippingEnv``) were moved to ``stable_baselines3.common.envs`` folder +- All customs environments (e.g. the ``BitFlippingEnv`` or ``IdentityEnv``) were moved to ``stable_baselines3.common.envs`` folder New Features: ^^^^^^^^^^^^^ @@ -57,6 +57,7 @@ Others: - Updated docker base image to Ubuntu 18.04 - Set tensorboard min version to 2.2.0 (earlier version are apparently not working with PyTorch) - Added warning for ``PPO`` when ``n_steps * n_envs`` is not a multiple of ``batch_size`` (last mini-batch truncated) (@decodyng) +- Updated ``env_checker`` because SB3 now supports Dict envs Documentation: ^^^^^^^^^^^^^^ diff --git a/stable_baselines3/common/base_class.py b/stable_baselines3/common/base_class.py index 396e2f32d..ba5bbdb85 100644 --- a/stable_baselines3/common/base_class.py +++ b/stable_baselines3/common/base_class.py @@ -211,6 +211,8 @@ def _wrap_env(env: GymEnv, verbose: int = 0, monitor_wrapper: bool = True) -> Ve ): env = ObsDictWrapper(env) + # TODO(@J-Travnik): check that it is a first-level dict obs space only + return env @abstractmethod diff --git a/stable_baselines3/common/identity_env.py b/stable_baselines3/common/envs/identity_env.py similarity index 100% rename from stable_baselines3/common/identity_env.py rename to stable_baselines3/common/envs/identity_env.py diff --git a/tests/test_cnn.py b/tests/test_cnn.py index b6dfd2411..772da90eb 100644 --- a/tests/test_cnn.py +++ b/tests/test_cnn.py @@ -7,7 +7,7 @@ from gym import spaces from stable_baselines3 import A2C, DQN, PPO, SAC, TD3 -from stable_baselines3.common.identity_env import FakeImageEnv +from stable_baselines3.common.envs import FakeImageEnv from stable_baselines3.common.preprocessing import is_image_space, is_image_space_channels_first from stable_baselines3.common.utils import zip_strict from stable_baselines3.common.vec_env import VecTransposeImage, is_vecenv_wrapped diff --git a/tests/test_dict_env.py b/tests/test_dict_env.py index c0418917d..dc3bf71c2 100644 --- a/tests/test_dict_env.py +++ b/tests/test_dict_env.py @@ -1,7 +1,7 @@ import pytest from stable_baselines3 import A2C, DDPG, DQN, PPO, SAC, TD3 -from stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv +from stable_baselines3.common.envs import SimpleMultiObsEnv from stable_baselines3.common.evaluation import evaluate_policy from stable_baselines3.common.vec_env import DummyVecEnv, VecFrameStack @@ -22,10 +22,10 @@ def test_dict_spaces(model_class): env = VecFrameStack(env, n_stack=3, channels_order=channels_order) kwargs = {} - n_steps = 250 + n_steps = 256 if model_class in {A2C, PPO}: - kwargs = dict(n_steps=100, policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32, check_channels=False))) + kwargs = dict(n_steps=128, policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32, check_channels=False))) else: # Avoid memory error when using replay buffer # Reduce the size of the features diff --git a/tests/test_envs.py b/tests/test_envs.py index 2dcbeed66..be39349a6 100644 --- a/tests/test_envs.py +++ b/tests/test_envs.py @@ -4,13 +4,14 @@ from gym import spaces from stable_baselines3.common.env_checker import check_env -from stable_baselines3.common.envs import BitFlippingEnv -from stable_baselines3.common.identity_env import ( +from stable_baselines3.common.envs import ( + BitFlippingEnv, FakeImageEnv, IdentityEnv, IdentityEnvBox, IdentityEnvMultiBinary, IdentityEnvMultiDiscrete, + SimpleMultiObsEnv, ) ENV_CLASSES = [ @@ -20,6 +21,7 @@ IdentityEnvMultiBinary, IdentityEnvMultiDiscrete, FakeImageEnv, + SimpleMultiObsEnv, ] diff --git a/tests/test_identity.py b/tests/test_identity.py index fdde0d2d4..6226580ac 100644 --- a/tests/test_identity.py +++ b/tests/test_identity.py @@ -2,8 +2,8 @@ import pytest from stable_baselines3 import A2C, DDPG, DQN, PPO, SAC, TD3 +from stable_baselines3.common.envs import IdentityEnv, IdentityEnvBox, IdentityEnvMultiBinary, IdentityEnvMultiDiscrete from stable_baselines3.common.evaluation import evaluate_policy -from stable_baselines3.common.identity_env import IdentityEnv, IdentityEnvBox, IdentityEnvMultiBinary, IdentityEnvMultiDiscrete from stable_baselines3.common.noise import NormalActionNoise from stable_baselines3.common.vec_env import DummyVecEnv diff --git a/tests/test_save_load.py b/tests/test_save_load.py index b5b733f47..d4ac20385 100644 --- a/tests/test_save_load.py +++ b/tests/test_save_load.py @@ -12,7 +12,7 @@ from stable_baselines3 import A2C, DDPG, DQN, PPO, SAC, TD3 from stable_baselines3.common.base_class import BaseAlgorithm -from stable_baselines3.common.identity_env import FakeImageEnv, IdentityEnv, IdentityEnvBox +from stable_baselines3.common.envs import FakeImageEnv, IdentityEnv, IdentityEnvBox from stable_baselines3.common.save_util import load_from_pkl, open_path, save_to_pkl from stable_baselines3.common.utils import get_device from stable_baselines3.common.vec_env import DummyVecEnv From b1ec40de47a479bf4e4c1f38d05caa3591afc2be Mon Sep 17 00:00:00 2001 From: Antonin RAFFIN Date: Mon, 18 Jan 2021 11:40:24 +0100 Subject: [PATCH 41/70] Minor edits + bump version --- stable_baselines3/common/env_checker.py | 10 ++++------ stable_baselines3/common/envs/__init__.py | 7 +++++++ .../common/off_policy_algorithm.py | 20 +++++-------------- stable_baselines3/common/preprocessing.py | 4 +++- stable_baselines3/version.txt | 2 +- 5 files changed, 20 insertions(+), 23 deletions(-) diff --git a/stable_baselines3/common/env_checker.py b/stable_baselines3/common/env_checker.py index cb2d1a7f6..4630ab080 100644 --- a/stable_baselines3/common/env_checker.py +++ b/stable_baselines3/common/env_checker.py @@ -91,19 +91,17 @@ def _check_obs(obs: Union[tuple, dict, np.ndarray, int], observation_space: spac if not isinstance(observation_space, spaces.Tuple): assert not isinstance( obs, tuple - ), "The observation returned by the `{}()` method should be a single value, not a tuple".format(method_name) + ), f"The observation returned by the `{method_name}()` method should be a single value, not a tuple" # The check for a GoalEnv is done by the base class if isinstance(observation_space, spaces.Discrete): - assert isinstance(obs, int), "The observation returned by `{}()` method must be an int".format(method_name) + assert isinstance(obs, int), f"The observation returned by `{method_name}()` method must be an int" elif _is_numpy_array_space(observation_space): - assert isinstance(obs, np.ndarray), "The observation returned by `{}()` method must be a numpy array".format( - method_name - ) + assert isinstance(obs, np.ndarray), f"The observation returned by `{method_name}()` method must be a numpy array" assert observation_space.contains( obs - ), "The observation returned by the `{}()` method does not match the given observation space".format(method_name) + ), f"The observation returned by the `{method_name}()` method does not match the given observation space" def _check_returned_values(env: gym.Env, observation_space: spaces.Space, action_space: spaces.Space) -> None: diff --git a/stable_baselines3/common/envs/__init__.py b/stable_baselines3/common/envs/__init__.py index 93e313bec..23bd5750f 100644 --- a/stable_baselines3/common/envs/__init__.py +++ b/stable_baselines3/common/envs/__init__.py @@ -1,2 +1,9 @@ from stable_baselines3.common.envs.bit_flipping_env import BitFlippingEnv +from stable_baselines3.common.envs.identity_env import ( + FakeImageEnv, + IdentityEnv, + IdentityEnvBox, + IdentityEnvMultiBinary, + IdentityEnvMultiDiscrete, +) from stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv diff --git a/stable_baselines3/common/off_policy_algorithm.py b/stable_baselines3/common/off_policy_algorithm.py index 7abd53c76..001976817 100644 --- a/stable_baselines3/common/off_policy_algorithm.py +++ b/stable_baselines3/common/off_policy_algorithm.py @@ -158,6 +158,7 @@ def _setup_model(self) -> None: self._setup_lr_schedule() self.set_random_seed(self.seed) + # Use DictReplayBuffer if needed buffer_cls = DictReplayBuffer if isinstance(self.observation_space, gym.spaces.Dict) else ReplayBuffer self.replay_buffer = buffer_cls( @@ -349,18 +350,11 @@ def _dump_logs(self) -> None: Write log. """ time_elapsed = time.time() - self.start_time - time_elapsed += 1e-10 - fps = int(self.num_timesteps / time_elapsed) + fps = int(self.num_timesteps / (time_elapsed + 1e-8)) logger.record("time/episodes", self._episode_num, exclude="tensorboard") if len(self.ep_info_buffer) > 0 and len(self.ep_info_buffer[0]) > 0: - logger.record( - "rollout/ep_rew_mean", - safe_mean([ep_info["r"] for ep_info in self.ep_info_buffer]), - ) - logger.record( - "rollout/ep_len_mean", - safe_mean([ep_info["l"] for ep_info in self.ep_info_buffer]), - ) + logger.record("rollout/ep_rew_mean", safe_mean([ep_info["r"] for ep_info in self.ep_info_buffer])) + logger.record("rollout/ep_len_mean", safe_mean([ep_info["l"] for ep_info in self.ep_info_buffer])) logger.record("time/fps", fps) logger.record("time/time_elapsed", int(time_elapsed), exclude="tensorboard") logger.record("time/total timesteps", self.num_timesteps, exclude="tensorboard") @@ -460,11 +454,7 @@ def collect_rollouts( reward_ = self._vec_normalize_env.get_original_reward() else: # Avoid changing the original ones - self._last_original_obs, new_obs_, reward_ = ( - self._last_obs, - new_obs, - reward, - ) + self._last_original_obs, new_obs_, reward_ = self._last_obs, new_obs, reward replay_buffer.add(self._last_original_obs, new_obs_, buffer_action, reward_, done) diff --git a/stable_baselines3/common/preprocessing.py b/stable_baselines3/common/preprocessing.py index 56ac43475..241c8dc63 100644 --- a/stable_baselines3/common/preprocessing.py +++ b/stable_baselines3/common/preprocessing.py @@ -144,7 +144,7 @@ def get_obs_shape( # Number of binary features return (int(observation_space.n),) elif isinstance(observation_space, spaces.Dict): - return {key: subspace.shape for (key, subspace) in observation_space.spaces.items()} + return {key: get_obs_shape(subspace) for (key, subspace) in observation_space.spaces.items()} else: raise NotImplementedError(f"{observation_space} observation space is not supported") @@ -155,6 +155,8 @@ def get_flattened_obs_dim(observation_space: spaces.Space) -> int: Get the dimension of the observation space when flattened. It does not apply to image observation space. + Used by the ``FlattenExtractor`` to compute the input shape. + :param observation_space: :return: """ diff --git a/stable_baselines3/version.txt b/stable_baselines3/version.txt index f23fba9af..cebd34692 100644 --- a/stable_baselines3/version.txt +++ b/stable_baselines3/version.txt @@ -1 +1 @@ -0.11.0a5 +0.11.0a6 From 12d42e93f1aff9499b8288ff034b860ffb252b23 Mon Sep 17 00:00:00 2001 From: Antonin RAFFIN Date: Mon, 18 Jan 2021 11:42:19 +0100 Subject: [PATCH 42/70] Update doc --- docs/guide/algos.rst | 4 ++-- docs/misc/changelog.rst | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/guide/algos.rst b/docs/guide/algos.rst index 612430e7c..7988a6700 100644 --- a/docs/guide/algos.rst +++ b/docs/guide/algos.rst @@ -19,7 +19,7 @@ TD3 ✔️ ❌ ❌ ❌ .. note:: - ``Tuple`` spaces are not supported by any environment however ``Dict`` spaces of ``Box`` spaces are. + ``Tuple`` spaces are not supported by any environment however ``Dict`` spaces of ``Box`` spaces are. ``Dict`` spaces of containing other kinds of spaces (e.g., ``Discrete``) have not yet been explored. Actions ``gym.spaces``: @@ -45,7 +45,7 @@ Actions ``gym.spaces``: Reproducibility --------------- -Completely reproducible results are not guaranteed across Tensorflow releases or different platforms. +Completely reproducible results are not guaranteed across PyTorch releases or different platforms. Furthermore, results need not be reproducible between CPU and GPU executions, even when using identical seeds. In order to make computations deterministics, on your specific problem on one specific platform, diff --git a/docs/misc/changelog.rst b/docs/misc/changelog.rst index c8c622c03..f5a3de3da 100644 --- a/docs/misc/changelog.rst +++ b/docs/misc/changelog.rst @@ -57,7 +57,7 @@ Others: - Updated docker base image to Ubuntu 18.04 - Set tensorboard min version to 2.2.0 (earlier version are apparently not working with PyTorch) - Added warning for ``PPO`` when ``n_steps * n_envs`` is not a multiple of ``batch_size`` (last mini-batch truncated) (@decodyng) -- Updated ``env_checker`` because SB3 now supports Dict envs +.. - Updated ``env_checker`` because SB3 now supports Dict envs Documentation: ^^^^^^^^^^^^^^ From 5f45044ffdf404399daeaa9506375140b8a4a3cd Mon Sep 17 00:00:00 2001 From: Antonin RAFFIN Date: Mon, 18 Jan 2021 11:46:06 +0100 Subject: [PATCH 43/70] Fix doc build --- docs/misc/changelog.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/misc/changelog.rst b/docs/misc/changelog.rst index f5a3de3da..f4d742669 100644 --- a/docs/misc/changelog.rst +++ b/docs/misc/changelog.rst @@ -57,7 +57,6 @@ Others: - Updated docker base image to Ubuntu 18.04 - Set tensorboard min version to 2.2.0 (earlier version are apparently not working with PyTorch) - Added warning for ``PPO`` when ``n_steps * n_envs`` is not a multiple of ``batch_size`` (last mini-batch truncated) (@decodyng) -.. - Updated ``env_checker`` because SB3 now supports Dict envs Documentation: ^^^^^^^^^^^^^^ From 510821ba819a7b38c482c37710dea5a8357b2c8e Mon Sep 17 00:00:00 2001 From: Antonin RAFFIN Date: Mon, 18 Jan 2021 15:10:39 +0100 Subject: [PATCH 44/70] Bug fixes + add support for more type of dict env --- stable_baselines3/common/policies.py | 7 +- stable_baselines3/common/preprocessing.py | 6 +- stable_baselines3/common/torch_layers.py | 18 +---- tests/test_dict_env.py | 89 ++++++++++++++++++++++- 4 files changed, 99 insertions(+), 21 deletions(-) diff --git a/stable_baselines3/common/policies.py b/stable_baselines3/common/policies.py index 18f0572d7..637faafc4 100644 --- a/stable_baselines3/common/policies.py +++ b/stable_baselines3/common/policies.py @@ -298,9 +298,11 @@ def predict( for key, obs in observation.items(): obs_space = self.observation_space.spaces[key] if is_image_space(obs_space): - obs = BasePolicy.try_transpose_img_observation(obs, obs_space) + obs_ = BasePolicy.try_transpose_img_observation(obs, obs_space) else: - observation[key] = obs.reshape((-1,) + self.observation_space[key].shape) + obs_ = np.array(obs) + # Add batch dimension if needed + observation[key] = obs_.reshape((-1,) + self.observation_space[key].shape) elif is_image_space(self.observation_space): # Handle the different cases for images @@ -312,6 +314,7 @@ def predict( vectorized_env = is_vectorized_observation(observation, self.observation_space) if not isinstance(observation, dict): + # Add batch dimension if needed observation = observation.reshape((-1,) + self.observation_space.shape) observation = obs_as_tensor(observation, self.device) diff --git a/stable_baselines3/common/preprocessing.py b/stable_baselines3/common/preprocessing.py index 241c8dc63..5a217421a 100644 --- a/stable_baselines3/common/preprocessing.py +++ b/stable_baselines3/common/preprocessing.py @@ -115,9 +115,11 @@ def preprocess_obs( return obs.float() elif isinstance(observation_space, spaces.Dict): + # Do not modify by reference the original observation + preprocessed_obs = {} for key, _obs in obs.items(): - obs[key] = preprocess_obs(_obs, observation_space[key], normalize_images=normalize_images) - return obs + preprocessed_obs[key] = preprocess_obs(_obs, observation_space[key], normalize_images=normalize_images) + return preprocessed_obs else: raise NotImplementedError(f"Preprocessing not implemented for {observation_space}") diff --git a/stable_baselines3/common/torch_layers.py b/stable_baselines3/common/torch_layers.py index d9dc4183c..b85fb0af7 100644 --- a/stable_baselines3/common/torch_layers.py +++ b/stable_baselines3/common/torch_layers.py @@ -255,7 +255,7 @@ def __init__( mlp_net_arch: List[int] = None, activation_fn: Type[nn.Module] = nn.ReLU, combined_net_arch: List[int] = None, - check_channels: bool = True, + check_channels: bool = False, ): super(CombinedExtractor, self).__init__(observation_space, features_dim=features_dim) @@ -296,18 +296,9 @@ def __init__( total_concat_size += cnn_output_dim else: - # The observation key is a vector, create a MLP for it - extractors[key] = nn.Sequential( - *create_mlp( - subspace.shape[0], - mlp_output_dim, - mlp_net_arch, - activation_fn, - squash_output=False, - ) - ) - - total_concat_size += mlp_output_dim + # The observation key is a vector, flatten it if needed + extractors[key] = nn.Flatten() + total_concat_size += get_flattened_obs_dim(subspace) self.extractors = nn.ModuleDict(extractors) @@ -320,7 +311,6 @@ def forward(self, observations: TensorDict) -> th.Tensor: for key, extractor in self.extractors.items(): encoded_tensor_list.append(extractor(observations[key])) - return self.combined(th.cat(encoded_tensor_list, dim=1)) diff --git a/tests/test_dict_env.py b/tests/test_dict_env.py index dc3bf71c2..aaf1f2b3d 100644 --- a/tests/test_dict_env.py +++ b/tests/test_dict_env.py @@ -1,4 +1,7 @@ +import gym +import numpy as np import pytest +from gym import spaces from stable_baselines3 import A2C, DDPG, DQN, PPO, SAC, TD3 from stable_baselines3.common.envs import SimpleMultiObsEnv @@ -6,14 +9,94 @@ from stable_baselines3.common.vec_env import DummyVecEnv, VecFrameStack +class DummyDictEnv(gym.Env): + """Custom Environment for testing purposes only""" + + metadata = {"render.modes": ["human"]} + + def __init__(self, use_discrete_actions=False, channel_last=False): + super().__init__() + if use_discrete_actions: + self.action_space = spaces.Discrete(3) + else: + self.action_space = spaces.Box(low=-1, high=1, shape=(2,), dtype=np.float32) + N_CHANNELS = 1 + HEIGHT = 64 + WIDTH = 64 + + if channel_last: + obs_shape = (HEIGHT, WIDTH, N_CHANNELS) + else: + obs_shape = (N_CHANNELS, HEIGHT, WIDTH) + + self.observation_space = spaces.Dict( + { + # Image obs + "img": spaces.Box(low=0, high=255, shape=obs_shape, dtype=np.uint8), + # Vector obs + "vec": spaces.Box(low=-1, high=1, shape=(2,), dtype=np.float32), + # Discrete obs + "discrete": spaces.Discrete(4), + } + ) + + def step(self, action): + reward = 0.0 + done = False + return self.observation_space.sample(), reward, done, {} + + def compute_reward(self, achieved_goal, desired_goal, info): + return np.zeros((len(achieved_goal),)) + + def reset(self): + return self.observation_space.sample() + + def render(self, mode="human"): + pass + + @pytest.mark.parametrize("model_class", [PPO, A2C, DQN, DDPG, SAC, TD3]) def test_dict_spaces(model_class): """ Additional tests for PPO/A2C/SAC/DDPG/TD3/DQN to check observation space support - for Dictionary spaces using MultiInputPolicy. + with mixed observation. """ use_discrete_actions = model_class not in [SAC, TD3, DDPG] + # TODO(@J-Travnik): add test for channel last env + env = DummyDictEnv(use_discrete_actions=use_discrete_actions, channel_last=False) + env = gym.wrappers.TimeLimit(env, 100) + + kwargs = {} + n_steps = 256 + + if model_class in {A2C, PPO}: + kwargs = dict(n_steps=128, policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32))) + else: + # Avoid memory error when using replay buffer + # Reduce the size of the features + kwargs = dict( + buffer_size=250, + policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32)), + ) + if model_class == DQN: + kwargs["learning_starts"] = 0 + + model = model_class("MultiInputPolicy", env, gamma=0.5, seed=1, **kwargs) + + model.learn(total_timesteps=n_steps) + + evaluate_policy(model, env, n_eval_episodes=5, warn=False) + +@pytest.mark.parametrize("model_class", [PPO, A2C, DQN, DDPG, SAC, TD3]) +def test_dict_vec_framestack(model_class): + """ + Additional tests for PPO/A2C/SAC/DDPG/TD3/DQN to check observation space support + for Dictionary spaces and VecEnvWrapper using MultiInputPolicy. + """ + use_discrete_actions = model_class not in [SAC, TD3, DDPG] + # TODO(@J-Travnik): add test for channel last env + # TODO(@J-Travnik): add test for more types of dict env (ex: discrete + Box) channels_order = {"vec": None, "img": "first"} env = DummyVecEnv( [lambda: SimpleMultiObsEnv(random_start=True, discrete_actions=use_discrete_actions, channel_last=False)] @@ -25,13 +108,13 @@ def test_dict_spaces(model_class): n_steps = 256 if model_class in {A2C, PPO}: - kwargs = dict(n_steps=128, policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32, check_channels=False))) + kwargs = dict(n_steps=128, policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32))) else: # Avoid memory error when using replay buffer # Reduce the size of the features kwargs = dict( buffer_size=250, - policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32, check_channels=False)), + policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32)), ) if model_class == DQN: kwargs["learning_starts"] = 0 From 8d9183f91f1ed4b045e93c34858b9f46df6a6b8a Mon Sep 17 00:00:00 2001 From: Antonin RAFFIN Date: Mon, 25 Jan 2021 10:38:08 +0100 Subject: [PATCH 45/70] Fixes + add multi env test --- stable_baselines3/common/buffers.py | 25 ++++++++++++++++--------- tests/test_dict_env.py | 25 ++++++++++++++++++++++++- 2 files changed, 40 insertions(+), 10 deletions(-) diff --git a/stable_baselines3/common/buffers.py b/stable_baselines3/common/buffers.py index b62981ea2..4a25c2992 100644 --- a/stable_baselines3/common/buffers.py +++ b/stable_baselines3/common/buffers.py @@ -213,15 +213,11 @@ def __init__( f"replay buffer {total_memory_usage:.2f}GB > {mem_available:.2f}GB" ) - def add( - self, obs: Union[np.ndarray, dict], next_obs: np.ndarray, action: np.ndarray, reward: np.ndarray, done: np.ndarray - ) -> None: + def add(self, obs: np.ndarray, next_obs: np.ndarray, action: np.ndarray, reward: np.ndarray, done: np.ndarray) -> None: # Copy to avoid modification by reference - self.observations[self.pos] = np.array(obs).copy() if self.optimize_memory_usage: - self.observations[(self.pos + 1) % self.buffer_size] = np.array(next_obs).copy() else: self.next_observations[self.pos] = np.array(next_obs).copy() @@ -362,7 +358,7 @@ def compute_returns_and_advantage(self, last_values: th.Tensor, dones: np.ndarra def add( self, - obs: Union[np.ndarray, dict], + obs: np.ndarray, action: np.ndarray, reward: np.ndarray, done: np.ndarray, @@ -509,7 +505,12 @@ def __init__( ) def add( - self, obs: Union[np.ndarray, Dict], next_obs: np.ndarray, action: np.ndarray, reward: np.ndarray, done: np.ndarray + self, + obs: Dict[str, np.ndarray], + next_obs: Dict[str, np.ndarray], + action: np.ndarray, + reward: np.ndarray, + done: np.ndarray, ) -> None: # Copy to avoid modification by reference @@ -622,7 +623,7 @@ def reset(self) -> None: def add( self, - obs: Union[np.ndarray, Dict], + obs: Dict[str, np.ndarray], action: np.ndarray, reward: np.ndarray, done: np.ndarray, @@ -644,7 +645,13 @@ def add( log_prob = log_prob.reshape(-1, 1) for key in self.observations.keys(): - self.observations[key][self.pos] = np.array(obs[key]).copy() + obs_ = np.array(obs[key]).copy() + # Reshape needed when using multiple envs with discrete observations + # as numpy cannot broadcast (n_discrete,) to (n_discrete, 1) + if isinstance(self.observation_space.spaces[key], spaces.Discrete): + obs_ = obs_.reshape((self.n_envs,) + self.obs_shape[key]) + self.observations[key][self.pos] = obs_ + self.actions[self.pos] = np.array(action).copy() self.rewards[self.pos] = np.array(reward).copy() self.dones[self.pos] = np.array(done).copy() diff --git a/tests/test_dict_env.py b/tests/test_dict_env.py index aaf1f2b3d..1c95bfc4a 100644 --- a/tests/test_dict_env.py +++ b/tests/test_dict_env.py @@ -4,6 +4,7 @@ from gym import spaces from stable_baselines3 import A2C, DDPG, DQN, PPO, SAC, TD3 +from stable_baselines3.common.env_util import make_vec_env from stable_baselines3.common.envs import SimpleMultiObsEnv from stable_baselines3.common.evaluation import evaluate_policy from stable_baselines3.common.vec_env import DummyVecEnv, VecFrameStack @@ -88,6 +89,29 @@ def test_dict_spaces(model_class): evaluate_policy(model, env, n_eval_episodes=5, warn=False) +@pytest.mark.parametrize("model_class", [PPO, A2C]) +def test_multiprocessing(model_class): + use_discrete_actions = model_class not in [SAC, TD3, DDPG] + + def make_env(): + # TODO(@J-Travnik): add test for channel last env + env = DummyDictEnv(use_discrete_actions=use_discrete_actions, channel_last=False) + env = gym.wrappers.TimeLimit(env, 100) + return env + + env = make_vec_env(make_env, n_envs=2) + + kwargs = {} + n_steps = 256 + + if model_class in {A2C, PPO}: + kwargs = dict(n_steps=128, policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32))) + + model = model_class("MultiInputPolicy", env, gamma=0.5, seed=1, **kwargs) + + model.learn(total_timesteps=n_steps) + + @pytest.mark.parametrize("model_class", [PPO, A2C, DQN, DDPG, SAC, TD3]) def test_dict_vec_framestack(model_class): """ @@ -96,7 +120,6 @@ def test_dict_vec_framestack(model_class): """ use_discrete_actions = model_class not in [SAC, TD3, DDPG] # TODO(@J-Travnik): add test for channel last env - # TODO(@J-Travnik): add test for more types of dict env (ex: discrete + Box) channels_order = {"vec": None, "img": "first"} env = DummyVecEnv( [lambda: SimpleMultiObsEnv(random_start=True, discrete_actions=use_discrete_actions, channel_last=False)] From 3bb747a3e06ac5fc68cae9db325a1b848fdcf9ac Mon Sep 17 00:00:00 2001 From: "Anssi \"Miffyli\" Kanervisto" Date: Fri, 19 Feb 2021 01:57:25 +0200 Subject: [PATCH 46/70] Add support for vectranspose --- stable_baselines3/common/base_class.py | 25 ++++++++---- stable_baselines3/common/preprocessing.py | 13 ------- stable_baselines3/common/torch_layers.py | 1 - .../common/vec_env/vec_transpose.py | 39 +++++++++++++++++-- 4 files changed, 52 insertions(+), 26 deletions(-) diff --git a/stable_baselines3/common/base_class.py b/stable_baselines3/common/base_class.py index 5b7cbc0c2..e7184d775 100644 --- a/stable_baselines3/common/base_class.py +++ b/stable_baselines3/common/base_class.py @@ -195,14 +195,23 @@ def _wrap_env(env: GymEnv, verbose: int = 0, monitor_wrapper: bool = True) -> Ve print("Wrapping the env in a DummyVecEnv.") env = DummyVecEnv([lambda: env]) - if ( - is_image_space(env.observation_space) - and not is_vecenv_wrapped(env, VecTransposeImage) - and not is_image_space_channels_first(env.observation_space) - ): - if verbose >= 1: - print("Wrapping the env in a VecTransposeImage.") - env = VecTransposeImage(env) + if not is_vecenv_wrapped(env, VecTransposeImage): + wrap_with_vectranspose = False + if isinstance(env.observation_space, gym.spaces.dict.Dict): + # If even one of the keys is a image-space in need of transpose, apply transpose + for space in env.observation_space.spaces.values(): + wrap_with_vectranspose = wrap_with_vectranspose or ( + is_image_space(space) and not is_image_space_channels_first(space) + ) + else: + wrap_with_vectranspose = is_image_space(env.observation_space) and not is_image_space_channels_first( + env.observation_space + ) + + if wrap_with_vectranspose: + if verbose >= 1: + print("Wrapping the env in a VecTransposeImage.") + env = VecTransposeImage(env) # check if wrapper for dict support is needed when using HER # TODO(antonin): remove this with the new version of HER diff --git a/stable_baselines3/common/preprocessing.py b/stable_baselines3/common/preprocessing.py index 5a217421a..35642f006 100644 --- a/stable_baselines3/common/preprocessing.py +++ b/stable_baselines3/common/preprocessing.py @@ -65,19 +65,6 @@ def is_image_space( return False -def has_image_space(observation_space: spaces.Dict) -> bool: - """ - Check if a Dict observation space has an image space within its subspaces - - :param observation_space: - :return: - """ - for key, subspace in observation_space.spaces.items(): - if is_image_space(subspace): - return True - return False - - def preprocess_obs( obs: th.Tensor, observation_space: spaces.Space, normalize_images: bool = True ) -> Union[th.Tensor, Dict[str, th.Tensor]]: diff --git a/stable_baselines3/common/torch_layers.py b/stable_baselines3/common/torch_layers.py index b85fb0af7..f66c5898f 100644 --- a/stable_baselines3/common/torch_layers.py +++ b/stable_baselines3/common/torch_layers.py @@ -269,7 +269,6 @@ def __init__( total_concat_size = 0 for key, subspace in observation_space.spaces.items(): - if is_image_space(subspace, check_channels=check_channels): # The observation key is an image: create a CNN for it n_input_channels = subspace.shape[0] diff --git a/stable_baselines3/common/vec_env/vec_transpose.py b/stable_baselines3/common/vec_env/vec_transpose.py index 343a994c8..e951f2935 100644 --- a/stable_baselines3/common/vec_env/vec_transpose.py +++ b/stable_baselines3/common/vec_env/vec_transpose.py @@ -1,3 +1,6 @@ +from copy import deepcopy +from typing import Any, Dict, Union + import numpy as np from gym import spaces @@ -14,9 +17,20 @@ class VecTransposeImage(VecEnvWrapper): """ def __init__(self, venv: VecEnv): - assert is_image_space(venv.observation_space), "The observation space must be an image" + assert is_image_space(venv.observation_space) or isinstance( + venv.observation_space, spaces.dict.Dict + ), "The observation space must be an image or dictionary observation space" - observation_space = self.transpose_space(venv.observation_space) + if isinstance(venv.observation_space, spaces.dict.Dict): + self.image_space_keys = [] + observation_space = deepcopy(venv.observation_space) + for key, space in observation_space.spaces.items(): + if is_image_space(space): + # Keep track of which keys should be transposed later + self.image_space_keys.append(key) + observation_space.spaces[key] = self.transpose_space(space) + else: + observation_space = self.transpose_space(venv.observation_space) super(VecTransposeImage, self).__init__(venv, observation_space=observation_space) @staticmethod @@ -44,15 +58,32 @@ def transpose_image(image: np.ndarray) -> np.ndarray: return np.transpose(image, (2, 0, 1)) return np.transpose(image, (0, 3, 1, 2)) + def transpose_observations(self, observations: Union[np.ndarray, Dict]) -> Union[np.ndarray, Dict]: + """ + Transpose (if needed) and return new observations. + + :param observations: + :return: Transposed observations + """ + if isinstance(observations, dict): + # Avoid modifying the original object in place + observations = deepcopy(observations) + for k in self.image_space_keys: + observations[k] = self.transpose_image(observations[k]) + else: + observations = self.transpose_image(observations) + return observations + def step_wait(self) -> VecEnvStepReturn: observations, rewards, dones, infos = self.venv.step_wait() - return self.transpose_image(observations), rewards, dones, infos + observations = self.transpose_observations(observations) + return observations, rewards, dones, infos def reset(self) -> np.ndarray: """ Reset all environments """ - return self.transpose_image(self.venv.reset()) + return self.transpose_observations(self.venv.reset()) def close(self) -> None: self.venv.close() From cda8c21eab5f84030d73be0686e1ed0258ebea4f Mon Sep 17 00:00:00 2001 From: "Anssi \"Miffyli\" Kanervisto" Date: Fri, 19 Feb 2021 02:12:52 +0200 Subject: [PATCH 47/70] Fix stacked obs for dict and add tests --- .../common/vec_env/stacked_observations.py | 2 +- tests/test_dict_env.py | 15 +++++++-------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/stable_baselines3/common/vec_env/stacked_observations.py b/stable_baselines3/common/vec_env/stacked_observations.py index ce4cecb20..513d84a22 100644 --- a/stable_baselines3/common/vec_env/stacked_observations.py +++ b/stable_baselines3/common/vec_env/stacked_observations.py @@ -258,7 +258,7 @@ def update( else: warnings.warn("VecFrameStack wrapping a VecEnv without terminal_observation info") self.stackedobs[key][i] = 0 - if self.channels_first: + if self.channels_first[key]: self.stackedobs[key][:, -stack_ax_size:, ...] = observations[key] else: self.stackedobs[key][..., -stack_ax_size:] = observations[key] diff --git a/tests/test_dict_env.py b/tests/test_dict_env.py index 1c95bfc4a..7b9c41079 100644 --- a/tests/test_dict_env.py +++ b/tests/test_dict_env.py @@ -57,14 +57,14 @@ def render(self, mode="human"): @pytest.mark.parametrize("model_class", [PPO, A2C, DQN, DDPG, SAC, TD3]) -def test_dict_spaces(model_class): +@pytest.mark.parametrize("channel_last", [False, True]) +def test_dict_spaces(model_class, channel_last): """ Additional tests for PPO/A2C/SAC/DDPG/TD3/DQN to check observation space support with mixed observation. """ use_discrete_actions = model_class not in [SAC, TD3, DDPG] - # TODO(@J-Travnik): add test for channel last env - env = DummyDictEnv(use_discrete_actions=use_discrete_actions, channel_last=False) + env = DummyDictEnv(use_discrete_actions=use_discrete_actions, channel_last=channel_last) env = gym.wrappers.TimeLimit(env, 100) kwargs = {} @@ -94,7 +94,6 @@ def test_multiprocessing(model_class): use_discrete_actions = model_class not in [SAC, TD3, DDPG] def make_env(): - # TODO(@J-Travnik): add test for channel last env env = DummyDictEnv(use_discrete_actions=use_discrete_actions, channel_last=False) env = gym.wrappers.TimeLimit(env, 100) return env @@ -113,16 +112,16 @@ def make_env(): @pytest.mark.parametrize("model_class", [PPO, A2C, DQN, DDPG, SAC, TD3]) -def test_dict_vec_framestack(model_class): +@pytest.mark.parametrize("channel_last", [False, True]) +def test_dict_vec_framestack(model_class, channel_last): """ Additional tests for PPO/A2C/SAC/DDPG/TD3/DQN to check observation space support for Dictionary spaces and VecEnvWrapper using MultiInputPolicy. """ use_discrete_actions = model_class not in [SAC, TD3, DDPG] - # TODO(@J-Travnik): add test for channel last env - channels_order = {"vec": None, "img": "first"} + channels_order = {"vec": None, "img": "last" if channel_last else "first"} env = DummyVecEnv( - [lambda: SimpleMultiObsEnv(random_start=True, discrete_actions=use_discrete_actions, channel_last=False)] + [lambda: SimpleMultiObsEnv(random_start=True, discrete_actions=use_discrete_actions, channel_last=channel_last)] ) env = VecFrameStack(env, n_stack=3, channels_order=channels_order) From f770217a44365805a2b97023eeae1a8c1e30d024 Mon Sep 17 00:00:00 2001 From: "Anssi \"Miffyli\" Kanervisto" Date: Fri, 19 Feb 2021 02:39:46 +0200 Subject: [PATCH 48/70] Add check for nested spaces. Fix dict-subprocvecenv test --- stable_baselines3/common/base_class.py | 11 ++++++++-- stable_baselines3/common/preprocessing.py | 16 ++++++++++++++ stable_baselines3/common/vec_env/util.py | 14 ++++++------ tests/test_dict_env.py | 26 ++++++++++++++++++++--- 4 files changed, 56 insertions(+), 11 deletions(-) diff --git a/stable_baselines3/common/base_class.py b/stable_baselines3/common/base_class.py index e7184d775..ea924c017 100644 --- a/stable_baselines3/common/base_class.py +++ b/stable_baselines3/common/base_class.py @@ -36,6 +36,7 @@ unwrap_vec_normalize, ) from stable_baselines3.common.vec_env.obs_dict_wrapper import ObsDictWrapper +from stable_baselines3.common.vec_env.util import check_for_nested_spaces def maybe_make_env(env: Union[GymEnv, str, None], verbose: int) -> Optional[GymEnv]: @@ -195,6 +196,14 @@ def _wrap_env(env: GymEnv, verbose: int = 0, monitor_wrapper: bool = True) -> Ve print("Wrapping the env in a DummyVecEnv.") env = DummyVecEnv([lambda: env]) + # Make sure that dict-spaces are not nested (not supported) + check_for_nested_spaces(env.observation_space) + + if isinstance(env.observation_space, gym.spaces.dict.Dict): + for space in env.observation_space.spaces.values(): + if isinstance(space, gym.spaces.dict.Dict): + raise ValueError("Nested observation spaces are not supported (Dict spaces inside Dict space).") + if not is_vecenv_wrapped(env, VecTransposeImage): wrap_with_vectranspose = False if isinstance(env.observation_space, gym.spaces.dict.Dict): @@ -220,8 +229,6 @@ def _wrap_env(env: GymEnv, verbose: int = 0, monitor_wrapper: bool = True) -> Ve ): env = ObsDictWrapper(env) - # TODO(@J-Travnik): check that it is a first-level dict obs space only - return env @abstractmethod diff --git a/stable_baselines3/common/preprocessing.py b/stable_baselines3/common/preprocessing.py index 35642f006..f72ff68ff 100644 --- a/stable_baselines3/common/preprocessing.py +++ b/stable_baselines3/common/preprocessing.py @@ -178,3 +178,19 @@ def get_action_dim(action_space: spaces.Space) -> int: return int(action_space.n) else: raise NotImplementedError(f"{action_space} action space is not supported") + + +def check_for_nested_spaces(obs_space: spaces.Space): + """ + Make sure the observation space does not have nested spaces (Dicts/Tuples inside Dicts/Tuples). + If so, raise an Exception informing that there is no support for this. + + :param obs_space: an observation space + :return: + """ + if isinstance(obs_space, (spaces.dict.Dict, spaces.tuple.Tuple)): + for sub_space in obs_space.spaces.values(): + if isinstance(sub_space, (spaces.dict.Dict, spaces.tuple.Tuple)): + raise NotImplementedError( + "Nested observation spaces are not supported (Tuple/Dict space inside Tuple/Dict space)." + ) diff --git a/stable_baselines3/common/vec_env/util.py b/stable_baselines3/common/vec_env/util.py index 8bc7ee44a..859f1ec95 100644 --- a/stable_baselines3/common/vec_env/util.py +++ b/stable_baselines3/common/vec_env/util.py @@ -7,6 +7,7 @@ import gym import numpy as np +from stable_baselines3.common.preprocessing import check_for_nested_spaces from stable_baselines3.common.vec_env.base_vec_env import VecEnvObs @@ -21,22 +22,22 @@ def copy_obs_dict(obs: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: return OrderedDict([(k, np.copy(v)) for k, v in obs.items()]) -def dict_to_obs(space: gym.spaces.Space, obs_dict: Dict[Any, np.ndarray]) -> VecEnvObs: +def dict_to_obs(obs_space: gym.spaces.Space, obs_dict: Dict[Any, np.ndarray]) -> VecEnvObs: """ Convert an internal representation raw_obs into the appropriate type specified by space. - :param space: an observation space. + :param obs_space: an observation space. :param obs_dict: a dict of numpy arrays. :return: returns an observation of the same type as space. If space is Dict, function is identity; if space is Tuple, converts dict to Tuple; otherwise, space is unstructured and returns the value raw_obs[None]. """ - if isinstance(space, gym.spaces.Dict): + if isinstance(obs_space, gym.spaces.Dict): return obs_dict - elif isinstance(space, gym.spaces.Tuple): - assert len(obs_dict) == len(space.spaces), "size of observation does not match size of observation space" - return tuple((obs_dict[i] for i in range(len(space.spaces)))) + elif isinstance(obs_space, gym.spaces.Tuple): + assert len(obs_dict) == len(obs_space.spaces), "size of observation does not match size of observation space" + return tuple((obs_dict[i] for i in range(len(obs_space.spaces)))) else: assert set(obs_dict.keys()) == {None}, "multiple observation keys for unstructured observation space" return obs_dict[None] @@ -56,6 +57,7 @@ def obs_space_info(obs_space: gym.spaces.Space) -> Tuple[List[str], Dict[Any, Tu shapes: a dict mapping keys to shapes. dtypes: a dict mapping keys to dtypes. """ + check_for_nested_spaces(obs_space) if isinstance(obs_space, gym.spaces.Dict): assert isinstance(obs_space.spaces, OrderedDict), "Dict space must have ordered subspaces" subspaces = obs_space.spaces diff --git a/tests/test_dict_env.py b/tests/test_dict_env.py index 7b9c41079..4166cbff1 100644 --- a/tests/test_dict_env.py +++ b/tests/test_dict_env.py @@ -7,7 +7,7 @@ from stable_baselines3.common.env_util import make_vec_env from stable_baselines3.common.envs import SimpleMultiObsEnv from stable_baselines3.common.evaluation import evaluate_policy -from stable_baselines3.common.vec_env import DummyVecEnv, VecFrameStack +from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv, VecFrameStack class DummyDictEnv(gym.Env): @@ -15,7 +15,7 @@ class DummyDictEnv(gym.Env): metadata = {"render.modes": ["human"]} - def __init__(self, use_discrete_actions=False, channel_last=False): + def __init__(self, use_discrete_actions=False, channel_last=False, nested_dict_obs=False): super().__init__() if use_discrete_actions: self.action_space = spaces.Discrete(3) @@ -41,6 +41,10 @@ def __init__(self, use_discrete_actions=False, channel_last=False): } ) + if nested_dict_obs: + # Add dictionary observation inside observation space + self.observation_space.spaces["nested-dict"] = spaces.Dict({"nested-dict-discrete": spaces.Discrete(4)}) + def step(self, action): reward = 0.0 done = False @@ -98,7 +102,7 @@ def make_env(): env = gym.wrappers.TimeLimit(env, 100) return env - env = make_vec_env(make_env, n_envs=2) + env = make_vec_env(make_env, n_envs=2, vec_env_cls=SubprocVecEnv) kwargs = {} n_steps = 256 @@ -146,3 +150,19 @@ def test_dict_vec_framestack(model_class, channel_last): model.learn(total_timesteps=n_steps) evaluate_policy(model, env, n_eval_episodes=5, warn=False) + + +def test_dict_nested(): + """ + Make sure we throw an appropiate error with nested Dict observation spaces + """ + # Test without manual wrapping to vec-env + env = DummyDictEnv(nested_dict_obs=True) + + with pytest.raises(NotImplementedError): + _ = PPO("MultiInputPolicy", env, seed=1) + + # Test with manual vec-env wrapping + + with pytest.raises(NotImplementedError): + env = DummyVecEnv([lambda: DummyDictEnv(nested_dict_obs=True)]) From 5cbde19e8cfc5fc90209fc8ae43fabc804a75d35 Mon Sep 17 00:00:00 2001 From: "Anssi \"Miffyli\" Kanervisto" Date: Fri, 19 Feb 2021 03:05:08 +0200 Subject: [PATCH 49/70] Fix (single) pytype error --- stable_baselines3/common/vec_env/vec_transpose.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stable_baselines3/common/vec_env/vec_transpose.py b/stable_baselines3/common/vec_env/vec_transpose.py index e951f2935..907d57aa4 100644 --- a/stable_baselines3/common/vec_env/vec_transpose.py +++ b/stable_baselines3/common/vec_env/vec_transpose.py @@ -79,7 +79,7 @@ def step_wait(self) -> VecEnvStepReturn: observations = self.transpose_observations(observations) return observations, rewards, dones, infos - def reset(self) -> np.ndarray: + def reset(self) -> Union[np.ndarray, Dict]: """ Reset all environments """ From 4464744c7334888d5f1af379b046ccad5bfc3432 Mon Sep 17 00:00:00 2001 From: "Anssi \"Miffyli\" Kanervisto" Date: Fri, 19 Feb 2021 03:07:12 +0200 Subject: [PATCH 50/70] Simplify CombinedExtractor --- stable_baselines3/common/torch_layers.py | 61 ++++-------------------- 1 file changed, 8 insertions(+), 53 deletions(-) diff --git a/stable_baselines3/common/torch_layers.py b/stable_baselines3/common/torch_layers.py index f66c5898f..8d97e487a 100644 --- a/stable_baselines3/common/torch_layers.py +++ b/stable_baselines3/common/torch_layers.py @@ -236,64 +236,20 @@ class CombinedExtractor(BaseFeaturesExtractor): the output features are concatenated and fed through additional MLP network ("combined"). :param observation_space: - :param features_dim: Number of features extracted. - This corresponds to the number of unit for the last layer. :param cnn_output_dim: Number of features to output from each CNN submodule(s) - :param mlp_output_dim: Number of features to output from each MLP submodule(s) - :param mlp_net_arch: Architecture of each MLP network module - :param activation_fn: The activation function used in all MLP submodules and combined network - :param combined_net_arch: Architecture of the combined network module which calculates the final feature extracted - :param check_channels: Whether channels should be checked for is_image_space """ - def __init__( - self, - observation_space: gym.spaces.Dict, - features_dim: int = 64, - cnn_output_dim: int = 64, - mlp_output_dim: int = 64, - mlp_net_arch: List[int] = None, - activation_fn: Type[nn.Module] = nn.ReLU, - combined_net_arch: List[int] = None, - check_channels: bool = False, - ): - super(CombinedExtractor, self).__init__(observation_space, features_dim=features_dim) - - if mlp_net_arch is None: - mlp_net_arch = [64, 64] - - if combined_net_arch is None: - combined_net_arch = [64, 64] + def __init__(self, observation_space: gym.spaces.Dict, cnn_output_dim: int = 64): + # TODO we do not know features-dim here before going over all the items, so put something there. This is dirty! + super(CombinedExtractor, self).__init__(observation_space, features_dim=1) extractors = {} total_concat_size = 0 for key, subspace in observation_space.spaces.items(): - if is_image_space(subspace, check_channels=check_channels): - # The observation key is an image: create a CNN for it - n_input_channels = subspace.shape[0] - - # Nature CNN - cnn = nn.Sequential( - nn.Conv2d(n_input_channels, 32, kernel_size=8, stride=4, padding=0), - nn.ReLU(), - nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=0), - nn.ReLU(), - nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0), - nn.ReLU(), - nn.Flatten(), - ) - - # TODO is this the best practice for finding out the size? - with th.no_grad(): - n_flatten = cnn(th.as_tensor(subspace.sample()[None]).float()).shape[1] - - cnn_linear = nn.Sequential(nn.Linear(n_flatten, cnn_output_dim), nn.ReLU()) - - extractors[key] = nn.Sequential(*(list(cnn) + list(cnn_linear))) - + if is_image_space(subspace): + extractors[key] = NatureCNN(subspace, features_dim=cnn_output_dim) total_concat_size += cnn_output_dim - else: # The observation key is a vector, flatten it if needed extractors[key] = nn.Flatten() @@ -301,16 +257,15 @@ def __init__( self.extractors = nn.ModuleDict(extractors) - self.combined = nn.Sequential( - *create_mlp(total_concat_size, features_dim, combined_net_arch, activation_fn, squash_output=False) - ) + # Update the features dim manually + self._features_dim = total_concat_size def forward(self, observations: TensorDict) -> th.Tensor: encoded_tensor_list = [] for key, extractor in self.extractors.items(): encoded_tensor_list.append(extractor(observations[key])) - return self.combined(th.cat(encoded_tensor_list, dim=1)) + return th.cat(encoded_tensor_list, dim=1) def get_actor_critic_arch(net_arch: Union[List[int], Dict[str, List[int]]]) -> Tuple[List[int], List[int]]: From 1f5553a288e1bf8c61c2fadd29814ffeff0a1dfd Mon Sep 17 00:00:00 2001 From: Antonin RAFFIN Date: Fri, 19 Feb 2021 11:01:30 +0100 Subject: [PATCH 51/70] Fix tests --- stable_baselines3/common/base_class.py | 3 +-- .../common/vec_env/vec_transpose.py | 2 +- tests/test_dict_env.py | 20 ++++++++++++------- 3 files changed, 15 insertions(+), 10 deletions(-) diff --git a/stable_baselines3/common/base_class.py b/stable_baselines3/common/base_class.py index ea924c017..e9308863c 100644 --- a/stable_baselines3/common/base_class.py +++ b/stable_baselines3/common/base_class.py @@ -17,7 +17,7 @@ from stable_baselines3.common.monitor import Monitor from stable_baselines3.common.noise import ActionNoise from stable_baselines3.common.policies import BasePolicy, get_policy_from_name -from stable_baselines3.common.preprocessing import is_image_space, is_image_space_channels_first +from stable_baselines3.common.preprocessing import check_for_nested_spaces, is_image_space, is_image_space_channels_first from stable_baselines3.common.save_util import load_from_zip_file, recursive_getattr, recursive_setattr, save_to_zip_file from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule from stable_baselines3.common.utils import ( @@ -36,7 +36,6 @@ unwrap_vec_normalize, ) from stable_baselines3.common.vec_env.obs_dict_wrapper import ObsDictWrapper -from stable_baselines3.common.vec_env.util import check_for_nested_spaces def maybe_make_env(env: Union[GymEnv, str, None], verbose: int) -> Optional[GymEnv]: diff --git a/stable_baselines3/common/vec_env/vec_transpose.py b/stable_baselines3/common/vec_env/vec_transpose.py index 907d57aa4..257506e0e 100644 --- a/stable_baselines3/common/vec_env/vec_transpose.py +++ b/stable_baselines3/common/vec_env/vec_transpose.py @@ -1,5 +1,5 @@ from copy import deepcopy -from typing import Any, Dict, Union +from typing import Dict, Union import numpy as np from gym import spaces diff --git a/tests/test_dict_env.py b/tests/test_dict_env.py index 4166cbff1..5155eb218 100644 --- a/tests/test_dict_env.py +++ b/tests/test_dict_env.py @@ -75,13 +75,16 @@ def test_dict_spaces(model_class, channel_last): n_steps = 256 if model_class in {A2C, PPO}: - kwargs = dict(n_steps=128, policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32))) + kwargs = dict(n_steps=128, policy_kwargs=dict(features_extractor_kwargs=dict(cnn_output_dim=32))) else: # Avoid memory error when using replay buffer - # Reduce the size of the features + # Reduce the size of the features and make learning faster kwargs = dict( buffer_size=250, - policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32)), + policy_kwargs=dict(features_extractor_kwargs=dict(cnn_output_dim=32)), + train_freq=8, + gradient_steps=1, + n_episodes_rollout=-1, ) if model_class == DQN: kwargs["learning_starts"] = 0 @@ -108,7 +111,7 @@ def make_env(): n_steps = 256 if model_class in {A2C, PPO}: - kwargs = dict(n_steps=128, policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32))) + kwargs = dict(n_steps=128, policy_kwargs=dict(features_extractor_kwargs=dict(cnn_output_dim=32))) model = model_class("MultiInputPolicy", env, gamma=0.5, seed=1, **kwargs) @@ -134,13 +137,16 @@ def test_dict_vec_framestack(model_class, channel_last): n_steps = 256 if model_class in {A2C, PPO}: - kwargs = dict(n_steps=128, policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32))) + kwargs = dict(n_steps=128, policy_kwargs=dict(features_extractor_kwargs=dict(cnn_output_dim=32))) else: # Avoid memory error when using replay buffer - # Reduce the size of the features + # Reduce the size of the features and make learning faster kwargs = dict( buffer_size=250, - policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32)), + policy_kwargs=dict(features_extractor_kwargs=dict(cnn_output_dim=32)), + train_freq=8, + gradient_steps=1, + n_episodes_rollout=-1, ) if model_class == DQN: kwargs["learning_starts"] = 0 From 671656759fe441f6c5c99fd7bf4e3589f162e4e4 Mon Sep 17 00:00:00 2001 From: Antonin RAFFIN Date: Fri, 19 Feb 2021 21:18:57 +0100 Subject: [PATCH 52/70] Fix check --- stable_baselines3/common/preprocessing.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/stable_baselines3/common/preprocessing.py b/stable_baselines3/common/preprocessing.py index f72ff68ff..e297e3afa 100644 --- a/stable_baselines3/common/preprocessing.py +++ b/stable_baselines3/common/preprocessing.py @@ -188,9 +188,10 @@ def check_for_nested_spaces(obs_space: spaces.Space): :param obs_space: an observation space :return: """ - if isinstance(obs_space, (spaces.dict.Dict, spaces.tuple.Tuple)): - for sub_space in obs_space.spaces.values(): - if isinstance(sub_space, (spaces.dict.Dict, spaces.tuple.Tuple)): + if isinstance(obs_space, (spaces.Dict, spaces.Tuple)): + sub_spaces = obs_space.spaces.values() if isinstance(obs_space, spaces.Dict) else obs_space.spaces + for sub_space in sub_spaces: + if isinstance(sub_space, (spaces.Dict, spaces.Tuple)): raise NotImplementedError( "Nested observation spaces are not supported (Tuple/Dict space inside Tuple/Dict space)." ) From e7567935d5b29a74af0eadabbfa7ba8570179fdf Mon Sep 17 00:00:00 2001 From: Antonin Raffin Date: Tue, 2 Mar 2021 16:49:32 +0100 Subject: [PATCH 53/70] Merge branch 'master' into feat/dict_observations --- .gitlab-ci.yml | 2 +- README.md | 4 +- docs/guide/custom_policy.rst | 2 +- docs/guide/examples.rst | 13 +- docs/misc/changelog.rst | 49 +++++- docs/misc/projects.rst | 9 + docs/modules/her.rst | 11 +- stable_baselines3/common/base_class.py | 9 +- stable_baselines3/common/cmd_util.py | 7 - .../common/off_policy_algorithm.py | 160 +++++++++++------- stable_baselines3/common/policies.py | 15 +- stable_baselines3/common/type_aliases.py | 11 ++ stable_baselines3/common/utils.py | 30 +++- .../common/vec_env/vec_normalize.py | 22 ++- .../common/vec_env/vec_transpose.py | 10 +- stable_baselines3/ddpg/ddpg.py | 14 +- stable_baselines3/dqn/dqn.py | 12 +- stable_baselines3/dqn/policies.py | 8 +- stable_baselines3/her/her.py | 54 +++--- stable_baselines3/sac/policies.py | 8 +- stable_baselines3/sac/sac.py | 12 +- stable_baselines3/td3/policies.py | 20 ++- stable_baselines3/td3/td3.py | 19 +-- stable_baselines3/version.txt | 2 +- tests/test_cnn.py | 4 + tests/test_her.py | 23 ++- tests/test_run.py | 36 ++++ tests/test_save_load.py | 6 +- tests/test_utils.py | 6 - 29 files changed, 377 insertions(+), 201 deletions(-) delete mode 100644 stable_baselines3/common/cmd_util.py diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6a31f4f19..9e0c7ec8d 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,4 +1,4 @@ -image: stablebaselines/stable-baselines3-cpu:0.11.0a4 +image: stablebaselines/stable-baselines3-cpu:0.11.1 type-check: script: diff --git a/README.md b/README.md index 215405597..5584ca730 100644 --- a/README.md +++ b/README.md @@ -3,14 +3,12 @@ [![pipeline status](https://gitlab.com/araffin/stable-baselines3/badges/master/pipeline.svg)](https://gitlab.com/araffin/stable-baselines3/-/commits/master) [![Documentation Status](https://readthedocs.org/projects/stable-baselines/badge/?version=master)](https://stable-baselines3.readthedocs.io/en/master/?badge=master) [![coverage report](https://gitlab.com/araffin/stable-baselines3/badges/master/coverage.svg)](https://gitlab.com/araffin/stable-baselines3/-/commits/master) [![codestyle](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) -**WARNING: Stable Baselines3 is currently in a beta version, breaking changes may occur before 1.0 is released** - # Stable Baselines3 Stable Baselines3 (SB3) is a set of reliable implementations of reinforcement learning algorithms in PyTorch. It is the next major version of [Stable Baselines](https://github.com/hill-a/stable-baselines). -You can read a detailed presentation of Stable Baselines in the [Medium article](https://medium.com/@araffin/stable-baselines-a-fork-of-openai-baselines-reinforcement-learning-made-easy-df87c4b2fc82). +You can read a detailed presentation of Stable Baselines3 in the [v1.0 blog post](https://araffin.github.io/post/sb3/). These algorithms will make it easier for the research community and industry to replicate, refine, and identify new ideas, and will create good baselines to build projects on top of. We expect these tools will be used as a base around which new ideas can be added, and as a tool for comparing a new approach against existing ones. We also hope that the simplicity of these tools will allow beginners to experiment with a more advanced toolset, without being buried in implementation details. diff --git a/docs/guide/custom_policy.rst b/docs/guide/custom_policy.rst index 168625854..f8aecfeaf 100644 --- a/docs/guide/custom_policy.rst +++ b/docs/guide/custom_policy.rst @@ -42,7 +42,7 @@ using ``policy_kwargs`` parameter: del model # the policy_kwargs are automatically loaded - model = PPO.load("ppo_cartpole") + model = PPO.load("ppo_cartpole", env=env) Custom Feature Extractor diff --git a/docs/guide/examples.rst b/docs/guide/examples.rst index 8fe943a44..1f9124501 100644 --- a/docs/guide/examples.rst +++ b/docs/guide/examples.rst @@ -76,7 +76,7 @@ In the following example, we will train, save and load a DQN model on the Lunar del model # delete trained model to demonstrate loading # Load the trained agent - model = DQN.load("dqn_lunar") + model = DQN.load("dqn_lunar", env=env) # Evaluate the agent # NOTE: If you use wrappers with your environment that modify rewards, @@ -333,6 +333,7 @@ will compute a running average and standard deviation of input features (it can .. code-block:: python + import os import gym import pybullet_envs @@ -356,9 +357,6 @@ will compute a running average and standard deviation of input features (it can # To demonstrate loading del model, env - # Load the agent - model = PPO.load(log_dir + "ppo_halfcheetah") - # Load the saved statistics env = DummyVecEnv([lambda: gym.make("HalfCheetahBulletEnv-v0")]) env = VecNormalize.load(stats_path, env) @@ -367,6 +365,9 @@ will compute a running average and standard deviation of input features (it can # reward normalization is not needed at test time env.norm_reward = False + # Load the agent + model = PPO.load(log_dir + "ppo_halfcheetah", env=env) + Hindsight Experience Replay (HER) --------------------------------- @@ -426,6 +427,8 @@ The parking env is a goal-conditioned continuous control task, in which the vehi model.save("her_sac_highway") # Load saved model + # Because it needs access to `env.compute_reward()` + # HER must be loaded with the env model = HER.load("her_sac_highway", env=env) obs = env.reset() @@ -540,7 +543,7 @@ Behind the scene, SB3 uses an :ref:`EvalCallback `. # Note: if you don't save the complete model with `model.save()` # you cannot continue training afterward policy = model.policy - policy.save("sac_policy_pendulum.pkl") + policy.save("sac_policy_pendulum") # Retrieve the environment env = model.get_env() diff --git a/docs/misc/changelog.rst b/docs/misc/changelog.rst index ed97e48e6..e497dcbaa 100644 --- a/docs/misc/changelog.rst +++ b/docs/misc/changelog.rst @@ -3,7 +3,31 @@ Changelog ========== -Pre-Release 0.11.0a7 (WIP) +Release 1.0rc0 (2021-02-28) +------------------------------- + +Breaking Changes: +^^^^^^^^^^^^^^^^^ +- Removed ``stable_baselines3.common.cmd_util`` (already deprecated), please use ``env_util`` instead +- All customs environments (e.g. the ``BitFlippingEnv`` or ``IdentityEnv``) were moved to ``stable_baselines3.common.envs`` folder + + +Documentation: +^^^^^^^^^^^^^^ +- Fixed examples +- Added new project using SB3: rl_reach (@PierreExeter) + + +Pre-Release 0.11.1 (2021-02-27) +------------------------------- + +Bug Fixes: +^^^^^^^^^^ +- Fixed a bug where ``train_freq`` was not properly converted when loading a saved model + + + +Pre-Release 0.11.0 (2021-02-27) ------------------------------- Breaking Changes: @@ -12,7 +36,18 @@ Breaking Changes: this allows to return the unnormalized reward in the case of Atari games for instance. - Renamed ``common.vec_env.is_wrapped`` to ``common.vec_env.is_vecenv_wrapped`` to avoid confusion with the new ``is_wrapped()`` helper -- All customs environments (e.g. the ``BitFlippingEnv`` or ``IdentityEnv``) were moved to ``stable_baselines3.common.envs`` folder +- Renamed ``_get_data()`` to ``_get_constructor_parameters()`` for policies (this affects independent saving/loading of policies) +- Removed ``n_episodes_rollout`` and merged it with ``train_freq``, which now accepts a tuple ``(frequency, unit)``: +- ``replay_buffer`` in ``collect_rollout`` is no more optional + +.. code-block:: python + + # SB3 < 0.11.0 + # model = SAC("MlpPolicy", env, n_episodes_rollout=1, train_freq=-1) + # SB3 >= 0.11.0: + model = SAC("MlpPolicy", env, train_freq=(1, "episode")) + + New Features: ^^^^^^^^^^^^^ @@ -44,7 +79,12 @@ Bug Fixes: - Added informative ``PPO`` construction error in edge-case scenario where ``n_steps * n_envs = 1`` (size of rollout buffer), which otherwise causes downstream breaking errors in training (@decodyng) - Fixed discrete observation space support when using multiple envs with A2C/PPO (thanks @ardabbour) +- Fixed a bug for TD3 delayed update (the update was off-by-one and not delayed when ``train_freq=1``) - Fixed numpy warning (replaced ``np.bool`` with ``bool``) +- Fixed a bug where ``VecNormalize`` was not normalizing the terminal observation +- Fixed a bug where ``VecTranspose`` was not transposing the terminal observation +- Fixed a bug where the terminal observation stored in the replay buffer was not the right one for off-policy algorithms +- Fixed a bug where ``action_noise`` was not used when using ``HER`` (thanks @ShangqunYu) Deprecations: ^^^^^^^^^^^^^ @@ -78,6 +118,7 @@ Documentation: - Updated custom policy doc (separate policy architecture recommended) - Added a note about OpenCV headless version - Corrected typo on documentation (@mschweizer) +- Provide the environment when loading the model in the examples (@lorepieri8) Pre-Release 0.10.0 (2020-10-28) @@ -553,5 +594,5 @@ And all the contributors: @flodorner @KuKuXia @NeoExtended @PartiallyTyped @mmcenta @richardwu @kinalmehta @rolandgvc @tkelestemur @mloo3 @tirafesi @blurLake @koulakis @joeljosephjin @shwang @rk37 @andyshih12 @RaphaelWag @xicocaio @diditforlulz273 @liorcohen5 @ManifoldFR @mloo3 @SwamyDev @wmmc88 @megan-klaiber @thisray -@tfederico @hn2 @LucasAlegre @AptX395 @zampanteymedio @decodyng @ardabbour @lorenz-h @mschweizer -@JadenTravnik +@tfederico @hn2 @LucasAlegre @AptX395 @zampanteymedio @decodyng @ardabbour @lorenz-h @mschweizer @lorepieri8 +@ShangqunYu @PierreExeter @JadenTravnik diff --git a/docs/misc/projects.rst b/docs/misc/projects.rst index 8424ce21a..f0620f0da 100644 --- a/docs/misc/projects.rst +++ b/docs/misc/projects.rst @@ -15,6 +15,15 @@ Please tell us, if you want your project to appear on this page ;) .. | Author: Antonin Raffin (@araffin) .. | Github repo: https://github.com/araffin/RL-Racing-Robot +rl_reach +-------- + +A platform for running reproducible reinforcement learning experiments for customisable robotic reaching tasks. This self-contained and straightforward toolbox allows its users to quickly investigate and identify optimal training configurations. + +| Authors: Pierre Aumjaud, David McAuliffe, Francisco Javier Rodríguez Lera, Philip Cardiff +| Github: https://github.com/PierreExeter/rl_reach +| Paper: https://arxiv.org/abs/2102.04916 + Generalized State Dependent Exploration for Deep Reinforcement Learning in Robotics ----------------------------------------------------------------------------------- diff --git a/docs/modules/her.rst b/docs/modules/her.rst index 2d564d2a8..110aa0494 100644 --- a/docs/modules/her.rst +++ b/docs/modules/her.rst @@ -32,6 +32,13 @@ It creates "virtual" transitions by relabeling transitions (changing the desired ``HER`` supports ``VecNormalize`` wrapper but only when ``online_sampling=True`` +.. warning:: + + Because it needs access to ``env.compute_reward()`` + ``HER`` must be loaded with the env. If you just want to use the trained policy + without instantiating the environment, we recommend saving the policy only. + + Notes ----- @@ -78,11 +85,13 @@ Example model.learn(1000) model.save("./her_bit_env") + # Because it needs access to `env.compute_reward()` + # HER must be loaded with the env model = HER.load('./her_bit_env', env=env) obs = env.reset() for _ in range(100): - action, _ = model.model.predict(obs, deterministic=True) + action, _ = model.predict(obs, deterministic=True) obs, reward, done, _ = env.step(action) if done: diff --git a/stable_baselines3/common/base_class.py b/stable_baselines3/common/base_class.py index e9308863c..caad120ab 100644 --- a/stable_baselines3/common/base_class.py +++ b/stable_baselines3/common/base_class.py @@ -143,7 +143,7 @@ def __init__( # Buffers for logging self.ep_info_buffer = None # type: Optional[deque] self.ep_success_buffer = None # type: Optional[deque] - # For logging + # For logging (and TD3 delayed updates) self._n_updates = 0 # type: int # Create and wrap the env if needed @@ -409,10 +409,11 @@ def _setup_learn( def _update_info_buffer(self, infos: List[Dict[str, Any]], dones: Optional[np.ndarray] = None) -> None: """ - Retrieve reward and episode length and update the buffer - if using Monitor wrapper. + Retrieve reward, episode length, episode success and update the buffer + if using Monitor wrapper or a GoalEnv. - :param infos: + :param infos: List of additional information about the transition. + :param dones: Termination signals """ if dones is None: dones = np.array([False] * len(infos)) diff --git a/stable_baselines3/common/cmd_util.py b/stable_baselines3/common/cmd_util.py deleted file mode 100644 index dea9e7979..000000000 --- a/stable_baselines3/common/cmd_util.py +++ /dev/null @@ -1,7 +0,0 @@ -import warnings - -from stable_baselines3.common.env_util import * # noqa: F403,F401 - -warnings.warn( - "Module ``common.cmd_util`` has been renamed to ``common.env_util`` and will be removed in the future.", FutureWarning -) diff --git a/stable_baselines3/common/off_policy_algorithm.py b/stable_baselines3/common/off_policy_algorithm.py index 001976817..7c8f30a3e 100644 --- a/stable_baselines3/common/off_policy_algorithm.py +++ b/stable_baselines3/common/off_policy_algorithm.py @@ -2,7 +2,7 @@ import pathlib import time import warnings -from typing import Any, Dict, Optional, Tuple, Type, Union +from typing import Any, Dict, List, Optional, Tuple, Type, Union import gym import numpy as np @@ -15,8 +15,8 @@ from stable_baselines3.common.noise import ActionNoise from stable_baselines3.common.policies import BasePolicy from stable_baselines3.common.save_util import load_from_pkl, save_to_pkl -from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, RolloutReturn, Schedule -from stable_baselines3.common.utils import safe_mean +from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, RolloutReturn, Schedule, TrainFreq, TrainFrequencyUnit +from stable_baselines3.common.utils import safe_mean, should_collect_more_steps from stable_baselines3.common.vec_env import VecEnv @@ -35,13 +35,11 @@ class OffPolicyAlgorithm(BaseAlgorithm): :param batch_size: Minibatch size for each gradient update :param tau: the soft update coefficient ("Polyak update", between 0 and 1) :param gamma: the discount factor - :param train_freq: Update the model every ``train_freq`` steps. Set to `-1` to disable. - :param gradient_steps: How many gradient steps to do after each rollout - (see ``train_freq`` and ``n_episodes_rollout``) + :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit + like ``(5, "step")`` or ``(2, "episode")``. + :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``) Set to ``-1`` means to do as many gradient steps as steps done in the environment during the rollout. - :param n_episodes_rollout: Update the model every ``n_episodes_rollout`` episodes. - Note that this cannot be used at the same time as ``train_freq``. Set to `-1` to disable. :param action_noise: the action noise type (None by default), this can help for hard exploration problem. Cf common.noise for the different action noise type. :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer @@ -83,9 +81,8 @@ def __init__( batch_size: int = 256, tau: float = 0.005, gamma: float = 0.99, - train_freq: int = 1, + train_freq: Union[int, Tuple[int, str]] = (1, "step"), gradient_steps: int = 1, - n_episodes_rollout: int = -1, action_noise: Optional[ActionNoise] = None, optimize_memory_usage: bool = False, policy_kwargs: Dict[str, Any] = None, @@ -126,9 +123,7 @@ def __init__( self.learning_starts = learning_starts self.tau = tau self.gamma = gamma - self.train_freq = train_freq self.gradient_steps = gradient_steps - self.n_episodes_rollout = n_episodes_rollout self.action_noise = action_noise self.optimize_memory_usage = optimize_memory_usage @@ -136,15 +131,8 @@ def __init__( # see https://github.com/hill-a/stable-baselines/issues/863 self.remove_time_limit_termination = remove_time_limit_termination - if train_freq > 0 and n_episodes_rollout > 0: - warnings.warn( - "You passed a positive value for `train_freq` and `n_episodes_rollout`." - "Please make sure this is intended. " - "The agent will collect data by stepping in the environment " - "until both conditions are true: " - "`number of steps in the env` >= `train_freq` and " - "`number of episodes` > `n_episodes_rollout`" - ) + # Save train freq parameter, will be converted later to TrainFreq object + self.train_freq = train_freq self.actor = None # type: Optional[th.nn.Module] self.replay_buffer = None # type: Optional[ReplayBuffer] @@ -154,6 +142,28 @@ def __init__( # For gSDE only self.use_sde_at_warmup = use_sde_at_warmup + def _convert_train_freq(self) -> None: + """ + Convert `train_freq` parameter (int or tuple) + to a TrainFreq object. + """ + if not isinstance(self.train_freq, TrainFreq): + train_freq = self.train_freq + + # The value of the train frequency will be checked later + if not isinstance(train_freq, tuple): + train_freq = (train_freq, "step") + + try: + train_freq = (train_freq[0], TrainFrequencyUnit(train_freq[1])) + except ValueError: + raise ValueError(f"The unit of the `train_freq` must be either 'step' or 'episode' not '{train_freq[1]}'!") + + if not isinstance(train_freq[0], int): + raise ValueError(f"The frequency of `train_freq` must be an integer and not {train_freq[0]}") + + self.train_freq = TrainFreq(*train_freq) + def _setup_model(self) -> None: self._setup_lr_schedule() self.set_random_seed(self.seed) @@ -173,10 +183,13 @@ def _setup_model(self) -> None: self.observation_space, self.action_space, self.lr_schedule, - **self.policy_kwargs # pytype:disable=not-instantiable + **self.policy_kwargs, # pytype:disable=not-instantiable ) self.policy = self.policy.to(self.device) + # Convert train freq parameter to TrainFreq object + self._convert_train_freq() + def save_replay_buffer(self, path: Union[str, pathlib.Path, io.BufferedIOBase]) -> None: """ Save the replay buffer as a pickle file. @@ -269,11 +282,9 @@ def learn( callback.on_training_start(locals(), globals()) while self.num_timesteps < total_timesteps: - rollout = self.collect_rollouts( self.env, - n_episodes=self.n_episodes_rollout, - n_steps=self.train_freq, + train_freq=self.train_freq, action_noise=self.action_noise, callback=callback, learning_starts=self.learning_starts, @@ -374,15 +385,62 @@ def _on_step(self) -> None: """ pass + def _store_transition( + self, + replay_buffer: ReplayBuffer, + buffer_action: np.ndarray, + new_obs: np.ndarray, + reward: np.ndarray, + done: np.ndarray, + infos: List[Dict[str, Any]], + ) -> None: + """ + Store transition in the replay buffer. + We store the normalized action and the unnormalized observation. + It also handles terminal observations (because VecEnv resets automatically). + + :param replay_buffer: Replay buffer object where to store the transition. + :param buffer_action: normalized action + :param new_obs: next observation in the current episode + or first observation of the episode (when done is True) + :param reward: reward for the current transition + :param done: Termination signal + :param infos: List of additional information about the transition. + It contains the terminal observations. + """ + # Store only the unnormalized version + if self._vec_normalize_env is not None: + new_obs_ = self._vec_normalize_env.get_original_obs() + reward_ = self._vec_normalize_env.get_original_reward() + else: + # Avoid changing the original ones + self._last_original_obs, new_obs_, reward_ = self._last_obs, new_obs, reward + + # As the VecEnv resets automatically, new_obs is already the + # first observation of the next episode + if done and infos[0].get("terminal_observation") is not None: + next_obs = infos[0]["terminal_observation"] + # VecNormalize normalizes the terminal observation + if self._vec_normalize_env is not None: + next_obs = self._vec_normalize_env.unnormalize_obs(next_obs) + else: + next_obs = new_obs_ + + replay_buffer.add(self._last_original_obs, next_obs, buffer_action, reward_, done) + + self._last_obs = new_obs + # Save the unnormalized observation + if self._vec_normalize_env is not None: + self._last_original_obs = new_obs_ + def collect_rollouts( self, env: VecEnv, callback: BaseCallback, - n_episodes: int = 1, - n_steps: int = -1, + train_freq: TrainFreq, + replay_buffer: ReplayBuffer, action_noise: Optional[ActionNoise] = None, learning_starts: int = 0, - replay_buffer: Optional[ReplayBuffer] = None, log_interval: Optional[int] = None, ) -> RolloutReturn: """ @@ -391,10 +449,11 @@ def collect_rollouts( :param env: The training environment :param callback: Callback that will be called at each step (and at the beginning and end of the rollout) - :param n_episodes: Number of episodes to use to collect rollout data - You can also specify a ``n_steps`` instead - :param n_steps: Number of steps to use to collect rollout data - You can also specify a ``n_episodes`` instead. + :param train_freq: How much experience to collect + by doing rollouts of current policy. + Either ``TrainFreq(, TrainFrequencyUnit.STEP)`` + or ``TrainFreq(, TrainFrequencyUnit.EPISODE)`` + with ```` being an integer greater than 0. :param action_noise: Action noise that will be used for exploration Required for deterministic policy (e.g. TD3). This can also be used in addition to the stochastic policy for SAC. @@ -404,10 +463,11 @@ def collect_rollouts( :return: """ episode_rewards, total_timesteps = [], [] - total_steps, total_episodes = 0, 0 + num_collected_steps, num_collected_episodes = 0, 0 assert isinstance(env, VecEnv), "You must pass a VecEnv" assert env.num_envs == 1, "OffPolicyAlgorithm only support single environment" + assert train_freq.frequency > 0, "Should at least collect one step or episode." if self.use_sde: self.actor.reset_noise() @@ -415,13 +475,13 @@ def collect_rollouts( callback.on_rollout_start() continue_training = True - while total_steps < n_steps or total_episodes < n_episodes: + while should_collect_more_steps(train_freq, num_collected_steps, num_collected_episodes): done = False episode_reward, episode_timesteps = 0.0, 0 while not done: - if self.use_sde and self.sde_sample_freq > 0 and total_steps % self.sde_sample_freq == 0: + if self.use_sde and self.sde_sample_freq > 0 and num_collected_steps % self.sde_sample_freq == 0: # Sample a new noise matrix self.actor.reset_noise() @@ -433,35 +493,21 @@ def collect_rollouts( self.num_timesteps += 1 episode_timesteps += 1 - total_steps += 1 + num_collected_steps += 1 # Give access to local variables callback.update_locals(locals()) # Only stop training if return value is False, not when it is None. if callback.on_step() is False: - return RolloutReturn(0.0, total_steps, total_episodes, continue_training=False) + return RolloutReturn(0.0, num_collected_steps, num_collected_episodes, continue_training=False) episode_reward += reward # Retrieve reward and episode length if using Monitor wrapper self._update_info_buffer(infos, done) - # Store data in replay buffer - if replay_buffer is not None: - # Store only the unnormalized version - if self._vec_normalize_env is not None: - new_obs_ = self._vec_normalize_env.get_original_obs() - reward_ = self._vec_normalize_env.get_original_reward() - else: - # Avoid changing the original ones - self._last_original_obs, new_obs_, reward_ = self._last_obs, new_obs, reward - - replay_buffer.add(self._last_original_obs, new_obs_, buffer_action, reward_, done) - - self._last_obs = new_obs - # Save the unnormalized observation - if self._vec_normalize_env is not None: - self._last_original_obs = new_obs_ + # Store data in replay buffer (normalized action and unnormalized observation) + self._store_transition(replay_buffer, buffer_action, new_obs, reward, done, infos) self._update_current_progress_remaining(self.num_timesteps, self._total_timesteps) @@ -471,11 +517,11 @@ def collect_rollouts( # see https://github.com/hill-a/stable-baselines/issues/900 self._on_step() - if 0 < n_steps <= total_steps: + if not should_collect_more_steps(train_freq, num_collected_steps, num_collected_episodes): break if done: - total_episodes += 1 + num_collected_episodes += 1 self._episode_num += 1 episode_rewards.append(episode_reward) total_timesteps.append(episode_timesteps) @@ -487,8 +533,8 @@ def collect_rollouts( if log_interval is not None and self._episode_num % log_interval == 0: self._dump_logs() - mean_reward = np.mean(episode_rewards) if total_episodes > 0 else 0.0 + mean_reward = np.mean(episode_rewards) if num_collected_episodes > 0 else 0.0 callback.on_rollout_end() - return RolloutReturn(mean_reward, total_steps, total_episodes, continue_training) + return RolloutReturn(mean_reward, num_collected_steps, num_collected_episodes, continue_training) diff --git a/stable_baselines3/common/policies.py b/stable_baselines3/common/policies.py index 637faafc4..f7b7a399b 100644 --- a/stable_baselines3/common/policies.py +++ b/stable_baselines3/common/policies.py @@ -90,7 +90,7 @@ def __init__( @abstractmethod def forward(self, *args, **kwargs): - del args, kwargs + pass def _update_features_extractor( self, @@ -129,12 +129,11 @@ def extract_features(self, obs: th.Tensor) -> th.Tensor: preprocessed_obs = preprocess_obs(obs, self.observation_space, normalize_images=self.normalize_images) return self.features_extractor(preprocessed_obs) - def _get_data(self) -> Dict[str, Any]: + def _get_constructor_parameters(self) -> Dict[str, Any]: """ - Get data that need to be saved in order to re-create the model. - This corresponds to the arguments of the constructor. + Get data that need to be saved in order to re-create the model when loading it from disk. - :return: + :return: The dictionary to pass to the as kwargs constructor when reconstruction this model. """ return dict( observation_space=self.observation_space, @@ -161,7 +160,7 @@ def save(self, path: str) -> None: :param path: """ - th.save({"state_dict": self.state_dict(), "data": self._get_data()}, path) + th.save({"state_dict": self.state_dict(), "data": self._get_constructor_parameters()}, path) @classmethod def load(cls, path: str, device: Union[th.device, str] = "auto") -> "BaseModel": @@ -469,8 +468,8 @@ def __init__( self._build(lr_schedule) - def _get_data(self) -> Dict[str, Any]: - data = super()._get_data() + def _get_constructor_parameters(self) -> Dict[str, Any]: + data = super()._get_constructor_parameters() default_none_kwargs = self.dist_kwargs or collections.defaultdict(lambda: None) diff --git a/stable_baselines3/common/type_aliases.py b/stable_baselines3/common/type_aliases.py index 87dd01a90..45db9eb43 100644 --- a/stable_baselines3/common/type_aliases.py +++ b/stable_baselines3/common/type_aliases.py @@ -1,5 +1,6 @@ """Common aliases for type hints""" +from enum import Enum from typing import Any, Callable, Dict, List, NamedTuple, Tuple, Union import gym @@ -59,3 +60,13 @@ class RolloutReturn(NamedTuple): episode_timesteps: int n_episodes: int continue_training: bool + + +class TrainFrequencyUnit(Enum): + STEP = "step" + EPISODE = "episode" + + +class TrainFreq(NamedTuple): + frequency: int + unit: TrainFrequencyUnit # either "step" or "episode" diff --git a/stable_baselines3/common/utils.py b/stable_baselines3/common/utils.py index 0bd115a79..5b09a029a 100644 --- a/stable_baselines3/common/utils.py +++ b/stable_baselines3/common/utils.py @@ -16,7 +16,7 @@ SummaryWriter = None from stable_baselines3.common import logger -from stable_baselines3.common.type_aliases import GymEnv, Schedule, TensorDict +from stable_baselines3.common.type_aliases import GymEnv, Schedule, TensorDict, TrainFreq, TrainFrequencyUnit def set_random_seed(seed: int, using_cuda: bool = False) -> None: @@ -421,3 +421,31 @@ def obs_as_tensor( return {key: th.as_tensor(_obs).to(device) for (key, _obs) in obs.items()} else: raise Exception(f"Unrecognized type of observation {type(obs)}") + + +def should_collect_more_steps( + train_freq: TrainFreq, + num_collected_steps: int, + num_collected_episodes: int, +) -> bool: + """ + Helper used in ``collect_rollouts()`` of off-policy algorithms + to determine the termination condition. + + :param train_freq: How much experience should be collected before updating the policy. + :param num_collected_steps: The number of already collected steps. + :param num_collected_episodes: The number of already collected episodes. + :return: Whether to continue or not collecting experience + by doing rollouts of the current policy. + """ + if train_freq.unit == TrainFrequencyUnit.STEP: + return num_collected_steps < train_freq.frequency + + elif train_freq.unit == TrainFrequencyUnit.EPISODE: + return num_collected_episodes < train_freq.frequency + + else: + raise ValueError( + "The unit of the `train_freq` must be either TrainFrequencyUnit.STEP " + f"or TrainFrequencyUnit.EPISODE not '{train_freq.unit}'!" + ) diff --git a/stable_baselines3/common/vec_env/vec_normalize.py b/stable_baselines3/common/vec_env/vec_normalize.py index fcdefd8ed..55ed2c54e 100644 --- a/stable_baselines3/common/vec_env/vec_normalize.py +++ b/stable_baselines3/common/vec_env/vec_normalize.py @@ -106,13 +106,13 @@ def set_venv(self, venv: VecEnv) -> None: def step_wait(self) -> VecEnvStepReturn: """ Apply sequence of actions to sequence of environments - actions -> (observations, rewards, news) + actions -> (observations, rewards, dones) - where 'news' is a boolean vector indicating whether each element is new. + where ``dones`` is a boolean vector indicating whether each element is new. """ - obs, rews, news, infos = self.venv.step_wait() + obs, rewards, dones, infos = self.venv.step_wait() self.old_obs = obs - self.old_reward = rews + self.old_reward = rewards if self.training: if isinstance(obs, dict) and isinstance(self.obs_rms, dict): @@ -124,11 +124,17 @@ def step_wait(self) -> VecEnvStepReturn: obs = self.normalize_obs(obs) if self.training: - self._update_reward(rews) - rews = self.normalize_reward(rews) + self._update_reward(rewards) + rewards = self.normalize_reward(rewards) - self.ret[news] = 0 - return obs, rews, news, infos + # Normalize the terminal observations + for idx, done in enumerate(dones): + if not done: + continue + infos[idx]["terminal_observation"] = self.normalize_obs(infos[idx]["terminal_observation"]) + + self.ret[dones] = 0 + return obs, rewards, dones, infos def _update_reward(self, reward: np.ndarray) -> None: """Update reward normalization statistics.""" diff --git a/stable_baselines3/common/vec_env/vec_transpose.py b/stable_baselines3/common/vec_env/vec_transpose.py index 257506e0e..2045e555f 100644 --- a/stable_baselines3/common/vec_env/vec_transpose.py +++ b/stable_baselines3/common/vec_env/vec_transpose.py @@ -76,8 +76,14 @@ def transpose_observations(self, observations: Union[np.ndarray, Dict]) -> Union def step_wait(self) -> VecEnvStepReturn: observations, rewards, dones, infos = self.venv.step_wait() - observations = self.transpose_observations(observations) - return observations, rewards, dones, infos + + # Transpose the terminal observations + for idx, done in enumerate(dones): + if not done: + continue + infos[idx]["terminal_observation"] = self.transpose_image(infos[idx]["terminal_observation"]) + + return self.transpose_image(observations), rewards, dones, infos def reset(self) -> Union[np.ndarray, Dict]: """ diff --git a/stable_baselines3/ddpg/ddpg.py b/stable_baselines3/ddpg/ddpg.py index e696aeb2b..e7a1f75dc 100644 --- a/stable_baselines3/ddpg/ddpg.py +++ b/stable_baselines3/ddpg/ddpg.py @@ -1,4 +1,4 @@ -from typing import Any, Dict, Optional, Type, Union +from typing import Any, Dict, Optional, Tuple, Type, Union import torch as th @@ -29,13 +29,11 @@ class DDPG(TD3): :param batch_size: Minibatch size for each gradient update :param tau: the soft update coefficient ("Polyak update", between 0 and 1) :param gamma: the discount factor - :param train_freq: Update the model every ``train_freq`` steps. Set to `-1` to disable. - :param gradient_steps: How many gradient steps to do after each rollout - (see ``train_freq`` and ``n_episodes_rollout``) + :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit + like ``(5, "step")`` or ``(2, "episode")``. + :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``) Set to ``-1`` means to do as many gradient steps as steps done in the environment during the rollout. - :param n_episodes_rollout: Update the model every ``n_episodes_rollout`` episodes. - Note that this cannot be used at the same time as ``train_freq``. Set to `-1` to disable. :param action_noise: the action noise type (None by default), this can help for hard exploration problem. Cf common.noise for the different action noise type. :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer @@ -61,9 +59,8 @@ def __init__( batch_size: int = 100, tau: float = 0.005, gamma: float = 0.99, - train_freq: int = -1, + train_freq: Union[int, Tuple[int, str]] = (1, "episode"), gradient_steps: int = -1, - n_episodes_rollout: int = 1, action_noise: Optional[ActionNoise] = None, optimize_memory_usage: bool = False, tensorboard_log: Optional[str] = None, @@ -86,7 +83,6 @@ def __init__( gamma=gamma, train_freq=train_freq, gradient_steps=gradient_steps, - n_episodes_rollout=n_episodes_rollout, action_noise=action_noise, policy_kwargs=policy_kwargs, tensorboard_log=tensorboard_log, diff --git a/stable_baselines3/dqn/dqn.py b/stable_baselines3/dqn/dqn.py index 032f89558..902b8cf7d 100644 --- a/stable_baselines3/dqn/dqn.py +++ b/stable_baselines3/dqn/dqn.py @@ -29,13 +29,11 @@ class DQN(OffPolicyAlgorithm): :param batch_size: Minibatch size for each gradient update :param tau: the soft update coefficient ("Polyak update", between 0 and 1) default 1 for hard update :param gamma: the discount factor - :param train_freq: Update the model every ``train_freq`` steps. Set to `-1` to disable. - :param gradient_steps: How many gradient steps to do after each rollout - (see ``train_freq`` and ``n_episodes_rollout``) + :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit + like ``(5, "step")`` or ``(2, "episode")``. + :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``) Set to ``-1`` means to do as many gradient steps as steps done in the environment during the rollout. - :param n_episodes_rollout: Update the model every ``n_episodes_rollout`` episodes. - Note that this cannot be used at the same time as ``train_freq``. Set to `-1` to disable. :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer at a cost of more complexity. See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195 @@ -66,9 +64,8 @@ def __init__( batch_size: Optional[int] = 32, tau: float = 1.0, gamma: float = 0.99, - train_freq: int = 4, + train_freq: Union[int, Tuple[int, str]] = 4, gradient_steps: int = 1, - n_episodes_rollout: int = -1, optimize_memory_usage: bool = False, target_update_interval: int = 10000, exploration_fraction: float = 0.1, @@ -96,7 +93,6 @@ def __init__( gamma, train_freq, gradient_steps, - n_episodes_rollout, action_noise=None, # No action noise policy_kwargs=policy_kwargs, tensorboard_log=tensorboard_log, diff --git a/stable_baselines3/dqn/policies.py b/stable_baselines3/dqn/policies.py index 04fe5d268..29f27603a 100644 --- a/stable_baselines3/dqn/policies.py +++ b/stable_baselines3/dqn/policies.py @@ -71,8 +71,8 @@ def _predict(self, observation: th.Tensor, deterministic: bool = True) -> th.Ten action = q_values.argmax(dim=1).reshape(-1) return action - def _get_data(self) -> Dict[str, Any]: - data = super()._get_data() + def _get_constructor_parameters(self) -> Dict[str, Any]: + data = super()._get_constructor_parameters() data.update( dict( @@ -174,8 +174,8 @@ def forward(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor: def _predict(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor: return self.q_net._predict(obs, deterministic=deterministic) - def _get_data(self) -> Dict[str, Any]: - data = super()._get_data() + def _get_constructor_parameters(self) -> Dict[str, Any]: + data = super()._get_constructor_parameters() data.update( dict( diff --git a/stable_baselines3/her/her.py b/stable_baselines3/her/her.py index 658abc6fe..642986ed8 100644 --- a/stable_baselines3/her/her.py +++ b/stable_baselines3/her/her.py @@ -12,8 +12,8 @@ from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm from stable_baselines3.common.policies import BasePolicy from stable_baselines3.common.save_util import load_from_zip_file, recursive_setattr -from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, RolloutReturn -from stable_baselines3.common.utils import check_for_correct_spaces +from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, RolloutReturn, TrainFreq +from stable_baselines3.common.utils import check_for_correct_spaces, should_collect_more_steps from stable_baselines3.common.vec_env import VecEnv from stable_baselines3.common.vec_env.obs_dict_wrapper import ObsDictWrapper from stable_baselines3.her.goal_selection_strategy import KEY_TO_GOAL_STRATEGY, GoalSelectionStrategy @@ -108,6 +108,8 @@ def __init__( **kwargs, # pytype: disable=wrong-keyword-args ) + # Make HER use self.model.action_noise + del self.action_noise self.verbose = self.model.verbose self.tensorboard_log = self.model.tensorboard_log @@ -132,6 +134,9 @@ def __init__( # storage for transitions of current episode for offline sampling # for online sampling, it replaces the "classic" replay buffer completely her_buffer_size = self.buffer_size if online_sampling else self.max_episode_length + + assert self.env is not None, "Because it needs access to `env.compute_reward()` HER you must provide the env." + self._episode_storage = HerReplayBuffer( self.env, her_buffer_size, @@ -193,11 +198,9 @@ def learn( callback.on_training_start(locals(), globals()) while self.num_timesteps < total_timesteps: - rollout = self.collect_rollouts( self.env, - n_episodes=self.n_episodes_rollout, - n_steps=self.train_freq, + train_freq=self.train_freq, action_noise=self.action_noise, callback=callback, learning_starts=self.learning_starts, @@ -221,8 +224,7 @@ def collect_rollouts( self, env: VecEnv, callback: BaseCallback, - n_episodes: int = 1, - n_steps: int = -1, + train_freq: TrainFreq, action_noise: Optional[ActionNoise] = None, learning_starts: int = 0, log_interval: Optional[int] = None, @@ -233,10 +235,11 @@ def collect_rollouts( :param env: The training environment :param callback: Callback that will be called at each step (and at the beginning and end of the rollout) - :param n_episodes: Number of episodes to use to collect rollout data - You can also specify a ``n_steps`` instead - :param n_steps: Number of steps to use to collect rollout data - You can also specify a ``n_episodes`` instead. + :param train_freq: How much experience to collect + by doing rollouts of current policy. + Either ``TrainFreq(, TrainFrequencyUnit.STEP)`` + or ``TrainFreq(, TrainFrequencyUnit.EPISODE)`` + with ```` being an integer greater than 0. :param action_noise: Action noise that will be used for exploration Required for deterministic policy (e.g. TD3). This can also be used in addition to the stochastic policy for SAC. @@ -246,10 +249,11 @@ def collect_rollouts( """ episode_rewards, total_timesteps = [], [] - total_steps, total_episodes = 0, 0 + num_collected_steps, num_collected_episodes = 0, 0 assert isinstance(env, VecEnv), "You must pass a VecEnv" assert env.num_envs == 1, "OffPolicyAlgorithm only support single environment" + assert train_freq.frequency > 0, "Should at least collect one step or episode." if self.model.use_sde: self.actor.reset_noise() @@ -257,7 +261,7 @@ def collect_rollouts( callback.on_rollout_start() continue_training = True - while total_steps < n_steps or total_episodes < n_episodes: + while should_collect_more_steps(train_freq, num_collected_steps, num_collected_episodes): done = False episode_reward, episode_timesteps = 0.0, 0 @@ -266,7 +270,11 @@ def collect_rollouts( observation = self._last_obs self._last_obs = ObsDictWrapper.convert_dict(observation) - if self.model.use_sde and self.model.sde_sample_freq > 0 and total_steps % self.model.sde_sample_freq == 0: + if ( + self.model.use_sde + and self.model.sde_sample_freq > 0 + and num_collected_steps % self.model.sde_sample_freq == 0 + ): # Sample a new noise matrix self.actor.reset_noise() @@ -280,11 +288,11 @@ def collect_rollouts( self.num_timesteps += 1 self.model.num_timesteps = self.num_timesteps episode_timesteps += 1 - total_steps += 1 + num_collected_steps += 1 # Only stop training if return value is False, not when it is None. if callback.on_step() is False: - return RolloutReturn(0.0, total_steps, total_episodes, continue_training=False) + return RolloutReturn(0.0, num_collected_steps, num_collected_episodes, continue_training=False) episode_reward += reward @@ -307,10 +315,10 @@ def collect_rollouts( # As the VecEnv resets automatically, new_obs is already the # first observation of the next episode if done and infos[0].get("terminal_observation") is not None: - # The saved terminal_observation is not passed through other - # VecEnvWrapper, so no need to unnormalize - # NOTE: this may be an issue when using other wrappers next_obs = infos[0]["terminal_observation"] + # VecNormalize normalizes the terminal observation + if self._vec_normalize_env is not None: + next_obs = self._vec_normalize_env.unnormalize_obs(next_obs) else: next_obs = new_obs_ @@ -343,7 +351,7 @@ def collect_rollouts( self.episode_steps += 1 - if 0 < n_steps <= total_steps: + if not should_collect_more_steps(train_freq, num_collected_steps, num_collected_episodes): break if done or self.episode_steps >= self.max_episode_length: @@ -356,7 +364,7 @@ def collect_rollouts( # clear storage for current episode self._episode_storage.reset() - total_episodes += 1 + num_collected_episodes += 1 self._episode_num += 1 self.model._episode_num = self._episode_num episode_rewards.append(episode_reward) @@ -371,11 +379,11 @@ def collect_rollouts( self.episode_steps = 0 - mean_reward = np.mean(episode_rewards) if total_episodes > 0 else 0.0 + mean_reward = np.mean(episode_rewards) if num_collected_episodes > 0 else 0.0 callback.on_rollout_end() - return RolloutReturn(mean_reward, total_steps, total_episodes, continue_training) + return RolloutReturn(mean_reward, num_collected_steps, num_collected_episodes, continue_training) def _sample_her_transitions(self) -> None: """ diff --git a/stable_baselines3/sac/policies.py b/stable_baselines3/sac/policies.py index 294801600..e945fde21 100644 --- a/stable_baselines3/sac/policies.py +++ b/stable_baselines3/sac/policies.py @@ -114,8 +114,8 @@ def __init__( self.mu = nn.Linear(last_layer_dim, action_dim) self.log_std = nn.Linear(last_layer_dim, action_dim) - def _get_data(self) -> Dict[str, Any]: - data = super()._get_data() + def _get_constructor_parameters(self) -> Dict[str, Any]: + data = super()._get_constructor_parameters() data.update( dict( @@ -314,8 +314,8 @@ def _build(self, lr_schedule: Schedule) -> None: self.critic.optimizer = self.optimizer_class(critic_parameters, lr=lr_schedule(1), **self.optimizer_kwargs) - def _get_data(self) -> Dict[str, Any]: - data = super()._get_data() + def _get_constructor_parameters(self) -> Dict[str, Any]: + data = super()._get_constructor_parameters() data.update( dict( diff --git a/stable_baselines3/sac/sac.py b/stable_baselines3/sac/sac.py index cd7a41356..63ed10f86 100644 --- a/stable_baselines3/sac/sac.py +++ b/stable_baselines3/sac/sac.py @@ -37,13 +37,11 @@ class SAC(OffPolicyAlgorithm): :param batch_size: Minibatch size for each gradient update :param tau: the soft update coefficient ("Polyak update", between 0 and 1) :param gamma: the discount factor - :param train_freq: Update the model every ``train_freq`` steps. Set to `-1` to disable. - :param gradient_steps: How many gradient steps to do after each rollout - (see ``train_freq`` and ``n_episodes_rollout``) + :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit + like ``(5, "step")`` or ``(2, "episode")``. + :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``) Set to ``-1`` means to do as many gradient steps as steps done in the environment during the rollout. - :param n_episodes_rollout: Update the model every ``n_episodes_rollout`` episodes. - Note that this cannot be used at the same time as ``train_freq``. Set to `-1` to disable. :param action_noise: the action noise type (None by default), this can help for hard exploration problem. Cf common.noise for the different action noise type. :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer @@ -81,9 +79,8 @@ def __init__( batch_size: int = 256, tau: float = 0.005, gamma: float = 0.99, - train_freq: int = 1, + train_freq: Union[int, Tuple[int, str]] = 1, gradient_steps: int = 1, - n_episodes_rollout: int = -1, action_noise: Optional[ActionNoise] = None, optimize_memory_usage: bool = False, ent_coef: Union[str, float] = "auto", @@ -113,7 +110,6 @@ def __init__( gamma, train_freq, gradient_steps, - n_episodes_rollout, action_noise, policy_kwargs=policy_kwargs, tensorboard_log=tensorboard_log, diff --git a/stable_baselines3/td3/policies.py b/stable_baselines3/td3/policies.py index d0e8b748d..e7e7e64b0 100644 --- a/stable_baselines3/td3/policies.py +++ b/stable_baselines3/td3/policies.py @@ -50,8 +50,6 @@ def __init__( squash_output=True, ) - self.features_extractor = features_extractor - self.normalize_images = normalize_images self.net_arch = net_arch self.features_dim = features_dim self.activation_fn = activation_fn @@ -61,8 +59,8 @@ def __init__( # Deterministic action self.mu = nn.Sequential(*actor_net) - def _get_data(self) -> Dict[str, Any]: - data = super()._get_data() + def _get_constructor_parameters(self) -> Dict[str, Any]: + data = super()._get_constructor_parameters() data.update( dict( @@ -74,13 +72,15 @@ def _get_data(self) -> Dict[str, Any]: ) return data - def forward(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor: + def forward(self, obs: th.Tensor) -> th.Tensor: # assert deterministic, 'The TD3 actor only outputs deterministic actions' features = self.extract_features(obs) return self.mu(features) def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor: - return self.forward(observation, deterministic=deterministic) + # Note: the deterministic deterministic parameter is ignored in the case of TD3. + # Predictions are always deterministic. + return self.forward(observation) class TD3Policy(BasePolicy): @@ -191,8 +191,8 @@ def _build(self, lr_schedule: Schedule) -> None: self.critic_target.load_state_dict(self.critic.state_dict()) self.critic.optimizer = self.optimizer_class(self.critic.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs) - def _get_data(self) -> Dict[str, Any]: - data = super()._get_data() + def _get_constructor_parameters(self) -> Dict[str, Any]: + data = super()._get_constructor_parameters() data.update( dict( @@ -221,7 +221,9 @@ def forward(self, observation: th.Tensor, deterministic: bool = False) -> th.Ten return self._predict(observation, deterministic=deterministic) def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor: - return self.actor(observation, deterministic=deterministic) + # Note: the deterministic deterministic parameter is ignored in the case of TD3. + # Predictions are always deterministic. + return self.actor(observation) MlpPolicy = TD3Policy diff --git a/stable_baselines3/td3/td3.py b/stable_baselines3/td3/td3.py index 1a3d0597c..b552e60f0 100644 --- a/stable_baselines3/td3/td3.py +++ b/stable_baselines3/td3/td3.py @@ -32,13 +32,11 @@ class TD3(OffPolicyAlgorithm): :param batch_size: Minibatch size for each gradient update :param tau: the soft update coefficient ("Polyak update", between 0 and 1) :param gamma: the discount factor - :param train_freq: Update the model every ``train_freq`` steps. Set to `-1` to disable. - :param gradient_steps: How many gradient steps to do after each rollout - (see ``train_freq`` and ``n_episodes_rollout``) + :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit + like ``(5, "step")`` or ``(2, "episode")``. + :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``) Set to ``-1`` means to do as many gradient steps as steps done in the environment during the rollout. - :param n_episodes_rollout: Update the model every ``n_episodes_rollout`` episodes. - Note that this cannot be used at the same time as ``train_freq``. Set to `-1` to disable. :param action_noise: the action noise type (None by default), this can help for hard exploration problem. Cf common.noise for the different action noise type. :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer @@ -69,9 +67,8 @@ def __init__( batch_size: int = 100, tau: float = 0.005, gamma: float = 0.99, - train_freq: int = -1, + train_freq: Union[int, Tuple[int, str]] = (1, "episode"), gradient_steps: int = -1, - n_episodes_rollout: int = 1, action_noise: Optional[ActionNoise] = None, optimize_memory_usage: bool = False, policy_delay: int = 2, @@ -98,7 +95,6 @@ def __init__( gamma, train_freq, gradient_steps, - n_episodes_rollout, action_noise=action_noise, policy_kwargs=policy_kwargs, tensorboard_log=tensorboard_log, @@ -137,6 +133,7 @@ def train(self, gradient_steps: int, batch_size: int = 100) -> None: for gradient_step in range(gradient_steps): + self._n_updates += 1 # Sample replay buffer replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env) @@ -164,7 +161,7 @@ def train(self, gradient_steps: int, batch_size: int = 100) -> None: self.critic.optimizer.step() # Delayed policy updates - if gradient_step % self.policy_delay == 0: + if self._n_updates % self.policy_delay == 0: # Compute actor loss actor_loss = -self.critic.q1_forward(replay_data.observations, self.actor(replay_data.observations)).mean() actor_losses.append(actor_loss.item()) @@ -177,9 +174,9 @@ def train(self, gradient_steps: int, batch_size: int = 100) -> None: polyak_update(self.critic.parameters(), self.critic_target.parameters(), self.tau) polyak_update(self.actor.parameters(), self.actor_target.parameters(), self.tau) - self._n_updates += gradient_steps logger.record("train/n_updates", self._n_updates, exclude="tensorboard") - logger.record("train/actor_loss", np.mean(actor_losses)) + if len(actor_losses) > 0: + logger.record("train/actor_loss", np.mean(actor_losses)) logger.record("train/critic_loss", np.mean(critic_losses)) def learn( diff --git a/stable_baselines3/version.txt b/stable_baselines3/version.txt index 9522e5b3c..f4fb83cea 100644 --- a/stable_baselines3/version.txt +++ b/stable_baselines3/version.txt @@ -1 +1 @@ -0.11.0a7 +1.0rc0 diff --git a/tests/test_cnn.py b/tests/test_cnn.py index b8d9df10b..9e8daaba6 100644 --- a/tests/test_cnn.py +++ b/tests/test_cnn.py @@ -88,6 +88,10 @@ def test_features_extractor_target_net(model_class, share_features_extractor): if model_class != DQN: kwargs["policy_kwargs"]["share_features_extractor"] = share_features_extractor + # No delay for TD3 (changes when the actor and polyak update take place) + if model_class == TD3: + kwargs["policy_delay"] = 1 + model = model_class("CnnPolicy", env, seed=0, **kwargs) patch_dqn_names_(model) diff --git a/tests/test_her.py b/tests/test_her.py index 382d84ee9..dacb7008f 100644 --- a/tests/test_her.py +++ b/tests/test_her.py @@ -10,6 +10,7 @@ from stable_baselines3 import DDPG, DQN, HER, SAC, TD3 from stable_baselines3.common.envs import BitFlippingEnv +from stable_baselines3.common.noise import NormalActionNoise from stable_baselines3.common.vec_env import DummyVecEnv from stable_baselines3.common.vec_env.obs_dict_wrapper import ObsDictWrapper from stable_baselines3.her.goal_selection_strategy import GoalSelectionStrategy @@ -32,8 +33,7 @@ def test_her(model_class, online_sampling): goal_selection_strategy="future", online_sampling=online_sampling, gradient_steps=1, - train_freq=1, - n_episodes_rollout=-1, + train_freq=4, max_episode_length=n_bits, policy_kwargs=dict(net_arch=[64]), learning_starts=100, @@ -60,6 +60,8 @@ def test_goal_selection_strategy(goal_selection_strategy, online_sampling): """ env = BitFlippingEnv(continuous=True) + normal_action_noise = NormalActionNoise(np.zeros(1), 0.1 * np.ones(1)) + model = HER( "MlpPolicy", env, @@ -67,12 +69,13 @@ def test_goal_selection_strategy(goal_selection_strategy, online_sampling): goal_selection_strategy=goal_selection_strategy, online_sampling=online_sampling, gradient_steps=1, - train_freq=1, - n_episodes_rollout=-1, + train_freq=4, max_episode_length=10, policy_kwargs=dict(net_arch=[64]), learning_starts=100, + action_noise=normal_action_noise, ) + assert model.action_noise is not None model.learn(total_timesteps=300) @@ -109,7 +112,6 @@ def test_save_load(tmp_path, model_class, use_sde, online_sampling): gradient_steps=1, train_freq=4, learning_starts=100, - n_episodes_rollout=-1, max_episode_length=n_bits, **kwargs ) @@ -172,10 +174,7 @@ def test_save_load(tmp_path, model_class, use_sde, online_sampling): os.remove(tmp_path / "test_save.zip") -@pytest.mark.parametrize( - "online_sampling, truncate_last_trajectory", - [(False, None), (True, True), (True, False)], -) +@pytest.mark.parametrize("online_sampling, truncate_last_trajectory", [(False, False), (True, True), (True, False)]) def test_save_load_replay_buffer(tmp_path, recwarn, online_sampling, truncate_last_trajectory): """ Test if 'save_replay_buffer' and 'load_replay_buffer' works correctly @@ -194,8 +193,7 @@ def test_save_load_replay_buffer(tmp_path, recwarn, online_sampling, truncate_la goal_selection_strategy="future", online_sampling=online_sampling, gradient_steps=1, - train_freq=1, - n_episodes_rollout=-1, + train_freq=4, max_episode_length=4, buffer_size=int(2e4), policy_kwargs=dict(net_arch=[64]), @@ -271,8 +269,7 @@ def test_full_replay_buffer(): goal_selection_strategy="future", online_sampling=True, gradient_steps=1, - train_freq=1, - n_episodes_rollout=-1, + train_freq=4, max_episode_length=n_bits, policy_kwargs=dict(net_arch=[64]), learning_starts=1, diff --git a/tests/test_run.py b/tests/test_run.py index fae6782d6..c588a0257 100644 --- a/tests/test_run.py +++ b/tests/test_run.py @@ -103,3 +103,39 @@ def test_dqn(): create_eval_env=True, ) model.learn(total_timesteps=500, eval_freq=250) + + +@pytest.mark.parametrize("train_freq", [4, (4, "step"), (1, "episode")]) +def test_train_freq(tmp_path, train_freq): + + model = SAC( + "MlpPolicy", + "Pendulum-v0", + policy_kwargs=dict(net_arch=[64, 64], n_critics=1), + learning_starts=100, + buffer_size=10000, + verbose=1, + train_freq=train_freq, + ) + model.learn(total_timesteps=150) + model.save(tmp_path / "test_save.zip") + env = model.get_env() + model = SAC.load(tmp_path / "test_save.zip", env=env) + model.learn(total_timesteps=150) + model = SAC.load(tmp_path / "test_save.zip", train_freq=train_freq, env=env) + model.learn(total_timesteps=150) + + +@pytest.mark.parametrize("train_freq", ["4", ("1", "episode"), "non_sense", (1, "close")]) +def test_train_freq_fail(train_freq): + with pytest.raises(ValueError): + model = SAC( + "MlpPolicy", + "Pendulum-v0", + policy_kwargs=dict(net_arch=[64, 64], n_critics=1), + learning_starts=100, + buffer_size=10000, + verbose=1, + train_freq=train_freq, + ) + model.learn(total_timesteps=250) diff --git a/tests/test_save_load.py b/tests/test_save_load.py index 68bdd2070..9d2bdea30 100644 --- a/tests/test_save_load.py +++ b/tests/test_save_load.py @@ -176,7 +176,7 @@ def test_set_env(model_class): kwargs = {} if model_class in {DQN, DDPG, SAC, TD3}: - kwargs = dict(learning_starts=100) + kwargs = dict(learning_starts=100, train_freq=4) elif model_class in {A2C, PPO}: kwargs = dict(n_steps=64) @@ -238,12 +238,12 @@ def test_save_load_env_cnn(tmp_path, model_class): env = FakeImageEnv(screen_height=40, screen_width=40, n_channels=2, discrete=False) kwargs = dict(policy_kwargs=dict(net_arch=[32])) if model_class == TD3: - kwargs.update(dict(buffer_size=100, learning_starts=50)) + kwargs.update(dict(buffer_size=100, learning_starts=50, train_freq=4)) model = model_class("CnnPolicy", env, **kwargs).learn(100) model.save(tmp_path / "test_save") # Test loading with env and continuing training - model = model_class.load(str(tmp_path / "test_save.zip"), env=env).learn(100) + model = model_class.load(str(tmp_path / "test_save.zip"), env=env, **kwargs).learn(100) # clear file from os os.remove(tmp_path / "test_save.zip") diff --git a/tests/test_utils.py b/tests/test_utils.py index 1e030893b..d9473f559 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -321,12 +321,6 @@ def test_zip_strict(): pass -def test_cmd_util_rename(): - """Test that importing cmd_util still works but raises warning""" - with pytest.warns(FutureWarning): - from stable_baselines3.common.cmd_util import make_vec_env # noqa: F401 - - def test_is_wrapped(): """Test that is_wrapped correctly detects wraps""" env = gym.make("Pendulum-v0") From 32b899f2f86422208c66a88463b97c3fff77b6af Mon Sep 17 00:00:00 2001 From: Antonin Raffin Date: Tue, 2 Mar 2021 16:51:46 +0100 Subject: [PATCH 54/70] Fix for net_arch with dict and vector obs --- stable_baselines3/common/policies.py | 6 +++--- stable_baselines3/dqn/policies.py | 6 +++--- stable_baselines3/sac/policies.py | 6 +++--- stable_baselines3/td3/policies.py | 6 +++--- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/stable_baselines3/common/policies.py b/stable_baselines3/common/policies.py index f7b7a399b..a53e99fe9 100644 --- a/stable_baselines3/common/policies.py +++ b/stable_baselines3/common/policies.py @@ -434,10 +434,10 @@ def __init__( # Default network architecture, from stable-baselines if net_arch is None: - if features_extractor_class == FlattenExtractor: - net_arch = [dict(pi=[64, 64], vf=[64, 64])] - else: + if features_extractor_class == NatureCNN: net_arch = [] + else: + net_arch = [dict(pi=[64, 64], vf=[64, 64])] self.net_arch = net_arch self.activation_fn = activation_fn diff --git a/stable_baselines3/dqn/policies.py b/stable_baselines3/dqn/policies.py index 29f27603a..d39d9f2b9 100644 --- a/stable_baselines3/dqn/policies.py +++ b/stable_baselines3/dqn/policies.py @@ -128,10 +128,10 @@ def __init__( ) if net_arch is None: - if features_extractor_class == FlattenExtractor: - net_arch = [64, 64] - else: + if features_extractor_class == NatureCNN: net_arch = [] + else: + net_arch = [64, 64] self.net_arch = net_arch self.activation_fn = activation_fn diff --git a/stable_baselines3/sac/policies.py b/stable_baselines3/sac/policies.py index e945fde21..af12478cd 100644 --- a/stable_baselines3/sac/policies.py +++ b/stable_baselines3/sac/policies.py @@ -257,10 +257,10 @@ def __init__( ) if net_arch is None: - if features_extractor_class == FlattenExtractor: - net_arch = [256, 256] - else: + if features_extractor_class == NatureCNN: net_arch = [] + else: + net_arch = [256, 256] actor_arch, critic_arch = get_actor_critic_arch(net_arch) diff --git a/stable_baselines3/td3/policies.py b/stable_baselines3/td3/policies.py index e7e7e64b0..1288d7899 100644 --- a/stable_baselines3/td3/policies.py +++ b/stable_baselines3/td3/policies.py @@ -133,10 +133,10 @@ def __init__( # Default network architecture, from the original paper if net_arch is None: - if features_extractor_class == FlattenExtractor: - net_arch = [400, 300] - else: + if features_extractor_class == NatureCNN: net_arch = [] + else: + net_arch = [400, 300] actor_arch, critic_arch = get_actor_critic_arch(net_arch) From ec3356efdb16d11edfcc955e097a74b19d4058c3 Mon Sep 17 00:00:00 2001 From: Antonin Raffin Date: Tue, 2 Mar 2021 20:05:47 +0100 Subject: [PATCH 55/70] Fixes --- .../common/vec_env/vec_transpose.py | 4 +-- tests/test_dict_env.py | 36 +++++++++++++++---- 2 files changed, 31 insertions(+), 9 deletions(-) diff --git a/stable_baselines3/common/vec_env/vec_transpose.py b/stable_baselines3/common/vec_env/vec_transpose.py index 2045e555f..501c4e422 100644 --- a/stable_baselines3/common/vec_env/vec_transpose.py +++ b/stable_baselines3/common/vec_env/vec_transpose.py @@ -81,9 +81,9 @@ def step_wait(self) -> VecEnvStepReturn: for idx, done in enumerate(dones): if not done: continue - infos[idx]["terminal_observation"] = self.transpose_image(infos[idx]["terminal_observation"]) + infos[idx]["terminal_observation"] = self.transpose_observations(infos[idx]["terminal_observation"]) - return self.transpose_image(observations), rewards, dones, infos + return self.transpose_observations(observations), rewards, dones, infos def reset(self) -> Union[np.ndarray, Dict]: """ diff --git a/tests/test_dict_env.py b/tests/test_dict_env.py index 5155eb218..74ad62aa4 100644 --- a/tests/test_dict_env.py +++ b/tests/test_dict_env.py @@ -75,16 +75,24 @@ def test_dict_spaces(model_class, channel_last): n_steps = 256 if model_class in {A2C, PPO}: - kwargs = dict(n_steps=128, policy_kwargs=dict(features_extractor_kwargs=dict(cnn_output_dim=32))) + kwargs = dict( + n_steps=128, + policy_kwargs=dict( + net_arch=[32], + features_extractor_kwargs=dict(cnn_output_dim=32), + ), + ) else: # Avoid memory error when using replay buffer # Reduce the size of the features and make learning faster kwargs = dict( buffer_size=250, - policy_kwargs=dict(features_extractor_kwargs=dict(cnn_output_dim=32)), + policy_kwargs=dict( + net_arch=[32], + features_extractor_kwargs=dict(cnn_output_dim=32), + ), train_freq=8, gradient_steps=1, - n_episodes_rollout=-1, ) if model_class == DQN: kwargs["learning_starts"] = 0 @@ -111,7 +119,13 @@ def make_env(): n_steps = 256 if model_class in {A2C, PPO}: - kwargs = dict(n_steps=128, policy_kwargs=dict(features_extractor_kwargs=dict(cnn_output_dim=32))) + kwargs = dict( + n_steps=128, + policy_kwargs=dict( + net_arch=[32], + features_extractor_kwargs=dict(cnn_output_dim=32), + ), + ) model = model_class("MultiInputPolicy", env, gamma=0.5, seed=1, **kwargs) @@ -137,16 +151,24 @@ def test_dict_vec_framestack(model_class, channel_last): n_steps = 256 if model_class in {A2C, PPO}: - kwargs = dict(n_steps=128, policy_kwargs=dict(features_extractor_kwargs=dict(cnn_output_dim=32))) + kwargs = dict( + n_steps=128, + policy_kwargs=dict( + net_arch=[32], + features_extractor_kwargs=dict(cnn_output_dim=32), + ), + ) else: # Avoid memory error when using replay buffer # Reduce the size of the features and make learning faster kwargs = dict( buffer_size=250, - policy_kwargs=dict(features_extractor_kwargs=dict(cnn_output_dim=32)), + policy_kwargs=dict( + net_arch=[32], + features_extractor_kwargs=dict(cnn_output_dim=32), + ), train_freq=8, gradient_steps=1, - n_episodes_rollout=-1, ) if model_class == DQN: kwargs["learning_starts"] = 0 From 4f787fa54cb5e7c9ca0137f695db7b5bade1262b Mon Sep 17 00:00:00 2001 From: Antonin Raffin Date: Tue, 9 Mar 2021 11:32:53 +0100 Subject: [PATCH 56/70] Add consistency test --- tests/test_dict_env.py | 67 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 66 insertions(+), 1 deletion(-) diff --git a/tests/test_dict_env.py b/tests/test_dict_env.py index 74ad62aa4..4e946d137 100644 --- a/tests/test_dict_env.py +++ b/tests/test_dict_env.py @@ -15,7 +15,13 @@ class DummyDictEnv(gym.Env): metadata = {"render.modes": ["human"]} - def __init__(self, use_discrete_actions=False, channel_last=False, nested_dict_obs=False): + def __init__( + self, + use_discrete_actions=False, + channel_last=False, + nested_dict_obs=False, + vec_only=False, + ): super().__init__() if use_discrete_actions: self.action_space = spaces.Discrete(3) @@ -41,10 +47,23 @@ def __init__(self, use_discrete_actions=False, channel_last=False, nested_dict_o } ) + # For checking consistency with normal MlpPolicy + if vec_only: + self.observation_space = spaces.Dict( + { + # Vector obs + "vec": spaces.Box(low=-1, high=1, shape=(2,), dtype=np.float32), + } + ) + if nested_dict_obs: # Add dictionary observation inside observation space self.observation_space.spaces["nested-dict"] = spaces.Dict({"nested-dict-discrete": spaces.Discrete(4)}) + def seed(self, seed=None): + if seed is not None: + self.observation_space.seed(seed) + def step(self, action): reward = 0.0 done = False @@ -60,6 +79,52 @@ def render(self, mode="human"): pass +@pytest.mark.parametrize("model_class", [PPO, A2C, DQN, DDPG, SAC, TD3]) +def test_consistency(model_class): + """ + Make sure that dict obs with vector only vs using flatten obs is equivalent. + This ensures notable that the network architectures are the same. + """ + use_discrete_actions = model_class == DQN + dict_env = DummyDictEnv(use_discrete_actions=use_discrete_actions, vec_only=True) + dict_env = gym.wrappers.TimeLimit(dict_env, 100) + env = gym.wrappers.FlattenObservation(dict_env) + dict_env.seed(10) + obs = dict_env.reset() + + kwargs = {} + n_steps = 256 + + if model_class in {A2C, PPO}: + kwargs = dict( + n_steps=128, + ) + else: + # Avoid memory error when using replay buffer + # Reduce the size of the features and make learning faster + kwargs = dict( + buffer_size=250, + train_freq=8, + gradient_steps=1, + ) + if model_class == DQN: + kwargs["learning_starts"] = 0 + + dict_model = model_class("MultiInputPolicy", dict_env, gamma=0.5, seed=1, **kwargs) + action_before_learning_1, _ = dict_model.predict(obs, deterministic=True) + dict_model.learn(total_timesteps=n_steps) + + normal_model = model_class("MlpPolicy", env, gamma=0.5, seed=1, **kwargs) + action_before_learning_2, _ = normal_model.predict(obs["vec"], deterministic=True) + normal_model.learn(total_timesteps=n_steps) + + action_1, _ = dict_model.predict(obs, deterministic=True) + action_2, _ = normal_model.predict(obs["vec"], deterministic=True) + + assert np.allclose(action_before_learning_1, action_before_learning_2) + assert np.allclose(action_1, action_2) + + @pytest.mark.parametrize("model_class", [PPO, A2C, DQN, DDPG, SAC, TD3]) @pytest.mark.parametrize("channel_last", [False, True]) def test_dict_spaces(model_class, channel_last): From e945ec1b8f90d73d270f8dab43e0ae3ea2ae69e0 Mon Sep 17 00:00:00 2001 From: Antonin Raffin Date: Tue, 9 Mar 2021 12:38:33 +0100 Subject: [PATCH 57/70] Update env checker --- docs/misc/changelog.rst | 10 ++- stable_baselines3/common/base_class.py | 8 +- stable_baselines3/common/env_checker.py | 103 ++++++++++++++++-------- tests/test_envs.py | 39 ++++++++- 4 files changed, 116 insertions(+), 44 deletions(-) diff --git a/docs/misc/changelog.rst b/docs/misc/changelog.rst index 8cb0a035b..6669b78b5 100644 --- a/docs/misc/changelog.rst +++ b/docs/misc/changelog.rst @@ -15,6 +15,10 @@ Breaking Changes: New Features: ^^^^^^^^^^^^^ - Added support for ``custom_objects`` when loading models +- Added support for single-level ``Dict`` observation space (@JadenTravnik) +- Added ``DictRolloutBuffer`` ``DictReplayBuffer`` to support dictionary observations (@JadenTravnik) +- Added ``StackedObservations`` and ``StackedDictObservations`` that are used within ``VecFrameStack`` +- Added simple 4x4 room Dict test environments Bug Fixes: ^^^^^^^^^^ @@ -27,6 +31,9 @@ Documentation: - Added note about slow-down when switching to PyTorch - Add a note on continual learning and resetting environment +Others: +^^^^^^^ +- Updated ``env_checker`` to reflect support of dict observation spaces Pre-Release 0.11.1 (2021-02-27) ------------------------------- @@ -65,9 +72,6 @@ New Features: automatic check for image spaces. - ``VecFrameStack`` now has a ``channels_order`` argument to tell if observations should be stacked on the first or last observation dimension (originally always stacked on last). -- Add ``DictRolloutBuffer`` to support dictionary observations (need to be tested in ReplayBuffer) (@JadenTravnik) -- Add ``StackedObservations`` and ``StackedDictObservations`` that are used within ``VecFrameStack`` -- Added simple 4x4 and 9room test environments - Added ``common.env_util.is_wrapped`` and ``common.env_util.unwrap_wrapper`` functions for checking/unwrapping an environment for specific wrapper. - Added ``env_is_wrapped()`` method for ``VecEnv`` to check if its environments are wrapped diff --git a/stable_baselines3/common/base_class.py b/stable_baselines3/common/base_class.py index 74914da89..b272c5a38 100644 --- a/stable_baselines3/common/base_class.py +++ b/stable_baselines3/common/base_class.py @@ -198,14 +198,14 @@ def _wrap_env(env: GymEnv, verbose: int = 0, monitor_wrapper: bool = True) -> Ve # Make sure that dict-spaces are not nested (not supported) check_for_nested_spaces(env.observation_space) - if isinstance(env.observation_space, gym.spaces.dict.Dict): + if isinstance(env.observation_space, gym.spaces.Dict): for space in env.observation_space.spaces.values(): - if isinstance(space, gym.spaces.dict.Dict): + if isinstance(space, gym.spaces.Dict): raise ValueError("Nested observation spaces are not supported (Dict spaces inside Dict space).") if not is_vecenv_wrapped(env, VecTransposeImage): wrap_with_vectranspose = False - if isinstance(env.observation_space, gym.spaces.dict.Dict): + if isinstance(env.observation_space, gym.spaces.Dict): # If even one of the keys is a image-space in need of transpose, apply transpose for space in env.observation_space.spaces.values(): wrap_with_vectranspose = wrap_with_vectranspose or ( @@ -223,7 +223,7 @@ def _wrap_env(env: GymEnv, verbose: int = 0, monitor_wrapper: bool = True) -> Ve # check if wrapper for dict support is needed when using HER # TODO(antonin): remove this with the new version of HER - if isinstance(env.observation_space, gym.spaces.dict.Dict) and set(env.observation_space.spaces.keys()) == set( + if isinstance(env.observation_space, gym.spaces.Dict) and set(env.observation_space.spaces.keys()) == set( ["observation", "desired_goal", "achieved_goal"] ): env = ObsDictWrapper(env) diff --git a/stable_baselines3/common/env_checker.py b/stable_baselines3/common/env_checker.py index 4630ab080..27326e45c 100644 --- a/stable_baselines3/common/env_checker.py +++ b/stable_baselines3/common/env_checker.py @@ -16,14 +16,14 @@ def _is_numpy_array_space(space: spaces.Space) -> bool: return not isinstance(space, (spaces.Dict, spaces.Tuple)) -def _check_image_input(observation_space: spaces.Box) -> None: +def _check_image_input(observation_space: spaces.Box, key: str = "") -> None: """ Check that the input will be compatible with Stable-Baselines when the observation is apparently an image. """ if observation_space.dtype != np.uint8: warnings.warn( - "It seems that your observation is an image but the `dtype` " + f"It seems that your observation {key} is an image but the `dtype` " "of your observation_space is not `np.uint8`. " "If your observation is not an image, we recommend you to flatten the observation " "to have only a 1D vector" @@ -31,7 +31,7 @@ def _check_image_input(observation_space: spaces.Box) -> None: if np.any(observation_space.low != 0) or np.any(observation_space.high != 255): warnings.warn( - "It seems that your observation space is an image but the " + f"It seems that your observation space {key} is an image but the " "upper and lower bounds are not in [0, 255]. " "Because the CNN policy normalize automatically the observation " "you may encounter issue if the values are not in that range." @@ -39,32 +39,36 @@ def _check_image_input(observation_space: spaces.Box) -> None: if observation_space.shape[0] < 36 or observation_space.shape[1] < 36: warnings.warn( - "The minimal resolution for an image is 36x36 for the default CnnPolicy. " - "You might need to use a custom `cnn_extractor` " - "cf https://stable-baselines3.readthedocs.io/en/master/guide/custom_policy.html" + "The minimal resolution for an image is 36x36 for the default `CnnPolicy`. " + "You might need to use a custom feature extractor " + "cf. https://stable-baselines3.readthedocs.io/en/master/guide/custom_policy.html" ) def _check_unsupported_spaces(env: gym.Env, observation_space: spaces.Space, action_space: spaces.Space) -> None: """Emit warnings when the observation space or action space used is not supported by Stable-Baselines.""" - # TODO(antonin): no need for that warning but check that it is a first level dict - # also tell the user to convert tuple to dict obs space - if isinstance(observation_space, spaces.Dict) and not isinstance(env, gym.GoalEnv): - warnings.warn( - "The observation space is a Dict but the environment is not a gym.GoalEnv " - "(cf https://github.com/openai/gym/blob/master/gym/core.py), " - "this is currently not supported by Stable Baselines " - "(cf https://github.com/hill-a/stable-baselines/issues/133), " - "you will need to use a custom policy. " - ) + if isinstance(observation_space, spaces.Dict): + nested_dict = False + for space in observation_space.spaces.values(): + if isinstance(space, spaces.Dict): + nested_dict = True + if nested_dict: + warnings.warn( + "Nested observation spaces are not supported by Stable Baselines3 " + "(Dict spaces inside Dict space). " + "You should flatten it to have only one level of keys." + "For example, `dict(space1=dict(space2=Box(), space3=Box()), spaces4=Discrete())` " + "is not supported but `dict(space2=Box(), spaces3=Box(), spaces4=Discrete())` is." + ) if isinstance(observation_space, spaces.Tuple): warnings.warn( "The observation space is a Tuple," - "this is currently not supported by Stable Baselines " - "(cf https://github.com/hill-a/stable-baselines/issues/133), " - "you will need to flatten the observation and maybe use a custom policy. " + "this is currently not supported by Stable Baselines3. " + "However, you can convert it to a Dict observation space " + "(cf. https://github.com/openai/gym/blob/master/gym/spaces/dict.py). " + "which is supported by SB3." ) if not _is_numpy_array_space(action_space): @@ -104,6 +108,26 @@ def _check_obs(obs: Union[tuple, dict, np.ndarray, int], observation_space: spac ), f"The observation returned by the `{method_name}()` method does not match the given observation space" +def _check_box_obs(observation_space: spaces.Box, key: str = "") -> None: + """ + Check that the observation space is correctly formatted + when dealing with a ``Box()`` space. In particular, it checks: + - that the dimensions are big enough when it is an image, and that the type matches + - that the observation has an expected shape (warn the use if not) + """ + # If image, check the low and high values, the type and the number of channels + # and the shape (minimal value) + if len(observation_space.shape) == 3: + _check_image_input(observation_space) + + if len(observation_space.shape) not in [1, 3]: + warnings.warn( + f"Your observation {key} has an unconventional shape (neither an image, nor a 1D vector). " + "We recommend you to flatten the observation " + "to have only a 1D vector or use a custom policy to properly process the data." + ) + + def _check_returned_values(env: gym.Env, observation_space: spaces.Space, action_space: spaces.Space) -> None: """ Check the returned values by the env when calling `.reset()` or `.step()` methods. @@ -111,7 +135,15 @@ def _check_returned_values(env: gym.Env, observation_space: spaces.Space, action # because env inherits from gym.Env, we assume that `reset()` and `step()` methods exists obs = env.reset() - _check_obs(obs, observation_space, "reset") + if isinstance(observation_space, spaces.Dict): + assert isinstance(obs, dict), "The observation returned by `reset()` must be a dictionary" + for key in observation_space.spaces.keys(): + try: + _check_obs(obs[key], observation_space.spaces[key], "reset") + except AssertionError as e: + raise AssertionError(f"Error while checking key={key}: " + str(e)) + else: + _check_obs(obs, observation_space, "reset") # Sample a random action action = action_space.sample() @@ -122,7 +154,16 @@ def _check_returned_values(env: gym.Env, observation_space: spaces.Space, action # Unpack obs, reward, done, info = data - _check_obs(obs, observation_space, "step") + if isinstance(observation_space, spaces.Dict): + assert isinstance(obs, dict), "The observation returned by `step()` must be a dictionary" + for key in observation_space.spaces.keys(): + try: + _check_obs(obs[key], observation_space.spaces[key], "step") + except AssertionError as e: + raise AssertionError(f"Error while checking key={key}: " + str(e)) + + else: + _check_obs(obs, observation_space, "step") # We also allow int because the reward will be cast to float assert isinstance(reward, (float, int)), "The reward returned by `step()` must be a float" @@ -149,7 +190,8 @@ def _check_spaces(env: gym.Env) -> None: assert isinstance(env.action_space, spaces.Space), "The action space must inherit from gym.spaces" + gym_spaces -def _check_render(env: gym.Env, warn: bool = True, headless: bool = False) -> None: +# Check render cannot be covered by CI +def _check_render(env: gym.Env, warn: bool = True, headless: bool = False) -> None: # pragma: no cover """ Check the declared render modes and the `render()`/`close()` method of the environment. @@ -210,17 +252,10 @@ def check_env(env: gym.Env, warn: bool = True, skip_render_check: bool = True) - if warn: _check_unsupported_spaces(env, observation_space, action_space) - # If image, check the low and high values, the type and the number of channels - # and the shape (minimal value) - if isinstance(observation_space, spaces.Box) and len(observation_space.shape) == 3: - _check_image_input(observation_space) - - if isinstance(observation_space, spaces.Box) and len(observation_space.shape) not in [1, 3]: - warnings.warn( - "Your observation has an unconventional shape (neither an image, nor a 1D vector). " - "We recommend you to flatten the observation " - "to have only a 1D vector" - ) + obs_spaces = observation_space.spaces if isinstance(observation_space, spaces.Dict) else {"": observation_space} + for key, space in obs_spaces.items(): + if isinstance(space, spaces.Box): + _check_box_obs(space, key) # Check for the action space, it may lead to hard-to-debug issues if isinstance(action_space, spaces.Box) and ( @@ -238,7 +273,7 @@ def check_env(env: gym.Env, warn: bool = True, skip_render_check: bool = True) - # ==== Check the render method and the declared render modes ==== if not skip_render_check: - _check_render(env, warn=warn) + _check_render(env, warn=warn) # pragma: no cover # The check only works with numpy arrays if _is_numpy_array_space(observation_space) and _is_numpy_array_space(action_space): diff --git a/tests/test_envs.py b/tests/test_envs.py index be39349a6..d8de91ac4 100644 --- a/tests/test_envs.py +++ b/tests/test_envs.py @@ -1,3 +1,5 @@ +import types + import gym import numpy as np import pytest @@ -48,7 +50,10 @@ def test_env(env_id): @pytest.mark.parametrize("env_class", ENV_CLASSES) def test_custom_envs(env_class): env = env_class() - check_env(env) + with pytest.warns(None) as record: + check_env(env) + # No warnings for custom envs + assert len(record) == 0 def test_high_dimension_action_space(): @@ -81,8 +86,10 @@ def patched_step(_action): spaces.Box(low=-1, high=1, shape=(64, 3), dtype=np.float32), # Tuple space is not supported by SB spaces.Tuple([spaces.Discrete(5), spaces.Discrete(10)]), - # Dict space is not supported by SB when env is not a GoalEnv - spaces.Dict({"position": spaces.Discrete(5)}), + # Nested dict space is not supported by SB3 + spaces.Dict({"position": spaces.Dict({"abs": spaces.Discrete(5), "rel": spaces.Discrete(2)})}), + # Small image inside a dict + spaces.Dict({"img": spaces.Box(low=0, high=255, shape=(32, 32, 3), dtype=np.uint8)}), ], ) def test_non_default_spaces(new_obs_space): @@ -128,6 +135,19 @@ def test_common_failures_reset(): # Return not only the observation check_reset_assert_error(env, (env.observation_space.sample(), False)) + env = SimpleMultiObsEnv() + obs = env.reset() + + def wrong_reset(self): + return {"img": obs["img"], "vec": obs["img"]} + + env.reset = types.MethodType(wrong_reset, env) + with pytest.raises(AssertionError) as excinfo: + check_env(env) + + # Check that the key is explicitly mentioned + assert "vec" in str(excinfo.value) + def check_step_assert_error(env, new_step_return=()): """ @@ -165,3 +185,16 @@ def test_common_failures_step(): # Done is not a boolean check_step_assert_error(env, (env.observation_space.sample(), 0.0, 3.0, {})) check_step_assert_error(env, (env.observation_space.sample(), 0.0, 1, {})) + + env = SimpleMultiObsEnv() + obs = env.reset() + + def wrong_step(self, action): + return {"img": obs["vec"], "vec": obs["vec"]}, 0.0, False, {} + + env.step = types.MethodType(wrong_step, env) + with pytest.raises(AssertionError) as excinfo: + check_env(env) + + # Check that the key is explicitly mentioned + assert "img" in str(excinfo.value) From 4138f9610b0fb6bec2db5dbed8937623750857ab Mon Sep 17 00:00:00 2001 From: "Anssi \"Miffyli\" Kanervisto" Date: Tue, 6 Apr 2021 02:56:35 +0300 Subject: [PATCH 58/70] Add some docs on dict obs --- docs/guide/custom_policy.rst | 68 ++++++++++++++++++++++++++++++++++-- 1 file changed, 66 insertions(+), 2 deletions(-) diff --git a/docs/guide/custom_policy.rst b/docs/guide/custom_policy.rst index 03829ffd7..f17c10ab3 100644 --- a/docs/guide/custom_policy.rst +++ b/docs/guide/custom_policy.rst @@ -3,8 +3,8 @@ Custom Policy Network ===================== -Stable Baselines3 provides policy networks for images (CnnPolicies) -and other type of input features (MlpPolicies). +Stable Baselines3 provides policy networks for images (CnnPolicies), +other type of input features (MlpPolicies) and multiple different inputs (MultiInputPolicies). .. warning:: @@ -149,6 +149,70 @@ that derives from ``BaseFeaturesExtractor`` and then pass it to the model when t model.learn(1000) +Multiple Inputs and Dictionary Observations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Stable Baselines3 supports handling of multiple inputs by using ``Dict`` Gym space. This can be done using +``MultiInputPolicy``, which by default uses the ``CombinedExtractor`` feature extractor to turn multiple +inputs into a single vector, handled by the ``net_arch`` network. + +By default, ``CombinedExtractor`` processes multiple inputs as follows: + +1. If input is an image (automatically detected, see ``common.preprocessing.is_image_space``), process image with Nature Atari CNN network and + output a latent vector of size ``64``. +2. If input is not an image, flatten it (no layers). +3. Concatenate all previous vectors into one long vector and pass it to policy. + +Much like above, you can define custom feature extractors as above. The following example assumes the environment has two keys in the +observation space dictionary: "image" is a (1,H,W) image, and "vector" is a (D,) dimensional vector. We process "image" with a simple +downsampling and "vector" with a single linear layer. + +.. code-block:: python + + import gym + import torch as th + from torch import nn + + from stable_baselines3.common.torch_layers import BaseFeaturesExtractor + + class CustomCombinedExtractor(BaseFeaturesExtractor): + def __init__(self, observation_space: gym.spaces.Dict): + # We do not know features-dim here before going over all the items, + # so put something dummy for now. PyTorch requires calling + # nn.Module.__init__ before adding modules + super(CustomCombinedExtractor, self).__init__(observation_space, features_dim=1) + + extractors = {} + + total_concat_size = 0 + # We need to know size of the output of this extractor, + # so go over all the spaces and compute output feature sizes + for key, subspace in observation_space.spaces.items(): + if key == "image": + # We will just downsample one channel of the image by 4x4 and flatten. + # Assume the image is single-channel (subspace.shape[0] == 0) + extractors[key] = nn.Sequential(nn.MaxPool2d(4), nn.Flatten()) + total_concat_size += subspace.shape[1] // 4 * subspace.shape[2] // 4 + elif key == "vector": + # Run through a simple MLP + extractors[key] = nn.Linear(subspace.shape[0], 16) + total_concat_size += 16 + + self.extractors = nn.ModuleDict(extractors) + + # Update the features dim manually + self._features_dim = total_concat_size + + def forward(self, observations) -> th.Tensor: + encoded_tensor_list = [] + + # self.extractors contain nn.Modules that do all the processing. + for key, extractor in self.extractors.items(): + encoded_tensor_list.append(extractor(observations[key])) + # Return a (B, self._features_dim) PyTorch tensor, where B is batch dimension. + return th.cat(encoded_tensor_list, dim=1) + + On-Policy Algorithms ^^^^^^^^^^^^^^^^^^^^ From 0bcfa11b9bb97f204e40cd23536300d14a26b9ab Mon Sep 17 00:00:00 2001 From: "Anssi \"Miffyli\" Kanervisto" Date: Fri, 23 Apr 2021 02:13:34 +0300 Subject: [PATCH 59/70] Update default CNN feature vector size --- stable_baselines3/common/torch_layers.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/stable_baselines3/common/torch_layers.py b/stable_baselines3/common/torch_layers.py index 0a308a0e3..6b73ae92d 100644 --- a/stable_baselines3/common/torch_layers.py +++ b/stable_baselines3/common/torch_layers.py @@ -236,10 +236,11 @@ class CombinedExtractor(BaseFeaturesExtractor): the output features are concatenated and fed through additional MLP network ("combined"). :param observation_space: - :param cnn_output_dim: Number of features to output from each CNN submodule(s) + :param cnn_output_dim: Number of features to output from each CNN submodule(s). Defaults to + 256 to avoid exploding network sizes. """ - def __init__(self, observation_space: gym.spaces.Dict, cnn_output_dim: int = 64): + def __init__(self, observation_space: gym.spaces.Dict, cnn_output_dim: int = 256): # TODO we do not know features-dim here before going over all the items, so put something there. This is dirty! super(CombinedExtractor, self).__init__(observation_space, features_dim=1) From 652a6d020037ee638af9a597f131d8058095b0b4 Mon Sep 17 00:00:00 2001 From: Antonin RAFFIN Date: Mon, 3 May 2021 14:52:54 +0200 Subject: [PATCH 60/70] Refactor HER (#351) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Start refactoring HER * Fixes * Additional fixes * Faster tests * WIP: HER as a custom replay buffer * New replay only version (working with DQN) * Add support for all off-policy algorithms * Fix saving/loading * Remove ObsDictWrapper and add VecNormalize tests with dict * Stable-Baselines3 v1.0 (#354) * Bump version and update doc * Fix name * Apply suggestions from code review Co-authored-by: Adam Gleave * Update docs/index.rst Co-authored-by: Adam Gleave * Update wording for RL zoo Co-authored-by: Adam Gleave * Add gym-pybullet-drones project (#358) * Update projects.rst Added gym-pybullet-drones * Update projects.rst Longer title underline * Update changelog Co-authored-by: Antonin Raffin * Include SuperSuit in projects (#359) * include supersuit * longer title underline * Update changelog.rst * Fix default arguments + add bugbear (#363) * Fix potential bug + add bug bear * Remove unused variables * Minor: version bump * Add code of conduct + update doc (#373) * Add code of conduct * Fix DQN doc example * Update doc (channel-last/first) * Apply suggestions from code review Co-authored-by: Anssi * Apply suggestions from code review Co-authored-by: Adam Gleave Co-authored-by: Anssi Co-authored-by: Adam Gleave * Make installation command compatible with ZSH (#376) * Add quotes * Add Zsh bracket info * Add clarify pip installation line * Make note bold * Add Zsh pip installation note * Add handle timeouts param * Fixes * Fixes (buffer size, extend test) * Fix `max_episode_length` redefinition * Fix potential issue * Add some docs on dict obs * Fix performance bug * Fix slowdown * Add package to install (#378) * Add package to install * Update docs packages installation command Co-authored-by: Antonin RAFFIN * Fix backward compat + add test * Fix VecEnv detection * Update doc * Fix vec env check * Support for `VecMonitor` for gym3-style environments (#311) * add vectorized monitor * auto format of the code * add documentation and VecExtractDictObs * refactor and add test cases * add test cases and format * avoid circular import and fix doc * fix type * fix type * oops * Update stable_baselines3/common/monitor.py Co-authored-by: Antonin RAFFIN * Update stable_baselines3/common/monitor.py Co-authored-by: Antonin RAFFIN * add test cases * update changelog * fix mutable argument * quick fix * Apply suggestions from code review * fix terminal observation for gym3 envs * delete comment * Update doc and bump version * Add warning when already using `Monitor` wrapper * Update vecmonitor tests * Fixes Co-authored-by: Antonin RAFFIN * Reformat * Fixed loading of ``ent_coef`` for ``SAC`` and ``TQC``, it was not optimized anymore (#392) * Fix ent coef loading bug * Add test * Add comment * Reuse save path * Add test for GAE + rename `RolloutBuffer.dones` for clarification (#375) * Fix return computation + add test for GAE * Rename `last_dones` to `episode_starts` for clarification * Revert advantage * Cleanup test * Rename variable * Clarify return computation * Clarify docs * Add multi-episode rollout test * Reformat Co-authored-by: Anssi "Miffyli" Kanervisto * Fixed saving of `A2C` and `PPO` policy when using gSDE (#401) * Improve doc and replay buffer loading * Add support for images * Fix doc * Update Procgen doc * Update changelog * Update docstrings Co-authored-by: Adam Gleave Co-authored-by: Jacopo Panerati Co-authored-by: Justin Terry Co-authored-by: Anssi Co-authored-by: Tom Dörr Co-authored-by: Tom Dörr Co-authored-by: Costa Huang --- CODE_OF_CONDUCT.md | 128 ++++ README.md | 15 +- docs/README.md | 4 +- docs/_static/img/net_arch.png | Bin 0 -> 138546 bytes docs/_static/img/sb3_loop.png | Bin 0 -> 168611 bytes docs/_static/img/sb3_policy.png | Bin 0 -> 180282 bytes docs/guide/callbacks.rst | 5 + docs/guide/custom_env.rst | 10 +- docs/guide/custom_policy.rst | 112 +++- docs/guide/developer.rst | 3 + docs/guide/examples.rst | 50 +- docs/guide/install.rst | 4 + docs/guide/migration.rst | 2 +- docs/guide/rl_zoo.rst | 8 +- docs/guide/vec_envs.rst | 28 +- docs/index.rst | 4 +- docs/misc/changelog.rst | 87 ++- docs/misc/projects.rst | 37 +- docs/modules/her.rst | 60 +- setup.py | 6 +- stable_baselines3/__init__.py | 9 +- stable_baselines3/common/base_class.py | 27 +- stable_baselines3/common/buffers.py | 106 +++- stable_baselines3/common/env_checker.py | 8 +- .../common/envs/bit_flipping_env.py | 87 ++- stable_baselines3/common/evaluation.py | 4 +- stable_baselines3/common/monitor.py | 88 ++- .../common/off_policy_algorithm.py | 108 +++- .../common/on_policy_algorithm.py | 4 +- stable_baselines3/common/policies.py | 20 +- stable_baselines3/common/results_plotter.py | 2 +- stable_baselines3/common/torch_layers.py | 4 +- stable_baselines3/common/vec_env/__init__.py | 2 + .../common/vec_env/obs_dict_wrapper.py | 68 -- .../common/vec_env/vec_extract_dict_obs.py | 24 + .../common/vec_env/vec_monitor.py | 98 +++ .../common/vec_env/vec_normalize.py | 3 +- .../common/vec_env/vec_transpose.py | 3 +- stable_baselines3/ddpg/ddpg.py | 10 +- stable_baselines3/dqn/dqn.py | 12 +- stable_baselines3/her/__init__.py | 2 - stable_baselines3/her/her.py | 582 ------------------ stable_baselines3/her/her_replay_buffer.py | 312 +++++++--- stable_baselines3/sac/sac.py | 10 +- stable_baselines3/td3/td3.py | 12 +- stable_baselines3/version.txt | 2 +- tests/test_callbacks.py | 14 +- tests/test_dict_env.py | 52 +- tests/test_envs.py | 19 + tests/test_gae.py | 114 ++++ tests/test_her.py | 204 +++--- tests/test_save_load.py | 32 +- tests/test_vec_extract_dict_obs.py | 52 ++ tests/test_vec_monitor.py | 120 ++++ tests/test_vec_normalize.py | 35 +- 55 files changed, 1786 insertions(+), 1026 deletions(-) create mode 100644 CODE_OF_CONDUCT.md create mode 100644 docs/_static/img/net_arch.png create mode 100644 docs/_static/img/sb3_loop.png create mode 100644 docs/_static/img/sb3_policy.png delete mode 100644 stable_baselines3/common/vec_env/obs_dict_wrapper.py create mode 100644 stable_baselines3/common/vec_env/vec_extract_dict_obs.py create mode 100644 stable_baselines3/common/vec_env/vec_monitor.py delete mode 100644 stable_baselines3/her/her.py create mode 100644 tests/test_gae.py create mode 100644 tests/test_vec_extract_dict_obs.py create mode 100644 tests/test_vec_monitor.py diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..137c95744 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,128 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +antonin [dot] raffin [at] dlr [dot] de. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct +enforcement ladder](https://github.com/mozilla/diversity). + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at +https://www.contributor-covenant.org/translations. diff --git a/README.md b/README.md index 5584ca730..836e10e37 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ you can take a look at the issues [#48](https://github.com/DLR-RM/stable-baselin | Type hints | :heavy_check_mark: | -### Planned features (v1.1+) +### Planned features Please take a look at the [Roadmap](https://github.com/DLR-RM/stable-baselines3/issues/1) and [Milestones](https://github.com/DLR-RM/stable-baselines3/milestones). @@ -49,11 +49,13 @@ A migration guide from SB2 to SB3 can be found in the [documentation](https://st Documentation is available online: [https://stable-baselines3.readthedocs.io/](https://stable-baselines3.readthedocs.io/) -## RL Baselines3 Zoo: A Collection of Trained RL Agents +## RL Baselines3 Zoo: A Training Framework for Stable Baselines3 Reinforcement Learning Agents -[RL Baselines3 Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). is a collection of pre-trained Reinforcement Learning agents using Stable-Baselines3. +[RL Baselines3 Zoo](https://github.com/DLR-RM/rl-baselines3-zoo) is a training framework for Reinforcement Learning (RL). -It also provides basic scripts for training, evaluating agents, tuning hyperparameters, plotting results and recording videos. +It provides scripts for training, evaluating agents, tuning hyperparameters, plotting results and recording videos. + +In addition, it includes a collection of tuned hyperparameters for common environments and RL algorithms, and agents trained with those settings. Goals of this repository: @@ -92,6 +94,7 @@ Install the Stable Baselines3 package: ``` pip install stable-baselines3[extra] ``` +**Note:** Some shells such as Zsh require quotation marks around brackets, i.e. `pip install 'stable-baselines3[extra]'` ([More Info](https://stackoverflow.com/a/30539963)). This includes an optional dependencies like Tensorboard, OpenCV or `atari-py` to train on atari games. If you do not need those, you can use: ``` @@ -111,9 +114,9 @@ import gym from stable_baselines3 import PPO -env = gym.make('CartPole-v1') +env = gym.make("CartPole-v1") -model = PPO('MlpPolicy', env, verbose=1) +model = PPO("MlpPolicy", env, verbose=1) model.learn(total_timesteps=10000) obs = env.reset() diff --git a/docs/README.md b/docs/README.md index 1427a7987..169a5e3db 100644 --- a/docs/README.md +++ b/docs/README.md @@ -6,9 +6,9 @@ This folder contains documentation for the RL baselines. ### Build the Documentation #### Install Sphinx and Theme - +Execute this command in the project root: ``` -pip install sphinx sphinx-autobuild sphinx-rtd-theme +pip install -e .[docs] ``` #### Building the Docs diff --git a/docs/_static/img/net_arch.png b/docs/_static/img/net_arch.png new file mode 100644 index 0000000000000000000000000000000000000000..20143ac2d614fe8d0ba2e1c4be9c4cef460ccd46 GIT binary patch literal 138546 zcmeFZXH-;M(=OU36cJHDKoCJu5lJR;5)l!QoU;f>&N(9z3?zx0EK1IjGYTq6g5=zi zGfiqz6K*Zo@78_4_naSR+<)ix80^hxSFbf^&8nxKda9b&QWC<1Cn-*%P$)u?#}8#t zD7;)0>PY+vJb0%xl=USFMS~J~C?IDaJvV3ZcDve0qn@?X_bDl!y#PLm6DMu&I1@6>E zXYt#4(S8%Ieo=MWGltvNd(UO1>Xz9;dEM+$lzXY^x8?H|ZmMWIjLW<3qo~j5qsqz> zfyW}Iv!w03`1dyMF5n`5c<-6KDpx?EQf3izM~DT6jvrMHbkK-hvP?YG#UJKU%((2> zKJI_k?(6*Q42y!W25~}TVP^kG^LpcPjY|&VC0W|F{`gg?gfr}~%9-CUDBp4u7%=;J zSCW_5L0m45h5$ulg`b-nO+#>}anNos=3dTCPMLGA+rzav6IDjla7!Q7^ls0lC^jDh z*(4zitI`v>DPl*J3nT-N>6{c$Fg*hfp~Ebo5Js#cFrhz`;n1U6%)Y?wu+chvYia3q z{5U-mZFY7c{{QAvps72Is#EjGV5F<_ z`0>fQzO5KDio;Y;JBUl4``=w@Roe_#wq6#b&88RiV4OK8pfFFYb133LU?}C^wmkph zG!>U;EwvL*hDHI`cHvHulyeRT$pZ7KD0Y+6R+B#nUmhpSvQ!;ErVQpgvF=~Oo}pZE zo#Z^JSB=kUA9T^)B?Y%nTs*ryqr<*ydL1ci%zAeC+K&S8vsvtUk$aLDGoA1%BdGbZ zb6l|gT~Ll2dN&8WWwBAR%9BOG_3U+iZqDUV!h(2_(d+m}(x{rJ0&~}nuThgD2fVS{ zkEv;<;;tDM^r=C72~)YlT)sLYHazl{IZMGQT*J}eL50uhizm75=1x(v8=o>`cD(Cb z;byGgUMe2i=QN(Ec&;x!)ygqC*qU;0W+l6Le2sJ=Ge0ZsG~=MvI`4SbO3eiINEnrM z)B7B-Y|iL9)$n3`O}xIOw9N{)7_Oyu zJ@uB9`6N5q+A1YZvFg#J7~7sgkzwymUrKIfM$J+lI-dQDl@6|s}6r@kG3JKE*6RiCWA=W3WPD{GroRkktsxTRKv znC@DB6!Q}iW^J6{@Itlu z>f+CwB7L)-Vl8d^Q7n6P6Z_yr3RdF~>mADMs@uA?6UnI|n7)cOw%}vRScx<;K9yx@uXzmEoM-!2jeJ&xUG|XH zxE@3L&hftG#70{F1u4qD0+Vp_U3P1J#gLKOTi;Y2-D_3-vqqSKmlTN_ z#LaHTvGne$@@j2+O35k78B{pg&g0jZQEM1D(F*%MQCe?c$YQkH^d_Y6HmllYi|6Ot zrH@N_nIRV&fVhH8j1$4S_tNP1?y0*bow@RV@te7F$T%m6_V;Pz@J_d96j13TZbfG1 z(#(D!V0kmI*Gq%7KfcR#CaZKgt*$CP@TTs|mq;K^u5!@L^%f*oV}2y>ZRV?J3E#|4 zw!Bs+DoiaEb#MrTf@UZ9cNeuo*G7AOY7hH$qfcb26s9e2+M_Gh6UUv8qTX)-)+Gq+ z;0^Dr4`p{zv%_XmtxgDs(o+^DV z6*{x$JU%(z)xI1OrZuQIn`XnahUurqTccK*u6({?W>XP7wm+ve$tG&Cd#LK>VAn!S8NwkI)6-Wg zR6jWjg~ob{V6k#pN0q0iju@uzHKd1=C6(Vwd{nywfWWvO$)+3_I=Q-`+4w$0R1zEOCaOhVq)jRz{ieAzMqNLY*5yWwAW=ivC)*n}0r-tqgX7|ObRO#k zc4FtUELfel;v`4A%2wONY9O=(4MYrX@F`v7gScC)USaV>9$7u;gMEygqGTwcUc0bwASJ0WNRg2*s>576NskU@+TozrUM7-l zmB4&IepeiAhDyoJj17As1o}k6Ic-;!ieZo1^EOkhmxnj55sYEVN1H_hoQjwSZ)WgP zbsKLoEsq-Bpxl*=-Xe3h_zvKh1_b#I>lPB9f3wZSGTh=cCYZwZRpnvhwhRNTCOm? zyM(h|Jyg)?#=E>oJFc`D*f#au(%$l2 z9Jt%!^$o))v9)TfJ4uT|vC`G09fOSVnEYaYiMC#K$F1ZTN0l~#<~O@fZ&2!lg4K_& zQ=g({!EsE-Jm)Q4e_YI6`cze*#IE~VRG7iHT83(O*CP)`ANkSp7Y5SHQ0X-N#YdZ! zDR}&}7jB_~<(_ftEz^=mgp^)B)Xp4Qk)`R`rCXkbZjNQ`HC8P=;jIgqMFT41YzyQT z!ULu0RZ1=oSQGEo-AtdOS$Q=~``~BnhsB*Dw9j4c*b7C*$#=Q0c|NXBGNt0u(XZyb zv(g!XkSgLZs(boUJ=n>tV%@m7ZRZj!f(~Vc^XFo(;+9uf1iSOft~2(3R1H{c+A!u7a23as1?W~n{>pBSe}jJ#!bePkm`dqR@AsR`pkc2A6ar4 zTt0ukP*b&AmbGH2PKU1T_%B3U_e{?{Iz|2N%#TcP z(dn|PhS&hyn>8yEWV7s#^hBQIqYT50&RZtmMXAfVyaz0>VUwM<0@qe*oEbPwr!e;JF1vNNf? zea=)p^r1q2%YRq9x)|T0(QV_44}C8>SN1ETdTxHh?1W5U!4l37;Z7W*t9OH#_m+Lb zN5eFMZ&%;ZMK|4c>03Q%#IaYtGK$x+KG!F^Abz}eTUC8+Ivjjgp~q)@l5R*HU8aZ; zEs|YRr>QdA<0ZYq2S8Zt*1xsVC5k3cMDRM5gF5}Aah;rPUsZ0Y-TWngDqhl}=7tsp zg{`5__@jHfRDhY_l-@&UingIjkg;_Y z2x-7Oo_H-$t0U%PgSzX;DDi0au{RA>q$y3ascNTkcclmUe%Vh?RW)?US|%j4eg350 zZ>!Zf!4yCCSj7HW8*wKWwT~Ki!oYZ~AC=-b0{q0Z5MH{;)hTn#7y{gNZ`tvR9&X5& zxSW25OoOF<;CK;tog~`ECz>NDiw52V4C+1_RG<{-5@#Qfn->UyL-O=6*>j`FhEt`F zNUqX)>1}~APHm>1Dk0Gc4GZy3j&m*@7{^r3E)?rF;<+NxJ5Yt$a1n0fe$nVS>sK_= zHQSl4*fyTKS>!nCy+IehwYnfX&GivOK~*FnukipqH})J~Hou}|s>xgnO)zS@z~Q)| zUz~&S>KqE_JC!T3>YS1FmbpAdrAh?hS_8`f2pMfaYN4djADCeCep+yI_fXfmiUj1}v}Jb>K5G2hW1#HJ$H^#ITm9JQUh0im`89(w~Mu?w;FAYP}^2!SF zf@s!MR+d``)fom%^Cukv1qYUF-XVY!ilSyav}?F^2~g#4Op2BXR}mNn_KsbU^HGxq z>jLtRgq7`<&Zf)tZ#%4vb}?}4iiz()TC% z|DJX*$sPcsIiZg-kL1n)Y>V|VTOYo18adc=+D1h*^tEW(;dnbb&%cBtJpK2@^?xV_2CPuNDS zJxt`P6I}<;2s);W$DjHZ;9-1l%&I(hM;)?#5VIm+v;#Jm9ih?Y<#OwqL=yJ5vMM|> zk11R69Y&cyJF0A8+Zn3(O}*z5w_}EAyKIQd?WNjAK8aGbUK*qI=0W+ibooXF{3AEU z35WbBIB$|rGcw!#%0>v_UKLbB_7kakX{#Cpb*%MCq^Sm~M%@{ii#P@O^Mna#`yD?E z*-1HJ`Qsjz^mSXS{Bf~T^TBaZY9F=L6@;*Qqh>a}e7Q+!R;H{eXqpVoBEEP(patpJ z!?@hPZ4)<&)V0H(HIJvcl)LRNOyq?-ELd2zC?OtGRI{oxOA!!I{?UaZu=k$ezf`jaT= zfr*H2U;(%W{RR7yu^_Ra!o9khfM}^Qo7;7wYq?7^H*m|#k&zo4Ur!bVn^bkOpQ^Zg zt#&+5l0Uu6HkjApu1P~aabNfWb{Y4)*Fs1WE8oz0!e`gZq$jEg()OA^r9HL@3|%lP zd{bCpGFzB`k+#knY9*j7*=;sy6c-b4jx+YXBsz(CL^r?&vJ7F#YccPqdH%Dr#tyn% z2{Sv#0Y~|mpj#je6mKR))kJqscxt+CC06rWp5C^3-$pj`AnP&g1^6HAWxBmg@Sq)yh0;y# z#XBpZ5p;4m(5r%jTJqB{-|EBAGazln28}NN3<^jq7%UpHXNdRP!L7x${*r|RN$1GN zw>G7XkE=ogt${$c`mdkb)$=u`0XwG$Kx`Yv9?MpAIFkoi1>Z!|;ajKyr<>*U>kNpJ z#Bp+gBr!enz~Atcxy_ zSu|>zAmjhIb76Q*tH`IsYT|lKl5u~R3EjPvqrF!`1-gQW!oC(*wG6I%LTw^%;zz+I zs_)e6(>T(n!QrN*4`j>>yPdP?Qw*n|;|GQ@t+?Tl0mVv}{?pXC&y8=}HF@IuY6B># zyN3X}jaDNjic)&Hv+e*cC}wNmzuaWyWH#x~&s=8nLGzUld})9RDR|XM_I%auvXPqV zG-R|fXT&RR45*WL4}n2$swJC56?Rg59Z=0@H{FDCJ9&J2x;0*~TJNt+Ox@N(Kc>yn zm!>`OF300Ztei54A}w!d(%H7$M-#Uk4XZcsE#}WJFlV$89*;2pPuz#a#lR=LgZFl5bZ6%oGOzUyu+rWwDAfy(Gs3*YQq@fwtf zavP>qYbO0ZLA2K{oYi49?cf;pn)&$1|IKQ2QwVw{G%nLOT^q8HL9fJ4`Ic7G468+d zB#0O0J2u^P(TaeDDYm$D#cw|BlDtcD4Th5An@Vg?vqV+x&^C66b%q$N=B7TirYmNL zxFvx+`uIdFh!pk@Op;dqFp;|CHQ6;gOk+!x?;rN*1f?92TC0qP(vXmWWwVmhBp*4Fb|txxi#4mMLdc0B#(zAfXeiYpuoKh`PaA}!j&4P<8^xY^B2Q9@ zXboC8>0`=>A_qYBwO|tjkm|&64T}2BNRg_}8dJ)!MNWCF`qNJ7wdQS5Exv740?+xr zqxz-L!;ruCE|gM5$|}RlOpygAK*zTNYa!<~T($*j>x0>L zmE>)!!Eytu0b4%%y_dv>EHT~2=Uq;NMxADjcYL}uR&-(_%GAHV8S!C^Mg=W@rEcdV zNVe8$k`+r^l=Kyq8JYih7HGz2PE&JTw0ltolDBzNXZKg8Ze*_|17c#LHDi<4!~1P* zJThceVPqE3@dvo+(v`D!L~g#!D0g%i zE>txY(>DWsp<4apLfRsILb;<`)gDc+Teh7{g;YMBD@6gKK@G4ye-d|A6dsllkvGzX zAbu!XSjx7v04az8S$u%wG8BD(gZH9pXRE(>?W7%vjil4W@C8^#;i z!Eh;pDxiyITT_y$+e;7JZwJ#=>mFf#yW`}#vh6uNoW?zF+FJ$|*>z=66Oz0lkH!B6 zdZR8{%r;@;aq-xQSP*kRSS`nJJPqmTJa37lQcZqpB%lupq*uQK)D-9Zg}-DU8-}CF zYU7md+@a@=Q3O(NZ_;HH@6x}<%C#_E{+v6OhF~{r;TH{6w`_gBQ9pBh*8H*l8W4LM z)ssO*EZB8z=$kpR`_6wh_0pi@)r+%mAh+zA2U>@HPlsk{{^V}?yE!2*cFApS$@A@e zTTm6|#9j;C9=>Mg&drPTd_jvCkf&9KsRnglW*~enfOkuYoZVKf7&C;c?!nc1gX(s6 z$PKgOX^?;%ixZ|&aoBxz$?h-Az^A*$8>WIvD`?hX{ zg`MpZcAbl7OW>+Q#JvfCMc%he${6;GXRz<~-AQHyW!d4RSsoXo@@O6j?y z4cHgFfb>Xe*k`No8Td5L;t03LlQpa3kN8xt8nic>V1{n5UpK-0M;d40-@CJRIzdvw zIcSljnZJK%W9Pg1__&+oUMC`(LMb~E60@*?Y)~s=cV|lxcbDq6^TW2S01yznvQ?=k zTB4|v#A8U4B%?0D2FZ{>%(1BXZVo_uvL?%TFOx61meT^0#WfVxq*~o?R!N2Uc#=c} zg+>%s#eOo=t{Uk=RDN(74vR8N&6(ex*c|TGboDc`0x=W65j)$>GED!08&jOufNRYW zsF7MU5@>U|(W7uiAye%u6VM+no}I5U6UmNjEL#6L8KM5M7?57T}jk z`R3Bac0VeHlPJwjYA;+MM%KM{ex3j+*xSF0YyIHWKDTBBtO+S9M3UQ#z*~DjEJ&#R zXl|#4$ZCIa>zV$=YNcTB@@00o!VXP<6gb;kjoHJGm)@CwsbWrP+lBT4ua@H%@IXSqrX@Knp}=g?x6*{-5i6iCHoE=C1GE?199ORK!6V-=sA8-kA)QB!&iZm z>oM)6N#`0;TYa8rcf}RFlA=fx-nTz zEvtOq0Sp5w*Xi+H960aI0iP`gYz-X)U#h~67@v}isgoZitAzc{^zs+JAmefQQnObM z=pxEpU3$2^ouxd2$gEpxm@3S!Cmmnf2=wNdi<;CnG8ekDHG7tD{)XLqJs$()VSV(k z{NrSDGE!xezB83&>uYjA=S6x3LPW!7G2_Hj`AoW2lwZ=1d)#%@5_^n}y4 zU1~Q=2*rP90WwV0OTCba?Ga8^`MyIrRQvMj^HAY1>^& z6)3+5aAhS28FUw_BR!_hzV90rqeeS_C8ram%+IFk#X2Ub;FbcY+4jy%?~7*8cNJQ?)X}E_!^G+S}>Z2|3@Tex@b3Ft(58?W;Ruz`_xH9P|TdC;xbPexFGVc zRx^_7(iJmLBXtl|y{Cno-c%Dsxt-W=HRBPcS*i|}S`%ny?{RHI4^!m0>Kf@h;W1T$ z{Wd&VtlMQZC|BfNi^Mf5$`|XZsPQq(uIxyW3(EZ&Xzdb0Z{>8Y|0MzdSwch>rGnIf z+>X)J$-@5W_K|qu^-J%lwNp%>BG|RS$}1vGeJ`ZJ?_qKb|Hv7njfOOc7NNANj-YDE z$jJ`2~8OBX66-qNp9m8#N zTJnkGl?Ft^~(9q#uyDzn%{6}El3!wWPps9+1D$-fE8?w+-g67i3N&tV& zyAj$?(+#^3cWz~CXpr;hn?E{TzWottVzzx1(L%&`q?Pu8SIFXpCPeaLt(luA(oV^$ zoSCug7wI;`B8A05*?MINGFWhvI=sC<<`U9(My!Ce8TLhUx)@-|o`@`BqnXLD&KrO< z1Wph^;#Z$8Y~fuWKsISwu^%FFMjyTXszIJ4K>)ZCw^o)j+oFyLI?$NBip&Ebvj9jZ z7s(9ghQwu}Cbhx6YZ6`uSBJ;BT|glIBB4z(wFa&SWo49g%sO9`gs9&q=od+u; zenl|Ba>7KVC{-p3+S)LWVJUs|cxrj<&6aCqn&&j4cb_Tkd_z}J;nwGgfN@o8I_({d z4pgDZk*+a0tEF6ZDf^(kY{S;jnW2v2sYq5fRy*MBQORy3KI%vF6$}2s@S5 zk~S_KS$=-)?7Q^@(bT9qYsC9>T`x-=k zzAkZaQ_Hu=XR|A}4UJJIK;*7uA4kmu0p48iBH{-D3;7W0z9~eOvKes7Plxh<+WGmz z6H>&Gu_3q!TA+Vk)I7%t7fIXsBlk}c)3D$5QsUokJwha;K?JKU5khwCU^V#-ul12X z`32a5RQQvaJFIZ0Te~mnW?`_8rszbjdq&aIU5_4AAG9J5)`WD#wuYgQ6aM2yLIh-R zBkw^r7(he4XLgwW7XEB!ac6yydfzra5Uk2j<2N%KE30#!IE&S*1kd`BxyDdy+ z9IPDK?bh%++DDK6Sb0F0GYRE=O2{m5==argVYjfRXB)5nGy!BgFeaq)17jjm`K*u6 z!ERBgIwr)cT!3YQo#YiY&hHzu9NI}r{LZsXMSngHNp-OBkz9s@HQ5b0(sdD`|K9E2 zCyW6h_buUj!a4q12j;X}D*NQR;T_NZ@Q;E-S}ZE&Go z&l8y+LVjSV%6H%J2)x6T$HAUq%u^M?+53J>ICEuq8|J_M**gm5PJy%Utp)#2W61vP zsxX823E2ukZdUQ!n}1(E*WU6>pzHJQ5nAwJl>5LHnAt-a8pBZ0Cz^E4D%anK4b68p zXvrgGHuBTD5*_>JKZ2Y7e?M4yzpHHH!&|{U*B#`CHhUjq-8+!L%7!WlDM}GFA!J(? zIUsnscR-R-HZvO@QVI-3U*U%nL0*pu5toMY>+Lb+rYP2b2Rqc^uZ}5;N8Ljl9eEu9 z3}kRH0Tdqly9ARPA^~OxlW9tU^3cfH_dv@fAu=B*Q;{EG4P+9xL>P%xw+9(elhqj= zaxYDSg=SFmUE2%!$MWtO3YY}PPhax1g#(cD?_hOq3(NM zq>B*lnTy5B^QPZNm8$}i4B9XCpewfT>_#Bt({z!so3!2A-}%0?r^gJej`jTN-F3ks zpPY$Ac2-&ZpPe1xR3>!p;9>BLgNJ?e34jGbLG|auT;FcPePDtRS*1-A|Bg8z_E-Vz zI?(B;@_hv(2hm2j0HnTycmXQ@7Fz?X_5ivq0{}J+k(MvOCbZV}?;`SYixAKNsN=!b z7m&lv1c4i37@23;k0}Gt*};Y|Hn$JIUiC2jFi3J_nL#fsW&eGVDtk`Jn)Xb)MNlL7$E)|RS@4OfI1MM zfX%G$;yx5046=)8Z>gX9PXhqkhh_Vr%??1IugHXY*2k@XSM?7A%=YH#fdO!4Rft2U zh&6NU_$u+Mfn4#=MF>(@&-L6U)!6~z;P(R{nA=KZCzS%Rz}x@EYFHc!)$uqyuk5Qw z22)hg-|+SK+t5J-pqLPk`r9GEZ~znu$U13^M%O9-{kC39ETGWeD?_0W>mZSZa`(N8 zd=6eTv7!Q8C6ZC&@S*{A`vOclAPRNX&(0&Y}P^s78T>EnRy}cj|5mj&q zYMuS~gD2Y{zDnteTT}`xFyF2I_uHJ$bQtR*+3hs;Z3v#tg}9L)NMZ2yN?-K9uXgF4 zQVL{s>YDra+fcl~+tB|yaPj}Kz!I3s*C{{-j!1PXct*$J)9$4@Jtj2}2{7P#_7A`N z?ngyNdEd!m>+Sx%>KFTcz=1)>{?n!KN9n%ewI+#2knX#b@5S49x`UV$?d~&siIosb(klXjQmhL-VYpml)>i+J=p!fkWK*YyC7X`CHkcWwb zV_)qr3fb+fk0#VRe?AWxD?Xwhz`tgH@H`k7gtQNB(|xPctRuj@2BTsSR+n7HejYpz zp@7&LS&hHW{b9}P{Op`;?rRvIU8;Rc@cz@62AAlzgZFiDQ#FaOvRp%M{n zHY(urw@e@Wa~24kAfgGWjvvgb|Lg7YKhTJ*Yn|GO^k zr4Is+NgdcJr?b7UCzc&&9%xg}FyTgCI$)`%-Ynfr8}Uf463n#Gn##?Vq0lFEoNPB=;3e`RrD}PzO%6pC3iS}FjS2|S zCcxp4vGMtSBfH_xmNm(d8_CIiD2L^Bf9h76*_xMu%71dO= zK6e<+X^V8HqfhN!b#wH-tS zGZ`k`NJ@un75DutjsRtY^m|@oYv7oF^Pia<|4DT!(})s!Uk%F}YS@jSy4lp%FkS>{ z=d_;nW84}SVP#46t<*_%;KE~tU>5JoV%Fi5ePCdl?W9j;D1!)|zPa}w{qX-~d|YzFlL3@Q8rol?g;;FH!DZtx8>2F@2GiM9vzc}ZoD!S6MV=ufSoq`ckX4KQ0MyXuIHBd?R6v;K%K-cW#v^okGM}U5jfj zP$AXAetF<=v2Ce5y@u&7dnmWxuV2Xuo}?z+!Ww!v=b_MGbMKlPTlKM;^momvJ~Q=0 zfW2$$b_)8vG>6M)`Qe~R4MIT~a)`{Ik>+1UTYT8|N^f*Dy0=ic|Hy-;8knzQaQe6T zPV4_Hv?AZ`cw>1aL-o1oNvf7Iu4S%vWC9V+=(=B2f{Rndf>Xi>&1}Qvcb5W!`}2u- z9}Q4usjONcZvAYyt<|Jws&%!4?VZ~MO2;3mpSp;OpqKp*p42@fq+};>!PqkoYz|c> z9O?oK`V0wDpK!oZ%J+2*QeX5{WoOA27{e)^hp!O<4WeMv|7&d?duh+!>xHCGBc71! z6J(UOyc{py1T7|r7HO{u4CvYqUt{&+sp;?sI5B1-2W-1r`Jp;v_jxVV|z8cN< zE3Ga-MA=W$;C$)8ojYo`F)u^-5^b8I7^*oxe)pP~=YCWffO@EnIJ9dqOey}hYZzyN zY@GQ{2kW>5tv&p2xI@AvZE)dLx7%BzWQ#mmT6A~XhTdXEp+|-r@Aea~VF}z@mbcq~ zg(MG&IP526c-xx1T*n5GP%OEbkczpA?VkPs$l^!euaV5t1J4Av{^NnLng1RKkp*tJ za?Q~`?h;*G-FnF#XouF~wLm+gqk)aZetYxek(fdkoRF(p+6S$?=kY6ns@0zrhn_%? z>n}C5&h82eHs(1!gpDSkVWb_Zc%eMf-_K1-N8ebzv~{0+vugp4HU=|~ob-Zc^5Jru zwXkAa)%*pLol3P6R@Kon#j$Q&1QEuuan--~AVd~BJ6}5b%cuX1*ZBj5O=e^mJv86Q2+A^|)QAna4DK($Y zcc3TC$8%fM-ROQP=_TtFt2j!Bcc|_OB5!~~L&5$#*W8hMTNqOV;TS^sbnuRq?3Ddb zC;i|AVSCqilaJRiy=er%kULKbM#(x&e77)ieU#j~vVcDvS;Qv-$hw1E0dGe0@8C%ma)+D)?-ovY6*(b>OS47E!s_z@s2^$HbU&{(c5isLB zI1qk7OG4!g(^8CeZ%PsFY8edQ;k{oGy!;^iP)$wFvHk{!lmAHsKeAf ze)2Dm2EE=A&$jQYkZo@8r@vtI!C;mx9=CXVxS=~dX_Y>cK~s~Wl!%%*ve1-xlkcz; z@1Cd9etF#nJn1a=zXPG*P7_)_faP_+sSlr`kgM^XpYGx4lv^PN788K#f7v<^K0uc2 zLMK7k?V5kxM$&-k4^rK-Du=p|?W(b(^hzFv)(;>?+=KDe<95=_8uw3DHB;BVi5$s* zDBq8EP=h3R75SD;2iCaPsLpNPZg_Y^(;=_=R?_B$m@CV{{8SA*5rx58Ea$3|;WvTud_LCJpd}kh27H}K5+e|v)9-z5n z2>{{JJ4>0JeqoEbnoGwfrUK;gN}bn6P9FK|`8?M{4Q)Azc?XSsANxY>3~v4}itkK) z=+!!QD_ihOmxokw*{x+2cw8OqD@6za#ehu!qxA;F~~xmb{=nPb*N*C2a50x0|T#MDLxOz$It|g@~X7sAwyDS8_H-* zwM-BbPvlv?<##0;`$4*W#Bg?VucvIKDy8|}-kzHW2}zyIjT5hO=}$IXf14ZPKIcIZ zLi7GIg~jXZhI02JUr}h^!?Sz+=o?edYr)#@jJy#N{k+q{c2N>6XbzG7_Pfp2e4CV6 z7^1tJ!L;Tivft!w>~|KOpXfagTB*dO1(#8G_gke+aAKcSrYt{+)BSSp;@Q;LpJd0p zJVhTE6p$?N&is6kUbr6iqc>mny6%L3AEy4`uNR=mc}bMr$&UJ3Jx(iAj+7XnnxM}( zKC&*o7Hq#oW>Tk%mr*ttXR@b85*|vhWs4(Ob1SyY%{w(KnS;E+h4m{&Jh2 z#De;IZeAw-(9wMYF%t^)`RY4%2dNnzCH+qI5~gGQ>tbrIpExcGY{8!j*ncg4 zMed7l^X*mHN7X(j>Ql;3S!0SBQWu`bx$G!+eGpG7cM#|9yu10KuklFOwKp|qQ(}Ko zdp)Xc_>p8w&+pLrQPMy@+EuYlML|r-xO|J^Z$tQA6e3;JK87czu!CnXU&5~QqSJP` za;#{MRPS-Is{%ouQCFm8i%EYvSb{f=<7e|E=J4T*=eYH<#!Os<46~KOy=mo(Ctp3W zP<;F}aM!Hu@B7;GkSKo+2>eTy-p>bf3Pd zhExe|{P>wfqGR*BziwB3%l8gqHA~Z5W)-*`-*v3s%N6Tud!Cg+*0ReiJn}8IeSXJH zhxvr7@@$V=H*R4MN^}&8-1iCr`61XU>^Q8DVBLo?N5yGD1dL&EbPJu{m_)`J*(FoD z8{(KGY~fXQm775|>ANIVObb8b$UDg`ea!1Y&YCM%Q_}=QPsc9L$*Pdyt~m{|y%ueh z)7%dn;D399?~d_!yIY(Vv0)c$e_d9?oZgu}m#HDNO zbc!%uz`Y6M*JGmrd}~|S2WrGjje!k!yF1r)7$X9C1n(%A1%Y*(U_-o?5=ouxBIXYh z{&Sx0pZu>7*n3Wm85>Mdkgu_N5UL+J7ZaTsyHYD!mvt{Q{n_j_?2~|?xF|Cc<)Y)J zPCJgHbP~CY_erD6KUC6bOFvz-7rBuc^g%JeF~2pff`_IgYW?vOh03G8?V5B4=WvS@ z&Y|YSe{seBQ^litW#Y5x({r*dt2e9&CMbluTKG01)~kqb2Q$45Sk|u8-0f9Y8juq) zQ>P8p%Yt2~m~X%Np;RdI;N$jss@TCJK2yUZhzL%FWo!|)^;wzK9ZsLGcBwb(WoiyI z-^sW{!fq975MxueC&JtQk%W{K9+SDNs!^q}F?O^u_v2jC%8Zl8-uQ`-daFU1R)_mD zSwm||@$z=qLK5psf$-*t`O%qd;)93faK2&ub^M&w&BUd}7zkOV_FlTIViIih(3zNM z-l@>neG!)u6N(`Kx6IbSU}t56nZ*1zeVWrS_;>_&2HsVY6Z`)pEBuFogagwRIa`FGTPaVkNWuS z{6f%NhH(UiWIWR6MJ-->!{}=OhC=LRLbgDp{Ipi77v=rGn)`I=hHY!xM-d z_oDHMF^b8>T56Y!SfFqy)Kf_kW%A&|-#M>#4l;_H=tvz1g~GQ%?qy7^QY)J zhRmcHYP;LcFSBCY?4IkXl9G9qp{3BoJSFd!w1VV%X|83*tm?%;{qu2;UAH965#G-I7)=NrZU|Czq0)&W@)a&aAP# zg&mn7Q3)g2>|aaA;PWutx}lU*%#dK-ayEcer9ctys@W_9g4jnlK64%VB@#jsPCjYA z*W`-jL5>dP^r6lp^V)=*=;$zaP35DUq&!JA3;G#Omv1$=k1>*Z zSqd5HmJ2HZEEZ#PdhA7vl$p*aqp|v_@~+EiKa35-Tc?!3MLs_cDHZdx+JL{Bp!nO2 zJwXi2LWW#8Z>AgyBJ{hj(W2Ps%IDuUv&C^FBo~un>l=c;iTNJtD~Yv&V1}%yd-=9r zy9Ac^%MJ6RB9IbEjL|GMil$B(t!ZUz?~t51LeTj2>{JJU6VC$&gAy?0R9XBVhCKh@ z!5uGM4=co!yrQIG`{{i(hYz}9Ez5Dg@9WMht@DNSe-Q}jEULEKc1bP- zF=vuv>qXmhuY-SXjTvNXvc%{2qI&Ed~P@{015lZyq7-5o3YCph=|3V|yNL;yu8p0|tNQ^9Wchje=BldH%5gxyT~ z%3g_7?%b!n+S>pJQ+2{&G9~@>;s<%lx$g%G}R}W#*d8szJ{H$Q=NwajFbw)jHZ`^#-MO&!+CX!(jiB?cvNCq=rndXq_gORh9miD30u@sH zyHS0e>qlL$*3mD08^41Y7nGU&@*sV!!F{Z%Pms`N4tw*vFN~5kljdoQi z+lt%StKyAYSet+LeAL?yVzEGa_u#u+;9ZU+63 zXHg0}ED}mMb3x0jdC^=XOa011+0yk|+zGrk4Kg zGby4!QbWGi*dY)jt`}E4BGX~zf8I*bI%18#!m+^dx@J)4r*FBZ9^XipZ+Z1edt}wR zUM){Ktp&ZRpUS$|sw|RfM50$L2870Oqss8|Lso^(b?;@KkwFolcF;{OV z%kWMr1$S@fPFvNf6>IPYv(#%;4d$Z$;)=sCKGKwWoN_g!Q&d{92nvS2A=NvODk4H6 z8X43Noib>$GV;AL`|HKa8Hat0TzmM=OM3hiK9We=Y=?IPk0}_n@FF?Z|MOF9hW5|s z?5*69BX$e^-M2Hvhd6d~XO} zCx7&=zzoCp2Xb@^{ZIEcubV5iNou)Xr5k{p%AQR4cpyJpwiGYzVh-fzh~5rn0tt;J z2wTYAq4{B*=SV5DA#AzWOndZ6l(Q?B)BXq`qX+7@k=l4|=(r%M60kebw6X}a#?s~K z(D|em{h_w7zR&8b~UJwV#{iG2Xt@)?2-xGRwEZ~2lG;vqZNs1?Ns=eRyV5K5I zD@_}}?0oI$1uM{vpL!dZF!OE1b;nl^r1c(_-q}JCISL_`eUC&vf?AFL&E2bNkjljR zy>MV$;Cojg%W)ju93*kOWh0R|y^`KzDUGZas@L()t`C+^VSU}{9wz+=@$HOC*cxy` z=yE`GumM(lt&wg325MzUqu4somya=H|1Vw2s!- z55akf@kUR3x=*lK6F#=KEOeCcRWt76Y1dOOo8^TNN?5JqoV~)pC(TRyi%H8uR#!pQ zM=o}HML)IR*T(sXPsWL!kCu)fbg@yl$c0ERss0yAv64Ky(~}T#v0YE4YCQ~mS*FY7 zizN*;xNGd$MK0m@Ut5?$J({}{7G_KW&a2ocnu!Ta9*W@UW;!kWewXWSS0lcWyQp-B zl1+5Bf8E>VBtw;Mk*DXQw8^gl#4#NGt0l~i^Tf!Kx4-H(3OMUqXE;83dJoY~e03Rg zzbr9aauH2m{$YdEtgsV=wC}&vNtAO!{#H0tpLSZWy=L%7AIp7PSKlrkhmrMhjgPGl zmy34ow%p+3yCcd=`~ET%f}eViAx7=cxU>kQd38oQf~!jO@XOlt|J0)&1FjGVTK*3@ zKpcCXkN(RlCkyvh31msSfVdFoYDyq-fq`pj?`(JaOo=sE2h5YG0E=&Ih1?d!ea^Y=YrNm@>$=amCLOcqxiSRnoA1x9K-Sc2!AEN?$X>Uc z+e7U-Ba95PM#tnJc%)*dy_g&km(*W`lwrPes=6xw;*p0SmskkT5ifT|#d>mA0G3X6 z@K;6?p3&CO1NAPWjM7O23kN%daUPqo-{Lde`P?{0UEj9n=cX`b4B@Q1Kkt2)7p@?y zodsrp(;00G!4F(r%Aw(jncJ%wgX&~8vwgC*4XSS>snJHfK=U{}(_Jl3mD*&88Z6Ri znI)D|I+bI4`Gi9DmNVPd02n<^k4Dr!EFWLRKg6eK9Pv)#z<;W6ucF6=9Ix2ZzpS#` z;L-qWPb{u<$I=I$;;L|?xwfYLywDn$DVpZi_hqjH?Hz}dV)H&-SEYeGKfU=T7km-4 z0=e^DND16#%bT}H$)7kBss0JpznoN7_iTT*ep*eNi=vF?HDB;FUo09(LfZ?PtF13$ z^gccx55FRsmYTjiofkR6+8}1F*nAo;I7_Q{EhwHlUlNKe5_Hp#V;b+bP=V?SxqWv4 z@kR?P@WwxDy1vDK3LV%(-&Q*(6BllMjz6|0T{TraXn^zWd=T5>bl$Z~H&k(Jh$71} zVY4nA_qM2wZ&VMFPk4q^Mf0o_sl%FLY z&!%YNK=6N=^;3c1+7~wIpG_WmekL>U_afkBD^ zY17vYQ_`bXUwr6G>%#W&DIk9pT52h4FXHq;@4+++0`58{O>J`9!yOmB0d3MXWu1hp zmCZ-3ED%pFzTX!A%hhQ9OYZpeqGO>{e|zw=bVMBZn;JGB zZz7kh`p|FuU1K0jyP?!J20x`-3GeT_} z^sJ@#gxa1ySsMa5!gZx2qAi$r&R@pNZ~>FOQ(XE^NDp(D6!;NDg8l%J-uCx#d~!Zn zdmksZ!dBaAUU%bE<3AyDL-W5V9wQ=5FVwm0-4peKQo)`T85p`wN^i7=JB-B>8*|B@ z5mEBS!r;}u@*DZ@UJd$#4es$xE)d?q7zitQU@`f+>TG=ZUD2LA<}PrU)NIK=V`4Yx zpVG&o2w?MQ)eyk0C9-LjeBbvdJg4^9dxOc^$Vq{OiB?R6ZLMGUAKQ|s zPy@x6Z1O4i_g`$A1wA5Qt_h_Zl|Uju&_^h(li9Ya4y7wI-rF*RDaB^Qeb^kAq;#D^ zqC@68jH|~9RdF*`G<(y)f1)Ef1=R!KbrD;b%~n4Zmu&w0+RyE(5FCPYN(Yxm$yp?%UcU);Ql))E)Ap$vo96z&uke?eWQ zqN+^ml4L>WkxLjne~ef&?&3GbC0Bip7n5QsE85Ul7|$Ow?PD|B691{WSmDI%@5+yo zH#IgY{7DPx{sM>oDt{2_Hl$B?BrbgKC)`0o1C*hu@*XzGt@?7Sv}jG{LUaG|GyJ;2 z3uQ0QI=y)H&!%1|Is~*>$mUAIJFA>=wc$d>`G0yWqsKqnkyp6CvZ(273!g^|g91WC z+r|2UT(H^UVW|zUz~4H2I9BtF8OD^iN9K9-Jd3X#ak1hcE1L{0-%OCzOGrvc`=2P- z$$a&KID6P*Hj}e`m9sgu*a=KE@v8zh)5l#<@6+T z&0c`cVDlFCrkl_JKOas@|5+zUPdirzD3*&so9;9;HZ(r6ZCx&|Cc)2)A=K5Z?-x&8 zF!)KuDp#u;F1t5^g4E7=Wbv|W4hJ2e$CScNXOQH+AEl>Fc`p`;@H>wWb1PbopmxAH zzQ1VIY*PPJJB|!VHD++z-(=?2QfMAK^^nCRhn%GunpZ+JsG%{0?WG=~)f^ckjUS=2 zmVLC39=dIlUg6$b=P8Hw&RD$OpHH%k|4)?U?0%;XY^9u*uO$x`XpMI0G=C>(Uk5q_ z%5Lb@7)ihfn?Qva+achnr!-~F|Cj}s8lecwf11Kur7>;%x!Y{x#S0&5W-2PNKb>B% zkp!O=OqDSAX%BJ;AWp=J0(SE1HnCFzYEWf?1zpS-2jZ5tZv>azz4?1TaITcD zkgbH}imLVQ#HEt?-yQWYPxj_N^bc&6*HO_1+w+~}+s;7d*low}H;kA`YQ%xp?K84G zNsTYa^oL$2LMbR0f=Q(MXs>!6%>SfuyF}t4nE#^XRg8K_*0b9?5>am60HR!L^*nC8 zf)`q{uqhxK{<8--w!nKa@YIAC^vKJ9f^ny_?`pTt{+rC9yi3J0W)USifV=UMOv9)4JwN*E#Y-G(iJF2Es)z;UNG ze)TUxM@1lZ#2JI_yF4+Q*T5uib9CItMXe-nyMmq9&y$~JY@3ci2s&hEU_hZD3wj)e z+)qPONcgY4g%*%pTMy4;=MJk)Ur@qRCyzO#L#rfl;rUF@0lXrDt=4`7=ByLG*QbFEY@t1iMM`-IR7<(#-rGn)Arg5ryVgg?BqaZ9HbDDvTrH_Y1+P1ZtqD_~ z2Xvx8*<1t_7Um_A4UP-WjljF5DYWio*_V%h<9Xhsj@&S}^7pHC;42VK%@9q{=j15e zH^TDu_@i@AY|D`C1})&r&_hT1Vt=s9BZJw?vGiw-x?F4LG>Rx*c{l#nrsB`!GJN;chR+zt4S&&bH? z|H;}_vs?)>FbighYz{*K#^&F9Z{M>U)z`hDH2l$b?j{pga=K3bpUIN=c!SJd!2m~)|{ z9`Z5@eS5b=&g>Zf8uX!&UyltMiBy5^wOVa9f!v-xHA?_F&gQQ6)yuXsJc&CO1^LB< z+Nv(yxXbRDj$W{zTbm)Iu1dJqgm=II1i7>K%I-lr}BI zlpbsqG7t2~*3=sO6of8>>HQ_94!9>gS^g6~AJ1h0v8c`dY!t5=Jjnyy2R_s@&5pY{ zU1**IABu)plh5t6AdonL`1j&AWVS|;XPqDctQBt1jusMGJt90bl;DRe@_N8K|84w8 zNmT86SF`j+4d`CTx<9*U^VgH;3Lk#p zt)l)zv)rwD%aHvq*tkNOrQg2oncwGxuP~>P1;^T9fP$#};L}VOv&f*ahTa{(+VrU5 z+fC3Y&wY0nS`aHpqkQUU7-W(g%@gIeTg896=Jd~ZHmvJ|-uGYKkBU%*xsMNvln<$u zjmouaFJ<)x_69W2TBemLX(28>O9bzsnT0B$d+;c`QL|7p)#eE@vtv8+KA!ba$Z#zv z0H14b6j4J@8Lf4o_^Ys$&8q7C=kQVmrs9)1t^rO&e+Z)pi41y-^UYx=V#vl(rfF3b zSJwp^@W>@u|Jgt*%~k#%ofB+*Rg3k7Z+EQ}-SMm6+VEUl2}X~uQj~GR_<=G1&S%2V z_t6B4`r{Ec(i?Zo#Y$}w-S5P34-mhz-=AqNbtnZ1P7PNlD)!tc>PyJa&e>Wmz0%D` zm37Pk>#I3G`(`+7{7k*P?&aGJ%74%s{fOV)eRE8n&WX(&E%2nsF_Dlb)xf5^u{{i$ zNdW!F*Jc#8W(&wmPMq*l9wI>98~I<$>=7Er>Q~bo+6#&2f?w0rrj*}(`}xaf803gK z*fjW6zIT>)s2lz}OFPQ0_Up9SUyaKZ2lsDnYlX%cLUa&mtx8qX^*9O_`kV}HL~NnZ z_Cz!s=z+q1DnS#aqs!yqEG3a?WR=$bh}8eC*GJ9#A3F7s{lvwvF6j+<8~tK1=+|hj zJ}@Y}eMqx>FwQN#cLxJihG^V>r*M{153DnexxPw8Td+j%>!+IGIxg+7{Xza4*u2vJ zIUq%_sC+yF{dPX3C(pQpQ;3C=vkk7by&_X;TPm6~elHI?@xX1I(B^LLQs!UfISz(N z4$G&h?0s})!LMNg0sQ)ZPnY)p#6-+E_Z<6xT9_Aiih+&F-RHhnM0bF% zfL8SG&9|Gl@_8FaQM;9&r=|i5ALjgxxGg?iXtHu5|xN*jf z$cbK0B(M6J-&Z3$n479vo){xJnu}-Z_@l#^uc`|AtZjL-$#Q(_j^7(lce z8`HW-9|3(i30B9fx(0Y<+LOqtn1gML|2^UsH8Zs8V4#Bp2F_qM@4KgzY%^9jGDX*B z9|G+=*gL{8BG4I})sAYGDnb9Iq0(r4Y6kc>YM+T(SoUFt3bUfY4m65_Ew%onZ*ztK z{b3fTBT6gf7tIK5KCk-EQK}=)gEUKfpOt^rXi0!#wldMgdBU*3VR~RF8QaJ619?bD zOi3y*@8OLjQTSRamn>dT1o7jYhkn5mseLI8{7_|D+?#Lq`Aw*WE)Vd!J8pStrT*`N zgsT57F?K8Vz6?&>$^6lC9a4;vf`T5T7@L(OcuK=YJ&^ml=|Jv734kUW9Cl|;4rXMa ztlfXQT@=sti?eMP!#nk0oC8jVCjMglTEhQb94hAJ24o`UMK*d1+Bt<9HD`#OqK8D@2;ksjPeP-mWB{L>9Gj)eGYWvB94;aIQ0{u;5ae;o3EA8S3Duz>(_}BWN$hE$NiSLK{4IX)D zyr+$sFP^+s9J(PQ&>*ska&Z6NAK~%LGyUInM&+T4+AQJWr4`R)luU}#!Ha?kgCVVM zcg$#(c0vq>xHP}FhtasBs4vt8Dz9i2+0Jjw7zo0lK39i27~FaP@L@8#MHzAQ$wg39 z{s<4!wSNhZA!2Qo2*I>kJFWSWSh!Y4M+aQ%(SjndeWuH8MYT3-$&hU-REWYjD^z$bgn*&i)5*H$91R|190iIS^xsUQL5U zqhS(tMRcaLeFk*R=B5wi=1sO+Z{TXE{eRLu3+}mq+hNuv>--f|;^N|vY}z73{+J_S zLlgaHYz*O`4rrJd+n&X+_KM^??oeN1ZZ`j@`fV#5fO3U4^wH=_EqxTC3+G`XZg*23 zMyvvXO|5Y~G9xMmOf|8?^S^3qEUxr#*xqLV12vp>Njl7^NGr5tKgH(Koeza8ESTY2 zY?dho$51LykJXPAK4JVJco7v8+_gt8;W9?Uu130#q4!3--@)0AyKiLw%mmA%5D+asw8ldOlJ)n23Z zestiWBIE0JeEgI*b=rbW%>S;CTl=pr7 zJ1GlkFW)kZ-+qZ9c>mVsjkdi$^9h;hu&c8ik%pTZb z;;Yt24PyG8y&2Oa7^vv8!6A%2GX=NTs*!mwI!u|{=Ht}B>JrN&WRlY|U=IeRO83ujZ_i$fA{h-uHf<#E}AMu-p5iB%V}Pmxs-di@M7T1%`};SBG4$>zQj#13$6y z`+YtWh;f)^x%5jo=f!=9)~l9avEBC2#E}=bta6ciQg7(fl9fC_2NflRxYVjL^CLG^ z!uC({w?gsyE9)B+rAktHV3#H)s)9SWSrLV@bFT49Px9Lvyt<1UL5wcDg`(-QTgID^ z<9{$4(wgEGei`%4RD&{dcix%5jamNLJqN1SqGg0fj)7XFowER5p*6fjZ9Elm@QBo* z_(4Di5tjB?Na-3`TPZmOmpD9h`xm8#*iq;Zf9nEiE^Nj80XWs!in}_nn z<5^eoJ}83bBC7TK#N>FGa9Bqt)nVuS2VO3wFA8N9uwICba$-X^$#BS|ZBYQ4xQCn6 zQW+B=HI<@U5Oi0othcE4etUZKS=6w=@62s48Q*v$5AR^Z(Si09+JP1%=J@4ROQfB4 zj*^#^GbBSn3(Y-;0rwwwd#C?6O4J4H?TX%Qzq;#t=c}?7t&_j;57Ku*`&v1@{4DO? zZRc4PAqW=l{5cCL+{1i=a>L#0EhiY+%1+{LB>~Vvy&Y7QFs9=(u0wNIf$h7t?KC?jRA6O60}Ka?Rd{ z^zREZ<*$dk4Po^VPJ10=H}4P_;u>fOG}uyNjAG{!lk}8|kNntQUY9|CXQ@V{T@e95M-(Pq?Jtt76-{BiZH@Gs<@dkscIlf&cWE^+=ni9|vIv^E z;7h41R{o0F6Up!Z{0ByB;zx-2Hrxn=H6$0h^Q#!L^GkETrgeqqm?c^ID=H)c9764! z^m&cUCuR?~cib$ypEVy_emBKfp;@GtcoPFP`^w@p%Bd23oG_pA>gjR=R+W@Pvy@ES zv0$8gqSU;4y_o+6+%dck81A_LGFk=qEdAt8=Yl&1>hY(uv?R@?i{0W#r{B!YZDx`} zA7>YZdj6^K1k^fkmx;`715$_POzXRdmuLQnmE+HNuOZZiH)n0_C#zTbaY`I2KQ!AK zs2)6?7ep{)x4h(l+>G)m%~F%yNxq1Zoe2LjPiIP0#a-SmIJnX{Dz2ot!V002<`8$7 zlEnJb5^b%O=1AA1MOLSo(ZK1yoF3fUkQSRX4!QpMZTin;+g7*r{TQvXwA^zuA&Im+thV4J|54fBUCropW{>+_Jd;cd~- zkU@5f?}m4DR2*V!TS<-f$JQqZXKgelz}M^jv?w~%)e0WNuw<0LvzTMQ?_+SDVIb^d z*HvyUvm=|ukxBV)rKY}QOourb9+d5UL75EnS^}JIiRJ+(ku?fKd@w5srZUx{#X#Ku ze0KQF5&^x>qf8Sv6V9fQ3w_U%=g%G$K{-V|&BQ>lzlFf>;WCE2RcK;+m=j?TC|qhP z$v!Pe*lPIQ6;ZX7$QN3+D!B`9$IqC-%;KT(+q@G@LJYn&UWyvZ>i=NkZiZ`WYZNg} zab@u+svBz(UdZcWT~aHVX2^58+^ZKal?}h5_G_w!Zc*r|UQI=C7t@k`H&`2{cF~$? zW5bNFv3lmI2+Eo@n#eX_X?_RCMpDOmI|>_k-3uOf+6VLge$yl?YSL8;$f2ggwbip) zpRo~bt1#Nbys@rRXIX2pBdI;K@j|ZOP`;LZxG*Dhg)cY1gFGLsZmT({8(GEn#76e- zTY87>5xBTiSBxBz2Bo47R<9?l$|6qF8DR>*p3sCDT)dTL60*xkAOV7SwPm0ffxUs& zE7z?dzBRe{BZygGRS3T-ryN@MF2cK|Nab-`=P zV4?y%Db@D(6vgb2-*#ri z@y^1HKh~Yk6?BJT6GihVA^T2V_fT$D#lOb}3^2pGa6s;`5Q20)rTN5qhZz=r7PkON zh?#9|@C+O8gO}r|4=_=+tqD1ZdBZgn6d&sZrb5JJnnvinLVwSjlV0g4ckr^ootVRz zbH+`Y64o?ZGDR};(4c**V~!S#M@T6>4C%_`bhwr~jxqx&_viMNWS~`|uS1g!-1vVp za=&NKgxdRN`^If3K?YuFU=I@WVNwS=tO*yyro$xEy5s9$)0Rg|5%#bij0`aIoeR~9 z5147KmV$}9tS7O5U+od!9z9e;-p4PuuK_^s>pOT@6dFY}f-DCI4jV8Y^t7xTE~kj} zf%wiv2*}WNjZ204P^aZ5D*fiMC31R~1@;ACxo6iRk>&cA<+Z}+$KNa(u1UvTe()#& z9(JEh=;w0ZtSgFY>sjyBS;ASfxZ2drRHfgx2z^c!Ifp~)a8p`RL5so3-*&ru0m^9i zXDehi4x{jKREzzOTm&%Mf9X0$? zUtO^M@os7{&|xgHeWulJbDl` zHZdk?X_?2OT5_Wk5^26^A1fR;2^y{OZD6+5^gphDiuRdZvW*hT3f~n?k;j@1ERXZi zUqOVVzhdzWJ=6#2V;}C;CuN66MWMR#D;=CZ`M1Lg!IugJ@KO!PnU7p><}jTX?8&zF zQZKjB*xz&)Aw}oe1exy7k)>p2Drp@>c`G18+!lr~g0<%TS9l0sq4=cuF{y)0UAw)h zuoJy}&WhWVrmqZ}EZF^l#l(HwGb;rP%FTub1oYBDZmy@zp;m7-#OyD3S9jdoUs*U{ zWUOPHHa_Cy&mjEt6KpfP$!zKRhR$EWq9$WDav0`xIl;=Kb|P%;F@zAzkH4D5Mtp^} zyw=?5ZGk1)Mxyv6G0{cN#O62pQlWJ#c(}EH%mTpo1jYjWMEp$;cKRq4HHBsqGT2gJ zxLD#nzz}HBSi9LsnEZal2~$CEPvkUA**z3Z!Ep79#)M}MQON#_6tu$?cRn>tlk5dE z_tSMKcC;3j%0F2hcw<_&|0OP=;~rG)q_QU{;@2?G1W}(EZ!}Gk_gA2lP;36;S%lkA z+rXVe4@)4s3ryU|RR*ROP2lVjY!6@ai&TSE`X#faY@3cpFsn(Aw?A6x$C|jm-c?>_ zaIhSvUiM5?v+p2$*4`2gUliBUbeyx~+x?=$XEE))yJzaivwu+Vc<_B0CW;+@1urU3 z(489JN}uU3pb9+wlcGD+A>=d+Vp_>ga$r{}D=|pi;7g{lV0yrWeu}{=5nI{f``w!I zgBrhWpR}04-IQlf7J^{V4WE;7;zOo@eeW|q+wJt(>3%wAU(q#tkY^Z#%$t_Uv10;~ zqh-l{zi1(I5nOb2y5sQSUFR^rCak`MmXw5=+}1V7OB3N=!MK%hB1TBodM(0n&!;4N zl8}*!Dd5fUnKesPg&{Mv!7Tl`_{4Tnseu$(YDCua=4&EHz7D;m=f4 z4FW>}mjGBeZ99V3YLM74bg;fbC6#U*imroR+%!o|QFPkPUYT<)~W8SA=aBbL; zg?Gz?nmL@>>-XnCPQr}5aA=@da|Ilh?k+i1k{C4|R<_ZhRmS1@=0~z2TEk#F0CB=x z_$c(yDaWN*&SqSu4`tAW`;KFtYCtxVnwB;v(A>qi6vCkSz>Y)Kbcb_NfI~?C$xm zRS&SWTU1iPYu~QevmR`s9dFr@mlq%1dUym&wiD zirfF5Rk<=gwp`tPjuyy7(gKfeSz!>X>Pn(b;=;heMA?<6ekc7Fdo5elx>Sf%*BaV4 zK55<4+scgfOpm+in>zV&QaVgf+}y&P?xCEw&3SL>wZfa{RCNXgj_w^lDsm72ddB^b zf?Mj3Bb5&*P>aexRfAw^=9G7pS9e+atdtN~@WjR+uBw4F9ya#(3(CokZhTvc)huoT z7X$>xXz2q^!sf=5y}vph({7>jgL($Wr#mc0>Kdh^7=cQJ&KDHv4;vg^{@OM4R=96x zs6Vt6Js<33hB2+WWY|Hp4KQd)=P+Zfu$Ax5hJGpc%FfA#w2X2p|9;3nAkN^UlTacF z)mT#z$E6!*yE@P?&Q_8En+l1_`AX2~=O+LblB98~;+_d)CQ4awEhkQ`-JX~+{drSR z%Tp(pgKjCuvly#inj8{lJ6LW$pmy9|mRTMap{R{1+YQ`Zp+8{h%2W>w%`c-poJQM@ z=iuOox3EpRGHXxXoNgx}#6yt*ld2G2?}(a3nH~2>V|*6m9!sYqaA<{6CsrrX5l>^P z^m}kQCTf@DCs9qP4G)k7RZw~EVOu?F_x|cz(($kmhbD}T!pfzS$r1~5kvtT zHvDDMvb_b<4hws>Z)X|2ritqM42D9q_P>WNLnnNEXyKsPZh48`e7D=odOggbbbB=> z5w;%siOJCo1n^ZQo*%Dinc)LylX4mwb022tFI}pKPs@&jus@EcwJUa54z+goPZh#p zfZ3kXo95XT(VHga>Ei7>aJyyC&bK=g{4;~r>$_9PPfRnBfa z=)h8k2*7dEyG(1Pi$vE6RR{Nda%#^0I2mMV+*Jqj5a0b>4v@i%T=e76cNG`?CW$FI zG`~L>E!loG@)U8oxim+z+@?CSKzKjIjcC;^?~;-geLnl;T=kNtvV(A(I8x-rV3vK~ z^Vi-U3I|PvUIWVo<@;~$if&sB7I%kRMQwv zL&M&0em7s+TO_I?RFxl1^8FD*Ktx8LFfXTC@H`5sxr4$x`F4vkl(nY&Ydp*2lx0Et z5qsvFK2`XXL)- zUIJu)$D&zvqVX-K`_%r(RzqW{`iA|y#k@u4Oj%~yk}^$eS+eS}A*M31mImtDruq{b zVe{#lWE|H!-&vih7Y-JCuEbqpzoJBiP83_Z*#FVt=%%3X1 zMOPIr&zOX-Ty@nCNytpZJ+gcB+_bV6`uJ^ufOo|?YO!TDMKyLv23EXPtKw4H^r6VzykDF11HSq5K@QWUQd_|} zI+H@rK)k$$%(kk%gp!u|v4_M^07jE@&b6q*ElVFDyGZHuL>|nXo9f0z3IqyaCx`R zF+Tbvot0J86U<0D_U?7Yt>F0m3>YQgIc+7J^%$0) zqW>{BuUL3is-ND?0xy#B#EcMldN{<;&@ z_oAYv#(ibyLL6ysuBFk&yXuXPUN#pm^ajlAI;)KE*nFc*^AjxK?-Ni>0bgTJy*Tse z{POP9!W^dVb=}UL>K)P($j@)6T+t&5@1v?;NcI}~>1X7+%(i2in$en-h0=BulsK!W zX((f@)6VBMo#Jg0w_e}0w&h^}PTX>Xg>J-eZ#4*igr^;)` zu!XYZvShl_E88;Eho@CvUg0ytbCHhkxKgGHmOOcq(|IkgR`#)((X>^58^uLzqZu&W z*Z~A!EIP?AcL?5n1DCzqlkXpU_HlpK*=ggM+cP2`H=N8(92vGm`EGUucVtmH9F>j{ zzHscz5hOUa{FWKfr^2YFB_{}bXTEi{VZtZ4#&?BllFyq6+4l-D{Fs+&(wmu@_D>gK zHQ%}I0&a=vEb1g!{I;ZqQT{NDF?x0`1aFiuRsO}KQ)Froy1~k>k$C4^Dj9rZ!%ja6 zY9c1#!}31uLr009@m@)3C){=sCj-C5j=#9j>navEj_T39ql+2e$@-fLPeVxQh__=u z4VySpaz#*%q-4TIc7io^Q%;Oz;-2zNA^;{>mPG=^VqR#v7qb=*o5gVCvQK!FU|PQk zh&*Dfgm&IqnQ+;owSE0y^Uf04wWzE&?7jQ&$-(OIrAG{U1Mup7o&1`|#A!Ss3U2W6 zIRDNC!x>^F2MmtUw-rCoNeat@daVu(!XdKtOkN*iuf$VG(xnFnyr7F8PZ4ZBv5l#% zwJiLtts_p|uhrVkt-I8iC-$+Oy5OAU)cov2p-@tZy&pfwTFJ7_v3UdUY?D-t@zq{Z z8F7E0^8`cFqVyuQ;E@OKbuStj;FF6yC+A=@xc0>rN2TETh3=jnGwO&0SGwg{@DA++ zTyWFmBSs6ORBmL{5mu#CZ!lWQ3Y?bXYlN8RHYy>=ot^1RlmWs0#DP4xbt1Fyw2PLp$j zm?0T)3t9FPp;2k>sl1vs!Mz!55lmamTm1niH_R8Vtnbb(%(-V^sdccXMP*!|zB%qE zsq`~}eu&YN+1~w3p-PMcHhp>&a_40ep&MHlBC0aqleko7xy-BRf(b3+ULz(HejEN) zQkXW}kte4&wCjV{s#`i^8()lH%gb!HbjYbz5P@<5@)do3YEx&FCYvH@OhhIh;wIF1 zUsd4q{9#_zEt5^p_26~5_?cnKiv==FRbQb1E8WPrn8Dd+kuT}3;__Xrbyr>*AQ7zz zC;P>FQ{47;cDE+YYug^B52~Emc#WkGNl{g`?Roz5Z!Mi-KhSvz!Lpm5%^r@vE6NVP zC$jdG$;etrL+E3St=7DyF(i{89W6o8uiv(~WcoWj$Gabt5goshz4&F3cE;qLqtlA@ zcGo$FeD3@+BxkYUj$N|-^d&_`F~OS?`=s;gg?r36sE;aYR*ZB)YJ5AA&G7W>RE~UD zm8|D7dd~RJZtTv4*U!h)+zc9;S+KMazK3%n?%4gJT6=PfBp=%_D?7+QtXppkcROky zX$dD~eq%a&d|m#~Y}9V)ew4!3rbiSO*Ov6jaeZ+MNS?%>@bzRPWUn^EHfeoGbDf6Q zZjs$+5B^>2y4vRZH}{eXiit`}N{H+3tYHW->=S(VYh-SCUXpRmvvIxix#6S7)O*cF zo`J0vWd<7*atSHfZnp-BDbAI%UdpwG3UfOwB`^tpe_>|osDRq@Ylh>4U+JO>zD962 zthGnXT_Gz|vZKp@uk`vf?~I1!S+_krljkv}+1H#9Lj@T*ZG+2AV^zA*aN}J?`pwAN zgp35AeqHtEz)Q@#xp}#jCT?VpJ%&&{sy!Fe@tI6MQqwrtm)x)6wBLRffAIbN1Epz0 z{v!S&_{P1MggX>xZ*xe?{;P$B6)!Mm?2VUyVjb`7tcSIpU(ZruKfQcmnqY<)=iwL! zzE7)Ch*ALF>+N0}sNO=M;-JXV?9~1oMW(jcY2mh+iFZuV|sUDb_A+!VaI@JEb4hZl@`Wv)Oejb%s*CETcPEMx=r>QKcwXJg@nLt|s!`Lmo zgNO9YctIvcQ*Hsd+vYx{HKEcKnY+_sn*El3e}W?Qw3_dvov_3y1veIm{q%LI^ZOYI zDXy}H+)VeyGuXdclj_G{>|?y*r(2LbhaG6eIEqiB`h1}`Bfz@IVh1;NZe$E~@zmwb zjPDV0CQ*ErZr_w%81Pi;KCa2X@X8zv$D}2nt&{1NysgT`LerVxEhX7-j`+;U7YqHa zWzoW0GX)8Ra(D?ZQ(iNsZBRVi{GnERMNi*cx0ZwZszkIq64?y9z;$k2@0M$6>p1GS zLUA36INkH5 z!%x2gJ5{IoP4LnMSqHMPoWPhPP-3JM9@0Gu;{NbmJ@GZcB`7aUjtbemU7s3lp-dmO zFm9UE%@x(Jeb0IFbSM_Vr3zpqvCZ$^UCheKaRKmwO-5EP98$-%`1nq@Wb)+HJUAH} z8=G8V9r&4Z@c80F!>K8vlO}ld>aq`xT*-ZKQwAgdH8f!Rjo!_~HVW7jF#xoqL<({q;I}B}GvdWbditM70I^7y5?jDr?^& zRV;;*^z}(Ur%Yc~*CDAB)h<9WgDkeL5a0RKQx~@Ea0(myL8;@N?k=D;!1cJ&NwTS% z93_H=E{)P#2&+Jq;q@mfuP06@#7i&`snaw`y0@1@(ZY0gO}dd6it-U6xAeC^h_5-q zm#weWzjAl2N@|rc557@4Q2KOzRaHV$G3jnJb$das_C(lY0|U%YOjNP^VBKfAmT&4q z%&2s`ErM+rg1FqNrynGb%L0szP$vQGa9!^PEi;FFp#Ykiz9` z#yUlMbBy6IY}B&=V^JJpTrx>9!v{*5gYerF+Z zx)Uxsev3Jj{8z#CN4z{5e;KuCD1{5)yYz9L1PUb7G}Mekp^t0&vIomA{gDevAs15B zctRy{tzv1y^s3Z8N59&xHm1oP!5d`}7MaL zJ|}Rw5c?+eNW&-pC6l3|s9^rM<2NyH%CV*KRGTv3*{i%&tLXAXzv72v{K&eda#z5r zaapue1>}z=eK{gy-L8N#`qw+3-(@6xwK)jSM(ZE}A1A58sdt>x&W)iwBeDl80*Z1s z-Da&+JHyX9jeVyh?Ut=Wml(@3ueAN7 zF_I)@H}9B5zF%@<>8V03&5XN8C?+-;I)QsF43^QTuYlMzR+e_`_;G*X^2w&KhOLk6 z;VwzV^dnEdm46Qww|pwC;m&ha|EK&T5e&c)yMN=1tV5r)!NO-DB;M&TWA@u5rs(iI zjetduO4T*-uZfTmhr1Vp8(cghR6NbkfjNcrld7HoljM^cw(WSRBS{xg!N0) z!j#2ilx3)_>u^+ooOI{dHfK#xS(`3rP3qihWSJd+e|>xA#3p9)Y*B!#ww{1;49j)N z0U(1`8|xRWY;Bo4?n*zyY42$FCuL8taM1Z!^;w5$j$+JW8k^p{r>FOJr&pF-?UtaC z=cffNcD?E^pQ%UHK-XfnlsJlDM)d_P%W~oi7h767ot4hEc-cZ-kMuhg$5%_54K;(^ zV4b{aTn@7jzrept{yY(3T7u)9o?j({4ob$6=L6zFLj?UUyp$}6VpuV@lPue7Nj1rl zLco9g!L31org5@6_eio^o02|s(uZ}`O|~{aB0;C6F&hapCz1=;cyoV_a4y4`x;;-N zIT-8&Xh_#O1xJ-oi^)xhNH6w0;Yfd&h4*%y?VF_Bv)+u^RGDr7cvC|xt0$LYSM3=q z7lUjV6jR>F9nr;5uuHkykJ|{7;dT*FukDsO!D5-RHXP2;`;?%p0d%v+z zeOh}Vx-@>l_Ay08poIKSB+KBpuVSx4o=Xe?5ZS;Xr~UQKc!3ZZxJNVAs9hE@j)d9y z`Fb&*R*Hi!HK$@~Fw63BGme-ol~T{^lZzWJ<-JRb^J>?wQi@ULLTY9iCKn(K zfQ_fjhTje6#R9eDr2X#Z{?TI+(^v&7qdR!5X8Z70014pvH6JfK`#ZgndU7i^)%Sz; zk1H1D2?OpWz8b&^c5biN7LD5cGE(c+g<7HFjf~#vzR610iXC#o;>LILS3%t#F8@ZkJF!ymfsS zA`qH%P=jHAzB_-8_XS_9Esnv2!Ve0l>~x{2a1%LxK5Yd|8jW)l?BO7GN^Ral&{wE)~dEW z4#hn=aeD(>`d6;WHEP{$0|XTPwxZ!VX=V8otXf4l^7Jx_MfL>FSdqfkq_TQAo(E&S zZ|C?T%miA};{3M_R5$NU_>4H0$A}=}D6IO8>5(gV41oE$Q^vv_5K&WPMWNh#ltRZudL(rLZx61HJ+hO;Cs+ zlg@gmV3Ey%i4427II8t-m&f6fpyYj&w!i?orv2DZTJ<@IrknSd2OY!PToKF zxtF-Dt<`^tuUjBF9=9jfzrsoMi#Z7i>Di!3K;P^jh6Fbw(2%LPsapKU$eH{>V~D=% z?aK@dK!Cd)Aw9f<7Sl528(3O;f^X(&NN={`tYeQ@U$km@M}u8>8d0UwFk|xE*f~b#Hh(Zw9+SSzV#g zN*x*?u0+S#82W4?H{LGZF{fL>j*YBg!3!SJRWPjo$?V7d9Wd|SzB{5G?T<2cV`=_T z<*{sQ!=6w6fIZHi??a7j+_N_}=BIp)t$t-OB=p!P<}DLo_ob{{knTtE-_Bp73ThLQ-+XtH$}^q7}N-uRA0E>^SGI0OjAN6ENA(Z~sx zM@-o}qoJvU3pl8fr9c&`wp6bh5~xK**Q``{<7tybn=;6gL9i$F^zZ~BD3~#v)RBuR zp`m*3)3OS(wNn?)yg^odZU$_69YHVCU)f0QMn*8&uwR-Gm=(O zx;Z8s{RTxY$*$WP@`J*{M&D_B*DWod`JcJ&`6fD~$q?sZ;=^ICu2bJTt1BVugZTJ@ zo$J4)dY?7+$XzkY9Ubu*5o>Lt1?_x1UvVkn$dY;~RIz18TjSz-i-?y0h4M`sKSAs7 zw7=rG@a7!o*_5WiZDh~1I@T~`nCjd~6IFj} z9b-XqrP0jH%o)g)1mzc?yl`AX6f#q;=lx(M>Dqv|iwpfC2T1RV_r(PH=nT>Ufxa`q zB$v(*r62RN6zk#mTwWFRt=P|>4Beixu6@NVO9ox01V!El?S36BY`ylioW~u%En0`i zEz)r6r34I}F0XuEa=F!@)tB~Kp^|Kx+;YQ&>Z;Zc8HSbA;oz|Y=#ogx)u-v|8Qvg1 zsyd_q3@}jG4|+TJ^~+xt-GT`uwP`OB%e)LdW>L1TH#rK??;Ug?eMag3sCw(TD*A4H z7!wo`l$Mf4kOt|NPU(`61_9{?rKP(CK|oR^r4f*nPU!{_kOt}B8lUH!_q>1JAGh9n z&wOXCYhAGhy~!d3+d>}diHBy(G9+BjK(IGOPVfX&w4av&PJX<~+465yKDvwo$v@k<@& zLI}pcfIHvji5mV5xyz*KWA z4C1G|UTK&KcUSX!JATxh%DMEKG#f}q>;Z_&Gk& z1aE6sJI#v~PcBV*vwDqw1L5E;U%En&ns@f9~C1 z8@eUq2y#hVsiefynBL6DNX+ejmWV)q##>4UWim0Lr6BeB$BVjZZQqA~{o9^d-#n=E zP!z5JTe6+|7lXgk=PH;ZuqY#-x`_(y^4kr+1>KQ+jEd03X7&xjI+lOAu7a(y4Zjua zy4&aLE`z(_jSr;v!Z)YO@E^Gx51P#FT@aiJ#(5f)kIK#_#zX}4ROTe}y3uo7SW*q2 z!^Nnpua{nzb4ZtPwFSj2r2Mh%ok#;!!mP}OE0wm>cYz3!)&hr$lAgK(9iPvPr*twf zn5rj&%7-O)*qDsJ)l{q-1g_taAbkg7Bega2o;?l0FQTa4Yspi_5^REzm_%GJv*WZh z8U25ESvFt!de-jK+(rcGL-gqjF+7CcAMWw563#7Kt8fHRHz$oC|v>hFCE*g((jVF~G7S z2K@;9Co7)a1kNR;<-RaDt2z6=3xm5!$3!0pkR^<+l{QeINMLo~j~J;(gdNkOlrV7d zEu6+`8tN^uzZJ1B*qIBDZ(G3-v7J)r8o{sVqA1FTjKOiScMCN%RNC{b-JC=d11Zkc z&x@3$oY#gviShS(-0_?t+sX#ICD7@)rIjUh;7y=_n=gT_N&8$+o*>Eekwg|0@*+@m zD#M{{gHS7xH#s%AnN_$WBcy_GoN=7X4Wd=l1@xOgbEXnU$QuX?z1l zM^Pmutosp>kI0+Ivp2iYo8<9s$3Hfn2XmKn3)9%>@crh(iSY>$AP9)3e3W7eUxLEp zU-NGBou$D3hyTG?w3j>jwi_*|l9kWJpCbbmUip=0-hFQcn&4i+Q;6sGj9m6+Xwz9} z)pgnqf}H#oJ@W^?(ykV#))BRUW;R)!&qID@g- zNj}%07`$RXUHQ^{^APCn<5l~r6gS+;moryEn*TP~tXy18$U^D2qw@)g>qIRm9L&6*3f3o8;}9Khl;| zAJ9LmNh+DTd2-XA#TKB*_qqE-*nBT@E=f*pgDm#b6VJYO~*wX;DtT|ND7I6;?mK9$q{J7XSVt9qu4G50vpQ+~%xME`s$)1d1xo zYPJu)j3&Xfwv$K*aS$Vb5r zbM8xe>9v1m?aVlx9CM-arqy*A;v>nmnZ5$hnTpZcxL zUuySsd+Kg?dc=~09qZ@#K*nI}SuI-F4Xk?J2VkIR4vN8iXn9Wr1fe_on8LO$%)#1E zw+sK^$0f$MKn0i^?o>g*RhfX9AN<(f<@T^UFYE9J|A>`2_h;x`Y5*3>;1?SOQ2>XZ z`8CwhIsOqXYOjle(}4YFdg`BE5rL9+T?;5dMQwFn<^iCN!X^?mR=1m4q-Fv75d2WU z^L|Npodk%LkB=1M+=7x2Lk*~HiREVRZH2c_qE`d56aex|weM6cFytIzk}zbcV})sD z%8F56sswb$=E69b%dH?X!FU-K*h`82wZ(K2EJ$+la?*P*;7Pm+Qgc)l^*xx3v0A>2 z=`vHvMRDw`zr9%oU)SG$vv?Dx>XfN!%x%oqfCdO~E?HwR*0^AxdFCD&diouz>|GE@ ze8bQQqujZ1!5qt~KAA#j8rZw}=-*$p-Dbp@r^Ci8*8Sc{X!GiDI zo$=~#emSXnk+CC+(DBii9gO!}#@3U4)nSxZ;AxB|ECo)s8T!Kll($M zpizKgCsLRfU)l}lC>zzHQ`9Fg=$_cxvrHlN3=WxZA$hp{BLg>}IRID;L_H!E1}#6o zgqDwiXzdbeTF#g+*pfb^Wr1m`e`ext7T&$4D4Ot>Ny37Wzo@|m&?5IWIc&)xbzXwS zS6W3!rLfZD8SsQ^_KzP%v0&a{wIcUSL0>IJ?h%nE)A{3ZzZYZQ?V0+dUUP~ioTsro zJ>LB4WxN{0&0|1b-Dm74|?iiXq=w6_<7B5#i{{32voiMYfzxN|E7(h6OMEXF4pp z^+zOlm(O7Ac`vGL&_252#iD2La#NVi=;!B=b=xtm{A7%BX(YS_VQ}hS1fl{Vd-%qO zwwvM~c8Q$!eF&2PBY85AN>@{|kWs)IOe7+BNPw%h$lJ3SCPp3rgCTK>%c1s#!z3qnwNqBYsr1NHR)qu`9fyQQ3|&DtW%<&!}kYCm6J?nE=H zZz<$I+D0&+c{9rp&PX#=y6taA8{0{>i?16Nir{ROO z)m$<`K>?Ix=PwV8IDdrBM8(_3>{WjQ>ulHx+kjA#GnxH#UF!e2u;7WY%s_U19Jc%jgGg@}2R<$!ekS+uN5+$V`RtE{k`{S%KvQXZGEP*kTmFu!pk+Dn zSV-@wJmwcf?AR82UY#DDM_J!AQEFW_b2-g1fT)DRY+z#1su>|tvOpSJU;qMG@Ez-{ zCpi=39u1%fJU*WN@dF(=G?m`*_kR@?%D@R$ehv`dcUTLrM()!~TE#S~eMA@h_4^ls z)OJ0>zeq6deL|Xe6;Xw63Attq>9^I|n<<|7AKD%$UJ2 z=lRN2S|sR|lT$#o!-^=#6-deaFMlPXN$n5Z6`i}*b;QG2x%x7I)R|01PztZT=^T}( zk%iz7I4k@-s*enyD#D1Ql=laq%tK4^^sB_-tn${m%);S=r&U}sFxkkgs18_tKpRcp z0~Q5f^eaJpq!|q8(%cM$#y!OKXX$^+3Qj$YV_KU-W9^BmhQtkFyqJ9p?_00;g@DO@ zLw1JtYa`I5Pm>jw>vN>uffM&uDace!fC+9{yVZEaG<KC^tM+YPBJ= z;Z4~#>xt0;b#-iDhflU{`F+6IzO1XOK0+b}XYF(xz)lSAe0@9d&6%I|YtrKzedtl{ z7wDJnPfEt0WJvBm)&^xO6GQBpm?mj8G0ZZ;@Z?v4IpXl+MiAig!6^%4z$VyjUmR-P zGfu%qe2YzLHNZ#)5b~e|_!gDDdSK8q1fAy<-|7>7uU#CFt(xH5f#)zax_tr6=KW@b z@(Qes4S2-T0;2Ai+B#a-GLG8)cEQ!Tbm=CzrscJJd7<^+kUF3nB>YS{?j zCEi+=EEGwKJ~*{}$!`BmgH6-%)IBZNXf6 z7d+SyHjvFyjamLDzggN^Oa2BuF+V$^XxAOi5r_pW4N8sF=xAY2yc6`|Wf)rkt4H?Y&x9&@>vi&PLa+OM?mK~zPlaBk|L(5lr@R7o0#oddlwDgUA7>aw?%LkLJp3~*8*KOq4d!q| z@R?5tW|P@~oDX8F1RpTp21eE`)x<3#&Wmz0g}S66JL-$%f=r}${|My`%L*yeFRC)pul7eQpFWAwvdtDpBH*XYjSp}O`3JYxP#7|Rg)I% z++UFg=4b|Rjz;<%K{`N-Leew)u2yJu!aHarrSk+n7e}$UZp^v* zR2|?S#e(VCw=9ZLu933WN4*Z?&8@XtP2`~7B6u1mEewE^SlA@jK$441FCqkLA0T`7 zc}Lq-vt5y#$8CXaSocV;NJbI|)SbEkgL8XAXA&W7+GxK^upw1wtOy7ui_o3YeiKC3 zz^0L#d-=GOXcf2w?hJh<0)|Gsq=b|!-ArSBHEiO z@P)M4Z?m=n2RT*mpkX-P>h;1Lw?Y)=Gjj{eE6>W%K>igOe(bw4+tWY%^LitZHznXK zaBZ##nTQu7AXPqGUaBKysq`~nl3f8^?tv(QpC1N1rfbpDuxDD!f*>NP6Eu?g`u2GEp5=;~!qVIW2nSil34W}4 zradB+QQBRV)TAwheN7a)=_jIC?hJHjrcLHd5#$W;%&&5$awpnRy$3xP4E4=lo7S7h zEZ1j8+wgAdISpj4V}joWG_HCR=P+| z!j~qIA}c7bEWK$+&5D^qfsEsT2)oi$fe!y(^-Nc_XxsBS0P8h}gMyKwuRUO$0B1Xk z-jh9=O__-R*Xz%~0CEMA0|B9H_=aLs?uLyJ@f9`XH*?%%ysex^!se&jw0AXN zH-)`Pcoqf3CWdl#aYl+HAzMM~+K9UmV3;S*SJBt$n>bk6<1kYxZ9D!SFMz%gx`zro zHy#$+=A%*!8Jp#sbAtg zMgOvLx6|=oL-YsQPXFJ+dxDAHfry-ggY*5Tn6_4~LOQHcgs0&B$HI?FL=u4#@S$hB z3hXUTf!eDB9sOv8eiW$fKfh0OJxB;(@`FgzLpNJC{{a8|ky`;UeZ6^$3&9R2$U=Pr z!`g3HG%$SitAmZ${%>;$W`1O9cj831`0%kX2unxpN#{=X+t@$>Z%}=axxF_-fcYpc zg+otIPv&5CeGwhfRMC~=AUt?=o__n;uW7>vRgB?$sT7MWc^B0=myQ{4@>o<%>Smws9-q)vuVyCA;LLpEn( z=vE+`#9Cfx2w~2+f%S%Vr&D#3cXWj}4P!!V>h&PUW~6R~%H7owuf^mMx1D?X3EUZ- zf@u!yLE!2G)}}JyNqLCUjbkA0hnd>J5e<&b2dtnyB5^i|Ipyq&u(K%=y16mqM|4g3ov_pk`T^gRbT$lF1GZK1+JV)8@w5*%J6 zoF>$8iGq*circ=j*qxbQvbI%m2VFyUvOR*7GORYg7l0wdA z(Dfh)CUP!&9cC_F12-PsFwJOtKB^9p4%oRcyEE$}sK3ZPIBQMfAc=WSom7Lt9eRe*WrOGU{I;OddHarMKzhhq5G2h3}= z^%d_*r@#j~x3!8HOF~;#qfR5Ab{&{1f8sp91)+Oap&sV9zHdz-mmT|d{h9aYP8(z0 zBj6D_{DI{A269e3ul#rLflw6{pJ)ZdX*D_v>^2r-9OtvO+h zR1%=n?7XTC()Zso*iZ zrn!HLEB+O@hE<6km@s6uyj0v5(+84$muwk(0z>b<+(be{$qjqv&vji>bUk>3={^># zYL`3g*YB<&xQc)&nYbiyCaQuStQKG#r9Rz;XdmJSRQ`+5x?5(t%ML*DHeH>)@rA;EENUSX=x3>}}~A zMzHMP0MHDG9i4Q>11&>VNDLG;lNqYq{i8!9X)B84BIKm5zhONf1`OW}QHY0&FK)*9 zFiAE~-SIl|ygVU+Ght}g{~`c`xWxouqLRK2B1x-yYbO_MLik7uvv72DxEY9jEuzA2 zQYc8CU?OG0@cAXQOx)iZ+WLb7qnVD^Q(I6mW4mI(54Y^V3D!OBcXF+Xw}Zpqs#Q%_ zIoWO3&VOxo$fHXs|D2Dm;^h^*w~aC%@s9pz@TAIuy~yQg&ND3J1rTtE8SO!I(BYMu zs1&)M4NiO?R(s)y;&ko!9oTKWpkR5!{;`*Zw`dSbu`~5%OyT4R7-m5MZ}S)cHaCsm zE++mca6G;2);Z($^Y{H{Qd@gaxwEV9)z{U{fyAA?_Y?dMV4!Lm8Bxn;l?o#MXn)@h zqK7bGOIO3MU>FX9(5}gaiUqvqfN*)J_;WETIN=!Qj2M0hrF5#h&H~=3BWh`K~ zkXO8N&#p^c1>5q&O__`ZS-!-;Gp?z`J`nI#^oHTyMU}JP;-3}>A_;vCp=#g;m4D?N z>~{`jTQlC)&F=4PB?m?E+gSV2mM;4Bj?M~^k{6^l%(wL(I<<&4M_nE`9uTxgYQWYI zmurrDVPWdgo6FHPKyHaA;z?Y6m4TW^Tv=h1fX?b? zd_wYuaYT9pN_C=!8CTw!TP7XR0+-a1Ac)mg=N&?~8l(T0C^2!nvnj~o)atIV6cTdg zK`+3^kP3B|Dj?&S+rjl>g9o0R#eILKRIm@c89d5CtVy6w82}n68YZU%6uNui0fiW{ z4IEjmV-UZdHVi5z>M7DKiBWz_d5ZA^DLn|A@b_Tn6+}L88tD81{?ECz*#3f4AaJGJ zpw%P11vEfc{aw9$+rJ=xHeDzbZB?U z`0s!bu?bp@h$iFpXQ61|HUbZZFX&as(KmRrbv0>$TYQ-uLoJ+U(@NLC*77Exf3-h3 z2=brr-Nj|GI&Ge7(QZ z+dA{CKE;1!*2SHu4uX}()`86l2uqWT4nNH4m}gnD;-(|@FQe+4hSnK&ICs{5EJJeB zA!99~TR@9Y>$C-&T49q%;BkZ6DPe8#*+J`x^;CBM!I|pG+mVAGDdeGji!cx#9Mvo* zd@ZZ+>pfKHoC{EEHuR9hASYVXp7^W0Ux0SZ3^t{WnF=B{Cev@zd@l1QO|o>SsyD&N zIw#(QUS)TFvq+~lV(==n$DnO6te7fv^zI~meH^(5-GD3!P3|Gf!zcrcO9&rr59Su< zU_iVHtrDR4w{141=kY} zntg$%P$iL?lD;JL23Tw=%Cy;=eLmq{z2jpUxZAA;L*MxqK_XdNTv9WU)-F}qFuj_ zdkr&QjX`4|f)X%QCL=^VbHWB+mO{4gZMSNhmKqCB%mo#{#_gvF_B|t#3)hx}X-KO# z{A)z{)gy4ir1fE||20h7-D}L~sNEWz%Srp;Pgt)j*C=)T`~aFN0Pky{?;u`TPrr5Z z^>6;Su;Ed~hud^Lk&USdbf~ZfNrnd1hiq^i4LTR7UxCC5lwU{3hdIbu_pWq<=S(|A zTWT42j3Dr>e0kL*%EHM??fNqr{sOoyuo@l{-zDL`?uNNb;PDY-Z2jrHFnR+i&9xex}YOG1{4SUd$3OT3sJIkt|13z>X zhAL5rqkw)*dHFR&oBFm_QKV6BqzZNAqV+tiVvIZIy0G+z6uHz)>K)j|u?yWC`U*P)HaDgkup87o#kxkvM*WgQT3mY_Q{V2}Q5~ScTdV>l zq&~HRW93%hvr?=U_0utkzZIRrEA`0f*=~bR$`E`2NF{J|awInagNTteNttDbHe8@*udOVG zYSHxo#oh6Q3r-&^eqvgD_Uvt)R6-rYv)_i}5U7Xif`;yX^l3VK*><`rXa5nUR&N@w zUpeC+Y?btzQJf2%fmfq??v27nAy1yU=>shgtd45Rv^LMvmW046Oy$j`(wFDEdLxDx zoQ+aNmdFSMqBw;asOI1sy22pKu;dQ&iD*~!BTaA<%jh2=mVp0oq2#X8ESfr6DiPAW zlBq?7Uw~Ba63ly1%?E@P62;=aH@;18yi>3&~YAI$mCN-H@mK8#9nK^Y_IYAEaR=a4TJQLs zF~YF=D~Lf877TFEqN)ILn&Z@{)?LzlbHq7f1jQ0aEWg7zFM2LZ-0wvJ+ZG|K3r2q6 z%>2$h1OYn84BZ6O;-9B9$Xf!6WE+n&OgxlRX0{=-QNP==on8R#gN1folkDSrK?OB1 z21MC+L+UQ}K@5^a;>H2gBBpALqih3l`)s-JFM*>hIbdvgd+Tj*Q?PU}pLl0&2Cc;< zR5uAH?4ii56YS68romakz{Ey5D!s; z;AdjD(dRw;^d%)p)Ku#iTl0f@=j8|aE?VG5RkrzIdN}yy3mmg|)350o5BBqoY^`=G zqN*ZbRf-x-5YRwWdO0r@NKDAyi-F-E1?C@V54d{%v8ZCfDdJ$H&NXb^N{0cqGzqp* zJW|At7$K5%{nbIE54Ne0GezgD*ETliE2sAs`efRV`h4P;zmklL`v9BS>0?Ze#+0j{ zeYQQ7P!+x%74P+sSKdz9vSmMY>!XsC)LOU$T$ZIqaNG`hrwe&7jbTiHRS>?0@Nem$ zFF&gcKP(+VR9zFND%gNr$a{nPXT(?PH2;U8W)D&+!MMyjmCic>x+Wp2&N=tz8R5^u zcXV3U`2X=lWp*K?$)`#iDUDm!g!uU_-}BImwp&<44!>m9@#_%WQ5h}LYw5tY_B#485qm>4LJUKTNZIk2sx^IlcM+zv1N}aA_NA8(PhAu!4Z{NZ89- zz?xfWT-OIi?!?I;nBRzqPRxMnF8<;1l1MnhX$Glpi72u3Gl(gQBk#&q^6sXt+C%slr!1e^J5>K=hGS3TovYu0{?)l?-6VP;| zYsbrfi)gxo3X`xDU*eO;eVOFY-RCw_zrKQxeQ$655uU)7a_iPz@~PA=ilY0?pnW0D z9{7je zOTccwh!E5MsB=-R%V?E;E0-oWI4BE}LMB?Ew;-+t<;updWTw?UF=ve&u8-JUfEjqt z=}xmHPxD6wFpV>cMT;X$AZP>si@?Eq5VAd7E`~!1LeD(1>f^Gr8B>P#2~I^G z&1E#~iP?Sqq4X0_nh&>{W@9yh4wo8--0?#P4L$V;f@8*M7zir78jzQ1S(vW^7!6Kk(#YE z8qx^6XEHNzH3}e*s&9}VmN8QyI2f6jyI&>a)dpM$ zw+=3+b>IRZEXDjG!8>;qYEXv}gb ztbRZ^cW}I=1zo6c!90(wB>-Eah<&aKvRw!6)$24J9;7`$11NkP&>|%=)9~c}9$UEc zhJ4SQvUy&Y80G~?`&GaWK6ZR%pysWm7@cH(!hr^}u+$c_8*00W05e$o)nxRFB2TYj z!x0Kbixf6?J34&VY`rwq9X)&r*3iFoLTwal&ReT9W=_QnGIj&{wk2Q?R7!{yWD%mH z$?c4OqFed3(hpsp`xy-TC;8JTYAZK9PM9(_I)8tWB%*N-94$-`y5B}NL1R@3CNRz(I zhGPR{!w>6Jh#NxpigqtlR}OMP+?vP^nobhxo+u|3gQr_-ySabEvwLB5wioN71jRnz zO!c|^yX>$$EzZElf|v(kHDIc?Ak^|RfMp&_m*B}?5}E%cTp#$C|Aion1#}_>I$MZ3 ztZTdfoimg^2M2n35IwK$DAy8SgHDJr;})wrk@vi)nsIQ5Sy&RJrf z(2xQO*uDfty`%o6ac_DpJ9f5MOH^keeXi|K5s(lG2)h2aM`_Wn3b($ni5Y`P=fm-! zzIQzB$NYw3coL)l#mTq61J${#OB1$YWNLY{L9hYQSQs>l{BfT#7(lCs_{#M^?9;dA zk6C@1<#_mb#H3!KJr$Q|LRb+tiu+)dZUIUL2ruE346z}l3YRF3kx7^4cKOtJ3g~1J zH?Px+|K-5nE^|@3?bPSvABtY5S*yP6F#8{TUUJ`ENs~Hm!)}-O8YFB6+#}sRJXA*O zqr9&?;!BHqrAc;#>UP2%Uu;DvmcVAgi-dyu$rPQ_!pc*@b5g-d=T|;cU74G({{R+~ zk#%|-?yEdhDNp)y z|DT9QAv8`}AnZY5D9lO^iaNkfXW{T6|EfBMmf^=)7$)to^NQo4qjb!{qn@-p9L7bCj!1T&|hPDka1Xq0cbVCxaa}ID!$d3)$mgJJoy?|V_WDdLg zF?e|uIk+@(Nw4Fy?p28HHu7%?L*5!TqR4qfN(Y%_ztCBDB7F=jg8yYjMR}&$a7dAe zR<#L_&mlhr>iC=Q!1i`t!WfN&6CmJ6ev^d_b}BW2v%E-O5e`6RyFP60a5zmaUp5J6 zRTC=wHvU#{w15M44!yeAa08~8h9Jn(%k$BC=ZZm${E9>yr~@6XBsiF&XYMb43i2ZB zGRR)Rg!jp-bVjliZ~9S$3YHrv3r#?A&Ow^#FBpSfF&~j4w*C#;ag{E}eutNjPYf26 zuG^Yf1yxbv@iRwJnWnAqV;sMKiCq!xt|Ps=Rq&}OcY^`;WR;{tl%Q&9MW{>C`xO4I z+W{n+W$qX!qH|e4Ljw_;m;S`2_Cu_Pf&AiiSa1>Mm)^!oO%5`34KjTeyUut!>!l*E zg$EcanlBAM%0zjLJNnv7n5va9yJb9`f&C^$2kh*u0qSqe`Glf|-2T~vOxcwM{IFr_ zI-HNCRj?OS{23LHDaPB}($95k?4PfZM3=~ig({7A4 zO3b^vdpfoS5HM*jGrlTds)KwpUWo1@mZ|j(^=F=!3(1M#v*>%#{!g+Sy<5qu}S^a;t2-A^6%<^=;>PHG1$fE^$PLWWt)Iil428i(Bz+I7k)rTVwxrlKC1WA z%t7ynVp|{kB)MV-18+xF)D_Sygu%rFvs*JPM}RT?(O5(ly(XtIqkt~VK*eb*g9$<& zCy(b%VXu-OxKNa}4Kn#teY@uMu*$(mn4;)Bwan6(v% z5+s*e7_VP8!Z5bER3j(`Z3`Gla6c~j-(g;;qk+9DhT3W2N~efoZGuRp2M??qN> zF^C*Mc*jTiJw8w>SOQ!bSKzJAdUyzCyX>jINe|aw@GXgReFp}uTJahdhCR!BL-r4fp@iXNS<@M0`4We^~$EasbjwT(#&EKta_buQeuf$Dezt6QoMM195H!x4yn#!Ib;xVL73O4jm`pqGJ$MPcM& z#RoxQs-?2*uJc+Tld^TeY6K3vxHBYl3(_^(Q5yt6sPB?!uzh}b=>*bKp^sReJg2s(AWj`o5uCsS8{&uRI2sUW0jFWPEhFuB2cNmimlx91t<)SGFX^;&v|~%6Spj4W^?%8Xt9TsxHABZr zmO*X0kb+i{Ry?(9sEeq9%h6R4W9g>qX|?)JvYk=2kX-EOc@Lw^slV)@gBZys7eSA9 z3iY^~Z^CK_4c2J@yy^trINh)2hgXC{5|s7f03pQKJ9x0`T6fx--{vUpcXW>lvA#YaaK?PD?w^1L6ro;!-^}Ui)8YAI~f0 zmtZ9`_odf(jLGu`NiX-K`2)$3jlA9z0jtU`E0>cci(X7&^371Z7v3CaO2;EhbsjxBFY=0d7vts5ge8(_!vCAPy5gN(y9qNZ zBZjK|Ox_e9jv??Mx?0oY3u)_Qr>c$O`dz(gXUbH0v=!@v`varkJ|Bhprd5TV1Ch;0Y@h8A*_J?zMPhd4?Cp2;^g3UXayQ^wFKumoE+tuB=I%=qY&mOq6k8s} z`C?5(VRw>KwO&SVR?sW|r9PppyRM`HsgSgw-n)#$mDTl+1J|av>)*g_QLUsczo*?g za9bu=@%_d5Ni4Jx!VV0nQCSJNfv{I2vwWCC?~Zlqx7M~-WGOX$PD=v)wX#PYP`Zvx z#w6azjYH{i9eZ&(b8c*I@N?b_q@%UL0NJdgg~Ogn96{B(JS=XyD0qKHPjAdSdZ7<> z8^+VCysbAQeos^wB`)k)TXoPMDY`L#j!>O0yg#iv)HL%3LM%@|Tk1s@hZk$xxx2g4 zn{v-IdSvX`+Wm;M3mp`>rF|J;P9e0_h#|o_{azV6)9Q9By4D*K{Et^qP~-z8PDypM_R`hV9>(L0^1P zpszX<-6Cl|&@67bSs37%#sO&0O}6K)g@t7%`^UbhjL+MoCem-E>-rqaFR>Y7*=hFd z=Ui;2$xj;&Exq*IY&lG9_3AS%R6f3~Gz5> z;qK9Q%S%abjqW5tv4NH3Av5mF!`prIR5hO-%>G`pn%2;M zSFin{z6$D;#bMF7BVZ{@n5^Lc>aKL~I)a#2bg?Z=rM6h)iXZn6sZrH2$10i)SX+(L zt|;~Hy~sFi z0(IVY+UaxKhT3?DovTx&^P$VH+fd^>I;x_lu21~1d}sX>1En9AB-~2hLA{K5;jH(1 zs0Mw+WR_*F`aKfa)VzbO2q;bQn+zY0Y-fl1Rxhlue$MAI06pKW^1fa)RklBWX;KHrucVwR+p_*BY!={Ho~5fu|v?nN%`7?ibY6} z!Xxtm`INR>e1X3D&{)mqVa-{oGPB#Z=-GFnSI@IozKWR@q%>o@jQL_f)P7tY4sy1ytMU0FrRGW9@B%Z3OFij#rvPvzND31tyDfivPhd64_1pCtvD=lI0`?cya#%?>E) ze?-L7Y1{a8EdgcyQD7iO4s~dPlA9LKJ07n<1d;iq{q>VMic|L16J*pW4GY8|qN#eM z>mf?SLXQ->;+~nf*$^~z3?uJ%?>%m1=yfp4v|p0iYCgr=#Fu|KQFYgt6y6Z~>obBc z+R20F4CNNOo@W&Poo2PNz=KQTPezx@fJ?^q-~ja>F+*|QWa zc=4P1(4RP*2MfPC>OP~O6v(A;zXdSV?>?P5?{TdsS>WT4owRnx?UbsN*t5F?WpT&h z{j`hi;U8UWXPn%}o1XwZw7)cf4VYtX7)QC^hdh?+Rl3P#w%iKqn&|N8wX9oD8Spo4 zdT&D&)H7Fj<4SHN#T{FPcq9G_*mq`8cYnsd@BO|gyUPEFOA(~Iy($!x;Wu{jUzO_M zwY@|*EWv+f8YY)(!~lrZe{z%0@sJtOE>I1F%lmDIfxB~yJijn$_PUpmg`qT1gxSCG ze>3X*gwqP{oUG@3&I9le&{5I(!{Cv0bgky=`*O}SGq?VqvmfY4p4)43yfI(<)y&^E zo19um6cFgIeAeL_0-K#bUMMuSo{XSupQ1fiq2s+fj#{eKKqh&0lPd!VV2mc+vvA( zg1_`JbjgP6i=cyW7a;;6h z2d)S^Tu3LUneiC!-h|WA99&NeDA}}rGr2vl4@w{M(;bf&Oe`%d2V8&K16IV@y7FF< zsKz@Lp8iQ&_{xr3l7<0YR*6+PMiBUZv%0w6Wj4IZ_W zZ&jn5HkHnCxc8))6CcMGxjGr1IHuqd%T=11cAui7Y$M2NnaA@!d>hXurhnhYtyON3 z|HnOtkmUAk^c~c`w=`JcROxJmw$P*=c`5Uf2-wFRCj7}M%F!w}9=Jb|R2$uO7(XyQ z>9S_Fr&#=wYJ8>n7`^)xz=YO(s&fc_Y27$ZX$=t-yvkQN1L`3$av4q(2SpPm`%BW6 zT!t6UyZP>*r0fODEeifXKV*rN#=jqthRA}7UsgcWTe|SyqFYGX@-zA*vACg6y9L;7tJ3}-^WD^H=JEZK(S}oxs6hY;%zeUc;=t zPG7+M&`cXl0=Qhe57bTp8M=S}kXAymZopmiJ z?|Bm6Ux$b2>lwjblB39Dw_rXyg9-J%C2qBG3c_iG zlB=ROC4~d!wuTjY>A7c+UCiu<(R8C3LzI5q%hv=p&v;ik_L8V*xp3r3Y0mB9y}|^7 zD{?BdZxlh?a}Wwz4Zm3HKvOgvx|X`xcdS5DY}JGj@kO}l!tbxaS927ui{kwD&Mj>E z%6)hZ?R!O*Pv$&ezFSz3diakcCS~$ z*m(~>O+Fz3VJHozBuV3U{C)Jd|F988fn%RL_h9etwbv^f@gakqsr!w=M#iS( zDg1KWMVGDo)`i^KXXYX! zlNhRT$>o9#m|1G^{4D7R6`p$JxXb%6E9`nqjq?3F?qx)CeQpWucr(VNvBP1E)=y)D zc2G-H3vxxpMBO@11Gg}`7k}IMhY)YgHkgs(&yo85Y2Dzb(1X*~&ENJ1<(3dMGg^YN z3-<2=_gWc%lc<(k5KS;4!&lhlb6TbR_b`uhyW6KP?@Vs7MoL@KJaNwIY^f?`?>5?a z3-m{a^h~4ML7HR{hmziEU zr{<1>=4`A_485)Q13qopxy)^B$N{JG9*BsXf>X8qwH8{-zca!JChua1SG6nmHI>T{ zasFwLN{O_El8z!@Ovaf2TSIJ%`eq-AjQsi(W&^bZfBk(|u`>U8EwWETh_TgzXTppkzI=hGE3RhV< zShB^IgH-?Mq_zf&MXjC3FC}$}tPO?Ne;@;nrB@#;0r>^i+J6;wYFgyw8d;W6v>O>t=@Q$&g4SrDyVw;< zG;X+Kss4&(JY{WlYY+uxJ2X4sF*dpm-0s~gb#9pb*vSmIIYUS4?BT1x4uoWzxw=+S zt+JH4z_Zo1@yWnj-<@JvbQG@fnxjseL>oef?NBZvvt;?pPBtVATKv2|!G3I2`LN(6 zGTdTB63iIB5r+OuytZ_W@qEbLarCUR^E-Hk*-4H*)cta)sa>K z_15m;utDXaU$Btc@pQ%79sV|_r!Bm4O|L%(Ju$DYC=EE64nfgrR^O5D|knjo&#^-f$di);b1VfFA7r19>1r+V; zDPyXzeF(o-|2@~|8FN*fy!=n|X-04_-qACafBo!8rlqB6;raaR{5(JJOR&vMl{7rh ztGy>Ds~sR>%j?h;HIk35N(djv4?ZTF1=foht#3KuRjZ2y}=RUG*2``p8ab(?w__Z z=Mi3^0DhIaQ4+8p zYX5j~m5KWMY=gHfye`Cd^`51N&Ol|amrGGGf*zs0%5$WHXoT+VJrQT|T;T$JZFSq~ zR%!Q*WNoWG;C^thu-=b4mB1>R1?yHzX4bKZK^dV`A7xeJxY5R-bkcNs^swKak)Cy4 zHICl-TXr5d?XRT>I8Bv zx+oLsGxkjvKfn+BstgIAmBL&gur>Nyxvk&95Z+aYG8YS-YxAdu!qq8Zo9}f)DDSKP zsKzxwl=c77_0@4*X3g8S0s_*~5>k@VEeHrw(kUPv0@4kNpoD}VT>=84gn*=9KtZ}e zDQW30>3HYHU7u%v&%XY(pI!Gp-}{`IIoDis&D;~%BH5-7H9xrp(qYq-E*g0&x|5tO zKb>E;akS(?!s|?C0;~+nc^z~8IT5BG8d8vj%{FJ;_}2X`?YqSBTR$b=g74n%2!y=F z=g$@In>{p^+n8y~f`$a=s}0C?W~WLGgN?gUNbc3g~Gs~cU^a94=75BglJ=zhbq#G9Z;S}5fQkN-zwS-XL1i! zWqp$OB9ZujtW;!7zjR~x4(wFsG-Vr;Xep=@M_IqbJeswIu--Wo@z+JzsZz{l3}0_k zmXJ8N5&Ky9g%vOqo)JWabd-CaW#DrL=4{00PgQVMPGFQWSew@{M?9bo_}sV(sq!%$(8ygHG+1P8%IRdRk2uuJO5nRvGTq@>Y}?#f9b0(#?6D#8S>=n zBOT#G^BpP+%J)D=X5(d<8!b!4b9i0A#3X$e-~cBvUQ;up9Yl_EL!0(0E*}!!sCRau zGGGA||CbI-EHb%#E4}yRKA?53eCH6{S;yY@!Oi;&*N8V?gH>KpP-51JJEXAdD?9D+`COS!e;jAo|>UQs8qG?gXBB zU;BN><908^p_=>YGUZj>@}RcR*e&kb`!qL%1sDdGtM5yqHC47?hXmXXXmd5Ie4({P zjWvlOrm5-MSJkP9Igq&&ul)S*p!S^E58AKvtD%?}m_PB~3|avzig0y_i)8T;9R45# zYi#>2U%;!ie_i6o3mJ-@xs{ZJ_X*8$E(mKg|CLcstg&Xu7ZT6&VYZaAw{D%X9grj3 z)?|&0jLVK2Y@q`#_1>T#mp$~yd5$#K1l7}qrP4eP4QLMqHb9BEcK0}m=%6(;g4}X6 z|1|5_Sr}=G;#udJ;yBjIltm*Tsxvc=GkEGyei==4)?4m{{9E7v^uD9gpbLNlLM7YO z)H4hW^tgs|i+>_RRLKHR&DIb4Qrxm2n;_mVm$Xo65ot9n_t~zR2or|lqfC1j{WWPW zkha~&+3@J@$!XGp!}SL0f3Vs`6k%?haUZVggzLXvAA+{+DRN3uy!ZBW8ak&J#-2Rg zgyyVV0`7My8Y%--H{*GnNXpAAEWTC0w4*uyL+C@b!+z!U>vUaEQ9#V@9ZFdy@2oEW zao)QCX!aCG@?Vk4I-h4Kufyo=0f-C{Plj&&)C>uEF*ma0;d^Kc+&pkVZx3>EV&pzN z>Z{txdS`uq)CdT>RHdYip-*06P}i6E0f|o*j35L;Gtz(*p-{c*2fMW&^^Qm0QEVJ) z)%|&N4jV0V=vTSp_`!LErBjy5GCv$xX_q3erWGJ0J~*z)&%0moNXE{#w$|UQVjdUQ zyK@*BOt!(v4%~s8T&FMuJNv2|W(p}?d;BP&N(RWOtQq^ok$979PKx|vdwYB9K6VW| ze1!AVs*GMEiT?WI$b*uPE>WAFr>+kkwvjoAf}GPYz&WA&IT5 zr6r8;C!zmkCEC{w#fw+r;jd$}X)>OI^yWndzz^uD277%~g8wiTsS{FoOzfboZ~USu zm&+?2&=i+EEB{cl^huv7@bCPB7ROr74o-N?h7iCr%KFQ&L_vU(wJsnhm+;n32)-eH zZ(w9m>z&zraa_2-lkM^*;z)wyk^9R9x^!8M0gYH4DyT}$^tA3%F{e+cQ)h&Q0oty; zpc$>4C;O+$bsz6vN)t_Yy9%U70&DVkbjuwj?QEl}&oTqeGdK<5+@8nz&d~6X+xf$Z zrMlL=iJwciXWEm~As@4^XPu?+M0xU!Mj?EA#(lVs<9S_@XOp9_-%$jxZeuJO{JGgO zX9>Z67ii_XiBG+W(r#tBt0U;j9y|pVwII>YLc&7V`JdljWnUBTeb?5yBBMy7PHUMD zpV|pkBTI*xUej02QjY?J7l+D-YW)^4Z|}16ly+Cxk}F@wzP+lD0Psa)Nn8rHtrEgt z*%);pSDFA;6y-#5d697zZC}CR!w?=(67RRd4m94zsAj9p%{7$}Gn4GW>KsSK>_M2u zyYKnkJ8x?7<#%KQx2$}a)nuiQfb0(@WSv+21Oh0;==2hzG?@4s+TuBaU0R`m*KzCe zLIUkeJBHm&r-)yMcpCc|qPKm;p|VMPa7!nLIcIjgPTzkYE)oIm4Ik*HN2NRk_-3Vwq#y@thu^gVy-AHwY_7 zRBmRz9ag_}>#+eTjLqqsWib}j7u8NKPP|EEydFI<$GTl6|EtCFi-;L(>!R`}AVPd4 zzii1UO-+4-#43Bj+`qTltQw=>WNS_BijUoJx%dKhoy2RfKbqLi>9u#{_i^VpHFip; z^SoQuSo6Mf>Cz>GUiv8_NBh2?F8#|A<>B84y9dX+tHr@gXYcJ&brGB$)VrmXSRPAF zP2Je}Io$17>}owXTyY{z7+QhlGr(H!KS^yB#5@%*JK*WR!Mtrp+R7Zg75EJTIW7Nl zqpz2`CdbF>tYVQJU0(W>DEbER^w*EYxaiz2LuuzRklFP@^*r!8mMb3L;CSaEyCr{+ zLOBtFYtN59TMIzy5Eib*3pMIRwF!2-nB%PYMxT0!;$mdYuI+D(Me=09unNwU$wGbE zaZdna?ri`wklRO_%=xSB-&%krq>}Uj(|QG?i+tIRG*!I!>AQ*(3DBJ^;zfBEFTOh#v^AGzSlM8X zA$ZlY@oWvX~|SZUZ`N@+<+rnTgcxA4`QsTKLrD$n$Y3EcG!qit$x^9w&o zU<*?w{d4?^&48Kqu6lX0g`|~FbY3~HQZzG1&(9oRx5(%py#u$XgColP3bY9yH|^$s81+Vb&x4et zyemDZ&)zobvNQh{*wSifb!_NcT`js{oa0w=T!#>*11n7Zer$tWHIRERi;oH|a8< zB=kIf)`vz;4&f=&ueKK(cSR_a0(fV>T3r%dXjNoBPCdfUH6ThaB!sHHZZ`;KlGfWT zDDF|jxU%row**oY_@&k?G;WZQ>iv)b3;QV`QHTzZ6f71)39ykQ|0aE7d30)N|A z`TDtSEsEAWhQsOaq;-^*Nm{q{$n$6#0*DGt*^opjg6hd_S07_r#{L>1Ry6<%Z;NHN zV7}*dexJnvP4Dr13|YKibHjJoy`AtL&NDGHOeWnPmj4PL-N@cMUwvw$XqO(z9odhd zpyJfO444kp;q;s~`SbSbC*Yi&%j@+>~O}%r&SD)qqwX0rU{ko#s3V4WD8|6?v^z|?+nE^n&&O4(5sBOfJ zbb)jZh#)QogL^T1nYxYd4prYrS(zF>q@ELA+t~Qo#-m*N$j0y?f>)6I^?Nk|hymt+ z`mn5g)B|!8s1gj0yWENtk1o#OCYU6;wA?ZG0d?Xw*6klkKy=dhmth$YI~X>`qO<lE+#>iHFBMVKhh&U0{; z;4}US8cjQ`y&5GD3b3j4Cy1~z<0;$9fC(24INyz z%R*3!@0xtkQg-t zADN3=l&`-k1tu}oz!@P-pyjc5|1{{*@QG91JeOd?H>w+kBUm6ua&Q&cPym+s85{EyVFrzITwV7Owhmx4nfCFQi6GM0j7v*0 zsqiKO!Gw}lBgrgB`0{Yo9fU;%qNE4L3}ibhGD5N#$TMw&II5~sevk-i|60Y%VxYd~ zer7qY@5qou3Q6Vi`EMO7-7yQYH$)rcNY`9K-+8Dp z#q@z@qjNE?==`XJEo>z`H>flSJ{@(#RjzjX0%X4t&9S~0E@q=_Nqemgw&cwBDN18X z8N>Rvp1V*ARAw0{8{$IgJh0F7*M?$DsaPa%aX`qe#%*6R5Fk0P5kxwdn`N`iNtFHyjE#AV7!?Z3y%jAs63doxFzE5C=z zx*c=M9{^wke230T#Hii~7N7*b*NiZyFn*roE}6fY?Iz|QyisdP2*GT`Zg4p4>?ud1 z{vpHmA|ZBn@Hk7+?@ULok(L{VVFkocNnY9HM?i6*fJd=`c>OJ#Yq2@u;!^It2iXQriruO=fHrj^_|YsF4q$o5h2+Ydr@6oZQua|n|kNHuz>M!P2P7| zQQ2HIhwY;RDQ8efUJZjy+|>wv{YX*r3@5|uz5C)Yp@T)P4)V$B_DWVxM# zUPf+VKQ=a3@rSGy$Dcm8Bq{=wYd+L(=eTwt`Gv3A=TIS)DizgHe7rk^uonW=RePVm z4JtU2k!Wqv7xfQzph(AVKHe<`AXh={;+5CTF#*<1?mg076TwR=`#nb=thAlNA)Z7O z#F}`}oq!l;L}Uabz2I|&B)*I;W(N+n?f2(=>fbwra1}t-wn7HiGwu-O4Jf-pDn0L* zBI0u(S7g?{%$^Cpl(zKEEAK=@1V*IF`g{nY;ItEH_4utZ2nqoUn#-+U?eaAPG6u>2 z)CtvFQ>Z#c{#-laSvF9h_QK-`KK+^=J*5DE;rk=h04_;hnEcq1+risfAu9OdV@* z%LscNyh2zn=m1Nq(t8&~=5zeLJmc$yi&%sYS1hO0az_{7ZJc-Yk_7DTPAMzUhoz|Q zmP2vmTnq-3?eXO&xqEq)K}Ctw>UyKQ@f$L#8k#}BbgASxV+#W+5}BZc`PPw285bWL z5#>+d=H{*hCIE4S5FUvz_5pN9Ak`ocT8SBC^KIf`j85)$$t>{#jL`4p!ycULg;P<) zdH4~^&9ga=AD`-)Ib3KAKdIU2@%vuWc;NcF{WpkIkK`Y~*CP}ugvY%QS8Pk>Pbd}egJLPN zPW@1buMskJl5gJ>ji69qeP`SV9do)`?#$lVKQL`f#tO=Rk6&FsK@=ECg)iZ3ud^Be zOyecbxEt!O7=ImNC7Gx{aY8Z6^m6rDXp|2kUEm`D2ymc|@c+{;~J6@6|?M}FtzK2XQn)RrW-oBm) z2}{wNyqFICi0xapQ8Q}4==MN27IEm=r7`Wb2bGMw0|bSjYs23z@u@y)%_jghgcuMR z6Nc=3=oILtPy?D6M9h=qW)RXKuXS7EJvPq-R1*PPI9dY7S?2xJ!4u?Vkb0{C6K2-Y zpSQO;j(6xbuZYNP{dUgTe{dPA>*+{MY3?XxYjuo<->0{h2dVpp_)Fcb_x-;P7q+n) z)=~0q_&7LXu;wI`9N##JUF9A%Ci;K&^Mf(ak ziSye^AR|gPFUZKOcqTL?EV2J8@fGt<1(lC#!oexsyb( z2bD74b17>OH&sX(c<@Cnav!do3arQ|;MUKJvc2j*Z)fTSl3SWBGBd&-PptwSm?{;6d)f^-@UUBiabXsJuQtCCqy%Ai-XJ;kw^EOr-r`*O5b2QbiFC*P;3U(L|c78%u1xTN*Hx1rHqQXU*$k_ z&or!Zx4k`{MbQ+BijFBSZ^YzX@@@x6yZy|=ObFEerz0d;4=;PwnuE`67vE7Ri8BXgydJP{&j?EQw=x?XQO>68(#Sp6G74XME0jJmE0 z&GfDN18S&TO%qahAgZRljN_+k!|v|hnqG4DzHZ-m_amsvx^ae8dF>+f3n0XS3=YsJ z1t{F-O^uPBZY3Py?FB~h%$YOiwo8;tK*F0p3bm)tsUS_p11DP$*qD9m7%c` zRc07q%88>>4V|pD%h#=}tOhkI!YoCPOHc1@_@wmA zzUjIjgv>) zRH%N<-l@e__yH%H(Rt-Nnm8zaGcq!}Aoy44c3R4^J8?Z#B-+-LgPa7>mjfX+cAw1z zsd<(VSf`KU)Jn#<;oqcvbSeKZf!F5lo^X#wbSNuOCXP)HObfg`kCp6v{7oz85yk`! zG&x9FV`fGb5^5yrf$S~uxC(ei15&_Bdou^3iAJ`%)l{xIopR}*80>!4qS=7A2k zwK^z2JbGva5ux^?kMQ<8JrZaG&wzql_mmH7PwM*ZS1B=yGsiB;E^<4as|xo&0dP)q zhCYT!sD;eLKA|`}2PPIoJqqzlGNy(!;bt_^=;5brwJP&S0THziqflD65PAj_g)1it z)C7E-`9F?PR{P}Y38M|bV+kAxrx#Qy6=sB{der=#HMHD71Y+bOMFA%j1`#v|&@1(G zS>TNs4jwprATfh+?fOlIjwlX4RRhEjn2o%C%3AJIp*;0UatP9A8ZHdjDRpAFl3uf15nyI*dqrEKI?;yhMo$8T7rT@(&5@c{t{GMa~EAeMNzNS z%?_ll!d`|gbCRwk456`}YBhX+xT!y`H#{SMS9<3O9gMH<*xkLqpRHbChOiU-$Id{# zf8mKXW0?s#3%Q(IL-r>>dx!vg+S^fJyYGfN)<6^stQ1Z8VqY*>46E)0>hTAgHi0iD zXM}}?+^n6PofW@(*-$Njjy}<+wB%zZ=*mZSdna>n9(I5cyBvV|OUCq*M+!abI|AkT z`4~sl=g?GTHd+mk2ikFXnoR5q3qMRG$-336YY>xMeWx2;Dx7LUDCYC0_A^Dy9SDN0 zMEV1D(o>GMBX=d|9u$>u1lHcN`MjI1|8C|%xZ)7x$>2f%*IuB|k4CP58ktQd!YxXucJxMRD3 z&g)1y1>y; z|Fcy~+xHNN+|y)z-}=9eD}NOC&w;G?(`IEE)IGB*4^W;FEq-RQ79MB@5b1&SpItqw z_Svt}LGhicc=9J|dF;4?0A?qx@&wIokhQbi#n%52>?Ws7Qs^3Dex|_3m~7)}>IQT< z;Rike?FYc+8X>w_@5H1;Mk=Z&U<_H1lKPU?_JLZbAD@{Ueb8)ao>2fxlF-7%?S{N3 zut95=V-w;|lFBC)uePOC^{_sCcpbj60&2O$>*~NAxfzMcJB1iL`7+#=#3@)@Jk?j| zcvknqYBUf22B0|>?XtJBhc?HpfPIwbciNYJ?E!V5AP5QXy@Ldhhp-;kj_w!8?`x?a z?*AfU9%)Sw%&)!bhpuH5(cri@sHvl~5fp4)lymmt`Stei>XY4cQyALlu|zkSpk6 zB%j;fJo|BGZd?YC+C6d4Gf@0<3Z@mAr6+Y^gTI~@KHTzP$*lNpaee|_`081wuXXin zrpK4D$XfYLVUQkb;t3$qh!*e4q1U>PknmQfIUbV$;HHUx8Jr4{F9N$QnSuy}#lD~I zqV5HkWOS8&6tk~)h>GvAnXYM7DBge$E&ioR;~9n{dABAh6Kjv>TO|uD{kBsB_upM2 zD1@9)(90TxzUzZl3B@8phGmof%t#Ynx9@xPJZh+v>rtsD5nH{c`~w>8v0nV7t^TQw zp3jMOYB)i5l>w4&!(E(d4jD#QlVsQosy7a@g}`UOt?lhmgrqgP`nlFaNJN6Zz60g_ z303U)ru4q;JMSiz>5ptRABmH=;x)<`KzgBYm^2F+FUsdfvC1#>^ZVP&Gi0C4N>@4I z*m2dwT&HKehsj$vgW z^mYg}D>oYhZC0Wt)IH$+kc#2pzz3VNa05&)CS+wptc1M&UOL~anClO7Y~Osi_sfTO zdvo;-#D3QTA&@JmXhxt{dXzg}oTA;)Yu zLwZew=0>TX*25*dcRBfyy3hP519YHJwULy2te&sQdM^%|^t@cRQ)?$=;Wcfw#E}lN zaZghyK(ywGfyi8s5;!Z1p>|Ms@mm8GFAnPQ6-S)+&_e_=1@2HI=vG6)h!n33=otD| zs;K{^!`02btMGcu+jaYlvBd%;hl94A8BU9A3)D}hd{xV(s!8i+JQQoYLVJ+W1^3Hu zIw(akC^W!1=0%Q~sP&*Z3sieBW}_49&s)2Hx&Uu&w4Q%b70k=O!#<;>FzEUc;vaek z3l6kS>|VGTh6BQ24cqE36P$%S56y66pj7`-!uqJI^+2G>1cJOJ=s6NI*mZQYEdaBm zrQm3{%WF;hhHl5Y@9i+oa3T$%9=k%;cudAlrukWz3p|dl)`(_vt|xQD^bMocQPrmOQ0J{oWBsbA1ncdKc-ZY7C&)0kL@( zUFmz5K!wyjUJPTRm^RF?KK{wrnRV!GZK&SQ5}qV|ixbb{$bz6OWGB z+G2rYz1ItB2=Lwtg-`n*H;v)7u7K6RrvU_PdlhUyjijZJlJRMN)O@rT)t5*2ws z0m_0Cyv1`^$=9)7mg^D=ZWvIb3~m_mX*Gfi-ej=T5O~ z9PfnsiHJ=&nv6bn=L3SOaQ+ove?DhE=DR8oR>*~8?PK15?@NbAnTPrHqwL5m`@!4_ z4M_lIQ4Ra9yPZ(@KXrzLEnnYS3$eMvqxVNxza$VL7f;bcr zGvYTufxxv6zEDMnG_1}2f_a`}%ubqm>LYU{An0-J+X4oWtJCQ9*c$O1F{1nwP`7)5 zWQdTqYOkb#Yqh2DrXV8I?;Y>m`cR-UMud^&3l5B2e5$#`PS((u)M&Bb<8!QNWk*U% zd}GiDByVQ6%mpi3Bi)t0MZ-XjeV^M#fAgOlDNxcDawMbU8z(tZ+yLLBliZ*tP7#0DKokV-T%{W6U{z=G@21S&QtS|710(>i3pHX`E7h3T@-VEnQw znO?Hq=28o<9y8#HkM?!V3QNUcpfsQkK*Wxc{*sQ%i~R54M3634D77g8Yk9xi(Z8<* zs27AKf;*z|YyTVBK3;EQA%xsR+Na_6AtBm%LkCX+LKWoHR7NyES&L0Z`;t}8U8W?- z`{inF{g8V^)cW4%YXTnA%!q7OuynZ-1ca^mg67zNXlp4tLXf^r-lmL2LaUkKgBGu7 zY}5ma$XABWh0}9CVr(rt>bsHVAhl%z zHI@6ffp1`hR;5zKNZ_472!!-4Wg5$!EONc=uU6P|g^q4eWKC z>J*j}+=|*@CT--_@2j>WN~-!>3y>ASCVC_O7FWGa9{j7+quQTeF|>l!p_%0qG-0@- zL+sNA*#k%m`lr&=z+!~oy5B<3K2SQqy2*l@wC-Ozb^rPA{iOHLd2yJ}*JO(qJklzN zbt*o=bGFu2<&^^8fhdHgm;AWrVHxtQx2|x&?40m^NMP#h(bUoIS=1_;Td!-eWZ~C5 z&P_&YO3=Dy0X?Mngd|dg(|h|Ke4khTs*@|AYejCMKz!e3O@6X@F;)4`bY7Kw8%^7M zYZL>gcA-A=ub@GebWH#YQ@acieIkoR#wEn5tcDvv)q^ZUUOIhKW{wLI3WQ6msjbZh z!V}!ah=TsqEY@$^O~NmvTXb!e@z(j}WHMq0Jtu>CS&3!?HF?$TMyuP~8mJ@~Yk0q@ zJ#yzxhWQyKR$c>d=y9gb=S^ApoYh3||Bhbxnh_9s`^X_0ROZO>zQDWp?BN4tk6agvjnbgZw&dLFFJeLBy}zt*30_?ef!2*cde*<6NBe?+ z&A3e)+WC-p`nyR39=8MbLsGd$2|sUy)6cB|l!J^3H{ysgK%j*0p97Q)sIF22<3L)S zIzheR0dKU|#%-54t2xOl9QkkWYPrM@59^|jIC&Vrt8&S?C#eEqR3La)YOJuI3xXDd z--Xt_gxyN`4TsawTw;#Q>q#Y#|K))MrR(RQg`Vtc1L`Lzo)P&-hrOu&Fi}QrZ7qR+ zjw}(Yj6If-l@6$G8qfON{Jnod5j_$O)G3S`u?_wAL8t~l``mAM3-(l?bq918G2yh+ zO z(ko`yDWV0!1H8dsF$R2MR&pPny}i12;uFD50zisgq@Ndh252DY+~BB>OwrdAtx<1y zIF3i?U#BC~pol{rjG}Kd#MKykeXEYo6AB;Gzk&4c7OlTdo_b~pG+n~!bQ~+l$s<{8 za5v;%47lbXxr>}6BRot5Jr{>y=wCnkfrRT7P4EcsZiqkXeAy7*pYX6D&XhzK!_Dm&jg7jvwQL1x7|j5TJd8WCV0yNiL7g z*B|v~CjGTHV0&PJVUt{Rc9K#OO!ebnSM1da_QWS$XZpWE5fU8CTa^JKxK3!FSR*DZ zqw`APKA0njhLvbLVJJIO6TkaDCZRmVGu!_513kE+yf>0cZaFgB%f^4E%|k59ZU-n- z(|_6e7Ta%U;PDSFDNeTjjB@m&;O1v@(KGPHz(bn^9hnl82E8G&4&ENGwj& zSagI&#P-(=!W`CV)KAn)@}Y%h6C)7Xl`iBDHU70TDMhNNaDtc`+B~o>cs9(k`L|~a zJ}kaxtt#emaP=}<7lX~8=2(sp4_A;>JNq~paSVi}25@XF8{qW~d4kaU4J_!+f0uqZ*B@;l*zt~Bp2Cp>HxNOc^>q~Swf!EDST!HI(DDcu?Jc~he{ovF6b8jWn_1oU}VS*tWoK~OWK)7l)_?MZnJ{cBEPRQ7IU{e2sBp-+i3FuZxZ}` z1oHNOvzWz1FW8s@9h=|B%KLy7jy2LpZH9vxRF<$c%nYw4$&!t_Ap(y?&ioaDeKlsd2LOe zDE)2{Gz`E}w;JSHa~L05CQl3`dHd=Bn*12*_Ck3zo)5zDY@9&U2{0WMcR+`5woe6f z6hQhPpB7$h)xi31yClc#;Se!8w{IB>NLMxTS48B@s$+cF_Vt;8q^@P34xz4Fky};9 z6z}mpZ%%^>ul^0^Nh+W-8_$wvq%&*~FOU>ZGi&jeafGS#U0~5imVU^X2J!%EOA<4*eMH&JPgSi{)b+SF3sjGPRJ;Nt& z@?*A)(g+X^xbJbSsJJ}&yWJiTlBd88>Cht@wBu{y~T=%&%c5erpD&++~zEW9Cgu?yrZ(Zw>c1abXdJCG{#{TYS&|n%7 zS4jii;t|COL<4RY!&4cf=WLMfz?10$f4y$V5BKMhe5KB0jmU{U9m#w0s<2#n>ZUKC z`0P@vYa4u@BEuJfvQ78uyf@7Om%C1(o_!(7m;yzU1%8DyMu>oOc8~uJlc$^xvD^9d zFwOD5QVoJaW%PWzh#BXz{SWV9u6|-KnwPDeS;>r}CjoQC+peO87p7gP6X{*11Mtvw z&IsK1Bo`azEMB_Z-i5~Hx<*3cHl~~#S<9*awE5ZPgpcs$~>7IWw7{D)DLllYr-@t4)&Lb zrH(j>RGa2ezOaz6veTQ{2nSwihoGU_)tL%t{B9wzE-|9*|f(UCo zHC8XwzH9@?Ev-pXaOD)TOOMY@UND{^zolov$slA)fBi@)yQoHOBCrKNSw}oMTcxqL zC(Jk}UlQB*|DH#yNR!OlZS1RG;5_zfn!kido~H@L2akm@$Q!A_ucin0XZWSde zpW0s@T^)ipW;jk|*zA459i-(mL!-MlAN7^<&0NZV84!>hTG@_?_e(24O9?&`-N8hn z+s0>)#c_k@L7E7LbDm9&2Z#Ejny`R?dR&pdfPjF@jD*G+xdKcH2H1B!^>z~INpe+r zEdAzXmAb!K8xp8fZmNYm`sRk7|GgWl%K3_@_c4u)mnR1Q0DrB0*hjC1wiKO-h9%;_ z04WR|k2|h)bcW3bOm8af<+{e0N~dn(@+?W+6XWPWkw_)3(om-r_;qlDkneWTd9A&7 zQUekm0aX&nq9H?Pjqa(ATG*3*N!wE>yI6lI-=U0)+_DCyCBrD%dp!9MHJ4vj9lzPV z*3zKwN*NH)$QN}Nvo!pXUpwm^<>B{@wx2RJAE`as5{OtJed54<;EjjtGVZu-vbkz!KUKvClBqUfsk1pjy?S3`zKdC{ zFjuLcClhtoMK7Gj?;wu$Fs2YO6KS;H#vQpxO>LFw)ceKET_7zw@Ah;mXV3+Wfd&C~ zr|*P2CX*X(?BW5*_h_l9l_O?nSaEP(UVgR2+HHxdWpjrJ+s}*kaTm z)+{P_0hNgdQ|82afeLy)MKRFOo{;qQBPg;@R!b`=etO54tY(HqY`*2W(xc@H!!K$h z+3%%?3W=!v9mYwn3QTK=gh{2*;|B%=yrTW{qP^cHNCwV%Zss`Ej0$y0XKY}J>sdc< zI`zIR72~L2pk@I-b;>vx-?%NF-%4|Nt;{<6VUn)j_LBf}W|e`r@Z6yAnsVv>^_%Mo z_tluonD|`Cv)L%6GzV~9CF;eDtmsjc#>um2l3+&H1HG4Vac}}(YlahWI}|^p7RfU% zTIE&b>Na0W!qHcM4m+EETHVb7I}PJ~YOm(Xo5Ac}73FZXP2Day*3zlM0T~6Bj$2gJ zg?txzv0)>P=HIb2iu9(=IzNef-hRz;PGE=DYDG^qH=V`isfGLmw14reH++503h}pE+S?+FOZjLwrH^owu80$vQVR zT!rfck{^N171DNfs8Lg=y}VU$%}Uul?xDTNZrueib7|T;N%Rmv-acI5Br%BMa-vby zZ@xW6X?HZ_W^sl^(CTVCnu=Ac;{E@TNX0FXH#17(N>kcQ?Y?r0j0TG+Ce9uoQf~frYC#Bkep_9rFh5j z_+!4P!rZu?6wi|Ip+no}E}XS;P`ltIw_U(t1LGP(zc z+0IN@I!|!TxISN{xPlXyrkp)aNb;m}U`l$hgmTxq4<92ByTAN33h(p2SO5#&}vNL!z# zfUUosTY#sxV7}HnRSK^?IcXdmw!+>%e9)_Qrvn3a`(*@JG)}BAp4~zz&GzJVNsap^ zs9xVj$@gRADVMK#s~fZ}*@C{N!=1)cnBdBsGVwKa0#%2tJUrWBTVh^dXgfoMw4 zcveYm;icQ`i?lClTLoBh_lwVa6YoBHv~gtwJk;%)2Y<5@Cz5j$)2;yMzOoX}wiJwBh#bDETj{FXMxJ~x^l99SAUjf_>=&Z1W&f8$7loMT$1LCSMF_Vu&aCPVsXNZcU;(Z%%=Y(%E~D|?~UuI>00jgVUy_?2N(J0 z+Kc1#2}3CkV-65jX0>)3Ow-F!TPAsY!Q4CDE~4c-Fz>D&VrQUzeEXImI82n)wQvA! z;FYj^4fxk4UkR4NYN#T7*XDbpYpx_flkKn9zvhmHK3=5ebKkwRa$c(!ag*fQJ^adl z_?P`Rw`RJX8Hy^FgEl;2mL!)e>z0CY7c}cI@2`eUmE+WoPe12{RRb7t2q}FICZ&t; zKFSDUUqX!g^zV2ISfuF_iP9tWa)-S2A9+;S%W8mGp6f{Ssit{|)z0U(*uo2gbNu6l zO;%=^5@u6e4(=c?EMxI+_t|P|*tq?|Gz)}MPy4dE21?QE^`7Cy*44;fXRBGx9-P5Z zz4yXT{?p4J_F7$P8W#CvwXb&JONsJ+xs~awdg1nYl}_^RSc-~?tp5mQTqRqDXve!O z+lY;)Di6E`TqG%@QW~hH>D8&hU86L^i0Um^?okwpqU)sdmn+=ap-{3jrMJzp9ln9O zC}Etms5?BC5FHXrkih=4kzexZdjO-v4tOINxLF%Z3UmC4>?~rg!oU5R zYYV_A{O+esK3#y382>e2RG8z2JTdUWqYUBh;Kyt(sDUhf{8ap7y!3kFszm-4d% z12jKew%8B7L83cVqATUuW^-=3!#CLW@v2^DqutLW(ryPnRvv}_T9d@M&90j?;G3>k5KerLWU>8VBjajSj8s#tu@ZcSQVM$+;w6I5TBMXEYB9g89> z?X2{0A*7KA{uNAncplzaZjuektGGMrgZnb-#yAAi+^3PS0kf;)`;>RZ)Kyhq{g;_@ zHM(zlQr*#D8^^px(kjYB3f4_miqraj>2bTDv}+QVCtap#T+&WbMWMQDhy<6rMUvC1 zO!F<(@9xGFmMUCj%_7be1vjk5W0EldMvYg#?wUi~;@SF5ZVBo|cnyE& z`Ew38bPEG?cx`3G)ORKy{g(%shjZ_L4>KA6%$ITxUTYz<*hLC9l1bYmI4R)6)1LHvzktJF`;^raEyAq*ZG!y>Fp}6xo>hFCsG&T)x8YXz(Cd{{5vepvo!LLoPf{#6sd2)R^yz)+=mh+8EIlx99b zms!{B*DQ_6%$4BigzeDm7D}kDy}uNyCsN}f);ojPxo-}9{qLVEefSl@%)W}Hh&yts zkOJ=4+4m$$1HEQkpP4M?rQTM(G7Hcw2jp%_|FX6v1|41tKeNm5;1z=$YqrFCo)ms! zWgd9Ns57RFQ$DqEuh0t&tfzM!*&65T8tQ1cuM=c5WK_EpReaE^fQFXjrD8dGzNo3?QZW|OH`Xd>2m;OPAXURtUqbGXsE zJO`4(PVci89$^n4F?Fq@y=Bz!`tffN6OfaTCZ^t7p;CD2izusx>sj-M|BKq_L&zeh znb`%Kge=<6jC13@oEj*v_`=IVS^zn9eb+{i4H4hZRjseg`ClZw2p63o!xsA;&Lp@Z z7)Xpwas*g0aC~uLIPs|vPQ^mzzqJ7J0xQ=hzdOfX{t!f3F-Dodf>I!HtGP>*kn{*E zj$094WfP{1l8V?k!wEv&Mlf>1u#KdD4qVIYHo2x4Ss9|wL7Y}ZEiLw@5?{EZ2FqN_-(cOMF zmdosP{c}mN+!BIO?7!Dh5{Gq6n!L5%vh%0wB0C14;$JQYb&>&U!aR0CDst6(%qS^p z5|SA-?@Nu0H6Jm5y!2Sd1f-X-A{yA(9JQJp{X01QMjC+Q)q^f*$zW_X*7&a`O2WaX zr+64#4w@H~x1rHg9Yx#ze0AKZd-h7XYw@Hz2uzk68#<#rX7X<1gsrn6JB9{uf7yo6uCLlL)81>rKEeyqSWhM}u>C zJ00?2RkE69J&E}d8isr9;5ts{(Tdq<%08T{cx)hI^_NivJPQP?8cNjG>b!DzUU`h(I`q9P%tCvR zAv;EagMp-pqJOWH^8{9E%_%PYOttLahHO=^&qE$CCpjCMSdAa2lU%}A1uQm$?rT{& z-~2&;T?JX>Yu7Nw>9~#u^M8U3aYz=oOPjrKeJ;1+k1%E zB;<}GVvX;Y*oh4Ycpblp^60?TaFC~4<$EJ0e@uMe{{gY6%`ne~n7UyFAdnCfKwYs{ zQi9&rS%urZMo?GQF7-sO_3c~gkjZ-bZTtTH9{5;=nUkz9{jmc#T+qNx9%_4-0V9%{ zR@?`f+sM#U@cXyi&NWsU<1-VBzE6%$U*5!iy{)(MVaRB(KFO56qVi{?|MnAexh``K z@4NVPF{;#VeG3eH9`_5znDUoMBIflZ8#gxCS@|wAbyfcTg4Ach(jqyhTf_riUB`s& zqlx_YL?Xgo*C|gq-067v@Im>VgK1$?`l?DpJY>a>4r>_Vz;I3QE=k!e5FqEZMJoNn zLk#3;`-kMnoC3gvCF9NivP;EHxQ5?^>y{pHe4+!-a8h+v7xN1 z2V~aPRMLQwF@IB3?Bdt=O+tcM^T|hzQ1Gj%5s+Rg?-y<%4iRu8Xy+YNusZu?FvXQ@z7+~=&jcmPFTwd4{%>9oDRqI;mXfh?Z>zl&7)!_q zOzGPi&MOj*TugiU+0B>ojC~Nhb*;i=vgd6;$aDLO)M@tDQJL4p7RG2%L=wG|%~LnZ z?03;wKk8r6!}y(zXn4%e6e|_W&L&+1g;Y2zPJAk6viw zzNQHrkHiND;^%=)aRGVSVZ_hdfuc2`6#i$hDpY4}XYF<<+t$nf8$UG)zy`bd%SHMV z_zUL2Wx^aLwgf-`U@X3~bc%R~zt4m8k=ZeiaA; z7P-PGhJ>+iTz)ZZlK%)}9#?cVYgS{;&)I!1zuPemt>+kj=lVHM)5Lmr9!qPPuN@?B z!_mG&rv-Z1PIx1|4wuA?MpZAU)~PTd~01?vo9v`Y^c5*5+Znrdqee%G(L zCU_v)>CYj#O1=_&tj=OGSzzqzJ9m!bKm323}*#7`m)?snHWlMZ;E7VM9`~ z6rq&T*kewxod0F6Uj+S-~Kki-N@&9_rIec)!7GW zO3go7blkCgj+eIoMv>GDug$}!xY~6HjbHH$6GGsnP?CIq&pWMQk|_rcykD>aJwH+^ zhH+d4Sd|3IVKU@x)q;{*_>cv0mvJ~^b(I5O4d+7+L{Kt%3+Qp=O0oO$0Cq#7qrKtB z9azz0M(1yU_tNPeS7V_A2|Wg9bC`o{L_z&uNx<2h6RP;BU!P36iiCt8*ORTai?v1E z7LZ?<(oYwM4xI^+;vuTOc8&n`c&E)H{D-?_LFc>IhZi>U$!@#uD!WQ4K9vH>mPo=_ zs|wExsJkWGor@N~0n(ex;6@?6N`#iJkbnVzCH@yT8cs`3g9ICUWZwXy6TtC-CU=GQ z9%&;{8zIjg`zxOeISWJe?m5<}O{elmXyi70oxGf_n zb`|D+p8M2wu5<4Dz6A9CuNs0VV7bQ1wBUjo90Rya!lK=ee~`Xx;! z9|}nBx=#&mwlBcS4YE_-%g0ry3t8&rylRt38T~7kIHzLgpE8#;o0WScl_?S3F`^t6 zu8F@mf`8*8Bgd_$AknQR{r}M+JAY2pj$i(_@1tGM?1<-@pr{g;R@nnO^k;=7s6M9l zy(n1BQl^=jlao(&^nfI$qR0q1lm6->*BIOI)rS|0*M7&fP86>f!{uiypL{**+l9dS z8%$vppWn0G)cQZvV3ymOP=5e#e&_e(nWJ{}a< zWC^1|v#`29yVB-oiuYeH^;4+-vzu094WYiR;Dl^k!@reS$aT`g?C zUqF1o(?;sP{igo3PyhLtuAU2;m6!IEeB&Qf15-3bBQx}c_&AT4dCWKc+e8E!Ue!jT#Od3`Lg+vn- zte$>_mB$f;|Di&$D;v$Upqk!Y0CsRf_PWU5O(>r=Vm<;?tL}VDuJ3EB?K|XWps++& zfy?1=&tB#Mq`$S<_Ge)SupW8P?79hxT_!>cC-;Xa(9(D)93meA1??0F3btkyOn=fu z`RBcGRm3*UT22u8#VNyfyZ$3Z_2X=ANS0jxCuYzxVKfew51rQLy54BO#{%oWHjGxk zO-)S@Agu_|;#uDKuNTk>>vR(6hHA}OTJRbiT#y|~cT6lqBmc773#DZ(`Oj;4jmuVO zI|YX{-Tz*&49QY5eU!uTw%;XXTw|E=W z>Tx#R(GSYM!MflS_q#1`7drfQisj#+M9Z#waK}z|nny{L<4#ga5?~VGIxqd#Db~4V zwf42;f4Ox#{xQ6fZ~gf$_da^FWp=XX&%SQm08Ss^_-f%rhDH{S8sjhi6${oAoZTz= z01cIft+3E=!$a?{4Bu=sSgFC3Bq;s6EN)f=^L`aM6Z)MIB@$?z5 z+{Pq3*BB+~&;FeBdow)CZZZjIup$Xd;iV8a@dEb5zkEe7Vpg4~Yc7cMrZisu5f=QT zCZK`~`_g0Lhm}sIV=yFzBn09TG{qe$#qq>WR{9RP6zmeI=1u+>aU=dl+@6Ycyj-*rSVKaEhWkhdeQ$NLvd^-(+lFtVPAcY0Q6*?!|GrvdPiyVMzsFCW$8zfl| z97GC0n_i#^lK#UMwSsH;^CM1F=f-?!XrF(K)>6Y8-ogq}X!;BkN2$+qd1pFE5&*e! z47#^7bCP?!?@+r@Q7c8iHkpvZ?f6Sr{n9s1}+$Q%0y7Q9e5?T+6+d=5bU4?^n3jh4rJjghx$R7mQlOiJl1&fffY4{Wg6+;?S;>Kk;$ z6qyY{QIjt0X(WL((0A_J&y({0bSPs-4h2UEM?d$bz0ysfL$E|_8mB5`I{#f3q_cJ| zf)-yFuSkn%^nF0>KWOibER6z1y!>B9{VGoTU-D^Y+_EB)YaFjf*0&#z=~_KXWx zNJn2E70qx*l&s2--PL_U(UGd-%YA`sW0jmZ8(2mf7SqBq<62eY5}7rKl2Yk>Ucv(KlZ{LL7)|N92pHCvcJwCk-tpkmN zoJPsB<|F<0|B|9T$?QEX|?z)dlr z_&mKR? zRj2*;ENoE(=6s2%XPpn*i7`as+~lj|3y={8!n?MwLdOIG2gu}AtiCc8mFg92T+10y zhSsRx>YIOF_+3`_cUzqaUxau0iZ~zBzeWH}=jmsLCrnlo>PV;rp+;Wt*4I%qD*=Vn zob~va4*iUpZmr*Ep+g1Q!o+@tB0#7|+xsxT&?a-^nr4;he=7Hl_s^WopSiX#OtN;* zFP6T7r~?`^P6K{{q9$_&>b>LhPb>F7ikD*u(6GVHI3cj2VZ-r);~hI+(q1^bLPwCD zc!Rc-*yZjmN|Cq1)qR4q>YUU^EfSC1>qD8uKYnQJ#Oi;?1YC;tKPf0kb?bmtrE~J< zWms13``=3ouQ^W!xL{ut z(75lJ_@tp|WyxuQkMo`P{i2sNK7KIRS^QBc;&47ULzCt&J00OT7KS5WTKS@ed{vag z25Ow*_LhEtLK6x}S9Ptme=V{l1dMOiBjL5z?5E+itx-e;6Mja|=kAVuZ-TBy73gX# zCitBTzK&$OVcI5rCS3j)mPo4#gZ{F;#lcc(SXsGJ_`l@b`%@}@rr)_Ei$?yTeAcDh zYn29Rew5Pvj-QjMOT4CB=el?KcYn!e7uBre;O2ybZXxuOn1T$nRIHx$L`wS<4KpY&Bltn;fZXWM<44N zI_$FRjPFhHRijoXf4l;_^#0OKgRSR6Y31_SBNh_4Q0U4z;g@#r2n~}I+jeNq^Hglq zCI2^_xPGg#dE~Va$qCu;K*kE9lz;3oH17^KD4~EJPGFJZcOc=7h3=w(FO6M2JE2Zc zCOSnLdL8gT)-(Rd$ZSQ*-Ng17V!K~d==Raj;G{j2nOnig?@7w0%KR7MW)Y<7zVO#- zZB%ReT~z)X|0>%5Q)gWgjHrLTfLY++(|7$gT$RbgU3**e^*34$tSLEJVq1CZ5Z6G! zM8&4$C-S|1Z21GymDuj~39V4Zqt+>^gZ_NtLwqcHWM;cg8%dpKB5HIJs-Y7(b1?8Q^iz(RI0bq62LwN}W zFc)afHwPiwXP6v`DCK+Nx?Zq~td7YhRd z0v?8Qt#$0vg0CW50wrdU)(XNA1%R68^UvJTKDoF~8@3-!Hg0-W#f3)SG;?BC@jHc% zF6DhUc<=D=&(&CzU?6IA)S@{X(oTG|@Q*xaTxern8Hq-X&o1b%-6ymq+;=!sf-=@~ zqb0=&Yhr*)KDyv6rG(*zevZ#7l&qDkWD2pj14;ig0W zoScoF=cp_kw{HQULdOcB?%5wLqQUS8X}**KphnU&!Pux{aPRA9gR9+K852R@lNXkU zD{oeq!fQjG^w2-#ah?bXiENWrh8@BGXOH&DMMV$GI$ebJV(YT5s*q5PJ>7Y_@@ZC` zrVrueuY#?p!T&(-Ynt``oW(rPzVIqO)?&!cw>3m2k6AN?X5Y3Sffk8w>+R)HVoh#% z&n{)!TaA%iw%*JfUW}^AKM;lglrY2G=NFX5>g)Fx<_1;idO=MK!tX?q7AW^I59~&;S*>=2|8*vtEe935OKKG_ zexTC-sdll$?f$z9qhO-%lZZZ%Mj<6}(A^iaYda$G(A_Mac%=}Rd0{l2uEr3Hxal#=oJX42dO5a1vYl9l>XSVTj6U$2eb&DR_{jBHw{`Cg+d)PD+fw#JB#%ots? z)IGa?fIxSqan=~YLpi8|wlVBV>k2&)uJe6S>i3UGZ=d_!2k|0kH#lkm?mRP52(`?Q z=5SWjP?#CU&D!zbYEN+YPU;XwAvDvBpTbw?2N#&=CwFYOlxuO>YsgrAVs0dxZH~Tb zpkqJTO`hK|hi#bo&l5D=C22g7oYag6MR@)r@iZ}Z44xKxgX1tVVIl#^#UF_7HqI;U9hs78Vg9OF%YWVH=HYx z*&6z5Wxc^q=gX@04lZhPcRiPZnSUOCUtD*XXw)AwzN0TG>eku1*XtlJ*5KeF*OT+ z$PH57-F$xr?WK>Yl>b--^k~@!wS`YiUtw{MBynP@Tr`O$VE~Quesx3ltfR}WQxNhp z$t5Y|$Z{o1g=s=HV>9EqcUe4jmhX|k25o!=cj&^dlK+LlNMwlE@1F~PLBH3i_fn%8 z3#1j_UK#;mkVW@rsk&vFY1Rd#+%&hgzO30H66?BGy0Oa|qhY*O21_I@2w%<48D#ZvsSD@*}d@3MGSH{+<{dX z63o_uS$D0${O}X3Rgx8+W+d+;M$Ev80Rq~AV#HV39Y^`h-jWe#wG7GF1aAA0{k7e0 zYcmZOX}ggd{O9r^IFv9Nz2BLh)jH==AHxQv0jM*G%1J-;pQgNW6F*LGxy}9Wg7 zZ@x|SOcnaT*(vx2+xJ!o7Q+wMi-vkD#W8=1L{znmll_0_H~Qd6CvF(A^IMb|zqER{ zeQJU;;ELaA6;5)am{-@J7l2vw>lHPl;)H}O;T<$t#go>Kj*cm8pz_b*-Poi4LoJ`M zm6={`NBQUPJ7GSZR(a(6sK)5#Z?DNbyGlH*uZB1%+~CYc+=XVM$6bR)&3}r;l zUNYW^HuzEavTU|BVX4u_r{f|zT#gxB5|1kER2TYJ5pq`83^rQ1g+mp8=+ z5eR4T7DMl=H;70DQlG`>5#5R2ikoA?|Hw zE8#~rzAJ;`Zgt)y(!`12UsD_Tzkjchzms}u0h!&9@#|S*WB^XF1@<5r+3uqav85E% z3NWgjiE^sWZAU7wMK`Dud24KzU!WotoXVpl$UjeYpLrj{&qS(EI^fuy#z~u-J*L8@ zp;VsUKfVAv`@!cU#cJmBd_A4`)#o=ITz;{adCkUiX7-0)yQvY&T5j`s;obyl!H{2T zgYTecjml|vH*7}>b!d)S=6CAeJUiI&H8COUXZl!Fv$#7nQn%)Y`vLKiSMr)2>MGAu z3-_rECB($8Ivs9tFOrU`fqCmw-)ih0{O1 zyTlt)woF$DDRFP^(AY3#zvH~ZhTX!Nns}8GV%s6FRPU};`+e$In&^V_MvB?hZG>h& zAw2Ia26e3bV#>WiLo*%G(4WS+5w~-M?H?c73y_{R#=5ZNJdk0l_ zSTiIbusiSUb0Z}$CbiM# zhojESswHQ@CHmJt9Z35ESC?YjxWyY54$8=N*PA`~Mjzjjk-l9bXG7vQa~)#6>_h)# zc_aCOEnJeAgOm6}2M?wr1fg;F_b~@-d!D@Ltx;Rs%1Qfk;Bz)=vCeKUwx3$IzbMww z6A_K^uYWd>_E=u=Z4}ig$JL^*pN;J^`b5y=Ta!>^W3O_mtVW(TGu-OT+0*da$lp~i zu8AsvzVO%syoy1m`2nq`FS9=HrBeLc}wyqhMT_hzXv0MKW zC3whnW!-P=-~pWZ_~7N`kP!)Y&aOgd@OhrEC9Wue=Gl-aiuJDR3)N$U=Foj!vO6KW zzdA&v*KjylEEwbPY{Y!a@JZt>!z^&fyi>96;EWq9x$Fx2f#bDVa@z0EuMIxqw*3`S zQIf;`^XJD~cbAZ3E12T3$)FrLbFkWivE0VShXY0;BEl@1#RWlki~3FLift=7(>7tz zm32QsZKI|B)bsB{pRtgybu_>iv)ZV?hj~#0>~wZ=-=G?Jt=o>x#NP0s^{ZYotoUHM zFg-JUEg-eiC>~ClXIxtlIXrZyN#VFvAV34cwqcBK(=%lt6~5ShZPkh)>iV$)N8_Xp z*_Y?5O@JTyFt@7yS;KtSr$vrKh-cQF$BzOz+D@6ODo0>gAwPFrx>tX=W}4Gdl*p$$1fh%vCd6 z5zwS}sZNL;t9d6J#9wZ^O{qDGxpSxcPRu=>b7#*7_*}kv)o|lJn;hF>#+bYGA>uup zt~|$MzOlYZ{`vx%qYBqD-_m2>lK+R_*x?yxuhF@17+9M>7A7LE{MewV9XxJ}4TJrh z+qQuJCZCab=;A_yA7$~}EQI!lr{aY|wPZqt-h}+o)>#i(!a9gn8g^V@RO2XR=g1#0 zTw2-cD<+`DRPHR|XDJt*z}dx7E_qIkJ?9~C)}`|MV#$5|=%%XtLyT+H%cFi%7*#&uJAdjWy;CiT*K0QBiK z0jn#sgY6Sg9iN1AAaMG`=@XnAx(6e);pT&hx$%2Rj@JzDzvKwUhO>J}3lM{NOB2Ze z()D0f-x~^qQqZxF^)#^tFZZ4FE$(8LN-sT_7b)&&2j*7tE9QXkBd%G~Jy0dM-p0*HPp91<^Wx|C zu`rlXIHu#nrh(I;V25_Rl7~-nKbaZs1op4*UE(oC)+gxXD`;wY?#ztB<0W zhr;1hA9E+nYN-WOc)ALGxIZZeh5F`tEAm+nKfxVkn-MK%*Z3D&CsXQW>g7$f9 z$Wg=o8rr{RAIxTbci+O-bFFvTS;jR)21dBw)zu>R4RHp?C`C+$Jy4@#Cg9mzB=~r^ z6X#HQc|ck;Wn`(83DdA+An+g@)GIj83?<;!R0+_GBm7!X&)@|13_#BAoSUDF@BGB_ zTL8y$Tr;>R=Qm#Ycy^BVEhj>TKdTQ|OGNsfOJDv-OiLz%heo4e``4O=C6q4g_iZB; z%#d7{;968U3LPxfVVpByZ1dV4$mM(_b{|09$q`$SN9P;KW*^H-g8M2Xom>GCW6zfV zcv>Kd_k?mV&)KtUmE#owmdCMK+8%T9D>UlAaqFI_&GJAZ;&87e*VBo0Fmd)%w4m*% z@ye{gC-?&Ck-vc4078!c}jqY(I3sXbiEM zW3@E?jE9jtgxP$wRC`t%wXiU0)QNJ(CsEzM`NNlkx)QbRJ2yXZz01n#3w9A zF3cHWWt!fVeI0l2a3G<#cUKQ2KewAMVS@qB5fBuueW8M%%t|r%*|~HZXD`jIvZ}*< za%d;f(+UcUqEn+Tg28a}MmDOv1Nz#KG(yW5KIk&{6$t>{NG%r@4cp?`oUYimP+}Hw z$L+LV(|wOKdzIBomA6pRBy4#*`ru(78rHZmBhONQU{$B7AQI(R8!1Plv^-URbz-w? zvM3-z&e1-SyC(QzZ7(DFS+_Mx-L+wk)Azh@$>u7{Ksa1OeY^D4%W1EW)5J+ZB6Na9 zThLaelZ)joo!0xfPgggAuASC*ZDWbxHRg8?y}l5yE#Z5Wf|s3Io-5#RD7R{&^xf2_fSs%T)ctb?}FYerjF($=VDm=SXJy?wM~7KMY}N*)yrt_kHt}4M!|DE zA_aQwftt2(fVcn-ckZH4?+g`EB@YkgekFC6Ci@{!<70wYTm&#GhW?G?MK)IZE`x`= z+p(^bAz@nf8G=`O2Vd!GPtP2n?X^mtPiTvDMfz}_*7oppPlRq;t2VH-tDnWVl!0d( z2*-BN`WJ-_Z*_AGENm@~-ksW};9_Wf&QuS2NY=~FpcB}IMDOlSc%oeVbCwRuhQ=e4 z*;qOEc=g3+&Lt(M#M{}hWA9A>fSp&e7CU~}#X^u7sz}gsBtS_J;Eja%z0HH%Zr4w| zygWzMn#Ra;qk~Pz1~_2tw>~#D1;{WGgTjRoP#hJ5Z|dEA!Y>Nc&2jpfw)E}ADi&~F z;f*q)*AGcr=+Vu!x8f~_+&g5{J2Nwr`T0Px?bw&iUUmOX?74NwM)R^>UA@zDHz3^q z!|vsEf_GJ5C#_7;`nR_{QLgmmGxfaOj}aAQ(3zJXcblD>2$|DZ{B9qxy$5DJINaZ0 z!})NxAy!=E>(7C@`~|oPLV9Zx$k-rfOiM=<2l;iVJF%B+FXd8IZK}f|vqer3j1yd; zd{ske9RStNOs1LnW?SYxi~aY;<<;0e;g4vCl|r}f1)pr?rx|jD1fw2#aMF3e2-mY^ z;swkEZ4hl}T#k+2+=VCzf=EI`LFWBqb$Bo<9A8*~=f*0t{U{Zjqi%dE_;Q;@+ zg;}f>o;{cU!JzjF-^aVXZUyicjdv=`*P}PCQ8s&>@1o>4qvfLAil|@T-VT#(94)iQ zht6)=+}LscYJ&@q10HG@S37c?x|#*2A>GiLizWA_nzr>{FgKPBS&{-amdT;C_+tBQ z*K$@jcNYTJz?KS}!;WyOI*B>Vx`FL#&kw%ScpG~CJc9K8^7CqdT)A& z>ip=VMI^fF#!IhxlaZ%fj6w0=C)+FSzZOHfdu7cLh9uQu9Jf~+%v{I!OV`5{_!_Q@ zk~yWQbjb0`4I{&fDsCYTAi$G5X`~BWMGJ6Kyt?DKG`=2l+A{)fz|MChSM%Xkwoxov z+-oo~G+-zqQ^O85E&65n<#6+W5N4Ux*PbQz?mKVq!ejYLnl|`u`ZvnoZ zU-6Kv8D@dq=I8R8s`aaMbllmaG_@UbS;VW3qB?Ml835QBlb-5l>bIAM4evv|V`QI2 zxH}K!=qV{KvykVy=wpa6`}cY7Hx)He45R5(7$kv?xyaslBSBt!MQfKpH|h*F5&v8r zR0L?NX2Je*hrq532_=EG7+issxDF%Flr2rE|tO%v#TnQSD=nfq~0zEz2+I54Oc@ z>o7nRx`L=mw~Pja`Py8a$Lu|YZ}%>)?5GsLbV`C?a{}TYDcO@T2IeAA5Ge;Pr_x@TO}+w z`Xud6i%GhgrZG)Ov+R*q<`|%&!iN)>>3Ai|mdkm^2-M`3zS^A5Z0&y1l824nR`o1h_oF0u`HH(w zIQvN=6bYH1v)hh=BJ8iNv*`&+L%sjb?jA4@jX8xIDm-p(v2PFF(jD+PT9JQ+d_Ost zVIbA>bgU85BL4egp_4^4x)Qw@Z=6-e6d)D+2~Sso2ABdbt;yq?dcy znK!x=&n_!$o5BV4`RLGAhPU1Y_*=^ZMi-_CZGZ(ybiEKKZ=^GZt1X0l4&A=_D=VWT zjwP08U$}`MOP+fPkr4TixgEM`S+Rgb`e#f#H0F7Xmp?@YU~iUKtfFA8vbYLdj2!>y zxaHHbs%0+w5x5s-RcTBs-;qtF;uOU!ug>9Coxaw$?<3#muH@khsrUPE8?hP7j*VnULV-gG zFdhy&_a6}E47LWVtB7%PTPm=piauJ?;l6-n{=Gct#RHU#Dh~$@&$RPu{jfDDXU$$R1*(-h=N-gssUw*Hx%>? zHodxAET-$EUSaG$PF00@yHjG9rCF-lujR<=I1$T&Wu@zmEXnhbXcXwf6;;6QS#5~& z!%_Z^v-`K^`vK?0EHr?wEWz`bGJvP%_`CXv$Fq|rv>fQuAdUIjdl{C^6d99eT82o zcc2mc7y*f|0zLaFR~D*}Ct~FZHte~M1X~FTbdNrY-w{Rm(!y1)c(^#z0{Y(6sL{%v z@RIhjpgLovC+fmm8cWvo4RZSjJcn{Yg+_3XBC*<}pEzD5d6KoG9HCrSux{>}mWAZB z_SuG#!^4eV>a}iW{&aS4 z|FPSXouiuPWiPAzDQEAA*bDNS#?s=uJ{0_+ckbR1!_0}b<2Rv8{m~C>@7w=ON{dT2 ze%#&qEn?WljqkLfli6@bS*~TT4vecRz{~|CV($UOWTA%-`;dLuIef)_S>B&gWFl*f z6Q3NYYfB-bi;KCRDLuQJn>r>%>T|{Fi|zBG^xRCd)u+k(hNpb;6ysrb6me8h`f&^f z6OoW1wQEGCFrn&Ga6(^lG!^cfQ^j*lF;ubD?^60CTsb*sywhXAsr1y4P)9>2 zK{R#Gv9@%|5<8pGt$**erDY;&tg-+nCE;Qj2rj5|hS)dZwY9}%**vAqWGN1QDpysO zkSA5vX-{&KUk^1<`Esk2-`}#*@zTTYX~WB{x=t8>L4W=Fn*-7nK~>89&j;lgs=Iw5 z_vKr)m}VQ}Z_2z@ADTOrzAEwK5`y94$U;;)w;{}ync}SVo@tE~kxUH?M=rctDB<*A zo+1g}zTsz&ShJ)VX{nG9GxOqjv=AB*&{Tnf390js>M8W1V0eH$wWP8PFYLGckk~!W zahS+NFsP;c5WM@OrcsN{6MPl28sk)>4MRzbkM$+f`LQ_HWfLQ;21qKi+x7~!ADcO# z_gE-C@IEj#HElCR33Nuo1eyr0+d-$-%c!3&5gDV!yd6jZOho{z|4i*aEGlqhWnucOYxGL%ej@WJ|!fDBwt?NwdbB> zwJt!_BTS97rP113-U8Z({0D57*AB)!$dP7WRjA+w#oJt$@{fIEUWt8}nVB8LM+p}P zw|i98duID&Pp!_0#6QLkYWPa)8$}jHzYUixMoT5gzKAkdt8Kb35haH5?PsFKOri-@ z5w8mCwe1L26JWlGH=Yoro9aZKf?l`K7DbvjPYEMEU!yD7oXxs7MfGvo@6Zc%R6RPN zKVK@tS7Wp=lO4ECPOUhO$i&tipGq=P3 zT%G|MlI8&91-8JJAHwlc=?U-lPb3!kd>M~>lEIM^pM>vJ?wt;s^;CNBI|j$ z4pMhNtlDIIZsKOw0zBF2O(HqM4z7isRf(Ua>x$15yMBs!Z|;3oDR3YlUe9~4eJd=l z=Rw9gJB6$RZfL@dSH*nMy2=j*cb>q+|K1iI~nZmz$HT z(C+CS*!14VjVF#3JRj?eL7&i6(70*O2i6qb2IJgHs_JS)RUGw9bY($^3eFk`P023M z!e+++i3Vc7o;CmJq`9rkFVoLa*CV&&=w~H;`;#&TS6+Miq8%S|$Fper2eC~hs>r&BUc@34e?ASidGx#di>u!>) z=+a6hOKZlj{D1ZWB$~de>hFf4ItOKOsj!5-3XeN8e%DnsTQOUQfOX-oW-o-|>n*RK z-+nneN%vtn)(zn%)bNlmvT;dJzyF%#pk!NnzwcH1bR(~O6JPB{MFuvjv4(dcHqK%y z{PA+ZFxOH~O<{6xtOou&QvXy>Iu$jQAlP9hQ=ToWq^mvpkN2{z^kP#7&{Q{IvJhir<47&5Xk_qc5Z(A))ie#Vuj1LDGVx(G7$uBnhjOr#c8zmc5n}kgBRO zV+BTjY5Z!u4!fvm*9FYgazVYS{+Cm(#fOtcyiP0TQCrxn^tFYk@sG6%bE8eGT&^n> zIuXIm+Q$s8y3~vYCpya{=UQrcjS^?)y-8|;N!JmR%4#Y-zn*0>pe?vKd$}KrIG;qc zI!ZFk!Ud&|{6`lu&#hC`%Q!2mK9TzFNieHf{PoK%NSobqR*NmFx*ELvE+Od3WCweb zbMZ06+kWlu;0Cti?q+B8Q)(}kMK~w~pF&LzORNceZ}9W0s_%yJeeb7H#w7-W23R@)z;MYG4K(M#o1jLo1|J{ zzz$<4)>cW^0__7e6jd|}1*N5>^2_V$;Ha|ui2_ng=_)nk%S#sIx#*kn zQWNVZH(&p%%aXNPiFG3@Cg*iv9%(8O5v)9n`_=ra+2u1! zqnn<7z33ew%Xeu>bI7Ou+Xc&(J>r@b<^kwKFaS4gC53zRqCKsnB`zYfWG~t%3!>h| z%<>`a2{RZoOE~ksTFfuh_&CWRA_r5PVG`9hxB?T5EbcMAi?hn%6Pn*PMj&WGYt5Y9 z)mBN4({?uDrl;@YQE$GkuL{B`kM?}7a(e7zFm11bqaJrq@*5sh>|Uzm{9IG)(Nk8q zV6ePc>X6Gh$@xS2Yp5*9O7@DN5b6p9I*uQfx4|spd+6kP)<4faU9EigYvEUVS$-tL z#iCP!Mc)KZh~<`-l?5W7v#o8caM+0k=9LgyrKVTO(YU{3Ii0P7@CYW5dX;7t7q2Pe z=c8n(8mUALf+vY*v{vU%mGcOuf8qP$tVH%|v$h)j1Qq zAyKS%124R%xbDL<11@V0rm$TqVDs3!zumV`B7*RE(!3k2Cvgtq%~vIQo#Ss*#@TAo zyOa7&e;|aLmy&$z!WYX2d&1?l4eHh|Tk);lo33h2&0u@>6ehcTlprpoEw3)IIYg*w z=kE(Q<3!$bxxw>;*Ziz0#EsGo%Xo&Ml+L0RlVAtVkV>b$guC;j!yj-oz_YaG>B+=W z+dE!uZfnbyMLs_NojqTtMH_x3ovMBc4d@>uOsQ`C(5NW4Nyn@s8^IyAW^-bl z6UL+HSp`{|={ISgn9R@rnvm8=p8+UwQ(whdgP*_2MHtH#?3aP7OnNU&ax5Gttdey& zY&Y=d#oq##0ZCiBdKzUI!Oq<0!ga(2CLK4Vf~kwQ3(bLMprMpadd8&K1$MjpG2eYoYI< zfETtpb);_Q))ts&jf}z|mSb@&SWtwa$jQkF<)fUthPBpLwe+fmFE0b0Nx{wr9Uv9q zy=n90hsW*6%lEu15QwJ1=E14F3YX1DN5WIBXZ$NnzVU(fbs{rz)wLo?Ipo&Dzik+s z7#lVxF<`6CgXD;?m)i|bHew85ta=h>CbAO-Xc~;ad8>@ZgV7SWN1$uNcA*0^&g7tQ z>$fNRC>+1DS-TU)2)krp6md0T*zq|Ws0nO1Im4i1cxGMYm60}+y$o#Yv6*8>f?Pg1 zQcC5VtA&6!!6LULeZQFDu?fIn(v*Y`)TAFt&5WJ|ltbf(+lod0a}wBJW_bfb!G7j? zThd9xoG_0Mgx2hi4XUnlJ+MFj^#Yy>_Q#LK1xag>vL07^qJ9J3 z)1*&h#oZS6FN#dCPZy?!^?~aOs*#mrYgdFLpvWK(IM?AV`HnJswgLe|6<| z+(_ChbKpC7v<%)Kn*p0x-O%6(VJ8*3AUKk?1R0xG4Di|wacwV>P z&ueuBy|sK3GF+7OSWu>S=F}?uBi14y8#1WbIUCr3mBDjgY{Y|aI)8Io9dEnWu`&O` zkqN@z9edM{1vl`y|EWN~{k416m(JUN`^UJG=JN}}sd6zX`f{=IfV}qTmZ@G_FF7f| zRpvC7?pci=9v=OYfgjukHP`u8WMlq2U76GT9fmT7TYhKZ>mU4n!PeSd28HyblJ~4y zWirr?2x`4FJG$}uml##EpgGR9LNm@WgN7{%9c`1F_T+^j;MdHNRHWqKla9fi=i+|X zWJ^0uTTP|MC+o2qG zd;nq$u81BKZr;^0(xH&O#|U3f@*1C!g<*Pc8iqwf1SG zRplq(;lhmR*_gO@c>G!_w`SgY-Dfz+>XzPz+fx$nG&I(j*z zM<+qT4Mhui_7s1(xc!X{!O>Demaa5j9kKju*p*B`!ZI~N197+TOWh^^T+EQ$#pi(( zUu2p^6vPtR`uy(v^J=X>bCo>g-*-~VL~yv$tjxRbT zC8a-J4CiRI_1U&r7xQR-OGT_p8wBkTlk3>BUN-8sysB4Txclib{lnJec_hP5Y9X#a24j+a| zP`vy0;IY2lK8i7C4(?vTCn}DQD;3&^-#;lEedpPl@_1^PJ)2nFMFrAO|08Y5&B*uS zMODPv-!G14cRmeS!tI`OTg!cFYg`;D(7Ca^H`5V?-6`r8;agD-mySJP0d8YA=k-!E zXOs3%UKpuItfeMP$lUioODlghC)%F;+51r;u%V|5iyfazU3P}#^wN2}rpfumvH{yf zEN_vg07@;wf&}L+1oXLSjAHe<<84uQM zHYIS2!S`N-;ckHq$2u?0l?yUl!eeA$oe=Pbq1%mEmt3-=S>?bInPCqTxM{1#>hHk- zrDNfg;Q5}DgtBTjV@hDgY!&wD((zRolkpPs2aluTM-zl4)js+EI@5&obujA%zjt?* z;_jWZA{YH=r6}bjieK+a$>etV%Ea7L**ICgDv{XKVbl)K=VI1-2@l6|xjM#ebF?^V zPN|x1r{j6kP`7fkR7=SM8RyI3QM3vEm^jci*rX2+M?3+qErQe22#L=?ddsij^JgAs z52JEvQZ`9SKk6Brw#k>dIa##4YP=AdI&t@w*@P_J!clne=Pf@;@~W|YdW_kP0n180dZ03BB7Bjc=T`KZ!n!!fZR;+N$z zd0Wv7fn(1C2eb`OFJkQ`8g<;0yCO6 zFfhnIyM=C(LpeA2QORN5{N}5SsU-uMXCcqj9U-j!LQqxthpcLOOd-2QS5h@$v}D%Q zYIxA_Y^nMX_(aI%JTB0}o!6gP01)gw7{f3uiIX8uUq&&d0VWD;c#nnz@EFV6qX&cH z_-jEKNCNILxmg#e;lYK ztzZ1Aj!`~u3I>zY3g+_AY4PqMO<~1gq*vOgIg!V)MM3hJr*%B@h+fLR4XWbZ}U`Oo%N7{vk!2S zX=NFaK=oRiE&yLia*8c(X{e|N!j$1)uVHVZqn~257OV%k%B6hf_cr2-NwDd_^H2e4 zB0b7A(+m>A{%z|gnX*{x*lx4r9+r6iY3 zlN^k>7X{$;7vdoKf@_-^V2ryb^&=}V5)gp_OU~<#N@wh=OIjCNKix?AkiV=WzY}Us z*6I32a`15mdTDql0ywcJzxtj!n4^yC>gjYtE@@c5aW!z833S8vcl?Q85GkdiJ*>q5 z)3r2!oi1Iwiz=YDa+^D7i{&1hTGG#$$_z>*hN$81J}kT@(5<=bL;bPe z#d)s_k_K#c%4Pba@v|dP!GXmegTb+fZ{1GvTg6wQ5u*V)JAS8fwu3?2;9f`pAEcC& z88_Vtif*YMeVec?sxUn(<>R`I(jTD{6vRlFH0%DHS{dU#gh&a=GnkzrM0D&kyPFZD zf{A^FSHjbvlEUn5+=K!ItFwjyoFo;Oem-PdmG_bHTrqFnY`Db}&i6Qm$jU*r`nul@W8!Oc!KoNYL za#kMk)7UHROe0EcbdKX?xb~^o_c@Rk^R-oUK(I(^M{yk(B!#pnkvXYrZ)nJhpQ94; zB&1BKAhq9^?jeO-OB%ZJtave8O9zhJ_KuF^mr?=2LtNi=d@)<~kYht+-Q1(j+CutPeWf&849{yV@{sB1y9i|)f=x21 z%F4>#hEhTm71hGMaHup`P44;V4Nb^Skh{%%;}%Q!!ZV>(6Jfc(bKrJqdj1sf61~XMTutWHU-#P-he+$M$5_E+6XdYV*VVQ|N;wUjCB9xMzf`&@O!^7EuOgdB4YL7Z;&kIk2UU zTpFX2UDKNB`K)3Y>5~_xz{c3f*0~VQnCz92nXyS`r~}N2AEm(0EhZ+Go}I^$>_e`= zYygLoxLt_NpX+@ohZYMTf>Pvxp?H(B{<4lvS5lDOLO@;>S8znP)_{L2r8)%(P7r9s zlyVI_!sVfJ=B_x9SHXvOVzK`4QRd|2Bs4>`iWWpVD>)8cF7$>tESkr~$<>ug7;B)^ zn?u6iXG~oj2O-koFx(WVmds<%y+q@qvne1JzI``yTK?OoW50k<{VEHkKFfW;KeD5_ zG9gaRftdF^%ZDcpkc$GDO4%fZdE1Y_$Jm*kRTz!xzokn2lQ?q86)J&U$=+dD_X&ti zZgg~1N8d=_oV>BD(O@7@HyF*VG+7EtpA74tmg|}3?~StevhT>@KfQ!Pq2%27aTgYg zb(3{ZQz(SD!dSQ@Fu^C*y4D5TX?JC)vEICVv^EiNk2dR$(W8vul!)b5U<&soaLz!5 z4pP1u+^2xxrsgLkE%iH+0X8?=2PSS?xG#?&==Q3Bmr}FNu1W6(YX5d@8+fq$u3HsC z7L*@&I~VO8c{UROm=fGy&iba3Fu8K5$LF~B$sRjplgv*6$jWPu_lxYu2*xhvAji_a zMj5g=95V#H2hi75&L2V`m5u5D;k>Ol$g*5wQ)C0Qb)<{a;foauV07hOix0>qL%?N0 z%|!K*R(JZX{KCAxK@uWYG4mm;e1atvWCjm_M0ng6#^{zi*|1IZmD)zhYaxdOtzf8%$NN`fB>xS^mEFBQj`vjro8=AC;md_3}wp;xt}9-LX7 zQ67wmTOP^eu3Pp?9-1$nS6|~n#5P-7TlAsN_JUE853@QZZWqmYE(R^qsY=9Ny(>>2 z)+E^_E5wd|8E&Z6+aAl`aVr}<{M}qOLKV}sN?_xd1iR#)s3#zM@5~o>M<_>Xe5nOdH}ySuyF71?Jrr(BaqLc{Q^Irif{5pnCt zOz_jMk)Pp&Al6?f^|Gc^U6Z~i4&jsGz??7nvvfSXCc~`cc|^0T7!c$=b@-%)ke!iD zu|Le6^t&P;{M0xbqIDFvDJ_(+U(e49!*%4z-GNW^q5uX30>uE)3g8ywG+$HvTK<}z z1nbH zs3%pHlz(mw(*J0}wvD+_xBuhot>dCf-@kF!T~SfQzybjkK`H4@m5}Z(k?sbG0ToaI z>FypHlx`FkkY=cnmXL;#9Gd4k?0&w_=lR9MANyK~nKS3y_Z9EBZrz#M7u#k4Vefof zW4GUVzBrWrkWr)7heF7$!_wj2uebB+2e^^}l?JrPjNDj`#NT>XtcGn2wwp{prmu-p z0Unr|&!^!ChjRiBem8P`F2es^{&fFDfe;(&^YGZmno>Y-_w>FTKk~5PzFtTBQkQt* zY8Uyp2$Y(@O6i1+=O+4h;2=uxS3?@;pb;Pr!AhZZ6d8$#j`)=y7`UMClnC#)jA_cz5ux@ z!V1TnYk`^GbNZ;F%h%WX ztTW0M_K~BU(-B~@qY>ci(cnL<`pO)|pfG}4plK{-oQF=Dmg~+`%oXXI_zs7J@9it^ z$sfHLDulD=#R6|-Zv1#gK!n-*%T&eaI@Fq(2=<_q_J}Nk#EV-OS7tqWwmRTyuZBzU z_lb$*SUOAC#lzLE^srp0CAl}3G2uMM7T0{^hK1E$iU4t$_>pgP`~9UmFLf;fFwfD? z>$j0#nT1R1Y)-Y6|1hY<9qrL&h#k9uZ@%R>sPU#`-g(ceE-4E8Ydy1_G|Oe#ctU20 zL=&kF)#aa8^jc%UXdLsGI)LGBWgB(UoUI1W?f0Qo9rYw$6?mZ=&+K^`uem*Kxq57* zV~8v`xYq1XDLpA^rSeWj97bcU2q)76*aAV(`vGbSC7QBfRtyI+tU5!awjb&)lUZ6?jCD9CG?HvAWEFm74jBlteS1PpZ9YO9 zg}<*kTQVLDDa*J9`f9g=vxY`yqTNY?2HS^ghi405a7F3L-QXwEm}hmeg*p3$acBi! zbBueMDAoc}Yrsv$8wIJ`pQqC2>lhMLcbd1zcrWAz36RqQDuE}j0MSS@{=AptT1i&7 zh{adZ^h4jd{Ja8#pbDeD8;PhF*;x7NNauhN-JLqwL%!7d!XzNBhnGDsbklnr{{H$c zZTcaF3`yDNvPh0P)M3=ftZUZ>r|U6NtT~<(YdaP#dZWQ_Allk7TaIt+K7uZC6x%AiNCOG1tEiv4t zXQaJ;9lev!Q1Hm|*{$%4GjbY(!y>8Za;rl5G|rbh=fc`&(lC6w&;FL%CXJ^JD*}?m zxcoxQl^)AGKu^b_pd9KAdGbu!zSGLi=3}Di7U^>4XXb;l#k}S#M#F+kJ1vg(j#p*= zh~0SbK8jI8tg9|DRzwRDzi|z4b4vq*jkMM1BS971{w5F)6?zhO=Ql`&l4@?dR2@%H zmNbZ>s(_QUPo7gen0%!PP8Fi{Tm5MZvV3omCUV7o{>we2Lnzo~P@z?UVp!E8RJ2l8 zZn%>j6X#~$hn6)c7jT_=pp?+BwpyZ13YbQb1?3`#a_MpI^*qh~poek^hCvS79U8M+ zb_OjhP00uf3d=LF1(N3V>x*NfqwLmX^^rPf*PwXSr2GMvXk@)gt`C#CKR!PRgt`6dN=Mx+h`J0~K4wCn-3N`NBOp+8^o-L} zI}nsz2}eNN00;uhoIsSV^k(VjVe}`Nv~F@4KQ<&0%$EGzXz6wnXkv=RP_~r?qO;bYuvsN%$^^0Pnit(tFVLkBJeXJ z`~uhqVS5#eK6?ca-BuW=QL{0gh+8C`eG+ZGJ%&K=$ol}NJHR`@;H4=AiFf|NWSm#y z6`Ej50dyc4XYbD1bZmDKELcA=A9FZL0X`0B76z2D%p+(hNg#NJgOV&d9|R5=fQP2N zCuFCtIv(HU+TapD^~4M3>N>azq0wUW#wJ~mz-pS#|8@Nn<)LVru|x+g7N{gI(3+e0P$~* zzj?t808DKhW|Rknb(^c`<<5gbgsfJcTc|3{bDiDL>_hg4NGR_kY!N`dFhv zl9L72OrRpMAn}oF1}Kqa-S7XMe3}_2s+%`FaLe!M(HfvPFK{wOk@J3XsXJ6_X}5g( zXk*_(OaoP44A$50c`C5aY<+}-o&_93!|D*vq(e<3+h<6~uC-g?fi15q#5NC$8Jrg5 z)-iMNyDM`ciHl3-1tivvZK(W~P{}@htm^^G_P1+hpSSIjtR6cg&zUcG0|qt^d(ClS ziki-~g(h|6C83-{qt|hB^WTyb!Bst*8gB3-d&>Djr{sx;$B*~iS-r+RjL9bBWS&ye z3o4pP&_eS?<5n;)a|KAx5GWbPyl`j$l=s#bE+m4a5rNM*2RS>cB>g?>xY!H!Gh`qP zbhs*kX0J%&JEI6y5c^i%=kyj?p_7;O}!GgK#6@iFU`MU!6qiZDSpgaFu~`9 zYAC^(cYhQ?0Ho}Og?6X_Wp-RVC&p0(D@;rG4fe(`KIx?UmKF_IsK35@2w^#ShhtWX zR!}eJA3c)US(4Amim(aTFIcEGZ#sTHtd%r(9@qhOkZkH zJY+bnL=P90k=!K9a=G_f2-S@A`U_K;X_yB~gN4&*i7T=Vje4(jDnGb7^aY>kBB2d%NLf);aE~Yx| z+s8P0-u#A_;hWUf(Jtv7jXhRerf#40`GB@#isNW^8SXtUoM@6Mxwuum*S@T$r)NhM z3E`N9xnL!p{!KfR7)O>~GJJ3M&=@*?m>xx+7h8UNks~5@xT3yLcY0Wvn38{{+|1na z<&>2y1nfY3x;UPqW4;@NaXFt- z=l6PURJ;MrI3LB6 zyTXCsr<&g*Xz>}(r!`e)eE9f4sWn`wPgU}xgxN3haXn)GpRgN3d>%{06abf7@OWUtGdUgiUgrD%ZR#$nksQ^CHQ+bh-)bIo)~y=GYw zLh;n);Gv?81L)8(k2uTT?Ga$&UN#&1&(JsgOjFEtd|bJ*wA4=G59mf5qeq#t)+m|M z1I>XQr6Le6mQ|AP_$I(sShvOZRMpuMG1VViA7?oU>3`)}l2tAF44ZG>waWob#6exk z1$d_w)t%eM23F}Mmj#{LlxZHzECo9nA8hHSxrj?aEVz8*_tpZ=K9(EPDVLihCD?LM zXGb|N3&k`dEaDZC(*4zNUr$fZyJ&JRa%q&s$6|3|YH_M9!E5?m;DhKx4+^i!hMH1q^-Rz7Rw&kMzPEjGeonde!ta7C`)H>`^D=$wg+x5rGsWNY6a>|mJN4b z{eeg-05|j>ES<{B&A%F$Gz~W9B-3O$NPuodxNLq!}9CFAaaX<;^Sng6pE6j4e)Xpo8z zdU=JmDw048P2li)a;{~W3LpQGcIe}0tiNso;5;dQGPvvK!?AIa`pF|bYy_LV#)i^v zzcSWT=dwFg9UbM-k9E!MMsD*dfZa^QxMntN=3s$+pygKu4kRozD}2y=5#C%HRQ?ds z?pf&lz++v&&}`$v_lk%dTwu=C0`taD5k?&>E!prgX}36scrpYT+4YjKkOi1L^a)7Q?TUtmfAm5HJP&-h za)_QVNLbKtN>9(8(~=peWOxncZZ3j8$wb#-bwwJ0qn<5#Y)(ct(rjcQnmV{cot{VQ zSvV!=1yY>$Loyo*i{@oQTlZ+a5)rPmAib#W-77!Bk3b1!l8?ABAFDxwW*`=bd3A9z z&KE$SxaDfiVV415I#@p+3GTYo;kVY-Qj2?8ttWEZ=vMC)gW{3Bp6jB%l~JekvwVQG zLB9|`*8O9JiJ_}JL;dmJUc%y;{a&OEgX11jkQs0dIU~oOXkJ(2wV5^pR)Ei z#4=#(4KHcYJ>7_ehkVNmF8-^nk%ajdsg@^lzdqXw z8@?V+1TJw&51T+SdkD;YhXtM$=O^|Z5n4d3WF3i&j^xtnFe6f$AXB2&8~{MJ{Z2zw zSzT!roFe1$aVqU)!3apeTJk@qf+?Ve8hEq~FL*vQ5dZ4-B%o6dJbw?0Sqw=y2;{Rt zl^xik158K77{u3$)kjCxhp`)_o9B0f7K_Ne?5wU`zYXl%?9<7Vlw3(?qRf2_e3|Ts z88ovajTL?r!sm}R=9FzviTnQJ?&x}g$!gg>nn-v2V&`&&>$EY(&1dgb(uVeuX&d6Y zSP?|+7?xVh3Q2#}vnk?$zCdK+Tb#S8u=)$^_FXxP^T&c&W6}mkH^6cS!2Scau{5Y6 zHVgK~|I2!D{%RtEh>PNP09p?JXlrH$*+cftF5R(8iJhi~CI--sw1xM(R9o9E%O<)b z^;R`su~DzsLwTA9JGHikVF`36$7y8tABBY7^C1>p*#E6+fvYIoAe^8AARGYL7LP1M zYti=)t^?}4=Z{I>{A<#96q;r!&zeq5;Cit)24Us&OrAX9_7|_~y6m+!0}3PKz&}9K z2x>duA+gMZD*0+RJ{ubwp^r#pRkBxTyP*2?8@#W? zR7t@;haJ|3ngX<4(XD&_pTmm)mRQK}=@Y95h!7FeLe^!cx4pq88Z-=-MuXCe*&C0c z!?ZQ19qh!DZDj{CS5a#RH-U;ek=IVrF?gV_gZ2(d#6Ewt>k2fXq}Vvb3S>bqHV@aRLsU*Jo}rkAT*{sN_ov<(Ar7ePha!3v^BiOOP^XR$OM^9cQ=! z=K}c6H5tEYabijZL}p;7 zTF;bRZ+homp_GKe`Kszla{)oGQgrc?p{Xl5PhG2ANA2`xYMI+ue2HNs>~`G{CL1#A ztc_idww?a>TZqyeEFOME|7GzQuK>OT{lf?_9-vE@N7y+y?UW>Qx0d>X1ywyr$Z=3> zAk0b}3+f$4thyA9%RQfU6}+h#P>Rg-cKZU4A^}DT`Jz;Imu+oc)H-_!O;j~eBj+dL zNpfD>4?|)jAD&tHP>lj}usM!}0CG^%V7L~hI>9xG-vue~$c*O_Z&7}L^rn_*=E1J} zFMzE9YeVHwgP_-e zwC=N7N8LZYNDCou1CU&Z-?|_k48xGdW-KtV*N!6M7Q3B$f&r2e@Z9|Ivs(}v($d1l z4AzEo2LLt~5Z}5Lz|5nqsGNqcCtkJCBmNDDB&gak)#Hjy`~Jq*#cQ%>(75b~*>e|I z4a9@3wpdmyS7NTTG0+Kx>=#1HxN0nId>o=+Cm~5tren| z;=&JDVc_i*Cm737hYevB-QNhPFW9}*I0s}t4|?w_x6%Y|wkuEm5z?S(juiY0H36VJ zOj7PgBbSixAfOH_fcfId8hCAMHwAMk!h!l7IvmW`{dRDQFUt1g46Ad&UuB?ZTDX2e zXgAz>8cWJi9j4&{y1EcR!AXP3`GwTk{iplh%aK$IMj*PM>|13~vfm7rMf4j44@EvB z@q1pYxhE?{El{<2GO(*z%inN;Fx>)41FQ;C~QCDqLX#TTh*sX)U~yprBx9GZRB2 zvI=n)H*XY!Zii8)wm$>aoq%*(7SX>8CV}{}20@S#gn_zoJ#)ZoOcTU5z6xyP7T{fJ z0(Ew7JM$fukM9@imT69x=``3ULN<8*I9%y>^4BzMyJY+Kk&&9A=?hu!3JUYl_q^Yg z?#1mMBK&n%_?^VQuElmhU?5n@Yna{Mnje2wgyI+AuL`xQ>UNNEE1G^e?YP|vVi`gW zw0mb8xJEYb&-t1n!*vw2GIV7I_78s_j<~ql9{&MS)&&990uEHPjcyM=W`aYfe!wFg z7KvsS_IkO4j#Z+r-6jMRSKE2dK~zDX^`Igtjy7fImg$mVW8OXJ8)=YFJ zVu#z}Anjy9BB&ci76iaa81O)q@*DC{ga8?4dIr&p@}8O;R$RATf~NH+ z;c0+^5v@gpCw=13oY}S#;HRapFfV710|NZFJg;(sMTTZF^~kH}t8PacHcq?VF;;u8 zYM$Q{l%ausxSzXUJE{gs89B%AC_NzD4Y22mX6Pv`^wHklFq8LWH3Gu}&5BmDbbT(A ziCVaF?w#(hoUX*N>n90>%XP|db%)V)O1l$e5}*uOu=EC*y=FkCIrv>n2Ah?XZnp1w zCLi#X(2yhYBTz`xjr;L}zGJr4_1R!Kj1ovekf;Ms&+4KQw#gzD<3&sg`4F(p^YCF3 zlUYEdgdM{BEKTJ^pg{?7hh#LndokA>JQV%zdi#ajX|zjVV|cVDyVmgfSOHRm01%sC z8yHD>FYZHS40l1RE-Iq&$vsj(x9L5ibbkJ+7r2xSn+zHd4J@oIRdn!5p;`a`Tr_>| zZ9Gs0N%BVijnV_doPik22GIt&SgS;9ngYI41JG58{X%)Qn!?hTyM)yt_D`lK@?jA{ zq(*}Rq4S0S%EKcDR}8C}-QAPSg3Op5@nX5%eCA|kwO^!F6SR8_xH4%br7Q@X0V?SM zig|Oq>5j_D=nAzy+cg2(@eXz6;Zmuqv5=cqFZ)7r^g_Z=J z)Epne<)PI^d_(sk$2}(Ax^+-9;A$|2C z=!4ILKntKt?gIii8w9^Rg=pFZ>92rWkoc*lqTKNc`cLkGtgK>@8`donFu*e;gOn`t z6T(oVDnegH7phA&B=LHX1UjRzePR5K zrUrWRn=Z0O-io+~RliuB*>hRY;)~D09BOOTr<%-1J_LR|T1V6)tBLYNPG76EU-#M< zz;d;qQz|1Xc>^WZv>d>aEu3a6-vc%N0>FJr(9v&hZ)xR%LeI!M4D`C+EJr|e0<$N( ze&YTFJ@Q`7R#t|R1R|3GOuBJC-Lj6r-7oAnNN3pf5B{nPzPEHg9iSFkLydEZg06AU zx!f$cmh;${XkJJdH;t5;h}Q+W$!bt=Ac{eVmuS#`Ks)rJv@B(q9{*;QeG8wH#v#K2 zMy99j&!Ut-b~j)1nr6@QQW+Yz_!7kABx5A~I+#QN7>LHbIu|(rhERNl@@_7B;0r#H zxx%{q()^)?dZK~2Z6VN(!m6OmFk8S|8?X!<4x*uWhZ`*-eu+F4)IVvlR6dGDXvtv& z{hGZ9Qp4Am8bRDnA^7UY)O}TO`h_Pfm}4W4b|xride!jfXk{RF^sH=-IIcB{hRa+q zF|c|T{!kW;Jhfv=FbAoq9Wp$h=Oa(!L_a5qs-?d|JqqSQMPNd_#ghI4CN!E!XOPz_ zFsGDrH}`zQkK0~5SoT-|EHCQfQ#trY1?J&&OX;hP>ImdD_^(6_vaDu+h$0!hJ}e@K zz7Mf+;7TSxn7a#NGet}3{I94-FT_qU0)5|W#@DH^e5788d*}O3fEdBT`)*#f`}G@gm3aNInroL`D+OlD-p&GIx5q81v$q4Yd4w@2fGMT z^C6HY^e+m4V-+kpnz`E=06H-PNX>(l|F=7ci#mn#&$AaJYFCEoTatnheuQx3vKZ~0 z5~#D!tEi}+NLJd>j5Nv4m5h~DRSO*E$Vb4MAflQ$^NNBIFHj4J-M(kM`hPV^ZP6_P zpYqxm8qZUX%rJ?G#Z)I9)|4Z|#}sI3m}&z-q?oDyv$6&3oANU4Cr2@zhtC`611^C|Bxl0Jn#KLc=g zM00?s1EE`;2Qj7N-PKc|a##FG`9|Hb^7eC*@@zfEN#kPF=DlR1BeHK`5Bx`eb8sFMjL}Y+0a0Xj$)vfo2!9l?o^wR9>(a zAOLd3$vrT7lG=uznGzZUNIn>Ei5}riOkWGa7%5f=SE}cai>5m@^ zbb3mHK+g9h(%37dckh(ZcfVi>?$9=W-&H#PsQ2ri%D+n>W5Iejpo{+U@>k%1^oK#m0y z7_zF#xGXaeR~uZDD3tHu@6ZHav`jp2Ai_@5uL z0);f`uMTZF03H9Dr;uOi@%;DFhQO*t{}MnEa7_K@k4u?OkQM(x#3@5Y)`KI)y~g*(aRJdr3VL!@Dk{&6+lV>Zou5Ug7iNQ z&mu+&_;Tcji7JS{5^j&P=t|g$#ptBhcyR-21910*eiN633%u0=Vp2rb2F^OEb)+LX zm13|?y4-Y_#K1@!`QLw+WGwW{^VLO4PHm9kL!BB9hC1X9!i_}O?~`v{WchAz8Sh@4 zTP%6fb%5mPfkux|LcBlMZ_o*lmEG$MBpMs#6WJ_o3}G3B(uDxj_x7ukLlCQ z5umETyX1qo^}k>5J1VLD@b@iX+yA{3P!o+#=nDx6A&#ZKC5R(}5neyX_sxOm#=u0} z??A7u-}+?I_0&x7>k`Q|C` zXd)u(eLjA0NS?d}tosLaFV_Sr@vo5~4Xz2N_MkIr+ME#VLj7Zo+C1SHeARY~Dk;f9 zPYZ1i185x3qb5}V7D|_MmV-~~)F&=Crtol5uD{d~e%BkIiRl!04dIOq2E<^8&_;@yu=N0Bco%VFFw@HdR zm?#6!KgWZ8=;7f3&+|+F_R5$z#{dU>!swaL&+h^1iBFRIk0WUoCkkDnzebO>4AkXI zc|Zyyi{R9$iziNpGe;yIpzMb683reosOe*TYXg`jPC4oV5dOhm9ryW}7SyCIm!ip{>K$d`{i84)mT? zM=XXth{cJS!|9$BU^_5&mO41A04fkWtPqoFQkRv9bn+9Z#5>r%>)f}Z4oFY!yq!Di zfGWM)AiP-(kT7q}b|tlw6oZ6R^2Dz-7uL;^ zEDkr~IYoG!vLE$>QgvjE&S5%RknEeTIbZ;^lY;{SLF-!%J!}eqK&yLMM}lQTD1r1D8YJvS7E0k;~bfvD#t2-}(ZumyM%z6x@?4LJJ*7t95LQow(gZpt0YHk-9 zkj5bX=J|xvr%%WH-3cQEiT@!PQi^C|`H9kLm-WE0Xp8#GILCdN6cIhqI_H-98e$9gu>n!n$@6n>z2 zs{d)!(cX#^*L30h7RdeB4%I+O2F`pb3NE-)2Pa-=UjYAC#eKn|jXXH0`9?WFFc`+4 z6eZkvvE%yQsvM#3Td-Bda8M18WUCoGh|;ug2<>@^a3~fRxVX6B@eP7sB70HlItVU$ zK1@+PsCUZ=%lJ8=F2DmpSDq62jaE-Iv`sAz`ed>PH>{pBn+m3C>Pmhs{=G4nkk8 z9%YO@hld?>7aB4w8eBC5D@y=j!2Z7n9h|`*OAu(l#$PseqoroZ#bAmY0yqwJ3}byN zCSab#1LUd^RV<3_m$rYi)q&@Uc3t{d4rZM3JHw4e>V^&xF!8Lmh(Ie2*Lx9Oe6nx> zHa>FkP$gFW$fM6|?HZJY(6ITYeqX9*^e&CUHFmD&>oKKfW-oKf@MPx;^meEu5g^o+ z|I5_PVo*50QOFYe!ej^^TalYxi0U;2GHME`45+Y4FQ|h!*lfFFXyxeW2l$!n_77*X z@b%Lz!C6QwcPn%1V>x|GOMSjbAqrd&sSa8I>JJy2U04|^J9`EY-q+5jPqq>)+d0oy zKh#H_Qv|d84o?RoCKP`6c3Snpi33I-h|ej+n`6l^T)}9NLRwZ5te&V_aq6sthhk5q zWRdEGj~6NaC|-b>h0dr&Lrz(;@u?~}K%A@_z`O+J9a=hFL{ATH9<%Q$_cZZkpwoc? z3i1%>5lJ!hf{3pJDWQQ~9C9hM>9eQ+i&Nn}#1^>!M~^0$rL>cezzU3{xl{j%NvZX3 z_9rpp$u9@gAkMAE{ZmssI~V>94918f6_tWJig2b1wD5*ru9L>J^ZIJVnn(W5_c8-_ zE%8q6EwC&gYIYbggp2_KQb0BUO9{+AblcGfd;e0(t48DLAd>^>eXdNm06^{lw7dbH zH5DWoFbzO-muUlOE*Q0nKLUcdG^#i8bP&W~3liJc0+m8nkK8M|@a_L)IcmP5Z4RPn z4p(;s^GC)&#GqIZ8~^M9GRUBoX~AOiI|cz9z=xgz&I@G#f`R*b1cM)7I!o3`K}Gf4 zz^lWvP>yVW0~yf;4zElY(y;k+{7HqQf z;qMTqP%s9<6MqZhZW+&d7}!D!)=%K~q`;L@YC4&Aqv$=A^d%;pTIKa^WjzBxiE|5c z(NI_=j<~_(r88eIn8GnSrNElSaXM+{CBKMv3I$?72pL3@c!kdlW(my9&51*`A65}q zwZ6TLhVRm{QH8IXjAa3{gpeFi)D5IfmtpJkkf|X4{vXpG4#{Otq0S)6G06BvLS5(| zho7OBiMc`U0)T@w8Q3z-*0-bSK0g{bw6=PhComQKbEzX8MX*UJj0~uNh8hv{lk#{N z1_Xsd9ECwKm#;H`4IXebL)$0F&(45kPqN5)@zl%_{(EBRmxJh4KI{4R{u0!gB)1_DrU(OeVOm z3FJ(6!B6CP1F^<}fuau@qEgQy%fb*u{|46M?;heRQasU5!Ch>|UszPcgfyde3U(zd zxrl5d9KQmZ*Md1nRBU@6&!H=E2pel`*CPTU-uw8D<%WVaxam&RbKsu?)x!c}Q-iXn zI@&cD6ilX&qanSSZn=WoG!nVvptPj@r7amR`0D5EtQdm9AQNz4L=2SBnb3BDgg;wn zGZdjcJwMS}hGPodLD+9BV3K<6IH_m%?kqU@s;q3#aiX<8$AxCST*0$A6?u>esbOhOb^DLUjeD3vGJn47J#mvs4nG99ne)# zQ+2?m%p2F@-Q3v#kZIdt@`f>NIl&H;vm8)B($UiC2i%Js0I)|CH~k*AP~2ji@x%zk zR}hTmg3-n|A9N`gupw;LZ=y=(crUxWzMTm=KzU; z1MrB9$u<;V|Kx5wt?Xc(IhF?Qv3P-?>W~4DBLX(5GrfQCZ1*f-Tso@miRXQtXozyz zQ0Rf-N(rn0@g%{(ARcv(qqaFh%j`}aouk=&sVpxqa^}K0XA*tJ>_;zk>5rG3^*RcXd3Fe$AY6d=q;iCYH@ak~Q2R z*@3desaIub-u1`kkgg}j5n(uwa;lt88C!?p^>XM|iDlD%T+~lN_-mI;2pDP?l=E>!x)BJ4d zYiK+Eid-&<+f3o%EsIeq@k^AG5jA|!iH zU$52^n8!6g5TGXgTDEPB&xMwn$9MmU-gebTC<#eX`6jZ7`r8ice@hrw0fKl~w_y`b zwBdm*)TRPPLSuhx^LIv`T4CGXl^U~eG_9<))vu8A{b(QZy?^`eQy|^8^S1P#=?Z`Q zoNHR0`1$h%j9E)FOqc!@ELjb9PjB)492KX)rrqv!qr^TK0i&+$Y=G0r>9&_G-`RWK z>J=pyPW2G0)e^Ky4og^AHI@K1mVKIsDiC0fg{~F8QRgO{Kedtt$FE(rYW>eSMHM_U zn3RFAHL?{Q9qsEy!DYW2x+}6Q=Z_Ek-L4N&6edqmh_uD5nDdMid(ow%a z?sT}nl@$}q5)qbgpZvdddw%&f+d|~w0Vzt>rS9CoNeHbZPDBzKiicg*nW6CfjW+>OZxVnm`iIlVaP#b zA`!R>5)J`>;5~EnUI+~*6(^cyaLyCW;O6AaRdnRHr+fo|UVAr=jTEAz#d%V!9J>fL z6@AS5gwZe6A0xoH1>P>WOn$vHVykeTJlxo3HK8hl5gV6pu)3u!IUXksrZaRtn?@KF zuS3x^8-TxV8t_BSC^8Kq@@z@zB))FuW3$N=e8zUDl4NxB$L<} zw5n95fC;t{s&s;^^weBs?9o?Pk#VCF_@W6>sySsVWzbrTwtxxSWWka(T1eQN((Kiha5{)1+wF)No(@C7c{!zd;l zS>GZ;v)$fbVeKm`%M0LlO+z(!9EC+OXgcl;kFoW(Jx9R3Ga};QYr~mKd}@h4klE>n3WL z6hDGzj(T(?kNbW`$=s=IGwtUYBZ~HnEx#pNUOLV77Bw+9G4Jk7W))2tmB8#L3Us$3 z!V{mRi+xOUj&nxhumWyPj#4OgMSr4R$K5Rfuiif&vgblav@8_DTZ$9@%<|b0-=$-m zI33#}YSdKpVJ^y|{aDJ6+o4&yWR6SU>S@V%MXOdg^-^lypaLfTezn~e0ivie&o%#^ zQY4jQ+R86GR$C*n?brZD3IMlzcYd#tG_WQS>Ro4T-zB0StaS;b^?3XKj1(vDZGmnt zSr`is=L9@|PfQ+V>3Af6@PxHVwxi<~rrN!CJES35=-RD2oQ}7wq{e9oNpwJVw?29a zF1F?|LEW2jYMYX_-8qZJccMA(GG{BtJBLo9@kOl>Q&y?vLldTr1ULnw^s#>8jKH^J zK5CvOLqxN8rN9eu3B5RmdDXF7Hx8>l+@}#?-x+`GrX>Q`fj(n*!Su!Pn=sn;jMEHW z3XN;ai<%CQYE&GdQRt4OcT#Va-Nc>3LyS zX45jaNa2;o&MV`;7#djAW$)5&sH2xltG|AGOWpn|0{?w#vt)1g;TGZimw|7cTaRDKZeLNC=_Q`D$$+%TOgTaDgiHS`X4LaTbo(BM7l(+Fpj7PJ3LvEU+eGto0f~uJN@euuHnjY{@3_i-H$>6Gj4hFQOUydaL^89(fw^2c9Fpw@YRJ@^2B0^TVKI(;ipw zg{47;j46gq*O*_jQIwcQ*=@Hm{h*!CW1hLBFel0YCeee3mT!)`hzlkhC`N~3c zY?RzC@<(y@M0({fmFp@Qsso)w@+t~O<@!@!>&|xCHw^R zspZCewVhn~!O@H0mqwz6@dyAxaeO9)renJGbulR|dLzg8%OtI~+_BHil~WaZXCGkJ zhKGMBBCGR`In3oq>n^rED&LsviYLmhcCtPtD^`(@F( z&ThrAx%N#Q{SmyY@uOb7=m*AQfZYd(laOsXk&8$N9*`iKG}HZY0e3BrRX9Cse&L$4 z@(0@pVXf%$Ff+3N!hr0ehTRuI=mN3+1w(9*4o!Drze_kR%5CdP<`&z{@3{j5iZ7q0 zH96=#*8U{V`aLCY5K~BA(}AY3v_8Ole8&llmp*1RhlUq=@O7iA~So=$~Rq8?U^Cyuy?wC_I2X;kyw z8mENXK_QFEA~PGJv#Hr-;CbYQ4Nhw`Qt9ng)af#1;aM=xiFEXxq6lDixu(g1JxunHJjo5)JdIu|$Tfv+h zIQ;H5%A^N-hVA1sz><82;|3g^vr_J$7tK*p@Fhon@s}1%q87G3k++Xju z+%+niFtH82Q)TfkT*EacMe|83V=^i-aok}dqE-1!lE@H>D#+Y24W9yi+jLcWu0hh#AOpVQO4jEj_{3rEpoA_ ziy-%4ZyZj5-Hzc;&YMz`C+E$=`|XwSE!EWSsDbV6xPHJg2YQ+ny|F{ z{ILN`BPKDreW*27(Iu-F=8rz$yPqV08cf5V)_?y6K=`BnZbPVN1^)sXl;j0nhzgW~ zVE@K|DVWyWjf9ATJEpP4w-3hB+{hexrSSV{KfSK`{k04k33f=Zv+q;T1@E91c}%*- z=C$gfQ0SbV>rdAPeF@ZAFKMJW=NZWq267A@+8JE7VHbf-UTVBWS`CWGL>{N+nc#1( z4zn!3_$%?k5yyJmjWer>HKj1Qi17S*eKDzvM5Jx(x}=-ER9a9?xDwZ!HLxrhQ*>rj z?%MTN*yydcWI)S>3`vWK6VDBe7ZUQT7h|d(q#Zeu(zq`8xK@MA(nlypukMvmc?-7} z1x%(z!X`tJ`;OEcWFQBZWXYv_FdmD-bhddNbF>GiN-M(C6GG$odZdA|Qyf z`e}AfzsG4j!Bv&-|6G=g@)Say3BPywomk28DM;BPG{k=}FD^{dx^=BX54oAT^XK&j z`Y37g7*~dH_yBWLyAE@$DMd@48=Trz2`^qW+#aaYaxvC2$bG@%nS8`o!YCAomLsOb z@Uu6P7(rcnl2sgFYFqfmgLUx_q?-h!&{2tEzZX?dmK@^xydIWG%E(a&P9CZ0s*caQ zKcTzzwL*XF9YM4ejB5wiA{$Ti0z@z~_crInsUgwN-6vyF_B?hsGcmtlHC_-O*BaLN zO6v1LLUHA!>sxCGw1mt; z@O*PvITr)xbethNpN5bW=mmBTP34$NnD2;Gx52%;-|twfSrh-^ikD-4PM0~nti;V) z=!A`9f1}L6?Gjk6X02WKHPQ(@uq8{m!`buO1tT3tote*pncf*XX|qn{2oJ*ir&h?* ztyX~UF`8eF7qsFn39{{?NrjE?rnVUr`)D}(x5_;bbB95(Aw$NAbe9OY11Er`KyTwW zD?&?mz$8q?feWVvj; zo}aOp)LN{h&?8IcE4@)>kVHN|(rb94hvieweIxqgS)^PHY14RDlG%>t<%%!4c)Es) zRVcGc7IdK6r*V&sCnHA^H`oe6=JeI!xBA97hvZmd+ntd?MlHr+-2t(tQJ3Qk7-7{- z*k`Co_wwmqeGdCD0#jA!7%liv2J`9Bz^DE@*cu+PQejs`hvD0sy$h$T`Z+s1*Dw92>uz-O*d~=LGsK}t;E~=&85t23N5dy$s3a-<`~c^{`Q$~9CRT_0I4tMmnNEpY-^0Qcd3)5K{jD7x|+HvNO&R@D@yse_wcISwgX)>7Kq574x{&pwv z>2vNCP@BBDcT}~8-!LI1&7;eGFo&MPtLG04XtLiF9*kvZUBW=?VJsT~YfXWa#6g;a zA&J}bsr})oMOBys=C^pPrFH|x9G~;)eM1gKxmbxwC#UOjvXoGKZI;j1PWY|B+={YQ z9V-31Qr!BkC|GpTDI3s$W~knOL=7Xq3qE}fPFHr!RG%6oe z7hm$^LIlhSdC21xnuB`@TiyjX`fKo77cCw_Oa;TSh9HPDFrcmwap3b$dSX&S8lf3< z99v=NnH|e!%(~A-UXc|@ISTN-n@;j^YzVc8D1FVUnyV{-_! zm%aIssK*wYhs^gK|0+qojbW<{wG%tI(ieE0pCN`d*jRGhn?r@e(f0Vs;&_3335vpp z)`X0#v}Dk!yQ|IkXriKMcQ+!=n>nP+@Us!Q5^uGO;W(GgM70cTe3&#sy_=|SJ}v-< zB+q)X-mLu`ksIj0ySx!A<<3}JB<2&;d~_!JKU#oKdjc>2?#{l<{@}YI&jG#`B(RUq zl)~n&-FF==*3_}`R{BV~CSC^Vmu)EY5jS^C)kDVt-f*){V3RymPnN|C)Rf!o`zTvM zLM0!fu>-Mz^xNW9$7)B}50WVaXZpUqU%=4*ay{B`BTLLk;6nw`Pb-X54UIT$Ew(Lt z!Agh1oMTlFj?F9%YNi}Ve7B?onWUEn=)mDN6t7_dCp{vyyQq=*Ur}pk&7j^5CyIjV z+GH>|$e#uf;Ne}YEQbG#2Oz#_7zYn1Lq$hC_I6qgB~uHu%I%koGECH01OtQoXSbCm z4sIbw@QAb85_w)tUxHJ$%(OW=+`ujHX>qP76<-+295T?L=M_*6jZoh+8mq#12jgGs zksZfym}WNtd2&rWUmc@Id*lX8U@M_@%(pu#063>mKc}j&q71@^M&n%g6Fsi0joNM% zfNhLcA9PU*3V6iW@0bawSShm$dhoA62Rc^c9_&9(f-0^`SL2X^g>GNFnK3x#vVJkck*SDx61i0( z8KG2O(nioQ$>c4v3Y?o}F}#jjr!Ye`zlEe^#m+N#u3) zA6kV2K1W$S+v4G<0p^i21O$q0ZPSsv@ zZhY5>_%Jl7lfdB^1fvevBuW5q{d1rA`RX%n)(;Z~7|hqu+3rL+K(|w2G``#J0X0&1 zB1ccs)cHkxdwx+RgW${ zxIIC=v!O}4>15`OrOu~q!RAoHL1%3hcG91`(UIstQ-j8-?-+|sQ z_TX_g^q@#f%?Z+~v|Wo`Y&%ln9FKM-^Gz7WHU@cP6fmR37%K6AATGR{wYRvq6K+t4 zy_gX}{okKa$_h$EMmKgmXGn)astM-+K-}y*;Txx3pJp#4rDHOfldpE2rr{Eg41od{ z!Q3Rd43^J~4yChgcv*DEXQ=wkIfad6k3wN>TmuLk8!2>3&Z}=7N1Naze*{Sey}pE1wgjcjeMk@;UI|-cFgT|FBN3L5IndvQNUa~$`3-_ z8Y2*>w0!5s_EAYRtpNn4;&O|{4V%Dsq7Fn-gS80Mb*AmAU_g?frzY;Z(9s=s0K}Fe zSe@4UjcpuBy40wo^Njp|^Q|~6m;@xZLZe%(WKmsH6_7**W0o>N!WW`-mJ0$;rYt$1 z!m;Nek~&8dW2-x#b`$qV?z5!Yi2Rh zvkql0#QPtk9b}+I>MA&1UMQjAtJi<_p2g{kb6YQVo^}hTObXc7-X1>lxalA`)}eQY zyq&mGYl&iV)nt6WbcJ?Ah9cf}hTm&5DmPHl@eTn>M|j_~St0IqPcuk^aHI9lK)s#X8X8W^&!0deCY=GVM%_30%W%$?$a2+swWAnJ z&FXbWjhp7po2K@^`+lQ#H%09Q&pG%L26S-#Tpllr-@P~X!3KB z;{tlG`F$`~6+hmsuxXquh%eN|T=V^K~~jb>AQyvE*g33_{FSSqM8 z`k7jR!E6YJKS-hmchQGp6p8$c1>zw0RVvrRp>LD#X??dzv-eFVbD;B5R{Z_xg+kR} zB~-|?9q?og>;)PGJ*BI>&t5W)vS25zhGP(0-QWf6X4~<42}u^MN#Q$AJ^6OJ?Q^(f2LyJf>p^t6KrN zxHQEGGNAJA-7hm9Qouhw==1%3Ug=FMd88txzA%mHrv<;XU-p24!eoCtBN`i-s4{Y) zPQzIYd;l71OUIbyeS0u9gbA#VRF_89Vu9Ie&lrK;H{v4tmWoGFidI#{BPSQSrp|!$ zuYaCxr}(2s8DFT-^Ybodl-b^_y>xSbb3)W;{m)MjD+t#w7-CI+^yo_$&Gco-wkJJn zBb}QdB?{R3U(J2@Ki2L4x0WtS6BU(~nUR$!q(b(t>;^g|DkIstG-ZWLCMlINjJS z$1G7~L|7T%bCoGWE)W05Se!{|MZGTE4+M?Q4$l?f>T)}-4^vqj zYMVdx4OElc3l!r%JVgxmOe){ zB37FF?mI=={xmdws0a)>l!YYYN|IZwXW#}Q5zmwA9y5{i)z+;01^gN`jj8c+1=AAK z;Zm2H1utA{50tyW7pS?N^kx7Ho#c@>k)G8HUX-(8qO*GAGqZlgql(rLE?@a7&>H;( z^<3Su?6xzp-){DpoNA1lgi$_I)-rG4zO{dj^At1?Ryxe7Kn}J02KDWo9UeG620i~N zS$PvpxID1}L}8?Q9)GvlMc>g&DYuHJQ9L;nJj_G!^d-h)<)Go7df+d;K$yTy(&jFa z%-Ja|IZ-TPB$M6?%m4FY6XbI&sow;sBED zO!r=U9T|zFY6+#7oiVYF6qag}II%3d22~Mev#MX;-#vU@PR$<#h2zuU359-zy;-L& zSVf2yDX2C=7X-~&FMkQM>BU{ftWxrtSCc7fdaBvAnQ>BSZB=OD{P3b<-epaN`MipJ zX|R^}PR4e(cI1B~KRe>=Cpok}4qAbT`&tJV8m;YHAVt*$`cE!?Y!Ex>=12u$94k}T z4FDoG{ZZ5-{xh~q$sQP!bafqXXKN`f;Eue+;&5dt!|OC=C@8$yPuq7|ph@@jh7axM zlmm1wzc{esHhuqI9p2lKa^k%Ivs(`jDj28!Gmvu8_g=<5KFb!2&M{g3kGBI8K-1z* zshzfsFW`N&YA|e2pSrZFZNIB9zyC8O38qO*Sj6l+OBcGU_1~5?hi<@iZp10BhPQ-V zAXHjX88c*Fq=z*QdMEyQh9PO@*&v_T($dU7cUq?Yj^1#=5qq)A$n@rx&gdw)-l8lW zm@fB?vi#z9@)JPOAC(bmU^Hs;Vx42B-TCGZgKLsNKH}>0c*&9_>dWe}7sdzMASv0x zGB(gF-tb1-fx~S+<;x4c>@!JFQ*2-{s+va2nLE1o5UeE5Pi(zc+KUZJ)qJdq5mw2* z)IyETRvbefk}Qx+*oX|f)2DX`O*`oL=o$sg+U@}kdMmxsLg)DL;~CK+2Apg!ovhVx z>Ggmsm1=vy5n>yf2W)ZLhy84tNf&7f&sFAx=V>S}(&^Xh?( z`p-U>i;jp(NqpH_fH;z}eyJftRXa(DY;(d$S_NYAM7~@dtHgmjgt0u4W~2d3lEk4Z zP+=BHgp1lB1PH9Rn~cQ zPzl-vqQaGMt~4`EW}WpI~IG zEL$XQkaH2>Q4G$j>h2mEQb8RB&o^3awKb5W-}GOz+n+kG!w0^DQaK<^(n0kaTi?se zuk#}WRb07pb*DyCE@Sd7_4frk?3O^&sjp}7&TnAf&1m67K!XiA9UQTu-9_Azf=2zW z4{JVSdARyBaURXR5Y-cj26I6~6N1!B-6tNoEC)bicHvXUvFe~b;>?)ToPOh29QZHy zzesYwzq>7^vNGhyI?jzYI+5079$WPWCd|_lTss7E`9?q`6+drZq(GylA0#Azjkp;WC zWM5@|Sy%ZiV^hf4^2AOLrn!HAt?m2K;;-@mQZ-eqF@pC4xl|1t-Tu(_vXE<&#OQ8` z2S8d?wtvJn8AG~)pDbNb1>lpY0RZrf*Ry4u8vN|b7hm0g5|=^2JH}6b4=Z$x z_kOwkqas3?9EM2Y909D$cnu!fCg&J_^7;#q7olC8c#}uz)Oy>}9XI8(5x4a_04B8G5#+2CGQ$p@T)MzFbiaMvx_5fw=U!E>^U2la!HnWQg;PJyQB;%i^em%R zsnKpfniAr$55T#m^iKYh*xsw`TgDhs?bbbYc))PDwtv&t!0lbK>G<5PxxC5Dg|@lR zFKucAUN2_|!byKYC~k-G!&TCoUOCK8Ru-|P!CdAY3`)?OJFA?il=Nm_aa=$Xsr0|y#;faldKKTS0#dsN)IZTA87LaWm8B#n z#?DL*lJWA5-b;}siE=lW*ZvCuA`BEW6tRvAtsaK7TM%IpPJrMLC^l<9wJ8N-^%e&% zVK|L?y?l~Q$wE>V*}6K_4?Pn2%y>y!L4+kjtxuNtyxQ~|D!faZ+$4j;e}g4CF0D zbmkkmT@hZse<_}7D7ZftXRI3a+&dny+CK!GZB!dDmg0VB1D{CH_r)rXt?vuow-q`W z4^L+*A&KLI7nDe;$ubVL&)eguQif=A0LANb!zdr;OLLjK=GB1qaeV&oe^z6j+}5-eIGkJ@8yPo8 z9@Cd=Xt#N;c28^@e6+pdKr;RHjhN{xg7cu2;G|e|)ITYI1_ct z3GujeRI6-hdNw}*XG%xgoJiBj*TfJCt)QQeH*eo|5E+Fq>d)I)pq|0o{AbpTZea&$ zLr;c40jWbU=)mIs&yf%leOR7uRT-W{pQJy}L+^Vn=Jn(A7W%sVwRel0`+>==g;&OJ zkvH@X+ub+GN61)+q0pE&l5f}ztHly)C4HNZz! zaa?n~Rule06xR|YTr*{O;`9~Kn#sb&X;}Ba--+~@a*LoH3QPJkE~q?9WN*V|KxW#= z2irdw=`>8+JHl5mq!V6O_7cRDC+WXah>1Qh#2xGw1L=@HnGA+m)wfDFuh70f8rVvI zlF+bF`)JN`&gBgD{%g6kbc&SkEAy@`?uP}UDtIw0e;(t5yJWZQ(LWym@lcuU?Tn0c z%+6t(Um!~+b9y&@)uR7ztS>A2`q~6OOmg{CF7?HyDGL7e>++!VBPSj;{9Gm@DwiCZ z2IY7nea(Pys-Op85i6_J_3JL5M(Y+5GJfriAU(*p$~pA;;Rh)!t!xDya#GOY(Wm)8 zz)a>f5c5}9mf!@^`!n7+0x{kYPSD>HHTd0`=2^KpXZpwD-69v`D8!bpZh6aEyZvX` z01jz(H~@_n{nufe4?1kzvVL&_P~(f!7wkiMXSw=y5K~NtkRlvdSZZqPc)H1`J<< z``~U&y)uII5z*y!M#TlLbg-0ked#cA~GzF=Q)BHk|0kUSI9H zjBpv(PQ7UFCPAA)&<9}(2wZ&6SLX&b#Iiz@;< z1}9tct}YI6{l^D0d{6J_aJWSY<$*`VFFQ6oJu8Tno~j@yBhc9X z6CJoHHSbkIo%Y`Y@uxH72%vgZJ3F7}Bg|(G(IyOypep7wiw;Y^)K-Y(kr_^9aV*Zd zJURIw%PLOF?_b~3C%+(l%CJ?Q4DjWWC!_r7Hkr5ATF>=9YO24}02o!=KPJw%pxgT7 zZXd4mv7T74szu|D;5U0QU*tkaU@iBRd_r$xiK3AEYqDUTo*YgY{(vw&J2^ccDZw67x2|GA0*u&{HV1$>87x*gNA{L_Y(pF90BLt1mQjh|nR>OX=UAn`gU)O;UuEJ*O0T^F0$rJ~( zq=4B;Jd`)}#^UIxZ~jEsO)@m!R;FM5`PK5ODIj#m`DT}rThxy(fn+zMG!Ac;gN~l) zm4TRQfx6~KdFb29u2a__2q}rdtO*IlII0OB%Q(Sx+%)eh_j%rKr3YJ}E1!8xlL?9c z`Y(`XK6jG?L^_Od>Y1q@(uW0Od$%kRhiCTC6p_u{Q?v}=5cLII8p#q1!Y31YObt1K z<9N^)cFVHMQN|M>8T!rX(Li~AC^{h*P9QeWkA?<1#kQQ{;!4AATB#^ekSR+@M;e!c zG-OL7#@oYsN034=Jh=&hMKxd=>xt(LBzq;mC2Sk%*%{fd1Q*d{U#@7K+0ttQF6KuN zqa^xG*q|z>fQioa8j|}bUnB+!DI!@xk4I%pCaKxR!*=opb@hBZ-oK$vW&iJ%T$g)q zm2O7}$|GY6-p^Z&I9I-3-}1cWUQetHBTnR)BO^KU6*ZtnQN5mIJ^PeWS#MkswXih3N^0o>*^YPpb=SfYV)V$-)0I@ zsjW{=ikRImRJxQUjA^ob+~Uw(ZZInsdJgHfVfXaMY%Zc{B6TGIRLS41q%8*}iAWR~ zD7#1=OKJ(y5Whbx#lq&CP4xuaI+5aTYYN=wb<^*++kQ6&+Rbgi`OkpZ&Fjy>5tNSr zuBb(VL5f!D5hNKss+qiK9XJ#PE9tX>(@zj#m zR8UN;MY|qji1|S0k=X%%A=sFG^1O(Q(@5w*_nrHH3^3BZr9duRCQ1xU4K;A8q~kD;zN%=d~zi3jvRbaFCQev`^zE#yQ*N)L!49*dhNy21YU*smCc zWsDK-mpIY0pRFK?^Gpk@FccSJoax|)un|lSxvXtnCMLt%d?`I}h{eZKtKnXvAYR=I zaR2Zr2bgg$rcQ+U#AnQ@BvmKImv-B^0eDJ)mby)9l>2?@P;zEDT3H^zAugQCll>sU2M4@W(Qgp#B-0>A8xm=9S=wsi$8Hv4H&KVkAKz4)WycdmSCdrtl#SH znr{)2`G9(@YR+v^u}cWTgD?&*Yzm*& z)r6eRDyk@Jf!l>x#x|yqmcPF;h|qxH@U5Vrb=36=aZ_na&YTz!3}NBlxf7A11*a99 zBjvG59C{(4&l(dy0krYQ^>2{7D|Bl{+i33&Qn>W3W`20VxqBi?De5Cm!f}Xtw#I|JgY{_xnLMTEyd%k@N{kYZwsuY6I@j<+AKa+j)62H62 zs}hHHNU9)iO%{V86n~uH-Dc2{iT<1N15GPf&-D4)ju=W zqkq4($$}g03b-Y(^5rsn!&3Gca{-#q4RW|{yZ{Q+%B8w#7xE*0106*`YNQDfG*?nR z(kcI!WdF_Qs3EvqoA=L7y@U!gv=9>TP(kcte7&fcA%@6}iyMrt8eH$k!U1_YfubK?G(iVs z-v;}pY#S{AW#Ubr^}lHMwIxZsSUB8QRk)1UOJE|5a0)oTorpIv|1yr>*0mHmJp|_( zzTT@js;h`8X0wF}n%G|5t_dMDB&7lWvq2PqEEbW-QGtoDo2!*YfVG;IH>2cbV z8!wrN9ZdLFM(O&QgdfkOIW`D%mU;a{T``m z1+?LVpEeJz1t1>0BnKNSr%N~KHjC>rS(mY6nSnP~v8x7rtuyGSz%%d}T-`7Yw_-6I zb|95M_LA^p({RQcFnN+?g6At=xarS=)y&f^4Dm_=;hp=v2>UH80GOTnJ#N5Z3+kL> zO=SjHUODyl{MLFK2vh~TK((AChd(6`>VApeTbX$U<$;yguFJYIrxY??QCUh|^Yh@H z($y?XZ1H#Dh2?{cI-?c6$0MAe54n`Jz7p_e{Geu+YxaYDkuzcS_1_HU<4q>=c{u!) z?3XvfqN*N{k92?P27lvI-GX5_5*5dy9^HSu)6HvE!W|jz@zjln1W!qDWap+wBHTGB zaFOeE`a6`xW6xhs>ipZsCke3`ywrVZu*$E*rjkaHK@%N24$hL~42Vjk-hMi@sbi)L zop~ed6VxMXdTicCuXV)Ujod2T&^ea8H>ZV(W#=eO*mz z_ARNVd4#R9Ye7s((n^zKVLt88A*w17?Zf|;;od&UAQJ!`>e95)=zBk$0aYlbSI(N% zQg8ULiA(D;&%vD&bCr^{YuChQg*W;Rvc9lqy zc)FEJN-F=Q?7{QSek7IO@k1UXp9hW`Vhxns*O=yzMHTz0vHRcMXexg8+*!pkklgX@ z&c*6e=4C~?UlU)rmazv^Z3k*ZS7~Ob+udf~4X!2Dc^mdV4$_j@wqPA?>Mv#GE#33b zoNXlMa9M(gBT8TrtdTzNYZ^YdZi{k4nbTB_rCk$bEo#^zS4O?%sB(IVJP%L~p{W$^zs1>V>i9k|N#)(}hLe`Mjqw5@nW zZ%KlpFzk2-cvp;t@vS}@FSPD}R8#j^pVHS~-ATXQy4zw$@@3qrA#kSpmB&SZR15(8 zUFOeDR5`>9%YV&p36bZh?9U>v0&@uotum2ULRF+Fy>|r8#C^R}wZ41=PGZNwF=@JN zLE_r=>(|Stpswa#c2%b~?%2n4cmx`pw!uW_(5eR$#oTCFXzunfu}z;EUwNMMDIJ@b zuyTDj`bGas)tkAMC285w?a?a$B;1e>?8395s*uc^|9N_y<=rs;3>r-I;(iNN>!Drx zm3FY-v8^q$W0HuVid{@@Qmeo)>d&U05<9JHYFIITE5Ps09+XljcWQvV!mSZXDZGJh zVpp0xh376-Uy-MIRZ1|7)DFV}^B}1nfbFrSJb1p~j&tZod89scrO;Y0LA0x-z@fgq zKh$NcNLuc3w>&wQP_&*c?V^d-XuloPAnH|~T9r~!Irn@@CRfp!5v0bkV@g;o{tM5O zMcAT}#;&?|y!%+6M|&>55dXS^w!-?omoV{s84Aj>cZf&H(>BS!f=bEAJ@v-!AfVej z@k?;bUHtVtZAv=3^Ef2&hom5DPtWRD$;1Ut1UGhUmlJg{C0c@<3K-KO97@x8<%760 z;T{Mn1L4b!k~ekhJYNPl>-0e0Yp3>EZm$S^bhPa25~^GFps&XS8Pz{UwAd{8tdb9R zaDb{%uNW3ON@r`rVQgiHs;L*%Wb+6P(v0it3Be)W1rF9GYYAoaLq&5IO7rZVS5pqB zel1gjr#!gV79o(95_jbb$@U_;h|MBL1k!FzRfQN_Uoc<4ZzBE?)Vmc?;t%-izrM>C z{XLq1JYb(SSB2!Ak}e$MSmmTEX)udEJhb^|fQ_&%Ttpdc0V}Z8(!Z#61fQo=Cd1ar z*X_P$wCDlNJUH)RUW3PoGN-LpgRoJdfG3+CHOs_i{{dueeA1IJ#qG*4Yn?k}XNcwj zBOK`YEf}mL z-2*|#mOn4kdPWfldI^mc^Iv}jnNWo`1*#TZ`@SoC7Qh&Lv>L=mAV+sf?@~9WcKsk1 zuq^_k3=BbMaFufGhwI4nOR5#o>$0QHZ2{hni}TG>>eAqp2%U(~$FFMm39c1#<7f7( zLF@_i6cDjpYmWNNE{}*z7R;kxnL&5Ws)}^Pajw+K$`Ay8g9lh^HFo3u_Q|k)^Blf5 zQ)dgu0%`6+ZI)m7ol!lx%qx8YFI0^RB*McTqEX~fQY8i5fERFB-h+gnh|-Q6Il{W& zhcQ3)_P*%bu{30SJ&=OMYA#cG*r)V?Vj?-#x&B~qH$oV4s7vp)4LCwmT`F2$kd4%%W0P zdBZmlDt58wg_8NEGrS=^i4?;o)M=7@3{(*IM~7FI5U+Vz=YC{J`})&PW&Lf;!}iH_ zx|3g8T1JI29r{JqE$jIkiPy8eF2`%g(}$J>mO$pkf;Zoj__XR;`22jm*vTo&v1~D- z+}P;IImSqMcKbP0Df{{f-X_#RE84l*-op;eG^5SLGSrPW=It9}l5BE(1!PtK0cA$z=T9J*eJ zHE9JrQor!efxmLDRktg(tRl>}Hvgni%bkOsffoSRpo)#); zfig+TezSq$+sO_jYv>ygF;otv2x$mHa(DMIrsyHlf?VIfRl3Ye6-Sle0?YAY^S_3F#r6OJKrO$+v>vf2VUI*lS3-M6cE+Zbm5c@X`uAaQztV3GXZ%djz90jz z|NdWtnJu>X#2uLp4MRfN+1c}6aOi)9rKfc_M7Cq;k;BCKWjYOqFCV7nunip?_Fg@^ z{s6#P;9*}l!>hxrbH=^k1e^5o20l7qtK1Px8fnmgq|k5zq6m_+ z0p9HdXuWmYR!$FDNe&jk17h8!pdo$RquD?m@mT8xKkqg`%TJ)oA%gGH0=bBlk=f)T z!cQM1_{IL~#>yvCLez)P!)pfOON#FfR-YPG@&vXcrkio)M^eapdo$=npyh$&l%Fjh z(JV#)k_>wpg~8K5wZ|q8U|aRDs(%=KNHPWy2Fg$^O@~+XNQ%!k+PP~ zMYru3J23R`mwsXOhV$wfdJY{TB9!uGuuXlK?i>nfq+PZ zAbu(VkPZ?`2XcN_}Th@BgDD-ML1$G|{ew9M&? zOzQ1fBAyKUiN+~`Bio@oVqCGZUVic)rMMWD{{44c+q5>zD-6@%L|4PzR|0)OyF#7i z(^_3t&LF*RxO;f>j&;;HC`k{eA2Q}NCWg|^W~Cp^rIK@kIv8H~2dgtaJp!pv10_tv zdEPd1K#qgzdf1u!>)-_TUkEN_vJ;yBi{ByFxnC2xYPKm@#i{Y(ke9a5pNChQe5jP3 zWer9Xi4weSgD{BdRcF6QB|mQKRXPyOCqKSp!4t+5>f=IL0(4-#m|QL+$_&nfni9IR zgns=KWrz@rfnf^~j!Q0Y&U4c`q4gDQB-*oF5cunTPKGZPpnp}gm| z72{{UMyFYbztldhOI5fYl^d{p_Af6X6I7Quss@22c}<;&Jm;TQb$dUFP=4jW*1SV6 z3?5IJ2S`xFn)swGs)Ah8LJ%`xR=88MN`-j1O1+@hT^!qU&_TkEPjEsAb&g_t5+J^R+)`k~EgZ&UMm1G`P zGbm8^mq>0wEkGb(^l~e0Rf6OR_xBQo*6+dICcTL)I90j=z5|TUNKA!%F2~lNpZhi= zOFXg`9h~YR);t<3R^CL>&X24B9#d91;YWt-92*2wmo%&!uP`93X{v$=W5}?>2UT9w z0nfjoJG1Z)!>U2Ig~QO_c5tloE4V_^sy+i_^T@X!*6sI3W|`h?H-fXRVe?2|@m}GS z`|zk!^a&&1urYhp)XbC~y6OzT$Ne91>R7XRlU~-nzPc(0SPVWsU64kPnoig6-@gYJ zOrR`>2I9E=Lm&%^{e`fdk%lN}A;b$RiAU%@V4oFhSF!sa5bt3Lwtfy=y>~UlS@0Wb z0;~uwbAvVrnQS$ZF(UH!Z+viBjz4E;rxE~Vq&8s^94s#{)fcwD{wEZsn;-@A2c@(( ztqRo+FgK_`JNYhFTHc@ZfkS1%e(8K%Rt#jaRZ%e7TW@=K&a)Ij2?*0wAD>e|=syj< zhu4&$YQy5Vg<#wOW*KKz00K)y$c*qa=}2B*(My<(5C>j~o6sIR%(T<$xmS8gL|!=- z@?w1gicx6rY>7s48@%@rT-)e-5Kk43MEKA}0GglY5tN#ED+j?50_tv*kQtUJANMtJ z(GImH=8~2;L}mkt7*YP&%YsUYRpQlih)0Vef*hfTN>$JHdc{j!6`PQ5rMwGPRC_h_ z`%>d29-s4d`zfz?WYA<^6r$TgE;?lmI4}Kg!I(1%j~^DHkR=XQwDpqvAIUqn#C6B!)m zvl}io04}KtGv*Mk0A)m9h@D>Y3rh9M9m)4wc6)%SCb%q`a_J0NOdth-KE2?L!t=;v z4tO8ThG#gDrLU1!w8QxZez~+CzHAVafux%*bP_$dHINLw3_5E3{95RD7SSX(AvoNFPiltc##h7S$k|_pmQh7IYYr8DG;BhpU_=2 zJyvwE<|BT9%11IJI;8uY0tY|^V9J_y5PjTJpgtJP^F+x^mzrRm=Fk_(lXV)#{?Dh@ z+^`<+y!L2vbFY`@QpAg%$=)K;!m6h{N!$_TI+}UyBJj8Ff@VCzzBZ1nk-ql$Yo<#W zUduosSs~k|jO*9`knfS4bWo9*G#N&FoTTK~Fb56dIq;o> zhJiT&_-~|1bVly*s;ZYZCmTbuZbK|*xgv!~<}h@j=|VRNR*mx;COiR8QtDSZLiE}G zcrIGbQ+C58{&*kcd77e^QNOh0L*PLm0B!5I)hJ_i@@j0i#52{-X23sraDN zrl|THu0MyegDwlgx9D7{IO!-#GsJQjxr>IAOQ5D%{+R3#k?zDm~)-s|9|}crOTEP_$By_ zU<@*Nwlop)8Q!{dii`YF`!J5q9yGzyO-Zf0xWfi~RB=DOQ$1ZvNqz)v6o?ugPV(c7 zbU`lU)@*6J$^ZE84>|?P#1Y^#4@sJcqE5h!(2Rd{bM!w|9a+sK_sBl2yCyDQSps_J zKavo-JdOSni~o3{NY8uz_d7;=7PSoj`Og23KQQ0+cpK z@b1BcP{h}16(U5y*NZ)LX$DZk4RrYjlofYL#(}S}V?<$rFST7{Z(xl7=VNs1D$K{g SKHHOTt0-z7OFeS>@Bah0Monh` literal 0 HcmV?d00001 diff --git a/docs/_static/img/sb3_loop.png b/docs/_static/img/sb3_loop.png new file mode 100644 index 0000000000000000000000000000000000000000..a0bdb3e6f78100234874f11d056e7c5c50aabe5f GIT binary patch literal 168611 zcmeFZ`9IX{`#w&acHI@CqJ``fNp>x=M;QB1+4qorEtNY$*~U)UXY9LSXhXKFL-w*8 zTh_4+-}9OKe!buC*XLjO`r&@aJX^LTT{SAI z{gYHw`=SmWfWKIU@xnhYP|4rDrQsSkGccid+9HZf*_1lN^fu*S2FKsj4>X$oszd(M zk(7Mq?X`!}=jmjcbd;jLOioT-6+KfMR~WT7=mOUT@|@)6%$%cLS;*KYyZi??1mM?cL-4@1LJaOVj@IyYKOXlhptG-bzg^@$W}OFZ}UI zsh6*JSRXbKiFQ`ovv-eE?xEdJtTDkfw`%lNmz)_olbV;5r|qCtFz58ov>N)^@gistGC#K-K-@pf;>OiIP#`Wz`fIa!0P#}`)Tpl-S{g# zx7EloyPf#vzO=yV8d6zp(1)LFFp&cb6|AWjht;oiwpfS@u zG_g(ZBJ6^m#_A-e8Jn0!cdl^mgdLrcQY9O$X7?3(gw#(vOn$sLV6|trgDQGD`c|5* zL*cjf9;h!EHF4OUUkW8(R3Wzx&|iKP(n_h>P_vv>IDF(VGD$g3HE%*5WOIvg>)noh zLGtdeGNGN9y6&btoL)C=aEgkGu6dvtE$TeU(UzQS(%aHuZ2MXNP}V7P`}oPWHUX<~ zp6KiLq03VPvU~RO@FU0TXP*YVpbxJP0(k-QyWai0x+vhP5D`j!eZ4#}z=4i*Qg zZ<{XjY{KkXgVi4V^Fss-Z8^*6$Yx0B&Nah=H#+T?vR=;dcVXO@NI~NZf5)H0>7m1} z+;7jCo|~;0o|sPG%-^IZX6tD!xWDlCpGGzcU8>{6_X0CAvm1tTe?d6^7Uckaw}k7@ zq{s2o-8qdd>fZGHD*OZFV=zWvqdY6=d05W98^_*Wu4aDsyz;pNZBOAFozRGAw&+|z zwvk(}^vj>hv~SCWNmQ(Ui$Pf-FaDa5<+2~m@yOr5pB%C|$5m|EwZXMfkEwaSe&l+H z$t{CtM;XQMV(^w-85Rq}pLzt%)-U1wUWgpJlKVw(q|a`Zu$OABr1LlK;wE{v)HT^( zk4`wyC`&hA!I5I7k{~}M>E$$}f-K`kp`!G>^^-`pK#X~bYJPgEydqnXe1@yLE6Mve z*V**>I96MQxJvg0g_R$sRo_o*F=(MJj~qJwE?G9%vT9C!B#q>~RmGOWm$Nv!a4M|( z;Z79=-@QM{dQoaTwleXou%-U{KCRfjGR^Pq$Uc?QG*qAEh^-A`Qmjor8kk!u6qcrv zC`#yDy7K7$^SR9z(i&`MJR=?g}-R`sK_Hlco?OaO0+!_6$|VMK9xdsFx14@&svJ9|t5{#0%~npC z2yI!)X=_J)t*Naa3&fyW;u$+kC#!~#sarO}_;7D9JYSaMLB~^Musb_nwZl}iYVtyN1aNl?A;K7ARYdU zgvXUIF_`I3<~<)$w*=-HwqD5|!%V0oD#E6>R_@`tKO`~*({y{@*Pt={2WP~M(VV8@JDD+B zLT(E)FYB$`x5y)7^f9y$w-!ooz}wH&A&Ek|1~$6g69Ku9znK@uiDx-^IKt0%+kN|b z=v2*@f#jx)dTlz1?HKZj>Nl%4v1(kSgg7rb=7OiO7~&rhC}X(HyzMdcKE zxl;qvob}yxTF=iOP+S^suIY=da3!+xD{G4HzvFS1a{Z9SCBFklLnR5Da)oKlTs9d4 zWe<$j-)?>y?YA5%AaUYz_JReE9NsWc;^mElnB#O_k2r{lv6He zk$na$uY9PwMfxZNztE){vV)cGm$mrN?=JfXf9n;pGWpT^XiVUB5p$ojz4N=a?r zdM>9E#TY{bjB5($+VLh<1BMKGS!jZ0K7RydDM#R9Qo6Htk8=khs-~GlFn_;My1%QM8MRW^O3#Q$ctTT znk#2!hf0U$oYpnS?~r4fKNKwA9?IG*Hq48D0n2i$x5zSibA65j|H9?Lxnz5zx+zT+ z<6JorkMS(xe2q)byzzVjLBRFrPlbwE)8?4^plRW(-tmQ@kVf*ap=s|y?GZRgSBk5> z*LBG;Rc!l~JCEB?7wgBRVJolZU+;KYi#mkpE#RD5FB<{IRuAqgW2bf5n?c%L5NLV#ao((%GOzkwnN9jk- zCf9iV&1g0>h~>uQUT54HUJt}JX6qE!K1KsDV1QUy(Wx;|f!DClR{ND3M#wTOHL$0? z6QX(l(=k7~knKKF`%6U=%l989N?;`Nw)b7gg6<&aE1usGHR;9BKjKBUjsvCFslK5pG-O_? zA*@bi6v7l>xDT%71qYTPx11E>QBW&QZ-DKF3;@aM5M4 zz7kQ1^ygt{#IrpUd&PuNb6qNBDr9LyHEXJaUY!gnCO#fQB^B~`C~ zyzR1VO?E)+W6vdDNhc5NnYeIJr(vflgt2#bQY0Y%b%tCg9{lF@qiPqRyeSdZmr8oX&m)kn339yIoA@bYAqyH{)oN`o^rm zbPFk!LTwD7G@Z4^+dYL=A$L~;ipsKj8J0r{dedFmdL%SO2KE(Fl%yG*kr35w%SH3D zo&n@LFr?D;_nS8wol;3dzK8p^AtK>c^}81@lKsTX$RDxNl-1@w`uU7b$3B=4m&PjB z)z73^t9sYB55~pv8^d6sAMbqr^2G^PCw{ebnem3jFl+M=0Ycf*cSFC9i6aF%wLGx~ zU$Qy%6pu~CS2>1u&Sq)`k)b%aZq?NUr+hi1Ex<{^akTp~eTYBP9`}xJr;&}I8`bk8 z>2-QSRhmacc>yOzil(LdZV$$LJccX`(HB>u_4HaM>yqy_*{Hgac4+4z(RH0=DOElF zzUKMZ%l=Au4Y7mhiRT;~Z3XU&q%V?3M2 zl@~uo$G@FpWGUTHX1owSaa+R#fI_IM6s$PM-Goc z3>g#9nZsE2;j?G@Dpc;Dle&1sM<>IIcbwWQ@|xnSqs*7!6$+e#ri@GWyX`+ICZZYh z;`+O7W)!+`;m(Rs9K^wzjXHpNSxc}_&WS#e zJITPNu-I-a;=HWAus;RP`;XJDrGWKT9xqZV$R?2g3Tzs0$ZuX&E_IVHR`8eAgSMFereVy2cHxD@B|{J!_;TC=HFFCN5+2r`UT;0 z@-Z8~t`8jO{(*ir?xiIetZ?q&w-VkyWBcL9J=J#AkEIMGeKK7Se-Tfr_FO2)GAPHd z?c^F(@X3api-07gv4eJ*T@=CvQ?g@AmAVR75AE@|=7WD7{XI_dGQc_Eb;_uG7^>9F((z<=tpFWqNmsP`2udl?qgA(UpJf27^uNWHOqxB{CRsFoes~n^<+aBe8T!67l?<_gKkle6QQ;Xmq`(@|{@C$#Uaumv51=GDvLJ@IHRF zH3X`l1vV>#&*Bwt2NiYJITe$xEt8n^E}0#*Jvk8)2xa5H{2`=9?yI; zco*RNUT!0Ec~~rv*n7uVys5PxNs`usMLmywEnn+eCBFT6!arKo*`i%drn#%+K%Re? z1v@WuHRSyp#dqy*+-xk%A7KTo|2WI2`n+S*pZt0GgKXx6il%9#rRoIim9nM6ZyE2B zpGxJPi*Tbt7lf3qt1AZ5^9bm4S7xIyzxe_d2m}J1%M!DstG3{zh8Rr|A1QiAxqc^# zzd2U8yI%{SA2D65XwL@UFSJsm#}Y|7)5q#K^G)-D`}$ccv$qaTO2x)r?KaLTH2bdW z`MkcbPmfu`)wtk(J-wX?iipEL9(<%exU!cAPtiGX7PuK5Rs=p-`20z?OZqbU%^_^(mZ@vF*>y()8>7b z;ELj$T=%eHqoF~yXExIs5vp0@8AW>jwxhERqOWxt>3QjlqKAq#Qyx6IOw3cwPN<}H z6L5Vym114pv?-Gxmnv4++?Ry_u4WVBZRhNSCAKeaa4t^V&X(}&Vl7Es*}pyAQq5|q z9bN>*ENMS6?0&qc{PLuEPfP&{m;m-?)H@x1{ne2^_Op=c5oEOh8=*XLthwWhx`3fs z%uEYlN3N&0husb>T@RUEvFcWw{gspIGqjEM8dfafZ-rdS?)OzCq}0`x4&n=NK3)Cy zRaM_2AUOX!AlRp3>F46zqzxS$?3R27JsljZ_G~=5r@jNYvt2mdt>iZw$*UWQ{|>uIsVn-V zSPkE%kf3*3^Mv$*chvr6phFhFkfqG|?7SsoeY!IDF9VUr+x1Y=)WT*9pjUv6wpe*7 zAxi4CfKl&J^mhy73~fC5;afGCKN*^3aqDqVbqgi?j7R%ddFLUTC9w~Ok4TbPN=!nv zmiTAc#(bJEKVN;i(M@;eM8+9z%czZ!P9&uYiSuA<#N&i-GN!mKjR%gB~qHi)(eJ}K8{Xvg*0#W&tDY(nrRd9R&S>IbJUcHieJiC{7MV#+z0SZ$|ZJFuLbp@ zhA!kAFPy}63(-z0R;Zq7)Kx_iypf|6>J>Wp!J^vuSF{Ssa# zoJn%Nr?_ssx3lU^qqG)s@-N#}|SvG(j6Viu?*I=I%a)AP>d4>4g zVyRvq@FGs+{qObamOx-fV}re>BePL;T+Ex(P}4$HwzH!DP9}h7<;b2gZ=*~hp<^~!S4w9uQ*;a6gWmXc zSPIuN502Wd!;^}VI#1`nOW;B*>dPg7A<`x=Qxg!n+RVMmnuFqb?WR}NQiXAz&uDjh z=}?mQ-e@7m$a5l3?=lbia6m~v3L6YaA!G4|x2NhLR`TeYMqQe&CcM z8nrZkc;Hxz>Fo(Z`NtT1jcIFDWKDNs0nUE z?#1$$#<2A;C$@m*rE0lr!JF>VEBEaezJRI%rvC4MfA|o^2 zKM0Kr=NYLD?$|oVVK;WZHP%18YdOprH=O25Y3eiE-*wGKWfe*n;-~@l>M)*aIb3p1ed*JBlwaMm=76|U9>=E5gUvSPI5~squjieE$SO1=TFIqmT zH=@_f-&%5@lD*?TSM5PsS}mSZ%Nnj`oAL2cUE*PxY~}lbdGy#u=R@-!v{!+eqic+y zXVux%Cb-4M-Jv-iy;WswIM)7|S23>2vAy!_4lz(Gb@dECzXy2+)oEn2W8|T>J98p) zbv{x#XDUWVzqs3qeek%O`RhlNc_TwiKvZcx2Txu$QO*tbOY6?}9k0QZs%z5K@^y#oqAa znvscX=MzR|A^m&7XE>qiiz(vf;#n*3HL$T|($mn8PV^QaAhY*#IX(=CfmrKmo9PmK z;vYhtD;SDxAS-a*)WyXUrRDf%4L_@47hX|`vF_erk#J2}yKUY4B8RfEs&4ZMg7$^; zVo$@rzHI0cL6YCik$hzdb(hpRwKwmsvfs*&>%EIDVHPYtbhvl}hvkihBFGliuN0{s z>scaK>vuGhgeq35`&3B??An(A&^-^oUqL9>(&6{VTg~poU=JA)`>hurm?h?tSgv`# z_z*mkWlfY9uz4hzehbbCLP!8@IC7$onOUf^vvn4*Gw4NHq}30vWH`HuyThmq8-TVa z20jOjp-I!Us-fcnacQz^Te;{;5`=A5Zo$UUc}-kocFH5aS2r>lG+IaqwXx3H4E&!Lf$RyYCjB&{sBy;B`sRLWF96O}lV`Lvq^sS>EW@0*9EMjxpA55M z7^RzZZbJlWU&(VflDhF71RhG4qS|yh8fun>j*(g2>fZQ5J@pdHk`D_xJ=4XC5tP{L(WU2Y0vdWSYi9{ARZEDIrZbvr(OJK z0mCod%&=(@SIz4@`B1u=TC@5-biw=Taisfi7BTmxVT}2X?zVuCEq1ugZHbCl5mRbA zFSsNY12MTK%3^Dn9G&gy5@@SsqW!#qb;O6{oStQp^j! zL*hX3pOo<0>NFNvx^YuC3(o0$0^hvBk82}FN+lQ|r{^ZDWV+|(#%&iFmmxL)1=f>V z8@(W?Y#GNWRk%!HGQ%4V-IlhKwAh-ezOVJe?Ye90!!Vm6yc_Ow2_o}-ymr%C+OY7R zGV$t!qF$T%fExi=vWhMiVHpY6KA>|DAX+I-Kp^eZP-%6Rew7-CkJv^AdX7zoii+~a z=ngXtOf!@}gv%Zn-9$ITwa?FQi$pjpRUS0)#ale(?wN>6ALhr%>Ld zMV-@7?hn5iODYyFu80Xd2ekyKQ6~j8 zZI?^e8r!01zd3FbJFb!+v3`mtf-|njY}UdjCHhUV*R*p*XPGIHDa=0mP?LITdQ8?5 z@4n@1-4X$vbYOQaiCL(N6YtjdqpkFokr2J+4vUma93EG!E{pOx#O<)!q*Kvl*o=#* z^j_~J{Zg=o(6oV?w^hoq2%WR?JGg3*~_ia40D~5O<9652e(=G`=Ll7gM zZoGkTh0*1Q9O)Vn?9zJW{S6|jrZ`~uxv$&Rlg!LHWt*MMz}YnjtrgUQf}WkT0%yo! zBe%n0rqn1D_MJZdi9FCi7eEIVO5AF7r|mwLh090<9K8tJkR&?sJt0i~6ubP$y`UPX z(;!i@fi75NGoX23`RoO#^EFz|E_I`D@c{;X1{J2+k9&?y-!Kmji;v(15s=l{Aqx=+ zPcmF-n2jw(VJHx(oeU01EYJ5D7S~EtkSEnt+-Q~uiIsE=!bp-|--l{pmY}OM1-6T5 z!mbjN&tdsN@0Azw@U@Ut`qGwx~jc)OS^JL&uo$Z7K>_6Yd-BEO$FBqzCy?BQ= znC0ZTOLhi?wsS^WB);Z~8)8nR6m9R>nBssjZ%|5yZ(8i1jzlIbp8z^*2H$4gYpK(X zF+=rT03hm{>v$8OX$Iu57vj@XXkqlxDO1OA*Za{t3VUVb7amQ zgcLIzwJwGNp!tbLi;Z1>-h+Hkzl~$p6F=tH0PqJW5RkE=1u(G4(ja(Ibyss6%r zSC0D3Jg{3|)A1?`0!WBWqV_wjf(BjUFL0v}?Y+D_>8ME%0aAi}&J)KfoTs>25+$`d zA6tB|Dh|90!7TFcxxZ4G_4FoA|IQiDE~AP83Tuxg4F_grHPS%BqsQ}6ho|=|mGwl{ z00g(EH?fNy&kqI8F3LUL!^wUo^zRMSm#knPJy^&>91Ad590CW2e=n03B zjbrOjG6!@c3WKYgyN;6*mlYzHa*VIJJ(<5%j*j9wDfOkOIpJD|`N|feTToRo1J16% zmvQa+@%ecR@8c@terKCrivzlk9zAj%3hD5PFA>phbT#+lUq3!8_6Jlf)2xL zhibHCu6$L>)82uFrJDw*(wH#d!D|2L14zW-;mw@LoN@B3ZJm~@uQxn$gokDsj0g?I zjijXz#D{|HzAT;Md!do6^}R$ZHV_@+6vWsNFA$+sk7;RD^5y=-JIhv>+|0RA{jU+6`B2_r_+j*(~s0mL(MxAfh4Xi#sy=EqF8#9<#t>!d7I5?B-_W ziF3PE4K|d3z=MdTM1)fZ6#RGZ@4Qmbw~hm%q;z-;bZnexcQZJOMz@A00{Nr-Zybua z)@Gz6?zZ*SIG)I@#T|JqaqCBDz|l#B$Xj(=OsOAR6&VFRtyVZKG0=q2hOD~u*f<|^1C-rk;yRq*i;eUigXyP9-jjIhdwa}mhX&YJQ%#rl4m ze%j>Fqj8rBJu9WhAFbq)4fb?dv^VP=SuM41U$N|<#j>X(aMSZ`tmg*ydYHo;KUO3Tkx-47i8h2O zM~|0|?vHU|xe6hH=KbSJMPeL-GZq$?)d-C}{eu!7H9u40^h=6-2Y)WR1qZJ?PAHB~ zE8&K^!OO$ZVI^QYa}&q^M)`I8wg^)q*KTrpbYqrd$vV@cUFYV5&C+7m-SJT*N~Tb zJxPDj%bolVB8D+=Fa-d+5T;jO+~+}pgxeKF3jx{?@Q-j_&eQ2$We`W@auW!waDt&+ zyaF*S(xYW?vA0B5E_o$90BXypbNxC{DanH$r~P_S=D+ z1tiTd6lAz)mrzXY^%u|+j8Mi|L((2YQoy|&X{ptq@C9)Lu6xf^s00M~T2r)>Evo#fNilCT_X*!R; ztq;4>2vyW5=&p;T8tyP>YtY0H)6x27|4>ZWVN55O4iPgRVj9(n)~Qi(MFSE9oEB5` zS}(3_E%xZ-vHJ%YPf6T+;J97qDX$;S@r>HUDpl(#Gq7ETPHGE{4E!A)NR_wtO2kvz zFae48=*)_&n6={^xGyxc+*NO4$p&?$^0UV`78AlxP(6%Z*T2ceACQu$fZ^rTWA%3T z%p57Q>oY*GJq_)-a4mj=OrW*qC;~0-5gP61^lxRFWoel*r=`eYdpA9g|9&yJdL8GI0T^;lX+W%(e{FmrsXeVH8vwmy>83 z9ssjG9OwoN{=oEeBe|Zq50j2r$SBkiMHTFlNTrSthhU39j9p+V;mJV?yHT%iVNKo9 zh^e@z$Uf{S^UWwtQ>0lXa1@Va=ylr$_(~$eGXCco5Xb(tZT~)o^x6hQ`)LWQ_+#I} z&{&su)BYN-E%GctpGW(ar9~8ULyb>X8Nd-B$Nanr^-#{i$#UOaKltxi@OL+~fdNnX zb2ljG;AIZJ?EbXqQc^DPEoA&|JF3?XPCj!$SktiIPJp?=$UE})B=h@&lhw342LJa{ z!&ARFfjo?Ukt=b@ebRv8oNV&XdjY=QaO_}sf79t6vJkpc*%1Jk=+Ug;VbZdT{z5B}orzcX&i5j-|?Iu~-2 z%RP;aQ{;bJdKz`Yv#wC9JakAfW-R;)ef(>{PTKmbb+4v*e?~s6bW+*nZEzMLk&Yw^ zM^rgd0+`IXP3pQr*gG|Dbc~Fd|GJ<#eD*E-(p^!GOF8%;5Rr30BIE870j(fx%z3^) zd(->l!kZn7%HcRsFxURy?xG~DN=&5GYxH|JpuR45pTZsuVd4Yj3!|{jVyaO8*1eUfmUrE>?b539KSHR>9NSPr~ z5Zt@nge$uP2*o#GL{+Z@$7T55^kFF_bWQa57Wtzk`k_wF6O`_H9^ zJSj-o%0N=bh)akIxw*M#Vo4~JI`CsLl{j}RT-ql+JpkJ=p4d(E#gzrr7+9qtAl&6Q>de>(#=sB%8VGW5vmdk zWE7R5>TmA^`=K&YEfPzd^pt0Ni#j^1Y&>jcJDtEEt6q2fypR*=hrAxU6Qt0t(@>vh zD!MYDR;w7hbEv(#`OrupiDhe&SB(<%2*!dn4;KRp1r~T9R87m@TEb|H+9F0Qa?UNm zp01%B6pJNIa7uB*O#pQUoUOvlX%slmP|69{b%-gi!%l@gNOr1f3f|6;j z@gP$)!2dIbL|h|8KYK@iX2OeK^ac>64M?^{OwQ2B&XR8P-x%mP&~WpEqY+p}La(yF zU-OK`TvdTg69!H{!wGz@>ODY$8$&!ba$7b!m4Q&JI1 zyk1dCFS1f8l3$MxDIoywD}uGH-aD3;Jl7uvsl&J#D^viNB6KRo3!SXG5g_uX$gro} z>nVF=wBRFb)6WXk)VHqi>;g8EKs16}x5|6xJG~8-1($}RcPb}cETu|`3HC;(J@*`IL3X;-QFD$EBf&Ew{G$CLc{rnTd$r7NU2lJ!K~@M3=`# zW%i~8AyRkZp*%uS_JXf44b%)=FCe@$@R*n&Q40lraEo53RMP;wnD=Gdf;lAyad=w* zACE-{o2j4A{9=xuznL=C?N^Nz1tUQkSXj+BQGHPBt2-@!d!StBEtWQ$Q35`(9Mal+ zolyKu@7ozH-%%-FvZWGGNgkem(N}82e>g=*%Ix1dXDtWf31O#+T zQBcRRh-^9qRh1C>rq8X8nk5DllFpGBB7_EOe+N-f3x%%mRPLj*>CQFKa#8LBe)sx7 zrM-Fw_*XOrs@(KCz}a|x04yCHKrLwuRC^e9_<7J?m#ExP?EoE0!$kaB>uT$B*{(XPC!a&0I{z_0>>7ZNb*ooJv4MQZLzQPt%4(p(0+)oV}SNQ*Q3Th z!KIb^=f$SHAN~_Us5n&q*S)BpAXOk|$bzXi+;`h59QbzNY2?ApSpU5tvb44|)MTrl zfbELkQ)E8 zNjnBCJ7S+I2frcWfSK#ZUoN#9jMNjht8YFlJXX7MHgPl9fUqs)rO0m>2KJMN2t}|KHDbw{Crdt zZPG0p#d(WZkNbT;ta>AvJ4#j~ONw)gcnK*sbC+!tcg)Xj-s{5|7V%1au$iT`Q8YA< z-MrQJg9bj^x=Px#R;)JD+Z6BX9qNf3{9dx^SW=8BN2+_B{=Rp!-{AcK$v(YWS81_Sb*?{&{v=|T+Y@ji3DecJQyTWWwZl_8Q;VT+ikni zkES)JSeSRT2=WLYk3jtCU|~85yjallv+_^AJhb?c^k|`}9Wk%F-&$ei;Fw%$>U!oR z)e?YaR|OJD4ve^JaJLF6zMu@3Z)Oq-G}}vFS~&&e)B@bAA`);u3E7ps6FT20E%fQNV~`v7q~Zs=$`wEtx8b zC~{6tLpnZ$3!2^!b|RF_(5)sSc&AX=F~u6|-HTcovxa&o7!ejmmH}l;2A?!~;3Hna zi3k}Z6Ui7TBxBHd(eh-1YJvp^KF;!VzZ^>L$=_JGSAw%V*?#vTIN9-6NHMQ&pom!( zf=!#nx?VD3Dz=ioxIZ9x?p%me}I3=vZfLZ>ddgPP`h>0vb+$ zp-}0;@tsa;-Q|vEDDbWwMt`m_C-tV{uq;(Ow@{@y0~$_nRRjxH!W0IGD}`_e!<}bXFb1bs{{HxhbRhGrfa~9% zfzw1CJ1eS$0zaeEm(F8Q$zFZDb`S*)R}CR>f}xUSgEUu0GFVVDA;Fr6`_rk7R2$5Q zBc1ZV`)?Ge zOJ)RY%l9oKKIQ*AUo592YX7_49oR3k0(}2-_mn=0W*PBJ|Mxq{9t$dHEQ5PIeWg|^ z|C!1)u2fqUq56;h#~Cd#_T_&6x%_$ru9X;tKBGM=+Vp?A{Iu=0$*m&20~asIdyz}M z$d9iCm`wlv&&^ri0B5k{|7T99xO(AE20-WC4^yeO1@Cyr{jX2M6%wZZc>%gvxFl6~ zY*MUf{@;=fDq*hFM1@@OKhD^H`fc^c;rw08v>5nvW7L|(q&+P=F)_tVfj-hv`}P&T zOFD2q^_HBLMQcjMb@O&KiN;@1T2*f$w?lu+^2SNKMEvj=N*!B<>hLmzalsu35yy?A zyQBEHUFk(qnEe>po+mGC(dGVk(X_xV{Jkr3xwq=u?MB@hPZH3t>Si!)`LZVcMb)gP ztNUNa`=}2yNJWZp4}Rmf8d4k{_X&@vNbP5}+0>Hn-wG*sI3OEcDynD7@4>kc$fDT) zTuk8FuYxzP#V+3YP^uV0{klEhD(kw9klb^or>9w}H}EVlEY|HtB@#Iv;5XN>Z)q4?3MPO2wT~^vryW1dR2ANW6f^&PB8;eE9I{SK6xAm0Z(^RD04?f#>9-qo- zXi9A{X?wM2V6T;DiTRypUz@5f?7e3bxjLj0>V8a2F@nngpQs3#by&aJJ$`rpERT{% zoCd_Lnq1S%hYng^Ub%x;2+Q+6*>yuf*NK_?ym!dc#z1L$dkA~`})$gB!|6Dfb!1*TK zI4R;9t%P<}A^%?0%X6LH;SLnu@#E1?WIu#+G=~18BerfM8!z+7eEsOUu(88uy6Ctk zGQ;%!@1_(5j8_XV0(MR%Jlf<3Cxv%Lw1gLI9z0~}bx3k`J@I2Nm1?xH__u#9tLt?h zvgCc|hpBHe$$d&|cD`r0dFMi*mT^PpOBWN*EgeRosl1~b_PAxHaGrsUCd_vUQ@PD~ zc6nr8Dpyo3`YS^^X{WfTS9;~MdzEYIa;se3h^Mu*S{yuhCHirJT14I95j_>lE=!)` zy?P1*8?&jOd|LLlRyF3}PLBNR1-ScqvRdu{rA)c@Yy8_E>%t~;*wW+Y7gb^$kW(j{q4E4mkGS?t#&bDPi6smgGiP&-p3oZDrhi zIxp-Qzt!rIQ*?}l{bKVawhI|S8cWApDMA@d;^Lku-pTW+M%~)TVMKQLkuI-J_N+t~ zbk)3&UL}r(E)%HX_m_p&1?_E1_-ExhF2h@MdaKDY&OI^z453?B2b0a7`CkN;q!e zn{K5~jNacpZonrg#57qo58VcWe9omJofr|Fx=D>Bhu==vDHbj`ShLK&d;=T&NQ7Kw zF1Rg9j=GpFc299++>15IKx`qiP5MvZ9{N=#EiDZiD*U(eB<=M6j6qQ^F6 zZu2PB$5K`??mpJNAxxHu?ESl6)u3@0g>{ur{AFe0M)LW=Ho9dp>#=U%aBZqx(16rs z$A7>taBlPOTz?3*7QK&smxFXKtJC+F^ttJ^R}y=W<7@A<((2^{r@R-6D^XkNp(?b; zZjc*GL;16qY~yJSnay+D*u6ezA}MEpd}xSXOH1XDMVH>jQ8ecv?jYx5BnF!;)`3m_ z)3mfK1LRXwzPUuWg|-`VJoO+$g~D2S+q;U~bcgh{<#*F7eMgcSRFuv`))ysc_#)O) zTW07x+0(YdmKpJCmDsL!uN_E}y{AX%cE7e_&G{)w5*1oX(To1l(9>w3P{DWVYfX8; z+~yu_wL(p!*JsMKO?mQLj0@%awo$N5D9&s(+~^X z>pH7KtMX6(>6U}kCU`|x`35O@OhVz;hfjdp^9?}8aK55O{puzrmdr4)zqi<_scRr2#+K;}tqt;vzJh!rU<4L6E`FoAaY>Xzyr!ccy{ERob zWcvHDq93;*m;R_E5dK+5^rZuQY4OK?zl?dY687T-&g3|0BHOL5ANb1f&)s#HKPl{E z_h+3CwX^xWa{Vhlsdk-A_BYBzr76B6ncaDXZ{nR5H0L&|O%9WiYiIxZvsvJz$ZCKa zY4-&q5&;EmT^}h@{`3n{8f%IU-1Vm6^7D0{Wpz%Nbrj9&=Ph`6r|P=RHEFUJM3y!v z1=;^Z7S(b$cZe)6$=0z=4s%YKy4fXj#d{-^wflYvxS%wnpKrVi^En0R3O^5h246~Y zmL5AiDQIX0H}YX=sBRulWLiOTAa3vHF|i$Wcw2hEutKDbh`_*#r_&NDktthY%@-*j zj_pUF)BqWDG1(JmPRRWUHc1ZHQ=Pi>HXM1PXW}4wc3S<=A>*IrzQkxLGoB-$Bj>kN zVsCFCP2*Q6GUN?vs#ZN!)f0d4kZ%J!gaVy{vBd1-`_g&6Sq)HDQwGAng|O4Yw_vlc46wU_ue0NE?FuhKC3LlPT3wg)b>tZz)+6lHlt;UD-W!#S`M2=LjQI!TZHf*iqWyD_Bc@AF_}^&n;ZS%7+E#S~_4O{zU# zFAm-tpe=mUiPArB_j|?4T)kx-um{ON@Hel6eapx?xm*U zumgg1x6^YNNX!Ce>sGkigs-4^T zoR>J{WK-qrj=a9Y^cSmJ zBAh{YF6(<|cH;-t+sOCtIqAB+*twztqdHjKe5jM`I3oCpD#t!#c-;E3VR)i_FO~0L zEaQz+yHiUg$IV9Oeeg`bw|yX#Oh>g586ABrLMWbrp*`0_$@y}v!k`(HgmW=;*N*)A z1pCcOZH*J9t~CtC7lcDAtEpPaO}zJ71G?sA2F`Nz=7xQ4(p{8^@QDRL*iS#%e1J8d6P4MQUEo{6LAxcq^Gp;$UPtqlolJg|4|<_Yacw-8QpK zOTkXqy~YaKn9Z{x@3h02jn3REkNt3aBfQ2AX$X0wIk+R&7typr6R#r+N;S#Yb5+Cy zB-gcI0Q?_#-`uGkXyr5QmDzkTr6yV^H_mO)P+HQ3@7w?&Av6T2(&;V0)+0Bsp z;ZH#Uf#mjD!OwQ1)?4w`vaY*RZzIqy2pDG)_j3Cb@uC2cA3B>#iO z1!(T$PveIVZ=3L9_W2djVH=IDt@&UZNK0|RHdw8pc}|XmhT$WZ;IC9vW$+aS7n5jO zwcw54yrraX9B8vCZBaBX8VF0^Tz7@`9zKZ)!{5IuZPmNQE+V>)ynZYHoROJoUkUqj za#UF-hIO7BK8FD#IjqwL7y5G2PDLu}X>9FO>jLoK-L$FbRWlOTK|Na2%wsL^I%cy`5e(ofOiXYQy zB2B*Ax2FVlo1rLLaC<*npZmts&&_pJP`ET~OP|WuyRc*-Y2#S+;X%h?!5fP3rc}N; zi%W|-)Lss14!JXDsU@UYMq+NwTX4^@{C_-scRbba`@c~tk;*1iva_= z!52OK0@^j(NI2DjB(x;j$&LA>ez1qP-QahjqC~3YY=&&si_5-)LG}bIsP4^z$Gmii z?9HQO^9V@h>L@U% zSb|4xQAsr{B*qQeoS55Zt}KIn54MUkHpnVAx$Vc!CdNn>lgOL(3>%wpU=$kz zkBz=aTk~pOsoO`@1`qxe2P5!9ZhZj}rbu8X^F3Uv+em78h?@PP%BX35o1QViKVnyy#HWeoI>8uy2vmg&5-npV$RY zx6fD{@tqgD3;+V0eQ`!xP8shzdX^kMzQ;NW`-fXUBHR~ONab&g4?*L5v?jFPssfzc zKOGv?@?Pr$$EB*X=;=5NdYOZ&CxZjCy(&HktEY3_|9W+ zAb-#JsBOXmJa<*xHmTz!aud)Vm4ka~dWtqD>;wBZrx#QMMuTNdBWuQPkqBq@^Nqys z7JVtzRg9zy$KFzO`P)%mpW~v@?)5w?KfKE{#lenVwB)gdMBdv4^V)g}rkjuTMcG?< zPn1}s+UUQJHTfF~2v;07$KmRisLj* z*#7g%GEQLb(D&~_8?YG?%>gGi#;p+2d+_R2ONYevY_N{V`bHqD3HxB({5eROhzJ*FFMzx0t^*;b zUH!`PI*Sh%7rFUDc06rsrIoHWVUamOCvw7Sy}N`~JefaBg{~Rd2G~Vcp<>3ahXeE;IAJ2|sd;l(hNE~0 zg<#1SG|`5tYj7m5qTT z=;Lz#yrxlkQ{;RT95xQ#Sg_WTuf_NBj7!C`;|^O*e(Jp_-`X{C zKQ@j7L5P+^*IJUaEtpig?H}tp*a}UM z12XqXIs01ZUeB|vJEvy&L*ALyE2S^7pLrr*E9sIgx1%P?o--)03|!pifO^f$j`iz> z*M}niO<2jI7w4&fk9G9sXj^pVGcpGr`>XhO(h;$~?HCwoeNoXQ?O}A*y9qaD;#%SL z79Y!-jBGxXW&NbcO55g8$Ax8>dXveHhSOX`AUJz5J#FCoH`N2qf zt8(b~ST-vxxMcdE$;XwU|NOaaT6x=w9~Rlx_sMLX*@J76{Ks>*Pt;~TlU5G8`8csK za0oaZv7{KP<8Ic48;pjc#uSltY)u!kE`d`#Dm?5wW?eDnaNlX}=99cMlp{Bf7?Ldd zY{%F0E5pqn>xm4A6n@``p}WR0^j=-SDSf;bEzMXI8r+Tdg0#Nb8v=?|g7$j+a7zi>sm3^Qa7-oDB_(IO)HP7YiH!?~Fp+uW$GuC-@-RTe}27|N0`Jjq;&m_S1@8U~C&vSrlT zz2wa=W&fGTsr^j-ZLk!O%ey`!;vH@O9?T2$SgSW^v}R|om~Y#Q!C;(yDSww)$`)?< z$iU8O_?i$6{vn?zq zdN=f2b&MqWhFI3;dY*f;dWnL6eY_#ChKq<^BbD`MMHc>{$3=gzYYU69DdK-f1wUH! zhYL$u89#4sNm$xu^`f~*k1Qn*Y4uTU%@zZH#6;R&-=5l$huDQj6k&%aLn{ZF&+}VC z9Fla#J45s)sfm^1r3H^{uIXNPSDf$7UM`HwU%aaO>GQE;WpO~fS=p!}VZ{<6;~y1e zF|V+h^|85C4f2MFYLq>P2C|7zo|11>Q&FX)g{$OpR}-%UYZ(brU8q|zfV$mC-CYSC4KQfR#LBB$P_!A0lOvkCOCB414Zv$ z#A|)ZweNKj*?#xyyz56NeiU|)oNf+w_rz&`R4g8?bwRw7V?s4mW%*Y+7Jw*g6QKN{ zBO~Pv_z)eB)gf$13R-7QqpCOw+}^C~KjdrwqyUD&4fXEg;T>~P8xz6hu_v~w7jJ2`iSE19gAnM4$a9&IS@=G~F#n1}??iZ@W9Bu!};%^2#dMDN=RBPbp;<~RR zCd^nSCRJqRcd~R3Bd2^|BIz!C4q$CMG&aO@04;t{kn3)(*MVFYAz~%nmw_p+Vl5&Q zMJ;KqkQIVVUwwf*(-rLH;{4!b{mI=^j+z(;Cw5%(BoH?K`(wkXY3v&7$WZv$PW38` zI&xuF^K~`d=(NCOo3YwVW$MMm!n~d%!QB}Eu6{*E_*{y(+(s7I6?4%QB3Cap(FZ7c zlfrdlt}0>D|Ey!(ENYs+RiiL)!B_%2vdeNrKrO^-%D zHcQG~5(XE`_258Sx)Ha;tQk zVswsu&os&;J2WTufiJMk;6xWP7jb!V3%dgRw{@yIa7jw@WLzb}kIt&=`Pms(U~c!f zZy(D{+DCl?SZw)ey~F@ue(&SeKLpHQ(2%ReQA_)1_A2vb<#{_YXk7C(#LQgk&Y|Is z*0gF-m&5%s0Rw#N?S7dR-3ins**3NIw7buxaie>M{%I?Yck3^BG|ie)7axzurfz@I zYP=Bl7qhvAjod#t%qQE~n7-7y7~nMheFVMFJem~a-< zOu-yBhP{84W6o{D^UeZY6O&?VZuocaXY@%8kv-5)ovbhaiooVd@4HeYGruaETh^!H z^AOM0h|vaM4vxWG_nzIlt!6p@cJEo}QCAGWjLIlZ9u0M~W0nQxCTt97xIV&^Cj^F7vnaD6CL%?Ai~6?4ygY;D?7u6D1mzGP;T^Xcmp4?7YT|(- zb@Dv-hF|f-h_Gs`>>^~*Kn~ap+C94wB|}Yy7Xt3)4}%lRoXsnF0MTRf5!CSMZ~smm zz24ME?_9I7aB#~%YRipK_J|wrxW0j~gGPwU1^A4pl3nMMAnsbVlri9lMQ~Z?^7Gnw ztib;iRC{XpO8#f+U-fRtm8RNf*s<6Y9~3~LvBr;H^#Y%^~4c!d0D@9JO6 zH*al#o`|<7^U&PNddX~NU-x#zP_6&V;lbl4$sZJz4IV%C7OqOA&d1pjxNUoHDw`lx ztIy8d!a_Q6^!h28p%VUtJ!?`GoqhcXB4C|iGXGAX|64OVDf~_U)eZ)_0>}TrfM>_m z#wOY}+ei!CSboN&g^j0)8YNU}$a>`G#rao}viTiA{PIfz>3;r5oYH=$bdt!GVyXWT zOPCE1LhB4!`%eaj@^2Gf+9ZsJX1uJlk^1Rc`D}^(C^E>c4Io}mYoY78ygYE?UGs8c zMiLWj{qp}<30&Yu6496l>S@05DAPVZ#l*-C(ARS9d`G@5FINaW@gOo=)>!c4kY6PG zlgq-+Sj|{9?uFsZ3PnX*SjiaDZrrz$r;O6ObY8l+hPP4w!o8?K^Q&)c?0lehXejO# z&NXnM$%OHY&@!6u(JWa&ISNhLeD?4@` z(_?wZJeW4C%1`L|M^?aXTR@1a&Xv<-bm3;tel8RJb~44IwmuI)L)RB7J_8W|fZrnZ zjeH^!I;H%slx+AhuF2Xa@8K|fUIHSHY_9syv?Z&i`p{aaQ_7dHqaPxob6sC%V{iDx zi?Nd2-moG8DfPg^_MyR#x;x~+i5}}Q26&y}dW#5z$Oi>6{ru6OL-Kc&m+|3)>&ew~AZNi&%@F;i`?DpQq0)Ymi2C(ZXY9BCX#t9GLNn}( z+geR) zkLo7Zeu^I>G0CRSE-P!~H1Dwt^BK0n3CB?%heXG=86lbGpsbSl`mjfY@}c7>fELxGi{>EDsjxCY^7 z#iPueqgg=>L-`#F0-Cloz9`h*fe)3x&-uYzSXOw-T!{wC3y@aCFn<;@?H`4mFE&o2 z*se6k{EdoDJ3-?7@el)AeUpo@|I>OB+a3)f3x7w)ru{ACQ>v<&B7l)WHYFN~g^8jW zX4T~2Q7!fxdGAr&1~jC83*}YM3MIN{tklFY%;&NGO6~9 zO$1rc5D5S;DZHP9eC?g1(c<94A?>%)YePv+{&xV6R>HyYY30=wAERJ(Hcw5Bg`-36 zk*>)a>0STbS=cY=ana9~xL5F{Is4|gL;Os@pXAA#0ODW*KdQ-~eyTf~LL{j!+ppzk zxs~{}UECfGmAYZ^A~6Y9Bg#aQ5F{UVe&uHNh8gD+A_hHKdLu_kVKpC~t({1f6~As< zt?;UHGGes6qIS^Kd@#5uid1wsdzNWcs)En*5DyBO7Jfmf!P0HMsXi`k#9N$;eYA#| zv#6As#o`%IOVxdy#03~fk1cXFU4Xao@52v_hHUmFDK-HtiPPHYU00;9TSp?td$H(1F zbUq=&Y2Y6^}n2E1w}RW%}{oU`_Gr>dHyuygnC z_bs%2z&cPk2D>b*$Fp$mR@Xi$hj{iLH~zF4aR==2)U1_?H@$UwT#l)8BgKeNz5oc3 zhm|it`gqCjIKrED@<_d5;z#YjAg*E`39d<$&7V9>uS|HU{y?h=s;sbLSb^fckxT@| z@a(Gkj5Caro2K6V9E1$tH*gJZ8l2BYt_Cy#7f?Tm5Akhlb+2@jw4)}!UXgb5UVR{M z@8{;x82@_^EEsk;Fe9YqL5=a^t(n>DBTD~&9Wwtv*FSB)7->U?_mB@;dPQsUgJ_o4 zYq6f(-oBSy{&H+y;#rl0$ZS(}2R6k045X1GUlq9wTWZ@4kxtx_-#`u})c|>@1~1NU z{+QN7%h)@b9p~q33vb>BJ|YcPOHkmgzm~G;Y58m(6P>OKW=`v>u$SGtpI;bXTuYL_ zz|EI+NCti{;tWA1zjsV;;&bOH9q)LRB;(nFxc-(_1;~E8DK^m@FJ$1bl#%h1y00&j zZ9Bp6$$lZMX!^PvC10XGR_fp*0Q3Wn?>4Bzy?zm%M_%Xfx z!wdJ(Ito9b+oc7Wm#7bXF*uOAz}(THTR^haY!)y{MUjnx{uA@3#xq@3XLr;aTt*5e z4S%a7hhUztT8#J-5qcVsrQ2kRzzG5IVT0e)Vnt{I0Ke{>iOx_^@!04QzH8|_0TEJS zC5?;>uckG9zHb@?*ym;IxgA%2ab=nA^mHc**BvsyAx7Ea6CE2AnL)iotc4k^JEMwk zBHgQ?D6r6xE4q;7z2Sz=%uDSDB0AsEqmAy zJ?(jA`d6?i0%DaC;a_37s9_?vb+H)T2^NJ<;%Lofh@iBIm~gcT(5PEm4b`!PoW;2Y zboL32v!vl5X0o$Ue+W9MaO!lrVG(hX366ciT{{I0?D43yOR-AkN}hqjBG#PUN{SW0 zhtwWZ?R3LE?0+lv@lI=Nei98`l?((Z@7CP=M9I^(!o<3l(QG8=DwE!dReC38L;FZ{ zuA_^hsJ5naP{hX*m=0#rr7)-`-mam*aYShkuq zC);%KaQd7zQi*@`hh`M{f*V&aIaml4sI<%eFM>BOAN$x#-7YsNH@AvUjpgh}<2G62 zSnHn^taPgpate{1wL3Urs0y(Tv05PD)+3y0s17J~d;w_Jv~|ueJ37;d%goNYr%Ic7 zeSZJ2qokPI&YPru2pAZBY8<0}np+A%n%kOTMys>&54xqS;47mRi5w zsZASvd2rM&L}f7epsKA^&zQ-(60d5UC(W|Gd1L*KU|X4(2US(Uk&_FWq3+)I@AAD( zdw12Psvu(OsxqN7^EJ)tpKig`Zn_ri4M8P}-QKopy7-UR0iVkiM{1X>H%TJuzpl)C zgk}e1XI*#i&E465Vsv>s*ctdN7%z36gj*NW)`jG|L5hTW%)r+_=I24dh}XRx86OJD z*L$a)i~S%vc=rKNU8g7nQlMc*{HJuJEIYNgM2w#Dj<`Ho>+t~uT*e$bL2BQuwjzQx zc|h-VD-^3P^@-)1`eDz^r6uuEDX9st``)TX3fGn8#ra-m^?;&D#0f)a;z}RB#wyJk z`IIH(kWnx#PA7x1(y~ZcnJ!}E9=d}9B?-MOiMKXy1Tf>#d^)JQ1);e45{x?Jowstu`krH|3px=7V1@aMem16rERO?3QQeAULyZoR=KPz6r z154|!+Ed24!-oi79aq-773ZC6fQXz)=Z1lSQ%lG4U=KbNSL+T67oc1tEP8fIh{8pv zcw{62V6>=8M&ZQPIm2V&BO)MG9726D2o_QMI%n%_w?O#UK6iEF;?3n%=i~hVe5Dm$!}(yn@bJ|6I$^&rEqMT#%#GfsdJGIVax6t}imKx$6PX;b_ zFh8jT-PA+~tG!}FG^^u*rgl2Wa`K{3fKt>p=sf&M{2VCtJ4Z+B!V}|MLYV+LRK2Qg z7yZR5VP>5j0HMIEMG|5!yqG58kBWavBK=yMH4psnkweUf&fZ68sz8F8WTYk}u0Ok` z)4J~9t(Qyo-hx=BlaYOSaCL33rgoJeK7s3xl0g3*_E2?u(6BRcYH{%-iJ4^mz>M+LGWkD3S; z7>M#Nl#RuKA`3TkGj_icKv$!?yqp)rnbyN=Ni@$jxb_K}Ffagp-ml4DuE;u#!3_WO^yDV3n~m zfLJ&}UGDd`Cz_-dVsoOy>RHu#nf45JIL1k|u`^c5N7fq5f-bUww@SEdqQ#m1n^<7S zphIPabHczvs9|?E8mb?FQj}w&MY=XpD1%%E(qV;8oIwyrvjeNreHF_B?|;1#jcZF7 zNp=d`25w$MiVn>Phhx)E9K_h73I37W*>Y3LcSzXZK8Y^3h*O;@g&%-g%bMh!s>AX0 zX8F5kw(V4O|J=N7<4zs{HxL$V@&xE5u;T%jZ!agi z^b++v@!o?`k|3ie-$cSYi$zC`U)pImgT=J|#JD=|Jf-9C`O{=8sz_k#F(2T1!>ha& zH4N$8n}w?&IUXbhYTc9KxK3`gEQAxH>(Pz>Np}MH%zmdEygv>w5*wa{Nr4}Mhq;-s zjVxDW+rND$;WN&4yE-+E<2b^m%8lMwMfsV`eT9*ba5tx7J32Jy{Dl2Sf3nNmo88U; zg?|C~^NuBZB40Q0O;)~2+TMu_ivu3O5aR1 z^>oiZyh1UI-@0=rlSY1H!jZ;Xx#a4yHYq%)PI9hM%;+xXQ_t8DI(z2qwhwW<7&+5H zJqoI;K+b*sBl%7zdVS_rVn^!Pxx(wBTmtU9TES#jnTaRjmmC0Ji?~e<0QtFIzVHu# z3i}kz?3)Ll80r~UH6%#5e|s?^3M!K@_US;790I&^*y;8S&3+r{87l+Fp$hC9*X`u# z{U{C{o!c7N0?{5FdFls?;ksax^-$O4EpFl!`%l4<<3EpFA%gFoEX5gK?Yg8yq|sXK z8}o%Ff8BI!W2w}eLsz4mXC^4%{fGBkk0{X+0KmUgEJv!(zCYd{JU9?#|5LDC`LQ-8 zMpVSXN0WQmGoY#786E%-B4sb_WWTVH;q10L*O>;Ep7~V!=v+$L*j2`p%<9svG=p_l zC#G#h`J$lG58PvKUyu*rdtPusLH_6SabtA4{ZyR8V)W%@4uH?E?w{lp;A0>}fYd4I zMCgQb;)WnZboGWkAlM3o2Ly>>c|QdR0K6OsA=0Ei{J{-vz!jJu4X7U1_kFcetqKpf z94ZBxptYB#&vswRnr}P$%^P;ROFLtaJQkU?`wiTkbEsg!cn7_U%6D+a7s@uM{h|Ro zSOPM^#uqPCmA}1kCSs)fZuv1)09~|e*uCZGI1s>K=2ZDsysVfmt<^=@I~}S|1c3V& zw%#~|NLF3Kec+1$qJkagSK8~&*{4yCo{GGSB0bT*Q<~LjU(0O8yZg@Cp5Wq`_19xO zLmx9o`V#}3Kh*gb$O|g5o~(V9pxe7pjB0yC@pO`Bhe_L5vQP-QE3i>6p6@TS1}+yO2FonzkB16$;3n#?=V?qLiF(4 z@92~}wAi0x=fyX%ia@#17L={!EA9P(l&VO=B+=9poJD!@4QL;sg2|M&nS_# z4dztw<+py~d3emGrw-_aZi48S$;wn%8}qQ1WNdm2%_!CdBxU9wdj3cg9#6fpvw};4 zlFml*ZZl-P!m@h;c_2uPE|CL$NYwG#3KT$gxcvf9Rs8#2V0>}SH5@h+{4WDbtgy1p zG-iYndjI3eto$s0E5aBT7sK>vIxj?Ex1;*&D&@e%U2x!c5jrwQEIGUUQ_s%iupgS4 z+ezo{P!sWZzXak8IVWRdZl4N<+x$^P#IW@|Q@b2*oa=zijdhT(JK2W~kUc{e{q;Gz zqnq$em$t>ocS449o+KsclJ{<%-ddmB!R15y={d@fyBzO%&VkZWDy#_m9NK?Hyi2{dg;z_3SMVbksp6+#S{_aphoZnJ7VXMDP3D|a{Qifk~e6Z6r^UwI%7vjI5`+SXh3MvNrF!dK0j zK@IVTm)97G=B~nB;>C|2fec`+;=ZURkZdkhj`?@BhHC%#RRjuE^)Sh%SDb}T?zD`s ziE!qk;T&|it0_cEpY-X!$DDTD=GmX&%X)M3Rr+$~6(Q5Q2&O7>qcDR+CHbdM z7APO7oz;_bdll5dFs1-$dKDGLz1W`&kO?C5wykTfEBU59rLw)swMUM~dy0~2+W;q! zU4f%Do5m5%mcTZ#p1T5&QO3#NTz};|Z7W-uSRdk&$d3d^GO7(uQhNtq?pT%x}-zNl`S#e>&2$vN9S_8oaH8UxJRvrM_1r$YEUis($oKg=~-Ux(}htLL(g(=LCO;#8#!<2jHRCDd7q_Y1T;^1 zOlg*4p#umDwOH?TgG4Qna_p*L&L=#GbXu#U(GyNZY0A}K#C|(hb`FK0_`jZfeJ$#( z-<1K_2n|2UCek-l&eNrDK%L$8OFS2Vmx`NaYfz9;jQDHk@$m8vY`M-G@IpaBaccB@ z?~^}D)R{pkH{1K`sjJ5&s0l9IAGcY-FlKapYFvyRQdn~jeSpD{BoZ4*rx%R)EiE#e|q`?xNsV(3{+vmAoAe65nn{RdT$oX*U^L1 zm%vo&fPw-PLD9M)u=FdYZ%d~gWATnQiPnhfKPwoq7zT8vqj@&ZW-JUz4Y^)&xX(&N z3GRQ^yy@C+3pBw{As~z^^_|6=?RXl=mdILcvO$R!uy~KpN~5J!Du7S~=+MtV_YEix zs{uH}3aYZ5#}BX_)Z}B(IRF~`1Jc(O$U;Ow1q~f=v$cuQri^~nYd2Oj0z->mfKXja zG5vLHet%4*gwMj}hml`DQ;7L_kuIMp*?TeWIzE+ ziFU6vRxb-0oSZ<3CML+paNyTZ-Gi%>#9Z&pzZpTiQb)y}3Ul@Dv>DOhlb+A18+&0+}sF;KSkb z!!r9f%v8Z=joWV?xlq#4+{gGP9&d0&yxYO`!YuVa6yka|ZV5IF@Pi!!Vu_ESp!{ci zAc~1`cU?cAvyIMvUZfY@i}6jITgZ!NSA^Ss-QmBTUf=jve$(RG3UX>QR-#Qy2%rQ& zOK1sc&~{(N+5CZ|g4!dO~1d6 z=S>xKHn`>x1Jukbes?gSq{Q3Pp1b9Q^he4mK*ujRG`z_Nwn*%z_39kHP-dqsD>nAF zU7SiB{MUMA=+(Bhfj zMgr_F3)GSrY#=iudf5IcuFgY!B6QIUyC6o-65R|TPIEhq^bl@xAJTbK9n1*;cMsn* z@Q9;JINIXZpjPKA{aXPJ$ckr$k!$?&v-5A{KpPtMd-wETlu$5@Cp;qJ*NxI1kqqX} z-z)&s;%WTE(HD7bSrb5kdMyIIWJ(%SsofEkn@vI^k_u397zj>mpi!rEl-(7zI>$gT zU?6BiN`vyA=i4+-y&t&1LT!S(X>PWzSyM5+8y8`cLB)dJy6^8@_lrRn&;+tX9{d~z zhpubYS?)0Mr!#uX&r5SVKY-hv-dL+-(^)eB#VIyQhATq`-F>)sO%y8a`og1BFyk83 zzKi}6&|yGnQ+Nn4plx}N=e)4bBYH&8=LIx=+!1`8!Sb&L!!m@7ax%zne@IpRBG`8=@EH79f(rNc}V zcio1S!xwQFy@rnfb;JZvjm^n!bbyzQh#ek+)OMjxQ<(1Tb8zP$pv%!C z#4dRzjP(|DH-cwA1#u30W1rpUe_8-4!#D-}KaUc4I0G-KsOYXe?gp~`8s7E<&6C@2 z^laUxCSsw_KM)YVSX{nYrifk zK4Y#_)hAspvS9col~?WIAeS#!0n4ztFc+&K^H(fkldeXas5MW2vT z0pM}EYheE9FFeEIFY-1@?_U0hJ_HZq#7I+j@z}>fK60UW0W21gO*NN2l@svx=>E-v zT2ckDyb(h3AwjkDI8Y#@7`XoS-;+=c3?lL1n>k4$cz1Khw7lC?E2^W}iC?llb#xTI z#3HkSdv4>qU6tCY$-!w5G)b#TcM-WKXCT7>i(m2#Yz6r0*Z|E-;Ioo9Y`#`EZ_ft{ z1dbvgCD|+sEjQl%JD=LDuXsl;B`qD7SeUrthm9ey4QjaOeEJ4PEM5UQ0lIqn&vL@x zxtfe6-2AuxL2jO!W0JwloC^^5ud*;1=k55{Is|y=8g!1M1O4x+G7|yf=EKav z&F^^n8CV$F1YV0E%Ykb;#6ji@Tg$`ytE7KNF5v~%3%DH+@$m6zEKQ0JTs+{=^gSOa z-dHp>cF00M{H?itegG!QW6_@hnA8=)st{1)K;sNSA|x02=g}k?_3L|ZI1!-O1H}eG z>kmjyqE{D}!S^=0rcbP_#&hXfCe;jsa;R}KLw;OH4I9+E59?E`@hF2{Hw5;5m;3rhm(1rY%PJ+n_TOIs zag*`)(g&jM2Nau9VFc7gG6d9PodhSbb#-+xdnF}BCPC&j_jil0n;hVO z@gl*fw&0l#jOXtb+7tKZ9+j~KY^*RHHG+|oaVIqAVfh`jSl$ndp9$ybT*>-_TumrG z&K9UGj79!Cj5t7Vi%vO!>O!PyGqcH^vWNBs4JjU|U*rR4l~j2hm;0e+1(h(g;`(R) zwp-CbZzRo6NJVmqyLOUxLSm{ExZ-6AFM3vM+rq|_@$6JQh*YBNODNZp_w-)c+MbJ$e)U5(TP!DQqx@isrCW4Ax z+ExO4=3~h#GH5N~2$|gzgU9$6Y`z${UONe_%r$X^Djx%;zws1DM`K=E(rt#hE z^Rv@Hlha@CTfNVi7nOgne|&<-H?`fK$8pBQVk4HSGHfH`(=)@sxs+NUha=x zo4@+C&b6^kf3B3^y9E`?$jkuy<{pmatqr-7mZD1x`M5;z_U5c09(fe<@Px`BOB_%D zyVWydG_2>ItY|%1#YZrNy-aG4c1|^y1H{})SRI`9hI4aMk|pI?QE0+SFK85v6qF7}U)i~jbQ zl~q<1f=Qtx_Ep9$Wugxj^&fFGH@CHbuuESkF}D34y(CrzH_IX3rAzb>x}8J)OavJk zxbzMlC|Z5+X~hqii>9XP_n%s2%w=TRZOzRwHL<7OXAooE-rtK?qsIzhJ*pt$*NARf z^#?lga4@oi9NIq|wHd$s6J%##9O1^YuAZKps5^otUsxLWMe77ysiuu#+LZqePpKPWLB>bs{5aileJohUbqm3n zCTPjL>#wM!SXxmZ0WL}V`P;-?G9S^&|62$;-`t{^Vj}yGWzbd`)FhppVlSV3rlX|K z2bTa1%8up?9>@=l3>JO+CZ0+C9?3Tx2%q|)bs)MGag>tjMp_Q(Vt1MhQBSV6sHWwTsi z=907quU)*_XQllVdF8XKaUSFYlXkDhx3B)CzjN-YY`rS%1xeh&_v4{Zv1WykuR z>OC}Rmirng{rrthm-e141JK$^IwA)6c{B-nfxeR#z)+^{xpa`kJS_JT<;$U2 zo8$Y(;i{&s-}h4sY=R_E12o#t^pWF!J~}V2j?b*jT-xUv=R$x-2VDqxtUeJCa_ib@ zENr0zjVe-`SB}PAa+R!=-1BzL$`@{5J#xTRA72C+cvv8BcBf|sQo%&)Z(a^NiybP9 z{)en|-x-jn7t?qMb}-3{X6GzsBukWqKP&V+)VPb958Mn3zfRn(hTd&^ls7j!`&%9O zroLe8;#g(mYbe&moEJ6b zR2Cc*94xgxJpzmB6g)^&Hvbg)nRwn2fmCgEKT2Eb1fNp@1UNTAQe2JON_&~&rA(!h z^S@s*f0{0;sISnemVy-WuuNvX^7?`}-tD-c7V+Nu^uX6&+z6`(#LA9pP_eOi*98XQ zjyPqC`514e;{s<+A!5fp9GR}5MXcqzbcnqm8nC^-)%~747`pu#q$j~rn3%f!BV< z!zjhRS{Xoqry2)k6d-x`Sfj4=b|5}e*<%175*W33w%<9qd3v%(`>(b|ff^SCxW+XD z)%t!1%kOyI8a?2BgMa3AG_`OZNZ>T2y+dIrU^r0V@fBn`g(Z^Njdt8qd@sW0&>y;f zr_fDPUtON7*XsDZfgc<^n3GI21#4Mlc{oeHk#%WN6k*7v=Zb?Y!Ww`vQPSCZe{5O3L!SfQ#OzAcRE3L}LSl45cMGvYV$8 zL6VafJv~E`;Z04{I7nd7l`9MmU!imv0fy6o@Bf>!fW(#XMnL>ENB~N2(K3;P%V>qm zXO)%$<{OaT?^>>AWRg=7IzF!_x; z4-jUrMrs=Mk>u;-5uNZBw=hjsX52jj(@GP&RBX@0a+QzU4kt{;~5Ifp)H;{5C+`tOxM$S9t*$gQz<>aO3{K)2Zp z1uwA+J8N_QN6~RMn~vUw3CW^E*aRTbb9b^UBU#sYW9yzzU0l5%4U2Klyf|sph<`h#9~kQKVs{y|CQhXe0=(RRZU}m? ziT@F2yms`E&j1O)ScnTL*+pq()l{pWkrI`;ff4C`MS0~iUK85iI=rN+6L1l$S;I}i&J_46%D`M zNGy-o2A7unnzg!sZ42HRGT^m;rbDVDS0y!k*vjn)8kdKaen~~5=ucxo1o|NmK6Sl) zOweSj{rx4(fLtk3X=MfR#)Jly=H|EQef5AwSg}*IQ!ug(ECYx-O82w+T9hs`tO?lR z-iNGmois6_R>zTn{-5?dTnn-{exN>l`JYK;vOPoNCv*92eb zGO_BJL;t{o$~o{zY2fePhP){wNo<8Tkr`QUi2*+X?njp`q$}hWvf|a#6G^w++W0FE zF*fWKgoCS1+$a`>+7|!dH5nVCo_Hh&_kPpP@5bO-Qp@K}K9?~zKWiCuU*HAty(C>dV zs3qvS^l;nToODwu5V%lTch)NJs1{TE-w_;Y+kH{g3LcJ&T1^+e*J~T2pzYowBfn}6 z?E=%%)28O!GYhW(_OW|svmzYe;joaKZ$edg0{4qPQe8_-HW~ApU-U&qw)%n4KVsi1 z`faAhFU(@`5mM~>-ql@1*z=n3p640=A;4~l2xvB#kUv z`RU=nUO|${$HF*^f;rUIP8#>*#|L)=!_GIrr_G%52d%_{VEX1-1ZwA2d|KTRxDE0R zcyBs@IY+ip45wU!ndmN|zBT(w`*yC9YNHnPcI1{PS36TDL)?!PASPz-idhS`3&<@X z_rpNazFx_psGOn5P>2CG4QB-dMbO(Voh~;kiy&Z`k7IuYbvw-pv=7q^Mhg?<{pm#xy?{T@^nmCJK1 zv|G~ecQ%(u?|9AmbzUD@_lq2DRbu^nPq)zSuYsBYv&q*_YnJoQ(Rj8pJkZ_un64;KfB$&b3k+ z|2<5UYTgh{en}fB{eiFGShh{4x8sjLJ<4qV!k3c`2B?f**^cy);LZsB82xOW=zVte z_oO1Qx%?*25ILVxdj;GqbUoJ<@$DJh)MuKq?PuSt^jghr931DJ-IdSr?c2OnizV=5 zG&FzeG1em$fqKC}7a!7cyP=`6W+TnR4IGKIWRB=VUF9g{2>XtHFUWQ>AYJ?p_8HpO zHV#qQ5TlRU=~Z=o1_t_cZ6x9M!^zF{4*ycXxBCG~10hh`F<^NXAC~?INDTW0Ws=zp z^I{*+3Y^TSF6OvAyjq1gmd$~2x3VXTONZrT)40=;wLOr}YYR8lBN;#36+Y8eY|Rp?dAV zEP3{R?_CJ4F#-wVITwJ&Qc}`mX{0bH$X7IgB_MZ%cGWGt=)ii*t$l0JnBU^BeC=*( znges+?K%-PH!!C)j~iO4_gc0(3J~%+f0-YNy6eMiqIT0$h+nHZ@z_AQ*`QjMYw6Y&6W7u$eJhUnvqI|zUtZzkY?C4){MTUnW55h8J$ z)Ve{ynmTNHvAq~HdAnjsCRzQJwzf97J~0z;4w8Uih}e!>rQ>V_mp^SB)}V*s<0Gnl z146W-jo9Mj>*~0chXZ{)c1p!I^+$@DNH)0nxU47ti2*LjtJ$YSjE{%2uzr$Tnmhwd zaDZ%PWoK0xVD)kJ3EgE#iQ_|*=`wKU5r;im=Y?H2UWgw6Gbw=?ffc^)Xpr@A51JDJ z;~N8ae(5bw%_pN_hET4QAqZcbjUHb~jlb*Fjfm)c8?k=7Dj|KbQeXk&HG)910obhF zy>o7|q`AZQ0Y?*kUvYR~XBf!LP@3%RWaG=NIO>2)MGsX1UpduG#JFnVea6)#-@D=i zlWRWHIf^efX8!#BUA^`yM44b)O?Oj1h@C3O`oWpHtcKTT%#o=lVZrOicFW_pbe=gmtpPZ*wl|i5EV;~ zx^a`WMyvnMCtIE60LmWL%Kskph+`Uc9K8Fmc-pcr7!csr;3gO;@ak3Em?*Hf2Bqm# z6dEf2(e9-K4kZALgm*GNxvx%&9J~Szp%2YSLzVVm#y8XMfS~oO+}?3tPz{u&OlR>Q zKEi0eSRU3?liyz(MDy=`;mAcSe*D;Gb?3-$6$O{@^TVYj(>`m*UGKKher+HOPf7iM zWPNuamH+!bDauHtvT0c*d&?*xdzP|CR%SA?mC_=G?7e00jANuo_FkPMNwyOXhjWbI z{nTfC-@lJP>u{duHSYVmuj{&BFSrp_6UlCK->ZRbn>JoTm&dGb{|stJ^=nBRNh@)8 z&JJkFawz+*Z19T&|3Fs9RPZxzwc0P%OF;o}LtlgmkSvLCKi>Reh8b1zD+)ZDwm+PJ zzRRtC3ODq&cyK~oOys3hz{s5)>8c}*JZat-X-&`PQkqu;q_uR_eYiDRmWUH>{*B+o zW4Pc2TleneJ1|bL08o!hh{LOzP0xN3Rz=$(RIaF~*xS4pdBgs|f-r3K*7a9$zXv1+ zmi?wt&+W1qcy*KFteD}3>o8$=!)9PzRd}r5ivKQs)m09vv3b}r?!R!1eYD9+;Y@|5 zIQ;Llmd6tg!*2qyXuLa{%gCCFG1a+C>ZjbH*yv$n!Rfx>VA#q&q@^G)H~Jz@CXlSy z@efncbSz7sdk$fG`V_)DH733=_bPK#sf{F~hFM$D`;bYaj|p1YiNjp# z&=y6m+B|XqI{vJD-~$l0;WVckL+Ra zt}H9jEk5qLydZjO`>f!(kLTxr9I1bWyJc`w7{m6Ra?Rp*oS+l=0YbC-(vml?4|tT| zM`0{Ih@><98QT#`{vR}R^rr*Pc(b3%eramLG6*Hh#-utlQbD2&WtoNhXrtoZ<_$Ek zF0=9kXkI~r6c>BoHP##{gmrMA-5;&%Ddrq*G}{S$d_?2WIdh6rj?YJ{4wYtnSgviR zytw)DP{Co#HYfhv(a*KV6$`k}C@RY9UzeYMlIStn=+PZKSv#^F&`m}^78lWeP>Auw zB(15#vQxI75Qp)4a@^wk_h}^@7UT2oWCXYxqJ|6aD2o@NQ>)`#@-ja>S#Vh7k-n5LUS&VAw2TFSO-gs* zOG}HwAg-aIq2k`m#cm_AGn%uSN%o)DPW?P@f$uG3H5_Q(r9kqVrIls7lKpuio_s!x zU7C#)B|0H;FDf@@cBTl9vVk9YXUpXDNfFgru1C#f8*9nj481O|cQ5*e{W>9VHVD33 z(qaH=W&2;axIs*z%=w4zz;I2qgbb`%ckt{PU-94htyAI<5BSA?77f?fY)(R4cP_*J z-kP%N_Q}7L&*P&dvP~dF0NnL7glz{`>C$T?2!(B4UruYIWp}94>T4^1ma_!%m0`${ zX|CI*^80+VNlVP#x;w}Enm!Gz|9MW5AIa%GW{odA?79^Za1w7E z^A&@Mte3Jh8#ML1;Z%7=1=-dX9TkgxG;?}-x*E3{zur;Q(o10~Xd5Mn-lNIN$r87J zV@gt@2_t!|*C@Z)q?W}gspi2n9jm6MWh2OCmyqpsL-A`u4_Q4#xo;!yH(pkJTObv` zAg#H{G;@sQq&mM@{mD~z#RtW;HE$h-Yj_?F7n^8VDTkk8;eLPw;wx8+J6c+Tfqhp3 zkt@AH0ZxD}mi>fp)pgYt@WyY5*-!41n426RBRk$}bY3vHB^~=0n33=)X!Ti@-0rnq z4LR8y6fl=-T{gmv#AnK*l-aBkp1qh@)JbMKHR)<>=5Ly*lOkNtLyj`4cE9dcx-2|b ze8$NX_hXw%Wzl81H@-EDQ5jNimW+-sx2FrQ;_~bCFVAlno~{Z#zzc9iYc(|_=P zE*l{uF)!c=)VwP162=x1!sMv!Q~TkoDc7G7lvkUc|8eiP&w_K%1`By z@>o4iJ(r=m!w=7{gncmSAe`~!)sa8X9tt-gpG&P4U~G6nyhvOuN_dRTE&s;~=qqRKd(n~JAuYe-362|4`Yh0*(^ z{8Ox1=_jnFF^9jQ(Qdwo{1BC?(M<6qw{KYQnxnKX4@0llatLXNMi{FzoDcd18Gzex z-3g4M#wTi<3LLEn*piVS>N|uscJkt4u2y4-7yVHH(;;0Nk>&@eaoN_WL8#oZ5J2v~@9AQ`E1X1Q%~of6ZI3mzayPgm8qJ zhx{;We+RPU5R9Vas_1_F=EQ@BJW9& z;rEpx$u6^NJ_nQev6rM&?tFE-DOfz%6q~~36px+@&G5L7!!L)$k+e;DQ^a*m!%C&+>4$lSl&e3 zz8;JhuZG=?Trp;KP1R@=8;RZ>mRbf zE#L<9%1DuEB#Of=fhV5ovzJna_C@qLB`1IdJD}qiqbqRD%JEw_S zSZKuQ{bwf&e-_gqtfLEeQ1LQzI3GU$z2!$%A4Km9&l0ZZMm z(NjiI>3W1O{Zz>rp>q)*#h(mB^m2$ecS`6==HK0CBk3(;q?{`zKh%M|M2DChELjTb zq)WbGU2r-PssE$kJ4v!0h6yw)VPlt&??!3~PHD$Efj{3pQlt#u-OG-=$d0qpYZjFw zF(-1@Y&miS7sp!kTrq3?ZmqbD%8e+T)`_lo$yX-}PnHFWlh}E~dNu#XFQEE2WZ^O| zmyL|FgpvCDJIPc!4cyxFX(Awe{)Fem=~9hNp8$$L>b>ATn@v~z8x*HQ#r@sZX!c$_ zb}~iBczA+$Ox#!vWJh?40QzP0_yF8U=*?WpR2jmwz>J6Lb&ezwa;p6E7O;W z_=A8C>SO2+_y~Oc#Mt*3q@r!EyT$kQwm5Z1^5&g>8R_PQ`X($vv>9(qYI#Nzvs?G2 zP;%mg=qtKFK2zV$LbEzk>A#`M<8Kz{h0 z5%x-HRTufRORGxhOA}w?uW0yRyjNFjRNg&jO`(4HSmjd6jGXo4QS;qQS+=YzF{#!I z1NpQNwhymhArX_uJRlA`HCyZN0yg!yEVdkVfaSQ-slWV;eH8(vs$z!ZDD$e5hRz&

yTbhVcP#UQVd>*{&E=@?=9D`juP{| z1Fd2z-cjdZ1Chf&yiJMe8ReXA4UI%`T5%ilaL?#a+D>BX10u)kDqS|$jHD~FgNd}d z3jSnP+h#8t%t`K$oXLV0WGGhPa)HO2E%h057^JZiHnn9Cb#YWL@Wc@DD;;|#KZg^Y zUc`U)j>TykVF!8ikm%$J?4U7!!qrd1haoP>=ce7DINEN?KF8%HFL$^180DRlAMXq9 zojc%jWEYSYW_b@xPd3k9u9$rDjzL|vWIo_?`XH`9lJGEU&A3bqF7qqzG000ErZR#S z%$~CqStRml7h=Sm4i?w!IcNb+xz=Inf`!U}z5e>$jHqfkTXay>nc}8gZ!gD*v?Otv z%O-1vrwUJsxc$80WF+LM!mz!){le!#QI?C^Aa6I^=3Ke)no(2E%#I^5^ZqNo@3(ZW0;G-F5PNmP6?4{2b^}-drXjS!?m2XK)y~A7(C(9WJdd z*qA4;7_f7Goq9h*fk09oLmpmFqXnbv@(kgpywqudY�u-rM>ncQjJu{Ih3! zk_SF-y+Xh>C?kgYU17um1>#W=*Cu%g4jRry&YWYdHDfj7K5&&a@8X_w1>9tclolwT z)tBk`>yd?Fg&2EZ{E`M;t41oI-FDOJ+u;#x_SM_q1?DgrvD@zp-IyonYz9{;I-a*x zwhitWtF^v_6>@(U^KPL3vzqlj+U`?85Dh8n={1EuH#iC0#>&&L@9o#+mfVOSNO>)E&JADxQp8#Z1EU!)@uHNMIg6(7cEjVZ z4Ka*~)ey1a%F@SD71%QlU8+wbu6&c3>&z3zmG5xb5(wiygx@P*hz;?hP+vAD@Rym!0%k&nv%<;^6N`{{q7K z&0W?iu`2hi?-rT{k$xb{ws+s!Pf*UkRdiy*w{H&e_h6PDeYuN)7+O4nmoqgmVf)q} zFp*{lF)EnvAnw=GAX8Zwsh$WjLS|ru#G|0WMdJoXNvkA|&m52G$w+?tHX%!Dwf1gv z&>ro6mQ7(!(tB3e9(`pc)`$%TOg^F)JMZZmnW%wsI=;_OG|l!(QXr>En+{~pd8R@; z|0V5wdP?>R-0Xnr@^jEP8OT;I|KcS1H89Gv2CkIP2pwTm;?G&~Wqf*>CNj$=>zWFL zp^P=5mgWE*i{o%9aGuIU{SW1*o{}0SA%Z)*yNs3j`unfdkaUc}j(|F}x3`yQ%{uB8 zL`VIqu*KyGyZvCJvs-7ppc>B4G}%-HLJCUjMK}arO4-xeKf11CatLraT~qEhObO>? ztC%V?a6}P0ZIgg99>$2mO@BIPNKsC~Vj0;B9>F*vxiJb%Ffu6G` z-}JnR)w`JJf$3-A$WT>4AXr;nO-q%$I^N^({@z1}+W)p|zD#_5q1@6`aMJXjz( zox;Cq5-ssM3z7O50XL{hZ1QXR;8ID6Wlr?(bvR)=%2n_Y>VXqF<2#$QBYriIr8_#TYuv$y0|rlJ zCTj-SASsk`eU$+CKW-fvpO0tiofsR}={380GLSFXt^7eAY=I&DA#GUnSr>A8tDZXz zqKxpISSmHzIW7uKB>nb7Dw@}u;(8AZ5j+d>NT5cHpDry_*=_KC_^=l;JvWEsQ`u7i z=`iINp>eRF`i?tjH1RAFHFrFW+~nnK1{n~=8Av3()tEmXJr}daJ{Sad*Zr$Zf__g5 zh`8U)EgH^m$~M+lY&WfZ=6_`QEW#zhHtNh0Ja3#$8WjNmXghgae&^4hfyDmd%SMu6 zd}nocW#gxjlq@uZmH!cTT`4JKFT`VUIIMhsY}dH9rZ#Trw)n4e)sPyHRPp0zTi>oP zo=03dM)jpy?hoZdd5eSkH+2+2kk|iR8kC$Zn*~`kuTsGwxFbo@pR+t|x3tO8UlG&b zkx;slYtsLeo5Y;4V5XL;)Arj>C%~LbpXoV_@Id)!5Qviy7yzRpn*bRS$veyDK- z$Wq4j+{Hnk$peysn^&yH_jZZGvh2%p@vGhdoN3s_j9qsDS?hto?U|hQs*9);S(J;0 zI_zv&=P0=P)5n+X$p>LLVvS!Cow!#P@?!zst>?aSxn)*x6) zeb`UM)M3KToj<3NXJ;rcXR&k8CxwVHU#=Xg1h(yx#EloS(M-KPaK`zA5&hh$`(Pw- z^SFPi(hRn(1|Smh7^27q&&D#kT4lq?QXV@quh_2Ao?f*Lw~6PWjC}8G=800^;Dh6n zxudbSo5$BNvpUtz=z}91L)Z+-*UCIS96SLO1!q&efX*Gr(OmSur#iQFyd3T2JlP(L z2+cOq*im|MuM^l4KgT3@{c3GjlWu5->Fg{tAVu}U!{GERrFS{)*FEP3ZM4gOz-fb`CvGnPo z;r{T3(Rhz;O6I+Nl%N{;3o;^poak;y5_AgLe1J|ctQErC6&OY*1~ur|-QdRJnPeR$ zrdyR}dQ|^dRV#p1e~|pp)@-0GMc&0P@_J|kirD`_a9TqAW+y^&V>hAfwJ69+`*QTj z=I50G--zPd4cD-?fCLij@U@V56*t1N7a?VmD{w1bIs|c=zOp`qKo03jP%707aH1`n z!U-2nN@rTxXolX!%|~{j0HB)p79E`&=fO0jba@^2nFiBMO#cA_EG%6JKp9mU7~++M zZ+Ycocx29@#QawG;gz$zjJi**m&|`+O2mR!Q$LvxzGy}Nw)SYIwJoaaMCJ0>gk9GH z9NCH;)hZKhKvR%3@;lV599W<4^9?nZZ#-KoU*=(3KZR}EQR}1k*~BrLpxoY3$_k|% z+?;RoS2aojB|!L>6H6uzHd3MoC+0>bEml@wPea_!@h9eU()t_IkDm`x8Zjw>17%z+ z2T}`CbW^UyZJ33|{0jRMBGNMXl4@P6C%Ue?m3SQ!4QxBg%Gm61Q}JDnuHDPYm3OgrA9+#9Q7#qDP$;ZG!l37U9Vi(+Z2fc_HB_T5{O1V%F&=CvKd;Hq@X z3fmlvQumsB@jisOP9dd7JoCQOhyJF?bJD5&{FidZ%5C(p$tcO;c(X83$B)fF zLoj`J4?CQUU-1H#H|vUOTAkO5o2kLQ3b)Uy%>;$hPhEfI<-Jb>vQK+(h5IF%Sq&n= zkgWBG2Sm*bP{_zA){p*Jn?h66Cc4HUmQa=b3#)r)o&7A$c-5%pTEw5MuapCBR`rcj zf<1lswbK6gJu+0sFXlMZ{kln%e++okyzy!Qrm~XKQp_7NygaD}lKB$;h7N!Q-*CXh z3g>k97A{sl#A5bg-v&P_%EQmYjot`!qHYUkwe5^dNa~0S2~U$1&guo*y%G)Wik$hN zEK(GDv4k~B_xYyJ!|Dew-`6qOC4&R+DD4B;vw&lejmadizB!lbRpRws#(8Yk5|?Zj z+*X?wz<`~s>!~vlSumrS1(+K}xbTjRLk7v&s+y|VgIswCCB)VP@(xVBh(y?i?z-g9^EFRqB`AAY@_WRcFk?W2YYAp;MUwl)z?sOr>kj^?l+RgLRN7uc)KaI#q zr9j{idVMLco7n`I_WY*`iSdeL-!?s+IV#(O*_YL_VNoQ>wh-lfAwV8KL&sT1k8l zlMVI?GU0I5q)yUdtFk?pNos*0ijLr zV!-Zw3;e9XPF5^PMFQCluoVy$e=G&oOc;SL`ebjpa#}HwteT3QY!J?#L_Qk#HyGX(o~gMogAF=K zijvJ{yHKT!XG=76D|55!UXoNYQ>WbCnHRQV{S^@W6m4ztpw$$v+O@q5HHd7L*d}14 z=2!b?6)(*XW6iN=MmduOoDWQ66MN};6TRDnXRYPr*JSZBz>?Z_J^&+bt4K@8-URDQ zHEDV!1Gdj*FBQGu3Tp?u-SLfx6*Zne_-M|o;i0{In8U4N3tp?=P0bNQfFO6l$Z;@d z83_(8ykR&GuS|#DrmN%guP9$#K#zpsHopA^C1zo~=8`R8!(n9I)Tp-NfkkJ}ig0px zSbxY{Qqe(i*H(*OnmPE5gT6-QUXP+uVziQFuC$jHp3lgSLj=-}S*p3uSK?$nzUsGL z@y4z*q{wObf88>L^22A*My>ws7h;K-j>TKg_%duXeedGp)la=){G}I;%#C@4Hu*5U zTq5zoeDfafF_f;b@vHKkb#AR+GFaMPnak6^*nMN7x@fYWc)_LV2}yT*GyOfDQ>yWz z4^CUvLL9tFPi_!i^^PubQf5WP{SdQ zd(FQM^0qeg?}GdZar3bYeF;%eZzRut?oC?#0NDB6Oi_fr<+|Lf$2OMB0XYHL^Pu)> z--wxUhG>3lm||KEdFHpA+5(=`9`&BgGOTstt5l_DM_DY@0||rnt+Tp&va0@9)&&2q z4B$JK9!(8;+;hWz4BQc=Abt=L33;4X>Z0_5wUd>kAhOcf>yfu&3c;oMjp@vfvqny} zX*scCD895D^H+^0*Q=;TmnqQNN$4yq&|f7brTPG1Zg`HQ>+Jg&dcra*!_NM4RLgqI z+&ftxFfhmD-Zr0KTsV7W%ISSPy6%JUFS-S$HkhaLZ)VnkAr5xAP0J>6TiRdaX4sug z9$(%IVFZKJ0_?auD9Ye2a5*!CotTveU}0UEZty0>(RMtN6g1q19kcRk;Fy`in=tN z7A&TwBX-YzK@T%O>fd8l>)QSuZvw1H4x2yy-V(d5poK5wK*n|BMo$x5LrGK+2Htg6 za|a55>s=F&I)c&SKkv>6M6&|5?Mkpvcy@o2hsa}eHu^+wPkeLzS&m<)WQ>@Nl-S=w zty3Q+kcZFXbD*39ZGU;RN&$;|r9dy7ZH?dNnZafZww-6!OIFj}J9-ERjjrHIAobf}Y^qP11yIHY}=Zs;^qQbeeij#%GRQU+*5e&ck*mNjQ7r zj!2q{dwVW_eH|xi(la4F?*LexMkub%Sp%0 zsz)a$bYbZ>g|0+)w&L`h;ovpz1Nb-3Co{8$JDocgsgB>&yx84x3Ftk9Moe^&RbYGu zmP9L`cjY+r>$18heYtTmG9})dzwfFqPVHg4#5WVFkfOfIY1}Q^0$2*)^i6^6E(pgH zPT37Xd!W=L@>c42-;vDGdWd$u)RQcmzl-;L@0qv_moMIRtt&LFc`4Wf>`Y}!Xslq1fAoeA}fK_XxH4Yk#u_w}*UV z;AB9nNU<48>^7Zr)AS;jR-U$-DSf<*LsheNIhN(#Ac2ff5o14Y4?79Kjp?A+5UWM~ z8Lb_X>oAe!X6*yHqJtT}w2{0CX!-tJQRMx}XRV%@x8n9)KSnhgH4c?(D5If1HV7SXF3N6LdO={9lKd|8lGf0L`$YJsbn zHM6XsLVZFp+u2LZvhBUS3aA&%^P3h$I_vn#u3rN^&rDGdKgVJ<#+h}3JH#q&+NxfU zmu>)7(Ygx;DtkG^5^=iQ>Ts#UhtGs=wJTA zFCfA8c7?${uxne5hy{CzYprH|Xqd^< zSQG;@9Owy%nWmft?Qqp|L=}p_P;0;4otv)alT9j*604WP_x+8DHO|%VvzeAg+WTIV zB3xFm%iRhQzY#o>2Ne~>^7doB5ALWb=qv>7Y5rhE$(qmjuWl)&+U)l-`K;;Z zY3n8oJ4tj5$(&%BF#358=j*pHQ{*{@D9x6Fg5JkkG6Try8BYA`W=`n3Rs zf8qPwbXZ8U68g*WQ6z)AL{XPUX^F{SK~7A zL(Bcpp3T;!87JvQJze8#7=P48(JR*8k)c6*3RRfW4A0fcD+yqCvtq2B2;3Q6O`P%D z!^MfYjH_RL@R-|G_l(BTMD;1=kBqY=E73(mU;ga{*r;FG2y(_ATac<$!35p+xIa@c z@(pb%Mqf7)sszekC@>URN9V?NlV5n3#bMvEZ7wcfeD_i~EUlS`xs+n{JFI|7l;kIJ zNn}$JLjt03U+#9;;7ifklV=wAcx&9Ig zve{BGnR~1yKZ8v3wub0)wawm!$<`C)?<@bXTbe6XDh`$*sq3Ya)-^XXUubwwP14Bz zXX*&T5}E?3DH3{tF^sxa6ctdv!lvQAXX@#jpVn|yK;7}@(p)FQG>4yp1ON!1SlpY^ zdW*m>BOBp0)meO=2HdS5e2ILOdX+#Y0ZsWmbSR^zJf{aIThV+JaeGLKye5j5x0 zVhHf?8>ZqCl2U@3S&fZy(+@T$hAsVJet`A4%XrXgBBFjOQgCW244@f6@-9=Zpzjk$x|G$<^dr6 zy`!!g8}B#<8TKx3-TQRaLBFLSNsc>ya}E#|VcA8pLO??&tIWZfS0P5}jtlS1<}YNsCW9lEVd&2G_erT82Kshj{S zJ|EI1!fk}%8JSZt0-wfM+Q;dM8|@G;0;2atMwMn;RvP+yC1h&diE1u>1FrCALuay3 z>cXQ*H`MwQo!U*i(Gy;ZmHAB8*|z;Dk^3UPUF9~NKkApiI7?&0#?%1vFPek^bKjjP z)gmoN?7E5Ovjv*<1R7c|j7Hup2RrMA1 zW3=BZ^eU&n;RF+YTI}shHzo%ZbvH}jsPXO{o4Iphwnj;{ypqwDI%wnQ)fr|lT)$tLy_fOGe_r` zkyimuh#UOeNP3tjglRZt#L1N0{*rzya`^I3;5TphBpwKwDUDv_6DYpgLQhGKOIytH zwbG|XJ2Yn8S61v9snN4t_ITL3NUbaR0^9vLD~SqOY9jY&5Opi+r5V}FfZd&UtJV0; z!in?=smoXOsyVRtgz|7Y+depM*n!`~*T76a|V!&-5 zJQc`unkBp$OpZ31DJs5Jo4ViLN*GqUoyuSj6bzwd)|_kz$Ux{C;d|6r5la_K5e&n7 zK(_U$Ub`=;XWgMU1?55_GAQ1gk*N#-iT4AaC;mN1p@TJgvo^~)2!Xfodv*2g&Zt0E zWB6WIoraO8e9G&fWH{~NW%M|Oj9CyS&x%WD@{YQEvAzHWlRdo6RWgdVF*z|hTjW0n z+jI{Qz!W5~zzd*t%>2w;u&G~?8a=-#;UR7f_rrn^B`}K1-O}DvQIP z8(whD&?u0UP!GGI5B-`M@=65G%sFODur>1pF#XpQu7w?KsdHbr2GlY0sc!LjC>Bu< z_pVKR4%1CTN_owMDVRb81NC)Og2gUPA*8=nbP+KpBh1FTlQHm}GS;@%50ZtPc_wt9 z1Ams6sGq63Yq_v^p68x3C3Fo^#vYSrVPd-=+% zYNPQMP0>qRlWpK|BXh9lRt2TKK`95yn3+~>jitAr!Qbnx@EM$et?rg}-p81G7msef zhx!((!ibeuC0Q1s8yvfl7l^CP&-^J&GEVOwEItd#=Yj3*PI-}~9R2fEO3zJgKYo6~ z+2iosi`}KQENp)5d6>-TB3K?TTc(sD0IxI|xSt5t{A`Z!35w2@*N_?YD^_?uPUAM z-`_O(eTDXpT|>3A#?+SJLENooSkk%_IN*S$SeBM_K8>wlEH-zuvGbCfdT(HFG3_+? z?TRl_&A})MWOuZGJuY+1Nw1S8x#PAO2oXzd6e*w$x|-tad}iLaN2kC<-mXduIQ5K% zwRJ3qu2t(8s)4Re>&9tv;3H-SIX5+EtKObh7K+w0GI1z-#GHl6sf7l~4*M-NZ_~Jl zJTf1iKYZMF?3!`Jn%5a{cUwuNjhMfq+%sVROpaSBvIN~#oOnOsVdQr8+LmC8%e|6G zCdgZPpz%eFytm8x{>{&frFL{_g8JXktjx<5N-0{?9ambP<>ErG02Ua~B#VL*bzb#s<_XAqZ%^Jo2TW%D)-O>RSrPcipt02|; zDdzchwC{4K*SAAKPir%Tb@YO3xNfbU;L3Bg?5fTvsiT5_ z@OK9r4Md-=We9xxqv!MDvT)ZMkQW|QO}~Gaudd5ZEGnwiEX3P8L(tQlTtZZQvL7X- zE}wqd%*Hz!O#s#K7+cf)AfW>BD{xhS*NLMx{Hi2AuL%6b@zyfB0v_`G|0p|4D2(8+ ziecstUd!L!eeL)vIbq@`-{VX|@5g%vd=%wu)r`e)Wa{kS0(-w-frSGlqiNQ%;JmJ?b7iwL7vA7bAD1K-6< zm#x$dz;Ni2Vpzmil-Q~O_<5l}j|z*kQVL7sS2UBTJiO873(kdZ9MpcNH=b0DUUgv3 zOT3x5;-wp*_mJ%NJ5|L~`*dpKV}=fwT$f&Hh8M@MvY13e0Az@-c5rkYOF?R-ADGFZ-M0oxF;SfM&iOK*T!ZnC_Yr7&VasV??XT?~RQi>F{1`EgS7}3=^~Cv`^^3=3ow-f&vZ{M0q$w zaVQe3QDelL6eJc9Pb#d}PhZ@+eP-cGF-QOg{HA@Yjp2xE2cf@wiYG6B1}WXU@zMbw zDyz#hiVqIrz%ex5_hT&0_@3%TBqNj;D0O^{J~G@W<0fsJ4E8>rDcu8#%P-S&O>~g+ z0(=s-qY-%FO9&O*0+|WiQj${llVt~Kl#o+6l#jBe#m+Ijlx9%|lfpcRZ5Q`+JEafQ zo?UXima%s#-4-x5xYY(AspI%&S%IR<)Nu%9;~);bGDw^Oxgdp;ReI@>qQ|0t^P^*g0b=K4%H6sMW@p;ft!gDdn)Ol5?;8&`T{Rl7H044 zr2u^fzk!0|zMTiG!YQmu>l)#g5gKZEosGnh^H05H#JH*?kcwTD23xRE8RE?S7JDD@ zla13=KC^_55ky`JeVz&YFgF;u z9)%LZY9~P(HX?UBCuh>a3*@{+$HRC+MSELM_p)@t zAZ=RO-iA|@Sd=SIbFOvt$rusu!QYT*Lk(jZeurv-rv)P}juSk2ePUv4aaZA7CR7%~ z{$DYTFayH(wnvIhT-NS`;k2Pa$%PDzE?~kC_c8geR=VaDHq3u#9sRI3HxSJ$ zP*Eh9(fS*6<5)WO2oVm`5S#xn2>)ScBoUaVLtOtB_aM|-|EB(G!~H(v6%(|i_b2p08QaIh`7W_NW4pmJ(w!zf@G&PLXai|Fw>;h9Pr8dVeowe z%9!A@*AUmh$p#gimn(2UN9bvuNq{{mQn#7aco-S89XN^!j8Od%x_uTmFvNP!~Q-~chBN_$gopcKg7nx2JSRo98b@qe0pkoL-a?$u(jX&2-gIljC`#vox`-8x| zu_sE2_A_| z9f_p=(}c_|f02=Yt@tM3j}HYpfZ@QNLJ46drNd%@6VOJ=qTDlP-xf1q|3V8+YaNSV zu+%E}Vw`ooY=A=j@$HJx28lLE6g<;3c#TScD)GyH6hYBl2q(Lf-Ax1$FGawq8tIz} zy0X!>p>}K{jmoRfmFbf1h{+#V3#sB>i|jg z?p<^r1P3rPjd5aQFfzUaX=Km1r!~moKXd@6Bf1k3Kwi!A&jDt(SD8DpFkG0R^9S0` z$@Jki)(r?CE;+p#wTYO%(WiP9v1@`o0|{Z^eaZvVqm{I{b3$SPbYleenWxN$fi`Kk z7+r{TNGLv4)YrSI0AZ3F+5t6L&sD&$G~|{B;O5 zljWcr7rA;SJe#V9O4j3@1B5$!k)`NMfw%g>b~g|0u1XtRd^55`Gh;20NZ|;lnCZVU zke{)h$t=>-0}UH$m)^QWFC;pRGndB*yMX(|4mxK!2Wkcy$)3hZr@BCR%&2*-j8J|7 z6h0eEiXh0OL+L+nyRG#eGTl;veZIFU*r?yWT9BER}ypB@wRY0mu zpfizXoU(o)#LgANZ`z+l74rp5j94M0Mzz&UN&|Zx;ui%7p7j1wzKTgsHmJW)9H;=i zL+oWQ#824!{`6eK<6VaHXV1R6CNWvpP?enSsr=U00ME&9kfqxP@NiCfjJA6%ppz+l8}@*!ZaY2lE_I?lAoW&x$Pg!Q7MuJcrB4yF+)y_TH}7I znIfeDBpwYP7c|dF?cwj&df}5X)#t$W!6frVisZJqvDZ~S8n)6Kh?~C3-U^QB4VtNQ z-8+DookPx%1l1qwC5!C=1N9+dfARZVp%0;4;VFlcgWL9HU&E;`60Yb&8bjq+r0=Th z=&BVr{JuvGL*Q}5Td#1OMBWq*TJ3ZQ!jjYmVjD0IzH|*~xb8i^vKO!=jJYe47w$*0 zCG)+OD*nCIY`~wE7WT3cXyaJ>0ITC)&j76zuj`3EdSJmu&;{ih6>_W*00^4jtY;+5 z;J_SY_kqAL-#*O+ex_H==A=z$kQb|TUOWe#o098|*2QpL$WLhLXg+X&5Ks`bCwjm~ z3>i4&C-dndpWW+V`u+0^{S%`S(4Ip=5<3aF3>;UaOfUa@hNS5SY} z>~wGt=GZ@<^|;k7c#LKX1MA|_8u0xL0GOZO$RBav$M}yoy2yd2@rx;({j25)JsbZs zE*bmZCE<&H(C{F=E{MkysxP+#*&y>Hm=g-%H}G~r(Q}{*{O>O-7b9&dwIB*eU7iJ}&To zLO%zeFN%cZayzOHkF;9+^DV`kOu%)9n%xhCBK*HD(+_M$po4G%yR}@dRh{eDy2ttn4)aX^aQ^-u;!o-NSLm|5{R{x* z|GvxKOCy*%_C1TT5j&)g1+##iZ~`=6GbD0JaK{SvBz=-D3#1hUq~7!;2aJpnC{B_R zKS*v>O+kA!|Nl-cnqSuf8grV{B@(4%1;vupT`>@WPKi!J$;G%1QNet;*2&2+EO{2R zMWwC&V)n|(3rOm+hYm-+E)(s?n1E7e0!Y6nSBUwJ{dY}TCRH)wIY!S7=M1N9q4P7=+ zttsue|8}R|07H4N@%h$*6V!9FAtuNpgXKwBC?No|d%cI)6C7Bv9=67A9qnV=6JKaz z>uoPtX^(Er-QHE&LJYTzliPNQGw9p)N8;=FNeUF9#`;fOgwD+h2*6!KfxlTG0I#-k z!3Y-sOCun}+;}`7eRczn&q16_56|~GZY+d+ghz)awXw{Zt*x!z5D#XAu(zp$SAC=xJ_Hdy9aUq`H2vt00^=V9ky%j=#uz-l1fXCxPD3vXKji z!0i~uB8gaik%Hl5)rHWVot-b3ZcOgXLG{C_KvHRgU)&{V)DF67E<2l5I1NNYv${Rt zg$gKHyAq_s{rvr{C&dX8RUVn%4`%zi7R3=U$JO^0YbYac3s5-!6QlW*XEkBR;qvyy z)L6lFOPP4`1~db5gI^jM2YVGB#KR45jqukV%inHdcP7qPFVY;ve_lBcHOg3Zr7XNc zTNUeg56cp3F2r`k2#Uz!&{wCo72-PnAt0mKsb3fA_Y%1f1-7zjv>cGx70?|9z3y`+ zA<&2f-Hy5erVZvd3LF5)UA8d;6-ft*EHIbP6mCF*{5i|jaEbFQy4EA({Ik~xCe;Tr ztJt(2>7HLemsjU*FFP(HBAWZp-s3>f(4kNoH{~j*!2|%^ax}f#f18%yoGz+M-FAfB z6&&+CijrBy;V>}6l6NG@$(j(io9jL)maYvX5Xx>NGb1hd4}^{%XRAs2)tRgS{>;mk z$Fx1Ujln?<8$BB|UO-BxcTAyEIR+JQHc8qe8)PuWL-j~Ahe<2U`1LL`0GzUc1ob;E z<=o(W26$QV++PryF<~WTG!wv(0#4sgoGuQq0-SDFKLT*Nkt}CWY|H^{kWWmSn{Hn| zeew0AR{$LK- zcn_q0FJv5b-AFjY6QOdjdP{InW6tC7Ls1Wfb#P(}|LVFK#K%E}YsCn?Hv`g6P+lP2 zG61|sG6gU!M9a}$3&7FO=B%S)-bFsEQ;uwSHOkBgXRcK6Ix=E~jxI^3dNshH>Tb2V zLKC~)s}H4z`un~3&VZTb|Ar-V%88@LHjnR3KryTZ-Y>+GRxkR&%Oi(zz{d6Q5R^B1P!lXm6&W{t zjPn9PIy9%Oqao()*E<|rNhaep+CGL-8G9fhAhb|0#ZgYk`$(@FTsjI}2aFwK06(x* zH#AknZU-9^)B8YN$*qs_4uy8le(3hgGw3n>i5W_Q)68x_+h(XeIP;xxc*6?ww|$cq z(BzyrQ?KINBE1aV2xa@4kGxR<8}V|fX22_?|KGg+h{y47*V){>OaBJasp}%nBG(bv zUl3O=v7W1c5g{|mu${7{DcQ0d@92bcH=q{)AVZ?{J~#2|DY=W+pTHnobVXdhVT__7>oOjq0h3T>AO_v_?Jx)N8?bDk+G5;qKu%xrQ6`4lII1-S ze0h59F4z(^_lG7M{d2%O?3#^;I*8s%Vua@jfCS)!4-XI5di9N7^>eZK2`It3Z!3*= zh=6@t7+X<00Kl{>7Yj|T$Cs{gt=wRNEO`S^3FCed6jkQtXRPidh&%lN##fsB^#kkLe%yn=KV`%q`om`lV};R@#6dH`0L!0Uh+RJ^<$hysPz%AKGxK0#8q^#rQe372x)<0tYu;Hn zC?K?2#{H1`VRLm~aP4Ibe@-?-+CZ(w3yrGWoD*U3jszeEt0Qa7NG*Ps1U>Wdkr_4;u|UZ;4@XOQi2;*{^hN83WsJmP!HF;Pe{^TJW|{xzCdgb4&x4 z%l`+m$QHQtvmQnT4p4zn0n@onNu7|3(yO--6+V~JT z89cTFz(FHuYeHqF)MqAIh6~eF5}5Q7pwG>1!4c^Y zhK#KXr&)(RlbHZ&)44A>FA2ElK04;T!qfK~bb@sYq&!AC|=R>e_;F47rMqqR%x3%<>5kC9q|K-@a}>NKyV z-^#*|i$u@UZeVP5D%4<16@!>G9dfWP#T6Zt>@@P)#7C~9rz6K1U27kcL3H;9Sko#M z&1d>GRvgL*=w+V!z84HFX<1&Z(8MibyP3%8Bo1K^X_LhBU?F)(Jy@XLn8UO<5`S3k zC@Otl0lgYpPTkiD8qKbvcnEY9F+fZuyvo;GzF+!SO2lyw_4=jh=LTbiowF(6bd_jmx*qnD^BLw>cK4l zAriY5w9ue=zItl$bry%@gN4wl;cWNz3}R~#G%|z5Ku~sszLy>NBdJKIl(clWv~+jFH@D}! z_wIY&7NYOJR&B#WKa~(a*+ld#jVBx&j^Gtt3xe zvNQSwd~3woB7DdU`S^L%J*3-Q{sr-Y7On%b-4sju!g1GDejw$Flbx;+6o~&Hw(=xdYJ&s%*_ilpdR-&1 zi3OjxLi`>FpZ|H>DMNYr59H|Eu6BP!$0L&lZEzp7x8NsjbG+~L`GT=(h4Jvv!kz`r zS!;E?&!U~;7e(ueP>wq=9uj6z31_ei8DZ=KR62TDsX;Z$37)ju?_e?^&-_lP z4B{w=U=+pcw^J{-mlnhkmpu;S?m90-v26kUl>L(rZvIahp+c ze~rbP5{vW?2^m)nu#HWp$UKuRK7pi3qGG8N4S;LO5@8$u!Zf7-1-l~LU}L8V;}6!1 z@IO)AkKkMl)eAFC1k^P~*rK|YhYh5atk_yjnaJeBuyt(Pcuj%H(%eai$B%nwUx5w$ z(RY}7cNb=kEbe3?(d&^VM*6^-J2GRVzgne8Ndb&vP1|1csL^XTuk;}+`TxDzRQ58= zQj&^4)D8PC0FFCA8U68yYuwIdz=t@u%hzQDP0~}m%8rn6zf;I1!8*R?k;FiT26w@w zWDO3jF=r|^Fj|8jh*tZ~PoS_MpQME<84L*zTK0R^G^Om1;IQ_CJRWj3 z14UX0cM;-~1i*#kxP2(oJw%OWu3Gx3-o(|Tlf}s}DxTGMa0v?AOA`ZGa6*B`d~RW` zZ_2~VG4cfTt47BlYf_j>6+!G8BO{~6rHF<7hwaKEgszs~gaM_kZswwWc?WdR@c8_% zhUbOR5YTm#_R|+(N_w|Pj==~k12tee#2sY-CT?3O4rSeiFp0M=EDf{LK8Ey`@gvPV zbotP6E5S#Wm`m-oYf;uwl>;5ieJ64v6#CxClzT|;*J%WUWHJyf7_gd1scY}X!6OI) zGx;9G5e9V_xmWJjI2DArXgaTOT`8H!*b3ojbtldU`jYN13}));7x;47aF3{qLD|dZ z4v5NRD~QzlWCVu#AQZ&x4z!*!^0FDA1@c7Uw3sL=9~skx`gx|iu^>^a`vJAEi^TtF zVt=aaCmacT(!$?kdzB>)l?#lUOI6DHl@=e=<55Ys0@X4yDZ<6Y<<5AulA9l*q)M^} zgmby43YxHCxUL*Arj5tg+Q3j3gxp39(oppRZ5(s+y8HRAh7D!M2%7O4gd(bLcY{K*Xc-@w!a z+|Zwu62f&F`s@VRkWdCl&Tq z8Hgmq)&(RF%n`Xs>*GH-(dBIc2LuOmUuF3YBV=rVIXI6CjF^92lvdD>o5wwP6OwsZ zJ7juw@yRu<+q=HSF5tg6hB<=4XE+g3_8$N!zK-*si1ZV>e*yG*8x%KBTV=j)ix7pp zM9o1^(<5(~k&*UEwnEp!dUtI+5`eg51kV9n1@Y*Om74bfQNIL6egub7K98$AQ7P|- zPsclc7WIZ=(XOu^Z!yGCY(gFM*GWhrds^;*P~aejKzJrZo)8!kkOq`%@2`)y>AqiX z&&W&4o5R@kLdQFHU#(hF>zC=jIAItb559sc=l`zoR_E;$|0G7aMNzcI%M23ef}T_) zl5wJZQjOP)6mJsLVoF|CyeZlc@F?2Y>1@zi_f+S11>ES2eVrpbwa-{unrEFfvG}tJ zlR&tP+5k_*{v(fxOT3B_1gN5a|MT(sb9-9-eC~C=To~1A9kI%NYu+o(ht4!~(p=@r z3)lXk!TP?+jJ%gygd-bmWKR2wm`e+NW3Ctc-|-%V-}qJe5fjugw{mKU7aRQ#LTO}t zlq?)>6EAk)#4+Bw>PhqWKhoGa6$JTL#~l3BlBfkaRO~3O?bLU4;J#10iSjhAj?=JMD~iRjd-h^4*#kk$e4 zul(&^R7Zu3_n4(TsyW{gN&fpvq6EgZwxqYNW2+_1k}NVj4gEa+sKF&azvQwmwXT1# zzxOz11+B3K;hj~PcZX#T>O|G(r)Q_h?ltFO4Ef=oVAIwA{#l;4xA8sC=zvOkG!_*g zBF~!AQ`|Q3$a><9yHmg0Ml$(NcGtxhKiO%oy{wT>wcZ4WH}RqX%0Ek++BDd7{S{jK z6{+Gct0l-H_}>H;>RQl9t)C;BQ%!lr%hGG21cc#Al6O8+@;hX%a?{46>O=k?(WiXLRJw~PaI#efcOM_8itFR| zib*`2mMQ-57p0#bm)rC(U?CqsU+(W8-%=jj#M*WD(S-#;6&?N~*z#piqD3mquoPqI zrpx!kz$>msmly((r|pjubd9(X#A80pSRsh~L~qaEpSU)AeFtk-FSg+C^K)l4D`jyh zN;>-d{riOki!gKD>q^z-e;=U(24ZC9^9r$n{2keCsbBtn7%xE-2ejj2t;81d+nzM0 z5o{VVuRnVfq8OH9a=2H!9yQ(V&b_KdD|5!!ZBOv;YRub=8oyzgh%A!-Uq7{uQVBox zRA938?@v9C(@x4t4$veObUc-?izV1uI2qc2UqS!xH94B(SPV^nF>@at=T8?3vZA~d z4%=9}S_kft$g-4`e{l2YzU$|R^J$#Eq4H>J&5s;w*N3s2vo_!sEW|A?xLxZ@(+pHT zzs&KG4Q}(GY+bBDY;CcXxqIW`C}Ty)RZmM!(0aelwm^+n#prn2!Jc$+X~aydJ+^ft z{68OOzT*kJRIx!MtRJkhw3OEhn*>XCHYL1NL(v5@@>7cphq*5p-867O1XeKS86_M; zed9lN8a=S*@S&Q`BgUza1F-bka@@zirMmE1A5s^z)rUQY`8q{4mXVX{rTU&rw4A}n}b;dh*iuvC^-g3Nl&k^nTTs-~n#=Lq< zP?$?tF=+#5O5&O^{`Zu}%Hl*e#cND(B5u(!rC||k6Bmr>z5ndSlGZenHK80}kR_$8 z@hQMN9e3xen|0fWHWJeAaBnJD^|^+f&-3IGpQwxeJ<&CM6tERXgYQkMf3znUjI= zjR>33ac?$|^K8h6Jgb45zE!nO`%k^6>;2lywVZ$EqfdV++;^7HY&Uahg7`=;J2W{( zve^7YlMUHsH2%9P7uk>giKMBAh8%nE8y+>^uw4i4Nh-dA-X&!9z>`L)8>&csmS}c! z&sFYd3S`{ZXqSOM#)^aPLG=fx2P8L^Z)CpB4b*KhFMaOtblVd$Ceaxi9iOCK#VE>q zE`M5ngBq#S#@|gwe>ctRMh#ekHXbD;n28cBWgR{9`Zo$`KWO<8YuI1j;`b@b`V&4w zo7=ssx6qzkrZG2(L;ki6z9SD0Pt_cpc^Vg6-qqo1lnB3+g~*Hy1__HmUvP@nLaZ(r z>!X5&ciCN>K=H++VsfgA&zij5t8YHb2|0-)x{5biE$IOFRO-JT00vmOdbk1f1A7XB zAuI1N-_N=vA0!nZIf#0~^_}jsnkmV1Z2t*j)(Tf8G18}4iKI7d&6V^CZP#I9iD3;X z>c=bTmn^|hG-7t20k%n!S-;Ynk*Nbws`WdArwjVdr*^O3yuJd#0S>jY38+c& zCnhEeCh_$XqQ(w`ArL${U7&-2T}T?efAS9EgTFZNqgcD~9>R_(uZITkAaG(qFzOpi z)*=_SbE(Lga?&YACnn-HaH?sMFT>k;T(8*j!)U<7?gfRwtiCCj!WH)zb8mWXZXkW->0ScXLE*af z;F0nUQAwj23WflN##j_vO#eXtHL-B(dq$i4`@U(i5n@kNaNzv>{V$0|5hz>WqO-Kl zv(0s{m)uN07+**ZV>#>`;~HtXxm;WWZ-r=&B&{rk6W)$D3`wavLlPId&z1ZWCuR^X zZ*HAQdAXi~o^)z`-8@eS3K=izqM{H@-m(jtVcTNPXnVT*w+IvF1$~ShyIw%$@FKZ@ zz=ciu5_~(l_XjIu_)y~L!L(Kan0{Tw&=Z@Y*Z)f~>_8Zv-#;MGqi(@RAuY>us4;gM z-gj+Vh7xW}yD8*A7Kjx%)m%!ga8`Ijhor_6250~5bhq=&$jF%b0VglBaiQx}fuJzq8wg+gUPX9Eei=szaV##MGig+SdzkbpF)5gJMov;3N9`H&QK$ zR$7~}>lpZNseT5Xl74Vwqja*_p#3JuI~Wxje|Eg@1v4TPBz;+O7FV>ltyAzg0M3lB%1p_sH2V#+Z+0!7;xn16o^wx()}_2_1^>o;fSI3 zGeclP{ln>ud4|1HyR~7%{-aL}^1ifQz6uv=W8bDVCOEx<%He*1%w84IrDi2YCPvST z^RuqkW2`FIEkX1B+WZZ-Llr2%L=zH}L`k2*x_qu=@{q!(R;(kTzT0xQzoxCNl`cQG z)Y_sf(zYv!6|$J!d0}Dwn8j$nu|gWhZ`A?eZ?#=Rx#Gu8%&^AZYK^(5#qo0AyqKPn zuwZMXAMg_~Jn@FmggSIEzenMPIo9B$1`2kX1syzZR%`>xpwJ+rf$$=p$CfAe(cZzs zI&2?e5)0jXaG0NxlP;%(Uk`bT@tz7>!knO2H0LX<919rV@WGK&xqF8d%8#9ePEr-4 z6~4q)p@6rOiXRm{OBD&0w@^D0F})@uAzTZ!u;PedQguz&(rOnz|m5}ZKo#3MHSayPW=dDQj~Yz|@H(6b=; z-sN|-icA{epmtW>gx%?zjCHNy2`@;gD_NW#w@-iQd@QbwvhW-O&55%fHUrSJGzpX6 z+qFEq_G12^!N-cZ5kvFZjljs&25h32@;UO^p#aP)&Nqu3hf1Ln#$!87XZ9^mLFY(0 zm;M~$St1T(HHkl-3Tl#Tlb_aLMDyHF&UMzEIH)OJ{WhbvR=puu zaX@CMyWFGF>BOe{<4L&jo7bK_Umj6Z?!zED^}UPY#az6T4&%nr));AY3DP28M;x!^ z#DO@oeCV|(U)_GsHFG;6-p0dSw^wL+13D{pY9-Bb>l3``kM1CipDE*b93MTiOM81h z7GgTQFptpLfe7PM+^66VzuY;luOBMwvTL`F3*L^)#e$`;=hxTqj^uZy;i4A;0s@*$ zx%?)h<3cwWb!5c?C8v=fT$1NBsclW3xTn{;!q#v~_P~@rSEPn}y_|7KqgbD0YyEe7 z+1-<>+gJ6Yc6XMN`fg;f(#$_LaJ~IMd!hXUEvPcDxvYcuA!~Y7ng(tu9GHJjHK%`I z3y(@GE3V$P3EPlto+EhkZR`Up!qun0+XyG41IR)z?Bi^6DQ~Vvxyr zL9hDFti*KU)=0&Dy_-uNBR|Ys7pIb)d}N@a(XcNR$x`qf6~#4X=$-1Rmr^*c?#~Qm zdje*m=~qoprGJ0A`b{l|yOp`0?4hs2V#i=kC^#Ce2Ish1u;JwfoF`UhhCYPFUD>kQ zvgBWTaC*4!^=y78qic$z!K?86?u2sYTSxir-HA_dojP$1%e<>!w9W=>QKlNTx^?ey zb_Pp47R|D&pxhTfw=@3AD=2`iORQw*E%!XS?!}UyQp=31l;opY7(PbJbU)DcNsr`Z zu9`QD0^E;GQPWcB(%lqAKTa5FoE82Ur%o>ang7VCq3F|kvCCR3*Yd{2*$MSVVe~`C z`_JrlGkY5H0Zl*~_p6E*+@jcCYvU2SGq-Xe<8x>*1|AJZ``WpQE}sU$fIJ`#AsN*! zg~tOoi?!7|VHF^!HkOlz)ak$2!j;f?IL(!)-^l8O=RK?gYyl;W+I_>noBNGk-~q8$ z*W>IhKJH_Chp!Os_YR=I*xoNL`)-w_618yH?sqr0f?%h<>f(-$hY#-dX;1EP)GYm; z7A;HiE4X_$92We(ZAUR>2wY+fFEE zt;|VW&FKB7z6Ak_$R1jPRHd|yDN<8?rp|{06{Y%EX*d?0x7wp-)Bn-}uwvhC?ZAw??Wn89^f}uh zpC@N>$}AGF8u9IqFJ-2_GUg_|2e)!T!_?x_o=Q919WwON`xc6GUGaPjBNe?F_S7?n z-djXJ$H(JI(mj^UCe$adg8VrVZtsXdLLx2S-C; z-gO*P4fVgOcKN~Hg1q!__)n}L{p5moAJS;E4~KM_tLxZ9^|FsqUZE3|V3srF5MW@> zv%VNPSe(AyX=hY|ubleKMv>r_#!P~Qml?2-t;^zI^ZUF?UK;oY<~7F|5LcZU80gnR zU?fTKH}PxCey5Q#7HAMs9jR_Swl$*0ILc157^&*DPJCfcT8ew9m{oGA3 z@^$Cv-)Ou!_G8p;prUXVy2z`V1Px#1zpIM7Jbx&A=)SvoZMt>>cAQ&B2nDyN+}Jkl z=jx}fhnv$Z6{U@4U*3<|7SGzqaqL<>Zr|cNytm)Xr)XHZ=jDUYob|rbWq2tav zu>Gfvy+t|th;_l`S1Rf;Oz9zNN*3vAQeg;$4dRV4agaNS*4$~kKKJD6y`kLa7-=bz zD*8^UB`HVA9=0Y!`Pa{y*sba$^gR@JzWFK#KV$m*!oaLPkk;f0A!oA7(P`tNyA51A2aU45oBRu1_lQ@rk zJsnB3iFbv%YjoXPCc^co>iuWkS&y-ii4mu3ZiB9i^ZmGNI%U$Qg7i%3PqE#het-P% zqtDcfg(NkVACTdC+|%V_4y7tw&H|E{IAbyMGgF;_5*$S+lCvj! zl8!|Cx1cg!{2t2owNH{hEoJWDWIWcsp;(8s2E7>NKq8#&IafbdUTY80&~gz!^ix^2 zcDgm@f-A>LO*R?MRdRa?t^DU_EA}Z>QTcIDdFnx>m+f$Nk$%qn<2&!5bkMM!j&zL( zWmo0L7QzLJ6$-tHI7jwIXDp7UO6Q&TnMQC^V}Cvrn>d}x4}F~o>Q-l^LNyF9b!(s+ zx6ZSk+T~?()x}DvKP1xoM%mAT|JErpVLw<__WC{AobWtO5B?ITxi<<-nnL&A&fQ8H z>t+gMHwLt9L8mkBfNuY=(yl{O|9Lc@0sV^C#Y98Z6r7pBiw(}k=EX^p@@6CPsAVYL zob*wGUNh4wyx@Vb;b@^(3m=^$BJEl2==th z6-e&{}PFPwSa!QCo?&_Emn>-Vpi&3G(?FVW=8NZmWM+ zB?=01SH74dehpW`PS;o9BYijM*`@qtgsaa!6orzXOceJj4E;Px4rNPOD&l8kqN%T< zl;wMLGxu8-3hd03-aTcO;LGY^wIUtDO4eQm-{fQ3traaa+dC>XR|GVTps$ zx603NaJ?l~q~58I{^0gazK9U}hu=fa>QbLp&nSP6lT+{-VP1}0_i1%G*MDaaVk51` zaAqGqVt-?WBH&Ng?jeu}@BMMt;r3sL)lIsm&+a~JX#Sb*OHao6CRp)IXEpz6mVy`* zMk$@(RlF*f{t9C5x5!+=n%eIZ8*Ai|Q3sNwi(2+4?Dh-Ay-eKCwg zaI2u_X*4_9@!+PRzvbc;K{c|6LSgt96M8W&yc+w?qN(DOF8D|r009eVGlqomfi^X? z_@Nl~PlUI2|4<@F9ZL7*vtAWdP4sINw@{mxV|a|HHpDXL)o@*yP9z+PdwNQ3%c^k5 zAE3gO3MhaNLtey3st*9PQgApm$kqI@s@B#dOG+U=oyH#D+bTtXOj<4IxcynffV|HR zrEUK{@C2&GDdiD{PVY2aTU;l>kw(wE#BVW$-yYb$a{pbXXHa4KQs~ouQ9khomh!vw zoy}VDx-r!(1;S+e-mVOU z@Vw$}2%{{%_)i@CIudk0d`VKYtxD(!223^rW*K$|3wILZRru!pn0|<)^_4j2mS*5a zCq%bvqdkEbO4GXIvwSTGgAYG*W$o06!K%GHvT#D^0*WOvuwW{>S+BmDDH|@CQNK;8 zG<@{+kiy}7sL1BkS=a3r22 znk0gU$moQNEa$6qN{mS_yt~^Cj&}vS*mm6GhiRLR$%+f~bo7b~_pLW+VBU!}YkpmR zipZoW#GD=&L*rlvs5ef+7p1TM>ap;yaa$To$adJLU-!u+(d84^1rQ5?Y8pu9W}t*_ zuwBAd6@U4>kKz^HHusZl z+~eK#dM5dU!_KNiY#B+}OqA+P93a3_I~}eqoU>s673C0P1@c; z1s==a3+~#be=i04@a988nm+MgD#l;^7pxx4G3w0%jy5{r+df$Z17lZMO#tq5c*z58 z`=a6(I`_^I~TD)U;;EwPkrG*=ZFMoXpc(uMJ>tcDld_v}lO7Q9$BRzD-d2ZuQzkbA` zrBTZy171f!@96w)%GqM{ZVmsTtFgbC2m&|DhK(ZC*?Ev@gRlXbMuZNXJef{`An znS0Ct4sQQxAt-r>0~Mdw=CkRkxj93d6Sk<*OKhpS?@H+`k^$g)&BUc}T0nL`=he^c z?zwFIdP+>!1SO=^vRRakBN5nfq)bU%IXB%To1^Dd)xq1m~o^i z+#6}_#qylTuxipDG}hAZw+NlY!?VN7w)VrhA@=#HU<4LyfZtDDbsl{WO7xCjMGdCx zfKP^55sW$ZbHQw{c!s&`M_y?F=cltCG;g>Rh^QIhXLP8N5M0v4nRm;Di3>WM6g=Zx z`0_+U=@wnw;nkhN->TilkL^YrIA`e<6EiY4yNM~casa{i+PUefC;O^1>Tx*pJ!aCE z0@^PJjYl3!34|d7giDBYGt=B$Pp%o3Hd|!8=RiAFye&m;h_8GqKO8P{bF#>a$Tpjc zmW~wL>pJ0HcfBsa(YM}jVJ?f2@jEaoP67(S+oqApaA zz>V{9uJ2@DJY&)flkBZB!oa=RKYez+`Nr(WIyqTv+0JpVV^n2mmyxRpS~(bwcSD80 zbN7YHla1aa7*nYY^Q-u`g9$_MO8rl;XO3!FrEP5^;dqFpa*_$?}71Q#rL5AnrA zxNW6M^K2j5g!W+OhEhn^^T)sTTvHX8J|iT<*PD|V+Wh(e;>>F*ZCXgmT|nXE&|w>*)^i*PiD> zV~P;nKs?6n3I1}tq@$x_Rxr{uijKZZeF*_dAm`LxL-qbiyl7q6g5M3WD+(0<>gD=$ z4NVPVkm;cY5!p?DJ+bGX5_Cn?`#VkDOwZBb`E74sZ>+bTzmE@8y}G{1D5F7BAJLzs zM7`@M13bl+3t;zNfH$#d7ZB^G=x6Q+0nQ9%jRf8o$mY^-GT_$`Il-4l@2`Z?Qb|Zl z++7?hrT$&!h61~nX2txv(Qtz>nk1>uM0L=Ws%X7dqQqtAUvqT<~~mO@P1V1X#;VZpABEUo#w`sf$~Vs2D1>a6}233AW$4W zUR>#wORb%mYV;(+4fJSMcq`y6t}R~CD3*W1uT17YOwhx2615?&MVhNr#k`Bk`@N`X zX)Xn{1ri#-Hoa%2xib%9Z7%K_mu*Va}wjxcdtr>0g85Zuj&a655cd2e8# zpPbA7d6DI{o@RiIzf|rw+>G4C+y%g`wzW5qv+^T=(^HbQ4P1Z}iBU42i8xI^<`Sxb zhTo7>SDM4g^z6W5659z6(AvT)uM{x{ibH^VvQ^fB^!vCyLB8^eEA@dD-YG3TA_MkCpSR#H4Ms#K}Gjf97d@)%MZu-ep7_qnyC_{w7D5v`~6q zNy7wA6!|4!d0qggH{TK69GIr5qskU|C^+EE1TS02?>KiLQ+E<1ME>zsNp3&80XYSc zvMcb}al)ui@HOe7mdGrKk-;S~_o%i_yceDNyW8N;auMsTGf3(Vr7w19L2Bx>yR?IM zy%n@P60W{tQcL1r?R_Q^5hB8kg^R2!Oeo7ZguEStGr`i?AgR=^D^X~L(pxz+?X8nN zF!<6Ii7%R>+_XcdC>ducA3Lwy6PT-?j5=Yst1+?D>|B?Sp#ow_4~<<-ReC# zIxa4!jJ^^x?}v_5w$RzJ>-|RUoV{#U zq@YdMf;NHk!V`86wEJ5L;s*-wcVlc_%%`5GR3qH$p?kB2f$H|Pdd$|t^rVN5pJ21f%50Bt{EhXhv2G{lI`Ci}vnf$a}FeELd(vw6p9oo{(g2gn& zy>{HVHsh6ca?ZW^+ka-;7VfbRIDdWV+5<>31oAwH12<*)nlw!?(~@&86CT_-cf)so zy{lslgfhZ#ts9PefeY{IzT3gx)<7(gpB(7Cc@N%lf2CZc)?2i`QITVp9eVvcjL(pY z(8hZU1?g@{)nm+E80A0{EbfsIk|j?Z9mFB-a&a6V{xkUT;aAIF?lJgU+!oi0d`!L` zsN7=9kQ*!DDziE{VK1)K2Ai}MbW zg%aD{x4%tHLQH;I){bLPPp8vcKV`Y~U?WTPo@kn9&A;u+Vow#u%IqWv1@vGBPB+Z; z$^!@hTzxNqkrT8XU~>Y-z_`1bB<)uFNEV153_No znKV%MdcDsscD4b`3LFxUBH|xx6NSG`v!~9q;F1TMNcFoqU1q}NGmN|@gnegs+8NeZE9%=vo^>x zYZk#_xg{hYq^ADyBlJaoIVw9tE3{s7Vl)a6es$6ikSN?oq7 zo4c`RpFAbo^?33j5LBp=L(1*ILfAozU6Wn^w%)C4dghd1xfIKXXp5U zoi!1T=vQs$1zU)U^3pAgS?)3OFe-4MT>`2GYQ2?Bx8`6DQdrNsI@`73yl?BuFwsk6 zwx3=-vZH;H@GzROtMqJtEYd=y9Dwy~=M0O1KPkJecb`|=kk8D{=*dmp^cquu$jZ5_ zBXHkNfHQ8ejH9MB%g@(a$^8Pf0Uy*_H>c|ZjvxAxoqxYhbkihQ3QIuH&yf&Yq}uZK zcfdAh#&L8hJDE7&Hb_@t7ejl4=0m8=ba(=hU5gn2l+oR>jJ^_=_CGg58*fHNT@eH* zquD8kF?R}A?XxXU5R>dd@MnFOlHz>2hm=QRr`^eZXR^XZ4h#Ndvz%x4Gh3iu+ht~J z*BkeVBBfp71jw-GlbXLB>yLVjv)L@AWYpZpI&n20fwTK{<9b+Vo}petMJ_&VvLImN zw>$4Lv(BD%8yo9k0uM&`cudP)@-7Lhr!Z%-^Ld|3r?w=@F+NSJcC}6PP7K`8@M0~n z6Cxx?MZG``q92D9_1KL?Uf^TXSNR^v+P9n6WMH-qjNcQh>qNa8NBP>a^mBL`$%-a` zb5p+u%#8IO^Rhp4%Y7MAt6Y^qXv@X*pccSHN3$pr_Ml<%GN-v^y8}@J%3V!k^EbYA zq&^7whwK5+uFS_7vF~tj3{|v0d}&iAWU8>yxy@dukq`PD@a$fK@r@`j2QY=@tQ>w7 zQ%5$9>TE5zKc+tTD51Cn?d;-lz*-yr>-XxK>dUV0|8C;@ig7|_{3|P#%#HVg^v-_l zWmBWLR{nCY{W?426GXtyc?r-06j0^>!vr+JWrRz%xA%jpk41i9Fme(&|3l|zh9o$Y z4^{TLi2@ zh@(XFaO!FO=C0DyZ&+M;(vx5EqeT1Q2(GqEsg5P*8B+;W*$DJ$y#mXEkUyqv+^f9ccig)!p;7b>oJWggeb&L|V*aW`0GQXJ=o%K%TRa{^Y&_Spq9=a~* zV213U2KQvaR8>|^0T1)Vtu2y8M{LAOy6co3JoS5QpnLzlXi0mF z)GZY82xC5XbTGF=3cBP}{Z7tn*3VP?Fiijo@6t;h=Hjtr0|m4gL`N@L@w*L_Uv0aQ> zQsQctUF&OR8xJ>F&cUr>lfk#086aAi0&A+1y@K5X_;j~+F^JOZvCiym;c||+PHM1P z-sy}c;+6Wq3IG@xe3d7${2_scBnxt+*ui&@QaW7nn0~OKFp>^e>Ck+x*CB#=+In@rxe|E-gUHyU{})-bdF~ zK^ZJsqAOjqS#dG>>m{NI3SqbCd>92gU>Nggy+c6JjiDVA3V^CE0tC2?qYs1~oA+Yk z3KBmNp_=a=#pvfd9}@0-6YLf^fdtf$4|pq9V5~7ycU5!n*8BCf-?4I(Izj8WsIqm0`l>9^ zHgRAG*Ys2%S5eUW0-;ts4^f-Ji_-U zWF{VLhqaKQ!W4Lm;TT--I2=WoH^A=a^k zU6=;dCl>1yJBC$XP%I3`=fxgYsLU2t#aAh|*aIO9db&Djvrv7|m6%3(gg6E70%)6=w5Pmo42}H1z}W7zcJ>% zgKwc&GxEln>q#8@HP7p)iU}<%gE?OCmw|MrJ$ueSo&SWI6ZCJRhSX{gFMuA-vuRPf zg_#b@1}J&5L))GwjJFd=_-=c2+iMlq*}Pwbfeabe3eb?cI2>FCQO8fXp#18Io6Cuw zjdR(LvVxlrd%M!a50@=BcXv}0ydZGF-xXbwy6?CM|E9WaZ*y?d@uM%ZUxLs$gITJC zh1_<@;6tFMQU5y8b4y+DiZ;sTQn}_Dsmn~R#;Lx-^s1T>Pz5ndU~arX0K8l!WKC+j z;v(aZa!Fgu`0Ta3`zsr%+w&MapaUo z0S;AsU;zXc{@CC9N=&P`>A9n!JAvV8SM=KI5rH;@{mY=20l6eTG_&k){xCisyYu4J zrR=L(db@R#K%s!);l2#JabwpI{&vD2MLi5~(ZN5heX{{fXv(>N=HyFSlSO(Twa{U6xNn07UQI`Ad(Up%dB@NEcq`jfDbH^ z4u>Qok~iW@=)C`?xY7zRQ-l$I8jVwk4v@*Z0tDFbQJ37Wszej^CtZ zKeR=$J^ihHyj4VTybMCDqJrWneW33Y0%4(-vD>8#7W1SP(dZ#S$WFD=xJB) zhAInYcYy9b;(^}-iVh9;fT3Iv^?h=Zr~31-TdPyd@G>k#FNJ&s;JJWdG*i5nWkM#E z6Nc-O)^JBrdok-i{zl4cP1*?@)0p9H&QAVMKo2AX@&kBC-uo)RQn(K6?d=(a%t=YE zZ{^?v5j{^l0YHkXbZNPVbuz$mz!q6TfV!$f+&+5uqus5Lud|>neN@^E@9sGE1yq`F z#njAmXP$m8%XJoJmvb$$A(dtM1ST#hD?NB}Cq-aGgIB~W`(n~S{oc!8YQOB61=qh zz;$?g#VFOD6KrLCA2H1~+pVTXDh>B-iJscd%QGcSiUG{t3ePmqK%&ZrCUczX(+Qyt zx?U!b_9|nu0P}>jbI4b^SwRl@FmI-v15=CsVU}Q8fn{nn|8!R`x!_W0G8I83BU#u!eJwEG)1Z*u{I*Q`yUKbf^8@a# z_h$gX3~P8KT-pt@IYir$)Z<8BH^0m z%Fy?VD*8bc@>9zZ7@-aY!JX|Ij^=zA_qVt*QCdG5beFyQmpkpRv52GmxOGe+c(0kL zYO+nS7rx)_GBN6}E%r(Q@My6*l z;-?p8En50&zDl)t%zS#M8=G7<7Uu?cn;mimZ8jAqTwHy_%9ms1S4Z=Mo~eQ;H+=rU zd1hsnd4+TBb-Pp&b?B8UTFaE-y3~MAG)(ugK%#{LES(q(cL0hJ_x*+aO*o1)^i1f` zQJGqvuZ4`eZrboMpGQBV0u9`7mGYBak4zR&HFWQPuxTKLWP<)r=lLBHY>EPXX z1^`cQ@sMQ2bxztH@O`2tu7Da1qP6w8IaFYEkmN$-)_-4*nieYyOW@2e&NuYARSx~1 ztqaVOY2a|x!}PISZ|D1(0B|B%xBDfWMd3@(?lzsA9RD~ho_JZ3)$Ha+fB#SULx{Ux zHFJP?lH1CEy!P>rMYiUl#VXu~8DLHNKjca7jz_vuJ;kjMH**00T28c`$v}~9Wg)gh z!AK$##bcB(!F{Z`MFQ}V#sWK0TZ!8rP;t_ocapwo&iNK!>$n0)pOZa?rl$Jab5-a! zmd8C#{%jF_jZ;r4Dt}>O@U1ldAF~*v5YQzhcc(FkK;4CKnl8^PSy~w9njl&OHp9bY z94iFn+qCh7GGHwMs|P-(=#wO~df+6B@e`2}on#v6@4lmDDkI_a&u;(NS(|uYNZ>0a zrn9W@XBj6!#OcB5(tbM``1I;`}_10kuVPG%L=&tE<*t`jgzbdXZ|uL${n*@ zt>{vJlk0y-H?f+)?7YdcLEffnVPWw#803Cdt03w%D+x`w$l}U(Wxo9=_+4t%W6OIh zRG-5mYU_XdA*PEcfR}i1=maPD)$00HucNcR^x@M2m*0Jh511x!MVWz$Teu-@oBxiW zpii?xhVxEjBs9qo8^nJP@|L;H@q-N6hxF(klJ!{dblPJj2E+_&m+h7I%lCVSs_yA+ z9NVs-C06h{a4Zb}{@ssr?sguxkhwO{a58eEz!-GM5}+{|ga0A>w+n(nS=!OGebGro z`3w^0wpShaxEAUxPJ}BU5trXz82kvHqObN;zQA#K5J?*6l@|Je*|6hSrmIxlE#d$N z5dy>Z(1VTki0;K|ds%sTZT@G2Y@Dg%Zk0EhGtW1kdV);1)*#cbh3;J3&1)lYtGm=G z1*7UgH&OC92v{ow{dc3<)D76|Zq0WkvIEVTJW=K*-n#ga#5Pl2AfFA?THPS4+uYgn0)!zi z>=Pi)dSZ*V=%}a!%@&hC#_7jEuAn|sMt0DSqg`Sk5Y0ObPNBGmZeerzae0Mw8s(!p z9iyU~aLs3JemD5RB;rik{V1JyV?k=0QiMnSI3(J{V^6iNjzk5}A)j`Hz6@OBi=n zUjqS=-!t>^QQJ!HKKnONh_aqouSmoHeMyG6IY|J-o4b zu;X`rb{xBVDQFR6l4QEl@yz$eQ0o4JW1@U8;uFTQ;%G8fJe6+N`zJO<3CBCSyb45U zE!lrg&Fq9bVXg>u{HAD;35QRI>*Eu3L*U4fDi~3cqZ8#ak+6!LfrS35RoS*P9WW9B zuXsLHGB|n)@h^K(ebUN$;pa2GqqOiv>VQf@5OnJV8KGqrh>=Lq9@3OgAhep=i%$MXCFpWzcpomrSgr&TTXFPDtTNBw8P?Lbw>{tks~d!1 zSDQQxSv!Ay4@B1Jlw47D)kCQfztP(X9Et~uHxGN@x>?0v}Q@81+4;&_k1GBaKl zW9;C$)UQTVAcKKTO{&~~4a~DM{9$TI7tWJvin&r=3@HpH z@vzl2E8riV?+GYY=T~3br(2uFy<1PbXsa1N<$}_FX97AY_T!0Yc46$}P@fisz)yHB@D1}32yhvEF%Sh&Qx}yW+QyU%xG>9jD=Y`Q!dKFPj+%3r z=WG9D89_MnAswV4lMJ*+bo|(mw!t%c{jY^pUndS`yWbdsuC>iv-e^{|6U9^)yurfn zK-j5Na{&Dn)V6e0REXhvX~F=eOH$jHY4)%5h+pbV6iBpn0T!CYgYm*x2qGH}n9i>t z$oSo7pI;+4Ohkd=?l;`0&bt7X7UQw+TJ~iwO9eew&T=R#)NMIFy#YBP8CcC?SDm6x zK4dPuZdFuVAA-3xSGDRS0S#+a)}#hL*Ap<-!EzUB6`-JLTUDQ_VxDgz!i|WKHQf05 zOuLspI{^WKAB$>=4@Nk|Ssfj>gfmnj`N#2Y0Bh!%TXD~r+LlU?TCDq&X2L}pHc}ps z)xBJ=PtcLbQ3rwT#=}+%%hHH%+o=pS~TioUuF!bge!8X&-NFUsl& zVBLwY{#S47sa@$L2Q%~1Q? zHXZ?wfQ{#Ii_4klK*`|2q0|CYUmvh0%z>7`e28mFz)oTL0KHB=XoN`@2z_mEK3IylsY7SZUvIqWur+H#>zO!rS8mjUX?@%eMRMFC9M4FyI@$N4Jzc3jtUHXr^+-?KH{0C^G2z^LF6 zLM#!81{8QP#}gee<{8#Vp&B#4K~4(#KPQD6Y(+{2T6*KPgk%26?f0CjpmLT_3u*FeXP0(WgrH|EDsx2rA}7o`CrvY;|o^71`&H0d zzyri%LyfDHCzzI?mSn$deq-x*2QK*C`z!OZk};vneVK0n%J&aetSE5a(E_^W4Cjy~ zL-bvshyhP0o3mD60HC43=Ay^QgiM7J!A#{3MLd{@o(h`HHM#piH0csRef>`=NWh^M zx`{X{p)BYgtBA+hA$!zL3Y0GlE&wlesEEsyN|zWkLpQg`}~dG>T+^Z_y_fD~Ns zsWf)>0)f)a+?}hLFZ0>N|5mqr3@;{{6UvlC z4|`R-vsX=`@JCi{YH0DghpKW{xo^9(4eNgFYyg5u4|prex8I?6$ULgk4LT1yn*c-k zhfXvIKzD}^yO?!6swZDiBYzV*%uGi;DE0r&)tF8Jvi+Y$eH{g}=ih8IlAR?OVV&OTg^EkA(6r)BQcNaLYS6N55ZYx zFB$6CMOM;k{x~Lkh;6HOx83QH_w)pT>(|hT@Q9Xw22T`W{M~g-OvqRjp;sSa*AoIz z4+On@2S;pd44=JRgd1a8E5g3mRlstT!B7Sw=rQUM@O)YPwp4Jx)_baq_2s#51!Ezn z(J$O3`LTgWN|E#aWW1`NcPT_jAMsmuiy1l z@6Y%1`F{F+Uw^#1Mdx`Q&&T6>T;qP-uj{H%oca#LfajwgkD6Mgz@PgG?2LEIl|b@F z?h#`5)iCz0XF>3v8gfE$VWn0Pdk8F+_p$~oHUzpn0nWS@q~iz(@0AJ+ywvt%J7*r{ zC;Ru7+4^bQy14^iq(eFj^434;u0sO^f4SUJ8SuCek6#&%5N!=QQ4ODdit&}WzQ1P%`m@55lw1)}sYK=oq- zC70lQ;d;i_-n4_rStSO4`4y{|o*_Ms@^0>&F$s}|#g)FF7Nj~%Ri9!-QE*~#;4PT&i>#+0IQXC8%NXBv>K57fH*%|`$C!wFoPUx2Ham?;Zav6~uDA)uU`UVRp{a^msgBzuK5 zNRsELb*U#+U4|#m)su9??Kc_g5&(w3#=V*b=A|Dx_T^}05H{ZPQW3I@Z~3#Fh8@K z2ObB7sR{`<-G!zPuQg8~LRwK+4;w#(MI(k~94;DL5CI8@7*fGUMUF$uhHuBgb0Jbo zVTLvesio!X=-ADsHvJX(A+=ophkvmH#?)hot1MWu`u+n2N3#Wducx%7gXb8Cj zJ|mQ_oB;E8jXI4G&g|x+=Z0&qAm4Xp`LhR9mw)mpNlpPnS9#T~(c8zKyVmDe4<+>6 zfs(d+Fkes}mjLO=CYwlcURz^T=PY`~Np?t`iP%X14}F<4VltTt)<{%$*iC@f`#TMw zUU#myu7dg%W&=D65jzF1bnH-5oNIuit%xu{)Zw02;+PYXqK6mj&=~`-a*Ba3>fS^T zO}SZJX0N!j#$Evc-zdyYMfE-{CX@aP@VlGnj2f7Xgewi9PM6R&PUo*rwf9W z-@N`KXyj@JpamMSFVaBJJ`fGD1VaNTZ94~PM5boX)XnYJS6>sC4G0{IN)6gR%e zd-VvoBjG_$FYZdTI@SCuzYUTBIbmEUhXJfoF9R66f3SdqfP=`7E2a8(J?Bz06gfZr z9o`0sRPgA2LM(|CDFfm0>>aH*ekg=PTs0x1EcW#Si@hH8>n>C0I~IWta>(IMy-@9Y zDhXgFQY8PG!6N$;Xs<{a4HVMj*541z>TH7J0VT}}>pL;=KuEamO>77?Q#ToApqL)6 zoV41O$k#5={FNqVt+B@;X{^2l@mi~pFZn6~SaKF_gg=!`LW}W*{s7cThrO6CJm*i9 z9DTvP_97j6T8vZc=xWo726Bo*SPWe>(n6tU$QiqIsF$l59MrYdgFNg_J($_<-jobj z8Y^1Ru5jMEkFxh^As*lYb}0ko!-uvr2upHkr;L5cT^RH_w@2?I=z*d1bTL&8 z(sW3RrfMISWX8^?KsHqe{T zwh2Kq<#snDWy5Aj33A}De_?8<=?OUR&Of|jt$d>WN~`eeQ#JC^aKDzwyfYc((*V^< zG^zI7zY(3cbq^o%d{6airAPrK^aCD|(9RDSX?3Mqxr|76UDvfL#mT_gLm?V2g}Ml& z&Y&q7u7;%M8U&6~(>$vpFM*JIopb%roSaB}WbM{>{~)pmX$-XbX7n=)T!nf%#t(zf zceOXSwvjd;fQ|589lUkw1?bwd%MXX$M;ga*c=*O8w!n(Iar>ORKCrjH7B6*GkU71i z_SXPs^6rDv&`_vo7NMx$%YwAu(zP}yQ;!Y-+m_z~EiI&g^iuuSy9UZELWEr)yy_MM zt}VOA!8|i`wYs$JKNVPSzO(_2duo9}sO^;pgy<6b^EMGf5}d#;xTlay zO+&NuJyNXiA;~C!)Vd+WHaAI@G{6VA?2#Z}6I4V|JvbL{x3I?aDk}D7iQ1x9*KmD- zM&1kWJT48L2ua5D957mF@U>2_ey#H&0cQ(69W!YH8)L0~;c_ zyc%$hCWxXPO;u8tZpM~NN`OVV7*FG{2(%NV9HBCK3{*1GZ!jDxWBHm%#kPhZb>&*< z0zUKGN8dV6^$)z{R1)aEMn$pj$0=OJ?#?n-ymT8nO1u~i>fFd&cpFB~dK*A)AYcej zh&R8lmXq=r!Mh01;wgp)PhE?aAWrEH696syVjDVhuzOSlx73r;=I)5>kJ=$x0W0DH z)qK&z^j<#(ew^{@wTgH>=77Oq5C?j?^=$5;aenA~MC=lUlF}mFgw~7@xC!CyNwxeE z!hM9~P{8rkv#_dEADZsH;p-~2l|Z8BEY5c26oDky0J?>rQ&790w*zgnMm}tOSiEa8 zP0;!(za{pfCuGhEQsg&o?nWN#LYkOa$3PnQ`r`QfL-o#~v1&hUiBO0wu2hpb%onP* zC%Z@zWpGUg{)Y!E9bRP1zgn+cw2)vgguZQW?GJoH?*rOy;x)ctt4VhV9!9J@&&_)vIuxroe8TKk$KW zhZoZV8`<4@%IpaUT9{m@iCf5?UbsDo1XK@#j$oj3*7<{_>$}AZ3>w(e5c<UYF6LLN?C8{+N{Vrp5}`yp-}bI4Xql1ZG)agG|&huFRd@<@;RE03mM%J@9mX5c8* za|)(*FfCT}Tpj?508#@FniV(~MWT*nH5|{uxEekCG{d~yeSRm8{NpqoH-Ur8)Alf0 zVBdaqWBa1vtJ<-$JPj4$-BieM;1PE~APk|%61oswos}PoWZtA#_?dL}3lvtnOPGI? z0#DQsdI4CU^kJlcW(KMYA0O-;>t9Igg7nVy7<&@nm4CRw$Dv*Mre*E>HD&C=5G`f1 zE(*8{e$aw~8xop8`V)K`3J|LlbYpPU#+`vy95<|;IH0=&J<{9+s)%rNgOKbBxbS?F z9f!O5g#Hx)F)=OpMh9^R%Ai6bURqIiS55++-M?@;67pY)%}pOebRrW(F5fmo>SqwY ziSsDeR1*8u?KbS=1b8fV!4Uc^U{!Se&h4!~6^QD7KXLE)J!gP?4)yZ==dO%vI5&;&8D61$x$F+ zA}A`AmA3?MeKe#YiOc+r%NJ>*2Ai94=A2xQf!@xiFQdBq0g$di+K1)eF7-b)mbZPEB?NoQ*o^8ylYYMp!K^MnCA!SQosV}#v`7c=#I4#dSjA1I?l{!J%6X(6N%^_&p$EimF z!zybVA?E1ZN?*lI_0Tr%>11zdPsbe`43|4cV!WV`^dqNFQYv9lti!G8Z>_zp^E;1J zK40jNFy6)Uoc%SD3tVg#TFpJ z?U`N<9yv2q;SjciF?e0Aw7mWe8O!Wv6+h~8pttOd$FE6*O*k!mF4!zGg!tBkF*ZOy z1!^Y`2Y&(v)sn~)IZ$HVH&*(4PR16NE5uc8mhVBhAv0(YdNLF_lXz%tGdVd$O>tv3 zV=zH;G|VD!osQYogg!~!DSp3yE$r^k_d09E=xInvD_C~L-?M72A|oWDX@2A|@%+^r zjpP0vMiy_ceZ1uH%zQ~jnM_&Obu6`U?j4K}y_VN|d;M+Hs%w&{1+SOqLC1u%xaaS1 zKYzblXt(UmlYE6N_#y1KTjTVyCc*h1g*S`&MFlihPaS+zGei=pjgewm6@BZe8Isx(_2*S`dAj+1T&{>_ge)wH}(qSZB-0 zD`nMA7_Yq;jmdo8^7-3FNe&RWD)e755_Q8p9U3T`(VH$ycNnS!oe=>8$h!lmH?=mF zx?phB%*2aG+)|oPnU92lx6dfBhTg(lAxu@y0uiegYC8`{NCDYO))l-QcR|M5BX{8C1<(P3YH#dgZp+_x5tq zas4wB{f&_#edp=Uv$z}cFZ?K#{@vS|{w&y7!Wornr#=43pxx6rfA11?J9^I04KH_n zyEyA@mXrxmLV8dk#gJ_!51mc?iE4fC^gdmC$3y~@-Jy!W+iP+oCbu5N#&)Pf|EkBS z(cY{kNBCIsH1jFny_N|7lAnOa*+>X7y-u*Tn|g>@6)LfRSZYzQcUp`$K3DKj5L_Ze z0OON|lfd+G=>&Z(3*jp!sdh%|WM;iMdv%t>Wo5pRuRyaXcDHA%c{kpYG&Ac>P-_Kz z$W-5A8;AB&k zCMCUQGnk*0kV>Gq9P&^uG4b{y-B4U0!nSVqghs-GoBg{xJABZEUMp4O{Vyr=HbO6> zyZi(PiGn@AM88>UEYx;y|2k=vm}l;U$%|AvgYQ+`wz%7;=hAFK=brMsHzM#cE43Zw z)Y)fYc_p^)L^$i@_?Z0ZCbYJEJrsuV^yp|j%T%`2viz73Mbihzd#c^P-}s#;8+E%2 z?#z;Cw+b;IWki;a^v2RKlbPAC0*a5DnIwcqNmM-<1Sp5C7}Z9RJE#v)oM9S$+wt zR|ivj33unv#U)1mIXTQW1E5ci`FDVVxYZ}ZYJnCjK5QE(%^MsccDTGA;HP(`kKat zr}k_@X0Fjw(#XK>K5l|Wh9P-u)ZF28&o>|;EaM{qStb6hPlZcDslg2QH9hlF>4)9B zt;7;e2-?lLEzdtQEPwkf=mT%@O$LHERsEZ}4vT<0r|;~ylJ&B73~p&=?$Nj?+UGM#m+=VoBI@Q~f^ z{(?t*yy(4Mtyjy!HDw{iV;npSpMO4d9Q_avhL%r^o&RN=zGub{tf0DZxyzdeHMQTe zi^H@-8Bpe;0gLGSbeRn!sBm7Feziv5eKhCxD3vXdH&e{Zt*mIv{z?lz^gOP6CTOX) zK3$hf2W_OKmZj_dUTM`~mnCzM5gl-T17$BQIoeEBZiqWQ!!sCbqhHXH~XcgsVO*N2`Xny2hUmcy`vCYhHL@WdD&Wcd+Bag zp9NUVLqjXDiK#5Xlt*S=IsRp<0=;DpqLY%oK`9MMqw_OdF#9oIXU@PPi6lj z$2#g9&c(TnV%*Hujo6)NL}D|lxcdeN6A;wk$H8=PEA85Bvd!`Eb8Y%mV%!lhh=@+y zH5TSK$wnKd$_s^R4s(A{Qj_&z->$`a3Awf#PEmbtYPnn@=UP)bUuYteUkJ{G`wI5> z*_`;D^X=cY!fW1TU-WRV|&7%LYT8ERCe_TKK)rc{+J;sO`ym&Esv?0fAn4(d2&-ENnW#%IlLQ!<+pA1Dsf=`49?To>)%}nYf|MuLF^l*q{T9V> zGe!>^VkfmwsHPrr;hR!jF)?nkys1mQd_(MmlZ4T?%z_kjN)7e!(2v0U)ona5>Y&?c z{_!DN5h?OJ+II`aU8+^;*;{?HBfEfJ)T;;{A?vj`)h>AFz+_qr$M*U)*Zzr{q|dnO zVUmwoXh{3dwDi?w>1)IKcAk*PpP2I4+Rf}=zeSrZ;5h${=WSPa2lUNkF}oFN=-Uh7 zkg53K%Nag}fnyS=r0WkZDaP^NYCr1&O%CsCxxxe;Q;+g%dq%Q#vwQw&dZTlig);ym zq<5`FPlTWKdE6tcm^4DOV%gJ{Dhf|bO=?RPy*x@keleZ$prH~@9SJH*y3m1K#k-q! zzf2_b8|p)>f8XjE@$p_;@7o`yJ1Y{PZ42Y2?#^}6-7I-*l-A#dS2AlR#IkQ^tH46e zvWusTv_YPBvYM7*3Qak6-iRqQiTiE$ZKD*2-Cr-1Ee)NA+jY_G&vi0Q-Fbr7up8^^ zrd1gpE=zZz7&oy8r#Y<8;F!K`~7oZ^7e+nNOXt54T30 z`zy=}4U@_eioHuqLZZK`k^J$@Y1w^7>8*bCC^k)Jbj;bVW>)N*|{ zQMnV~SWd=(^Ah0USkUEdqD*3Wc;mUPLDliuPM}e|(3Wwz;_f{YJP@yQ^)}B0c$TIN z7c}zEtUo?aDcU^7YiBf$wc64&ErTS;39_zH>1PK<`nqat+QkM6qvBqS(u&CK_N@}f zPhTM%XWzLmV;RlOuOCtJbb{eVhA`LEJ*V1xZQz}Mz}zt$SY*N3oi49c zLzplD?Z@YW{2_G=(Zlrl_|E47PFgARLy`^3e_C3mv{i|}3eQpy_C@lZ{XwDcF< z3Y|Qr*y{#~6m?a%EgF5R&^~^L2l>ifTD{pI_+UH+qvqK#zjGVnTj>Cjv^QU`=p>fK zs%B@Z8X6j6>SHXQcSjl6+E^_`#uE>F%nFkZ9=~~~O3aJpqWBX^AU6YSm+`ykam^sM z-tzEzgI&a8B%~xq38>4>MH3R=neTve(&`mxFP=7W@IcIcGY2awOLj23_Lf$3b!}-d zC82P|BXfL|MdHU9+AEq2F#Hk21VL85`Q|({7R1SkZ7&si#&f5r4>H&KU)le{4*J-?Qt_{PaaKVFSHY7sP^^S3(|hPg@^ z>J$`{zq2-BgXx{7)9(d$KSy zKfkIJdmB(j1kd>hC8Ijrws^O~UYg3?2?}Yua_$~61tSw9pULK?zPQhwNG3&_D5VDw z|4^E7PL=Ad+^Bkt_UKzAr#y3YAUE^9akE*Izs6n;@9cts0}Ty-RO`8lxkN3cTeK2vB&q8B|A-h zR{EF~!tKae(%I?hhVxc(2dAP%4=6n8e)xRGPKv?=&Ez3f|A}QQ?^TZ@R~WDI%wb1L zLo}asZUFC6_>AVYeeEt%V;Vl0+Mk&~q zoKIpZ>psa#0At)v*(38T%Pur5zZLnG@qR74hxJ;C)u0kLXAJ~HZ#4GasqN3yGMUC# zxktim*i%uiJf#JW(ye&V7%Ypv0{rI(7ax2$ZvYNz`cA~E$8+*O&IA*o-giDzKav-(Qc8i`1SLtCPFr&XzpyWaG#z2U zoHVJ>q9TVKu_4isZPTpkz`$E%_Yp*LRcw1LB6}ll(P}!G{!5MU)-}ulpi(1FGs)xu z-eb)RK;*x#D<=_6Lw9B-&%kNs3uB`bc6}VTbg(L81m*5S)o~8LhXSCAhtkv3y6Dk6 z6CVyf*EBS@(M7(!6ukqg`TULoYmu7w+nMvh=0b=*ktLL!8Xt#-+Y#5cN*<;g#m4^} zAhz8XwJ7c+*f9st_QZ)3m%w%)GpS9pTe3d1nKX-nx0!JNVV3x`K0-!Qm@;R{p= zq81;L_9Mrd08>CO=6cgE5+P%t3E}!)~BQPbZxs@3*A>0P~ z=s9gmjBw=KRlSXi;oYbaRQv8z=b48cbZMz>RD5oE#*z|OhCVLGcE3%r4dZCkxpA&l zKNSgS#6BwC)7Mc#{tECx*Zjx+2{0F;RDGJ4oT0C?pGR7DSW1xd{+NT4U8YE#OYG!Z zw!B=3QV^d5ki5*s$g9M1xVY{5g53Fq`MCGiRba6Y90%|@i@_LcN9ZS6?)dYJ7yxpM zp)yW2FL zhwdtDaK;8lAUNGYF=hNu4YtF5FtohvPoa*C`I4v~WD=Sb4xsa3u1-#~_Ym72`bazj z0j@=*arY>t9_=!CCqo-KgYqA$va*$?d$pHKZa5|b08-{=`PLE_K!yOCDhRHgz9TP~ zI8HNd_J1yXD+07H=-uu!wG%^2*U2u4e)mi-!rZ-YtBuP8WMuoF9EM#r- zr%@x`u}NWlXuUME)iu3lX#_WJ^4+(NL78j~rREDCR72^6t~QN{1++e?@ju3Q?f^Vr zUdvOOYR^eWR8!irEYs4#;PTD=4D6Fjbl5Eq)kQzbghEKEVHkN?k75y4``Y3Ffmnyz zhu6mmJ1NxGGJ+ska;xv>V3(#!XC|0Cz-PzlA48g+H?xw&?cU1U4$C-ba87K)9pdzJ z{%DqssZnB{kL_lH?&A1X@P~!K0DUH`KHv8>opnsbM(o=6z29Ug_Q9+lS|q24O$9B- zM?n>}f0Izq&$clWFzV^;?gXufoWA1iSjpiE=X02=LG;uu?Aaa*dPP-|O9F5F9Kn#+ zTpOGYOSv^_SPY2Mq8r$Y__ghp+-Nn-Is!ga#rR}`PjhSOuHz^<5%IOEo6`uUH7TdR zKijc1RNrrSy|4#p0;+l4e)@B&wwDL`2k!3AdQLrgM&*_He6526lbhR%#Ss1|^J3y# zTB<&M?@Z5suM(Jj% zeXU<0PeVI62=09@usb$YeTDjh+s}V{P6xH}EcC9M3nKkDn(e`V<==Mr&GiAi1bI+a zUjP`BDB2yVih0WL7=e)ifa&qj1FiDOX_&K$BR0ACQebYi^*69-7670et0oo->qD#v zFHeB^+iQ-ySi1~&xg4DwX1Lt6MiJH}v z7NweJM(hznLOJh%DzELJ-jHr@deEpCr(t@z?TNxx!8!51NpRzGj@o{!!t9W&y9zdx z343B6+tAr#2J$to?Fy&mTj{cL8HGLBv|A>%=>yO@)`?X+i~>b7+T_-ml=?R$Da;WbPMvkAPlOf@D~q@Ju=l%ZM^Ee=u9X+18V-~GZZZ7a`qdEz!JD=TAn zP`NxI6YYS@Sxv7eH48&G$vFLy6md`9@=8dYxlAoALWjD?UF%DOT%Q)%v@|8b|LQCn zdl1EY?b-Umuq-%!Sx@kfMZC^_J!VblB~}lJWLI358O~eATlT{T)zaO&kTY0|u|H4F z{oIYM3Q*CJIduf%Ox}TJ?WPiocoV(GN4CMLWdU-(#>cP1p06!m5V{hmQEv%}jL?0L zC`hp|>#$W-A%PlX7dt^mrgz=rP*=F%uo2@LUZ`|r^F#@yu@k)YaLI52(#XI?Z zF0{?g>naoP=f+*^_3-uHu7 z7;k7mT<=_f+(s(hrGbfS`hB_EVwQl>o~8*R1qEg1_q54?Td5eP;_!*}>{Id}4~<=} zZE&|On3D#`{^%+t_fFo5hl#({vsSRc+S34EkqZf+QkTnwg=s3Qzo6b<2z=hb#)!XN z)R9#^oUfa%I0v8=G7lPZmMFE-0wtpXDsF%_d74UHtd!wym)i8R{+NQU+B5x;X3f>h zwZ63&t=I)jHBLJn02E%6nnj9EuouA-#+{V7W&L8Kl3@KI`EDkLF(a& zccChHxWYl1dkNYZd~$-vw<%OO{*35Y=-@b-K{v<>k^j~P;3*L5soU#4v=x1mok={U zR-)3;;S+XZ3Ih;qNDzJ9vn2>p)L>wrL(5ybjai_9^9ywT`JJS$T*m`rKP4` z$l@D-Ot+tsAVfu0QHU;tgvG^FTN_9zj2}@Av(c;QfF?UL%`<%b*WBJtR2FxccFm|| zTY#hYLHu@3*a*f$4Hm!nut6*Tyl%(0)0gLZyxWwL9k;tJ1|YZXf(L$-=BakD;_YwR zr2*#(Ew%q3+I0cR(!og}OtC!cxIvc0pnyGc`3;M#u&kdJmeWW3bpD z*e99t9h)KN*d*sG%iM`#6{YIhxh+!c> zR@YUN?<>^HM$(Csb^2^fN=nAwd>&gcgM9DFlPAp|fVt9KQdS!IqV#?+JU=>mpC)l1 z?tYC^k&W9~y~)hULtOj9hEsfst;)vCaKpRNNG(Z&?cAuV%JJ_u_CG^ZPA6jjQhN%n z{E(PwMd?DU1yC*oY!1jwHzW=g^!HI4J<&ZAqXxo+D|3@f!jD<2#WB5!{SE}HfWrS6 zyJiWb#^RSYTTMUizK{7HdfP(4VRo z0BtQQfFg86N~Sn@)j8CWY$-U!?O$F#bA&?8Z2)y9$ZR^JuGC3f=H~#gyqdEYKw7IE z!CN72t;-&%w?ib_6PG;(C-R%sD#cWMem?l=S5pNcfQNI)3p#1Yy2KK4FAS7sNa6XL z*W;)^LM1R{-JeaA6&nqngr}nTPf9c1l%LXF{GrhPk%ZwDBXzrMLt$Z`yY2uwibTmt zo{h#`BtlytN^c#P+tqv^;k6qjD)OH0In-8sX|83E?1o^??ZQ5E5^&%B{eHrhceGL{ zi|maR>wDjk*e46aBA>dD6wu^Hx?re5iSwy`yzW=C)}LdIWK3-0023{cskRB*jV@y& zEs!6*u88dOPyk)K=7!>PYu!@np7$nwp?b~sz#TNpf-0A1TcTi>1hy+A+&7uKa$6G- z5O>#o3wAe@g!Ca!f{j#Pl3o>m@3)VFQ&SGr3Z$55$hmJXw-G7^mrhI1WUFd7J<^J( z7oOh4+XFY6;$SFbL2zj_cF$;WMAOjw?3t_2*9ok!WvdzEffGM2pxP^UZEuB6V0yB& zCci(pv3lnGtBcg_4N3#XF$?R^gnTQB&*VN$E`1yL3a7j(G+hj}y>pVmhQ`I5N|DS@ z(oq8oBlW38Q$xp8{Wd2dxNuTTD5IuJRc>oC{NzwHVN&0ooBQ@6gffjz`34oq+I{7_ zPD39dEeiuqw~SC0e1iLC3S&+y^SiocHTxS2X+9UU^9%IKPt;4xrR?Q-D#2ZewE&}a zCttM-6g6%=wokvwgsVwlYhmgz%eA@+@g#f5+A7~B&6L(zs|ftxAtAK7g-QgSSs}sQ=`M%2+8G2s$24kB%A{?OmD&GAvpoT8mUt-b9J_ma) zHQuwpl@Z)Sprem-eFecW)+OlTY>IxiST`7ms|381$E(=&lP;E%Z_s*tS#5?>UfY{2 zB~I2&Q4>PW6I6oL`62li%tt@C1smV_0e;Cg#5BbUZWcm9==~l(37F3o*UCWt_x4JX|#4kq(^(N)JxEiy?Ub6iGq4G7t~T-S(~`FanF3 znmN=HMVvc<`pml_)KE=?uBXTC_GO=ImM-&~x(enhw~jRv%r%0601}nu<}lUqJFPZX zZ_qi&=NQV|(=0NQbVQS7SjJrZh@aW*W4K@03p`EEaKZSh{U73^XZbkX;<2_C>no2M zCpV24qN8K%jE*fAHbCr_x9O6utYJ#I>V{AZY!nhlwkn(!WXKjJ3}f7IW2e<9vy_*_ z-{kgkubu~_7C^3VUA2*kIzo5nbF|zy$I?>qpV-`{qo-_b&am|?g>Fo|wr4av^T&?K zhtSeB?0GnuT;rpUwaJYKmdV0MSwgz3Oe8d*2dp7d*^Gfk(D{7Qb>|zu%YcXW5y<9Q zh<`nih4856M;}Osf^vKz_8A0Jfy#mBMa@gelW!?mX#j#juu{O#qh2S-0;=-yx&u#U zn^HD_Fe|I8XXyyeq7=*u914Cf-B@%H5M@7ju;b+D3hd(JlLofr}<(;Ir;CTEe4 zOP?n=Bv`$PEJNoax6HCGD3n7>h8p|J6dZQHZ=zsjhsf(Oe@Y1aC3)ioB{cK4oDs8bRG=xj(cu#1FobX_0{%uw5t|&yT;J?@+gD z-q*N!xPRB;eD%@ZltIOIz}>1Sm}bS~hb zP?M18qN+@hTi(#DOltSUk;Y!3ZdXopYGxnoA!Yr`D9ea}-cqNE?<0v0!*_Xrl3yKb ziA_N;rB=!$liqV<3@S@)CN?}BJZmSi^Vsqj=aS6!<1D&2uZPi#rX$2YM>+_(&LsIj z1fuSk^_=>-ChhaKL>{#wdT;CPh;xk1{;xmRJi(RhT4C0uYPOHmlsb;eac|J(1BMFG zLxqbizzorO8JG87bBLvO0LmMLK8r%J*8<66XlWkG2FY!ejqCse?Cv;EE2yz7fQRy3 zsWRq>seHZ7B@%d_j*h~XiJzf*gMo>VqLnh@y)-X#a*CAAHJ3lq#4Oe1(z>6Ej)+r|`K-OUE7`?kRHaR*o1kq$0Usy3h5 zs|YbW2Q1nt=rNP$gugiyG#asu93b@g%kuqT9jF`~(9Klx{5g z4WdaP(@=)L3|v_+0F}~RMetCPZeZ+(mL36%&&hEq$2EZl|Dfh3;8?()OLa{^p(M4_ z^vbC!!-P+zq`Hcvxh*fyLWroG;M4JFFgZyep@M!urKkow37m-uzX>28@cMhxjL(kC zL6#;H?x@~%k0$_D(J_s{2!{!YhS2kTTD-D%lIfE_y7yQ%rtSk09mOM2`-a!uxzw5e zsxwJ`v)-MfbHub1#U(#@umjzz`W|g=m}^<~ss@ z!s6j3z|neN0eGn_K)nO9VDw1_(Pq(8?HiA!(DSVjuC=%;tBxZ30C5bYVC6nb;*dRi0q7MLw!)oI(fWC5fwjfZ54))M7yNlCjoYC{ z-9llW3b1&M`Pf-t< zXoRnjoNbui0U?q)cs*!zQo;jz9Ti`UW}~O0{gJIL97!|@*34u?qcS99@=HH)dZw1W z#)Qh!2qqT-R~vm1U6I+|5?YY%_MmwRM}TX#_ZKRR+w1^f4)Uq5H-^|1yzGhjcg+`a zc9VChiFvO~>#g_kb}T&_9r5DlH)k$!wp0YAt(xZ@u$-JEiX1jav({}{G6s`lr4*6; zjCYoo@Fg_?PzDsqh1lPpRNpY2QVfLx5)rGG=I7lJnOia}BUc@CUSa%p-hrIntAyXC ziW<4MaghjA2{QS<7j_{9?c5ck)j=g?ofwBczs6(AeNWFNXem4|IBz_qL+4Y8gVFkd zuQxskr&>|f=bWXHZWtRnxN%H=8^w&wKZm;}VstF{7MD8;rIOd(4SdDa?4V9--gAJ% zYi@i5xHMZiNuyE`zM37r+UT(s_qoDWJV5G(Cfmqujf{}=CZo1|4&;LyFIE;H3}rak z_;7BiFI|0fU!DFh?z~ob@6q`CjK~Z;K_tD5l?L(zz52`F=RxE{3$P-bBq)!#ZIWpsbMyoJ-?0FDelou~xc&ISd0U$nXuh10UgwjLV{WoR>=zl$ftM#FZ-~C}0U}Z&5oHJ{?&#R9YA+mpg{jwrZlVz3iLoyfn1OX0Gsx z$(+F4<`|CF*LOsRYA|@;hpnBB0ELQ|fD9Td7e_W|O|OIXB;f-pYnr%b|r8W$E+O!ftLY?mFfnq;>#GTEkIaT(fDF+4PRW@WR zQ4X}=-!LjET^UWT3PK-6p`z|a#Xjm5Qw^%F9=Cor)sh=hVJungnIW?*aO5{BR?ru4 zI%NP)J0SOYYkCX#{Y@(nQc>9Q`7#2TD$I9eAgbBGmskZJ4%*SZ-r0~fu3B=r^v3FR z`}cdiscj=GDdW}z?JFxQ55H_cq7TWqn9{=?AHqS5#x`P#l7HqqaspAnvzN#(!u5my z_%-VTR$<}rQcB2t?>*g9o;dP)m(YjAAD`1Ji2mz@;y919q`p7a%8Dt_gVT zJ+ROO8A%BMJqd8Uyw0D09OQqisbl{0Shg%joiCq@+O-~aJHzdo9X=>T7I%X9_*v4k zqYzO1st?To*8pbu)$i?$_^h)c>+3Pb-)4fYAyupK!bbAL7v2yePxGBhhb-$||1aqu zNtvZsQ`<(RgV1EYwrW4GV$#wJz(li*=%eH*#u$*(g*Qn(_z+UTed{*a;aU`vSbpyM z^b1Y=8UX*n-$}dV*8xN?3-jkeLQn|J7`L(|-}o2+&Z8wRR)wy*c ziDtl^0(Ftg%b3{UdOtNPk>dcEXD*J9?^cGO%HhIiK^!|nN_DXNOBwdeu<=woKHYO= zZk9I<&sYx^z|WZFsE~fmYDUu2Z`}IW+{brCO7H~x8(hXk_m_YDLR<+2Z%v@sL7jwF zIYje7kF~fc<{o+YaY?l&kLlwhGHKsl!FM-1hkVI}y0Q{J#PC8*NfzYK5r#wEAc5fw ze0T0WOM53re)o2*{PeHJ_D(nVL=GQQBw7#}st!WfDR%PMv!j_D+B=G{3a3@%Jx~`4 zA+I7B;)~Sr9C7E%Q@(vrO(DeJ+orr+ihuZdb0oUV9jO#ZlqI~PGLT{=E`JPXL+8#4 z7XlT2?uff#H`AN!YgW13SC5@5KKw{3v!<-U67rrd(4wxQ;tGssdM9xcK^jfk{b{b%LKgQ~jvxm{#Y)-IUYh3gQLU z6bI~wA8&Wu`c#^k`1|>>qpLt4R0)z8**x0B|EnzlR*Ket6@)&GLQMeHlC7m8$sjCB zj)(Ev_Hgaaoqz|{T`Z=Wjs_~DAE;Ar`+eI*tcQX~&_?;fDt zwxITk{NO$+;EnP9n3LF@uTX;Z=a{(T?qg=0=6a@|%Bnmkv+O-QU9F5$DV*w5XQjEO z8$Al|^7`N*3SoB@kIHCvHyCPtY)7^JF6ZP48A-CWuJ!?(D)`}9@Iss)8?15;*U{o1 zW~B@7CoA;xG>FoY$r5WJI--d)MYt`!5>-v+khdYegJk>1i5YCUj4YJ|&+4av$z=nQ+OY-Jf zO%1L`p)mKz;rIQHZZ?nv-{o|(ba8l;w+^tE7Qg{NlnTYGsbs{*A%^{m1N{R2Joern z?eYN$Lm-!9_*>K5IjdQm1>TYRy40U{gs4wil5A>n7JHjzf8S9+NPC&s?eM$I%UiE9 zsRdO_xHm}`UlUsR1V=7t81d&_E~W;`2==yx3Dni6lbk&K_qRdW`@-Klfg4%3WQ3xr zeWNW47RlQ`>%UG`B?@*QLcVNRo;t73lX1brkD^i&2l>o8o@^4p@0b|+N`poVQT(MZ z_(B0@*dt{6uM1pseNRl3xo$KCWCy$6tex}V@D~=hkg^mBV;+;d=Eyy z&ID$9w3FQdrxUaX7Xhx_coS|#JEtD=NBMK#NyAd%Uqb*R2_HNone*pKu3rlp$-Xi@ z?pqj`bHxP_h6`L=ONCQp;8*;+VPtfnzVyhl-$kai-LUl1o_yL^UR3 zh5(mFghdWlQXl#AbHrEWK?sis3EIFH`Lh<}0fr@@3{k#})FNvPGrrzfh5tHwF%V#R-i-O@a-T0-fw+aglKhO?es?eBY>ZI^v2dm;7kHMB1xf6AW`x~j~ZN*31gN1h5Hn){!- z`GP7E4q5hP>mTC*0(ZFAe-HUME#`})WVPylRLkLu_56J?cGEIrNe1|WfBx3ETeZ^QOYB|I-k6(9!#%9i5K0oA7S z!p>ng*Z%nkIJj^UtSsZBe;(iQ(EH2)Mk@^E{82-H1mv^jpH1Vv;`s#LOvEDc&)To` z`|bmM0t)ny?>8U#D~D&q+hA|NuSL}Em)b9<|D3zi-|gWx4ux|!{Vwp&3sRF7a3o*a zg!DgLfbE4puh9dU-<)sEe~#e-8}g^&og>oI>4ah^-)OL)$P)f@%Tj-D!7hTt|5JDZ zo^pM@@E-p4G2~AEpWEkm`5Z_#&sk!`PeXlUyCiz#?>}Jr^L2igDXEFDf`1g)YjsHp z2v)R9pZvL|>}URL|KAN(+|~Wp@f&NYXTjU^rb+(!ZM>2s!{2*8@%N(sKCKOQ-Er6b zpZfzh0u;#UU(168sP-Y~=XdFkT+cm4mcfn)P$#2J{m(-Q`JdZzrWVhu>Yq|)-pu>| zkV&m(dwje-)_*RisDllb1Bu%|dxq#0Kq$FGJRmPLE^>G%@dAH#?~=?kC^sYfzq<=v zT)hk(@bTIT-i-5cSqb>ey9 z4E~521;u}~F8QD1yj)*+3xvzi1epC2Fxl#V3ZP7VVdP1yEc<_1B}AV7V+me_8gNm! zlQF!?{MRe-vOe$!NnoEv9Bl9(4{yi+5#hUM4*;DAl-W=F*N4yWy;lMC6{_{-&u?MR z|LV8@$ie&@#bZadN&fk4o=7yJ!oN(j96D);Bm;p^yY!;GB!d_)&7WiW2FK&O_B<#k z9cE{A0=q3&twAXWoh`I7KXFHyUJS@#O5xdo+2acqL5STthI?|+;eX$^-kRR}7=Wd} z4l5Tl8DzUA#Am>SKZl*ouv=8kAI0mH_8_`p;D7YLt-xnt?8s(;b_w`z*JPn;aC&`t zN-=@LR#}Z-+$06D;!$t^ygiaUfqI$$E^jRBnkHq|!$ecam?mN|j@fY#h-;ApWQ;<`*L|YOvzF-*&6z({W_26kv){(3% z^Y8O#;fAL?Is3o2t7S0FKk$>sH%MtiX*8s`)!3L2T+xf;Kn^LrhB|jfXLWXmHg6#A z*9~@)1H<|V-C<#NjCy?$zxHGU&6`&s)E%rkdqe>+_7I&); z9=qs$_dh4{;;-iv1veWCiU8r!DjMW`KX|N44u{mq7Ad&t^!Q+1#c;yv;5>-C>~0wSN_+xsLc3~TEBis zy`Y}rKPJ!ylkJg}gC0u}K)H{xIkAttQ1303BSdUQ`RpQ}FYkUn0i79E&{17x;1P{9 zJwoFs@_yg$3%cTtf2?d6a##DcYjY#uEv=wdvI4>lgv6nJYS4oZTy*N{{Cu+YCn2H> zne;XGAy!uvd_-ES-~v--I>w2bZFm@&k5&fF{*NorMirXHFbb$ z@c;|+jl87CRxs386UKjG-K znzX#}?ZqB%0nl?JBMq!=tgW;^yS`^R&T<>Bqx^(8%wmCo@vcTa1fK-LSZ{Xv5MXZ3 z)7`fb%x&^P7F!3V91vs#9f$rj!IEaN<90&Uv4DG78UMV|3tdgXaXtr$ld?*FSj+G0 zzGjk4>akKkipZdjmFFKC8&iNz+*YLR19V0Rh_|8p=B zJJ&&7gGeh#{$w~?ze*X22Wdb_^ew^F4=4NN!9eZ=wL;sm`8u?Ar)L-5L9>B2<6IdD zRE7Hdwl_gSe-q@leffwQ#0+FJZNyFJ--vX2$#BqW&PBT85U@c`Hg%wc~rs;sP}4O%r4;}=1uaqKvYS=&0`y%W{tOwXM> zK`V#wQFe}}z~Ov=DNvw)Vq+i}#2BPM@_c4VEi??RNbCr??~7WTn7RqGyHjACMOzoY z9->cj>vovmxNR5PQ=iZ1)l?Dc=1xq>=(_u7_}1+p?vw_D6=+PNOiC&KE9HY0!K0^r z|GFqsE-j>C@D?<-np9Wj+h=YV)@~3<(wN8*Xfzy6MKoN$omW52ji`i2KC!b~*Z;bk z272AyTVm2q*9z32*TTFKns{3(Bo;3|d2(hM6eMuPAu>y19WdSKF;J29Eo{GzDp0hU zg6^firZDA`Xd$`^JD znmMlfM$WCntPuU?C(xIN9Mw#Y2lrw60z|c4XXimzn+_gD^nqwFUgos!wU(@4t@~+f zJ>?-BQz?zgCb^+-+u|J%+?@gaE|vgbX{ptif0Hm-H7fY~3)5%M{(RMiIT!cAP(a5v z0gc~%Ronnk)Hg>9tLM`;XpYe1mhbTj#H)yFQ9DIl+w-kg)MlCVJK#0zSz;b?z3SI> z^|-4_GO?Zvm&u^=ij#ITcYKn`_>KaYJ~DX#+KGoJ26?D?>CMnpberfny2O&&rw9@f zP%thL#+K%eRkY3zw;si(SWK!&UiMvan2=eWXsO((S%Y8FGljZ`>t9I&D#!6Un%4O|C^WNRN1qrqFo4OY+Vp>6rWVarzg^sX-)Z~$S z5^KT0s`SGGp@Nc;>gsRHm#YPIbU-Kw9J19MCC&QNA9ufanEbh5zgU904=Qq)>LDcE ze^9(m($W+Zb=T3$vjdfy56#1d6Urb>gUc)`;kdzo@_+-hk*mx<9Dx?LB#x#wa}_D8 zLQ+}iNp_wYfcE^Fjugu5Y_qMV4am|G@kQjMCqq~|HxNucpMWOwk_qU|9%46~pN5OL zqAj3#CJDz2w-aN9a8=cRB;lB4kO(H+*~`Ld;L*W1<3L4>RLd4W48`NO(Lv%1uufx zoYAfVMK!WP`OV)1xtd~RZeG9Mw)5peP*PMTYtm0lH!^3U;dT09#3?HeL$3hWwJ)FH zGnb7j8Vc_fF9qu^FQVe$yg4jR(JTbOHIHt_6&0eDl-^9;#4Z&&esynflhS$qGD`-k zJiF5`h5+S|YxnzCPWepz192(THcdooP>lur8`V$-5li&rJ0QAUlB0X+PIPcA+QYCtP zOif}gu(;37AVdj7G#9_Ze#MAFzKwWFUX|~%my2ylHw-+=A_|+UZ&a5w(_krIWpMY1 zSD4ZqIo`sxVfWxYEzTlssMbV8yXs^;aOrmeWYNLlRqh^(9C5J%*P3@z*KzGID@_+( zone(be>bTovA}(xY%siJ&4M5+?|ETsbF)tf$*q7;@7tOsmaEU&mode_Y?i$$U9o5= zAQJ~}gO&y)ae{;Rm8-$54caq9wHl_Q;Xq{q9sD1F`)_f>xPP25f*s0LJ-BuG7=ury z4e0dUF??e=8xNw)piJtk%F<0AhnXvVg`HUsh3i3y9uzGG7vCbn57IO;c}3|c_kLWX zfWJn(j~6*Cr*Gl$&YL)R<1XkeYXbDN2TxNEGB6zTQ1e>j$U!DzfcZxr&SU>#4}y|Z zNJyT4&Vyte;&njpCS($&zuO|#Mx>aXPSYsqkJg}%;Z0N4Y0ghjWU*bn9{@ogQDB2= z!mJBg%WpzWWTrek-VQ@M?Bx1k3Q@;5dkRkP%=@u&5aPD_<~2bO`|Po? z>e}k&VmDCRc$E3bPnzV`U>G!g46i`2=7HiBrbIE@QlOIgdh@84Ts#Pa(wn~v6c>GZ zv~%%^ypdJxWf_BrXU;`TMbzs1#|O^^!dz0|Kk^;9Yq7kEA7=ll<-{oFtRLqifUw$P@KqebNL%$jGeUKUiP%kD zMaEqrWj+kZZ0px^v4Y;$$sU6Wz8hc+cGAl%wjYc%b&G$(&DGMUr8jtg(==9Y=ZlqR zz(hnr%06(z$3sru%j#~DS+_Q+Z>;Y3h0L0B-()j$?D3_gt{+T4*3Md(oboR#+!?t5 zo$3Bf^-TK2B9z6Gei)$>5*MZhBL1T<3cS45>$zYs86pTNU0d#aH~_-@Xa&i+ED%hN zKn$i*W?*~AtfXMzKu=nRU?Hk9e9C^!RwrMNzTC>c%t^N7v=bRT4&n4`k zzkci1_1i!A&z-qca^|1o$B&7B-+lLf$I0T7Ta8`j1*kA{n~&zgR&62=3L+Z$f0h*G zsk9}da>w>^?5mb=F!zfbWRGZOnk8g~7U+*PMAk;m_KlShS5(j|b2pia`96k66BcG6 z>{_l-AYtz=G{V{$3VHTAx(yYHH>`MqkUvuAiY8=!&cZ0hHyr22WMl|OY=iD_SUohl z<@#bql+>rN)fFfyTUq3lpRgSCGzZfouD-$AC4J}N{oMBhTQih}&J^r0bj>Y%a}f9y z)oGKgpv_c1;4qDk>udDGREaT+SG!*w+kc+Uhq?`$`P5jWtcd8WVrm^RnE&%JxpA+H$QTD z-YWZXj|xrX`T5o<9&2mi{KPBPX#0iQkNov0Qc2I_=|=Ss{c?9ziqbBHrh0&s9II{> zdkS1&QWdii>?k|ovHW&2%o|I*Z-Fy}J-rhv^nkLL8;zB`?&)RdrNTkg6Q3!Zwz4zs zT%QPra$b4;wWB{leYwth4Hl&rW>nxF=!%I9`i7RQx&?z^bf^hNu^XYTPK!r4(D7mS zmMofMP!ct&#Aw|x(I^HYb5kX3{ZBlB5jdTersZ3ZHKT6BN8xO#2C)hK=(5V$o7=_Vq#n_&JSo3enxDkyCHF)BVNg4_&my*iXy5 zVrt&83+vyxQ$J_H1uY9EG~=NFJyddyE|}3U3xx5XO{MpKhT`$IzqwzLSF2T zF(VO=b7v|nN_18$HOHj2jkE?9C?K*riJNp=4%N~NE27lW+ptrAnD7(5e*fBOPFTyr zR$L0yV;foBS)2g`-t=Eoo87XTG!7Ow!f56imR`ye%=4=k+gm>1K?}UenWpFeHb6-7XMshN zvYS;YjGwp&mk#|In&vd@uQ;ELiGaJiTlo?OIt%OnIKySV<^&2myd4xa$Rf^59`1r3 zAylmw4U#?nbno8}-E~BHva<*@ZiF?miK{HMn8Kei!xc93lcQYly+V&y_Z7%3cSNT( z;56SG%`GyWP>(HimoD-;G8mZ_|FoApGd;o|wDo5`jH9KZ%xyv~mDn4N#q$X6Y{0x? zdb8#BxF0vW2o-)5P0?j)T(GvGA>{MtEAz|qC>^`GFJH|EDLHJi#U~~S#qmA6isaSs zZpw?C^2+vaJD0tv4QNr>1G`uE{wklX;4QY>*eLv!h+!_uUYy{LSDrP*6Fp~Qouo^% z{CtbyZ6wH1iCc7ZeQet=K`M-sp7wn0V8e|1iT;9LJfb<7hFpL=esJK3FSz!nVtM!@ zxTDYAKffuG*%xxS(~GpQuMO=VZ9NP7&_SpwwG+!+hmk;+-54=Z?!6sG*DhXBG~B8t z+e2q}+B7jMI$Vd&k|~L7X1e|>?890P^{vZN(fIVTgPiw5h9u`2&4ZTwsq(As8kmzzBV)VGRTo1-7Un+G-8 zNf=^0HU?|V>Amhgde6tFb1{haCoe2b(XPKl*ywA$0L<4cMu~e26fo4oOQfJ(KbgJwh5FR_o zZG9JL)Z6)T*Q;MXFDNG#HW0~&o1@O9PYX)o`W<2&-4#WLu8L&xaiwJ~6(h9!k65i1!zT51QA(jQZhL1@=&v0XMBC@27^j-VO#N z>JWw>jC3jdvc3?}FQ_=^mV!b({ALvQe!jd83mJ2{0I9XrkIpX_ljJ9wt6QStvGK+> z3AhWU>hGycBmwn}h5bEM=&EcTeCkM%U~p9MrTsUL=x%yLQrHvZ9PhP|k)5q;_4Gz} zcZ7DNklWk)*z~ZZS3z}bSo~S!|H6rR0C?f!uAKvMBw?1l&7j?T9io1Y6_iYz92Hy7VyLSzNbB_#DFA&^&4U&7;BKF&r_NI*#IK)S)?#LKh_)%|9DJ+k5_ln1imWG|lT}~(EUc7oy`|aqlTI4(mbHsdr zOH}G0=8SBd7#nH!UN5Lv`lFKXNpx0HFWBPQt5-6qYkO!sN7dFQ3x+1tLnHQZhhExn zmWol)i#`jPtDM-aL8sEK-?ZTHXba{uV`C%5yISBP1e}dGxVdC&x6o9=I`aK}HDkjx z3Mo9O$?N=Bl@ywpooppdxCur$Sby+x74FE4A|y}I{#dBhd3=N`^8!i}Z)xsly&TX@O{V15fDV@8urHLblp!WLZS za(BwX9EzvAx4<0G0%JwiJLpPY0f^!J&c_Qn`L@IGlS>La*Uh;ODi2RZ7KjCn+8%1z z0`oQnNp^5>%%7I0yY}o-9txAs8nn?HtGX&skG@ML*rGeQwM`ea$_=#?#sF@eNf2RJoEG0X3)R20Es8+Waqn&=4U^S{1J9NmevQc zh|O=&WrzE+=nlFV|LqEwy*4ahU5Q?^#d8#li7XLJVxJzrN~Yjn{mH zEvZBYKp<1OCA#o9;tzl3;=XcjCe??Vx|kythW?F7v63oVb~f@s?EK&cKZ+Y-ZZ>&( zs8z15vq@0qB_x(ddE7=PEQW!S$#-piTzfLo`06>@*P~2zNP@AbF018rC-#B9ZHx#C z9gJkM_M=O&X=;@l8_+Nk>oW3kpz;h8yZ7CP53s?INGd|^w%{)LJV@Qg_-fViI8&X? zF1o86|B58SJKZBNi$&riU4eS`lZ~2nHqVBzLjoMMlm*?sS{VE?BlU7&#BHP~fxK@Y zgD}H5+o4SZH;^=qaUz-mpPF+CE`fv9e#L#oTKrfIL#E!@4DPsKNDc4gW|lF#K=Nr; zFUn35)_0R0XT5R5on_BbG^PK(At&9JwQc3d@4S8E21BHN3T|oc-P-`m3HJadCrB;VT-O4sL(Nz>E2#f0q-vZ#)+L za4n2mY~6-p7X8U^%sxagFuvHsv z^>yTD4v<=I`?inju_5fn(jkN)HROmF8+xl~1Z##6o;L}Y&uLftR3`d~Y0|YEcOv<0 z{?%4IVW7u5sMbYx*jH_S`LH*Hu#+yfHsP!UZ0lN-Rw!W~-4mIAh0`7GV=(X_OhATu z0|_bz77lLzZ6%Kd$7&z1!M7(ukiT8IzvpkVe|OhX6Lfl=f$F7>Oy|>dfyXJ;SWeb^ zt^I%Bw=~@s0|qt;!j%L8!-W^y&q=U?k^U@e|MFJw^FHN@^?afX<6|MRnHBPMK8xE@ zqc0}NKVZx9R2X)yM{?C{FW|q+g^!(-&a1nU&>W4a+Wum3uRhnmPjg}WTTe>5LW<@0v^p@U2%8;;$7b{jaI)r7Q7|VbOz(E{271`(yF<-|xTk_xtlULyYA(ge?R9yR2(>9(`5Q-vo+?LP56HoJ3k)eM~}w_{XL6d%q1+E&Gh-M(>59g-ValcO}+o} zNM4C<{Mthl?faC!`Q`>^oy~I#`4TtUetP{aUbnQm)HAiZGTNcrrJOoYN8zAN-bwGDwB?$=llQXQGL4 z2w@Lh)7^r_#GWzYx4N;Ae{aLy{+ra3;N)k@Q<43$0o9D%d8x+&#I6;_n8Abl&9qCzl{E8CavF)Ak z-4vXFcRrFz**=hg*$vrEhg(iG@tsdl2-hkc7lapX(7R%?B}(;(7PBe-&;FlT$wi{Y zk23M?4>CekZ1qDUZ3-CL&-X@BWhY1hfsuvsZYa9n7mYZ)8^`pT0``nJ{cA738YpRk z&F4>s|NYs9hC4glzZlr|x~NdhZ6~KNp|F?l4jh>JwC2Gk=@z?C_OCN~=70QNiF_L4 zcypXY*2XY>)3L6k_#e@Ef4I~yVwvbpbxIcojV|(YVj}u&uD&&Ccn=FmD z!sbXS-r@i3D*Ui={rJf8*Tq{)@4{=>h^C136(!hQx&7TzX237-@;$>7Qb@J=<#ck8 zj_t{oW63HpZogaT3E<6O_!dTPoAcvd?Vx-Dfd4^a}~Q*t-4ZqWcN5qZ`;Kc_dm+u zpo&+05NppBwC=dL!`<=nv#%F2`j))v1C?R3m0V{bEZBE^3l(6MYFaH>SpV&2Z!S`( z@5@FpJi!r9HzNAUf!r>8dvQ(d>eII~`SoHO_X?&0{`l4f(Hk&rKb5IE@zKc^_1IeP zk8}xHtf8$+JJvx&a=69iNb20lNX^Z3zW&wR?+onZrRlMT?#xmho%|Wbk(%lB%=LC= z^v~22(+>7VF9cS&*k~b3w&yMLla_UrWMO^Mh8Mm^4xDt&c!49$c`2E6@jk(qLhIm6c zIIk5~wpv9s7pHGli_|Yqq@A`Ju&p1QHI?hL9I1Jpi>L~IA4+Yigs{nhoHk^mKL`rJ zpEqg3X_-!PEtqwhlmQ!eL~GkG;4aE;t}INB@)J2$gW$*wCY`v1Z9SYCx6^d7GFn!4 z{w`H)S|i4GGTVTE%mw_6(hcj%YGOlNK56P2A0I|>?P;O|nd4*bnnyuLVs_$a$dBgl zs#H6rS(*KHL3<5$ZoiP>MoBnk76C@N|MD8u8B4u|C5hhaWvPn>)*+?wMxO^8yCJ=( zvvIuDkRTXkqRetHxgs|=*<+d4e9Wb@OZ^mSbo|W&b7FR;Y|h5j*3t>u7gpFemmZ%l zS8C(EZ{gT!*C`Hpjz13m_jiwlNt>?5Ug3w449IIa87UJC`!Lwqs5g0t)YYfRpc*~B zvC~)8PuwKdGdbPxZ#F`nRPNogq{@A3eM$YPWi)60j3Ci)sFmYK4N-k9 zFLCkhX{m<}v3qv;vE-UeO2{1;<@oTkdLTMSt9T3h|Ez)Et!+-y*kZHtAl;QPnRBtQ zg2NcX)kf7@jZOZ4rl-kEo)5AKIGJ(S#f?*iTG~Ngunv0q{!qgBKD>_?1ej1lXXyCu zr3{wE)4ijgKP-w6r#h*ZM{3?S@yb|5t%*8qaL=zSl&M7FC-c6?+7T2Y-WBtYscb8Gvd$E2ORfnU-LboM7jog#md96trUWZN{ zW(hT&m(PB*d3a=5LRlaIEwzDPC@VXHXIr1jC_034c=G31qnLtee-iR@DQ02O2I@uS z*o)X&ojZeqg+3Y=yE-&8g$qnQ^ke$T*CZ1J6z_ygHt@X>UB2&S2%)zoxEF!jBA!M5 z=lK)0X3Isy|ABBe8tfU0U5_<48cOZx{+t_hOP;H)rLZUX3ZMG=lFDJM-DGntPy$P* zuYCIxsO{Ky@n4I)cJ|NMiN2>!u2lHzIvRz;N*Sbu4;>t<(Y8RgWb|!V@bGzS3+0D9 z*-ERWo&cv*RR0MzxqPrWs!(|?&;1+O>Ojzk)!~U?p+v>RA^A60bizVZ%vpy&2MWlw zEFW3mf@h-o5zT|=(IjJ?qG{ot3W}d?3B^XIU`T~0%S$Ie4fVM^1)^^T{3%&deOKQW z6A##>hvx?Iff1!eM#yCw5J)4cmWJ=&!30H3()ys^zKnkfY-is?ZICo67<+bFMUe%Px5Me*eXj)G$P}_YA@H9EnAyYE>f^(9*L!g;hS79K`DvjIdh5Kkkg7vG zD_$!pxR4XobXFzH? z`cNuGq6(7S;14F@Yxyo^@b^H9>--S%x?@gvudnS^ zvHt0yb9XHuJDuupTQ(bIE4hHq=yAJu$(j_Lcsoc%jl}$l`}E4nLfHOx@#xcpw!m5E z?$SW@xXW2=%{q8HBnuinY<>N!v>ABkn1v-}h#51t$gm6zFN3n^jTg7vN}k|;$n}%1 z^hMI+c`p9?Ju4Z5(r+6Fyy5H~qL+?&e1FBhk?~e*VogkC z{1gvalp0yQq{SRQ3&9Xc46VzX?c|R9nHk>4{OJJ!%E5K2-uX7=;53!9;)=^q zE=k0Kg;jNp%FzeNYBlg9H+!6i2b^PH#Z>HRtWvGBx$hicu(r%_=ur5}(B>XTDI1*U zC9BAhLtAtIoH}*3^as`Hrg4%dgy~Czhj$W-n#(O_1-JSPmLNhojaGYoho{@vC3g40AhKN+Weswac%^ARf|o(~{J3XvM0Z?@;2J1e2(vSx^%wI-C-#gyD& zo!NR7Ydpn#!hl02IIif|4B;#DCu2|k8F3yb^K!#2b9m6uDy;4CleWQ-?n=@&n8`1q zT$)KH4h9!A3mI60IS283Bs`G43Ye5_NafzqtBaeL_K)}WtMD{~qme<}A~?r4qZBsz zVd*{n^6993sHL4*!mE2<)JbP~59FHkZGQ1K?fzBjM?@1ft0un3HoGOImGL3xy7*ln z2FqI|->*t$7IIh9$|U8P#{X`v`1ai=>Tlp6eSM>O9=sqD+Anp*=Tl$p z&}%f>EYxN_9t-<7_2$`%GOJ(A9%6@?{+#eTM%^(S_k2-Cn+&&*4 z(nI(7$-d1P^Km|n(ejXve)&Qk#~Xuoi-lJO ziGkVTCb*^K%+12~+>guUmyyJ$ks4pTl-#q9l06v9bM>lz7|Nsh(@>V=T+U$lAY|I2 z?qYGR3V0fIK{%b!B$<790ZJA?cklh6f;%+?N6PTy11)i^-cy06JvHnGsH{j6D z_G47yy?78ulF{#Z+hy%G*-&e}G712Y%wms=(JTG-6KnnUtCEn!R~`$Cdk-f;NCbs@ z;W*2`xp_9qEO5sIFx1FvR6~E+li35b&=)V3^2w(hQwW;sfF+&wq9M84o*(KWeL>!G z>BG#k#p^$?MS+;g50F527uv3bEQ_5+>nzuslN&F}Shl-Q==djtyB4E3Gcl5RA+QK# zyR~}~OmFuaCEK^hGXx3q7c~6wdK)H{q2mRaaXHp^%8JdA(hn6cWZ0iJt~rUPCY&iX zAUrzx0v#D)vbYj+yk)kzQM9jVx@^w{|J`)sa4bbC&{jwmg5=O2qk?-ScC7al0OV4R zRA!cT=MvYsBvDk&11{FMswX437dGSZt%2XH0?oKARkf^hUvp&2Bw)d6f!4e-alVcK*BWqP?2i@{BKcXOlvT= zQ}{B4>BGWYb2oAb1QeF&$zRy7CLHP@Q&O&At`m3uq4QLT zzbKAoIXpd5;j{U;(Y<oWPXcXi5X4*h0hAzR-bqaet!y^!Nj(lswl zey4lJDl$cg2MYmzz-oWVOrd4zQLl+*wn9;$qIzo{U#)u7gU(*L(ZxpJ3r=6~_IF+V zw6e|Y#;=K;a!b`QTGECA_o?BJWZPNOgb6Lsz%YkI>VWJ*fcJ#2USJx zxwuoCO;c9U3BTj=w~t<~ofJ3J45HM$jg(xna!3fn8T!#*-XV_Kg&>l)%h_s zhQ&wdSu9JbMpTwIlD+GmtZl3*@Ufe~t7jb8S^8tI2)Thz!hDU^I$E1sBw6{HdhN=~ zn(?kOw*l4tfM9^#XzhOU=ArL$$HgIXX{7Lu_4%hy9~NuAx5SX8=zJ_-A6Q9gol_MF z+i!wFg$2;F8pLnI%V-;ahygr6=|TVAD3pb&Zc4d_N1I62+57!piEX_0ug&{^|K6ul zFs<3|)Fz}NTAvnhvCY7lnzw}{C(H`>)~T-)Ewu;2s`RW;EwJhP_Xmo|$jZ={(UOZ# zQ{$1Uzg;4iwD4(W^Pq~lw!Q_x6t;&<0*fw$)kB?aID`9ZlUfa%?8i*=%bQ)Mo9a>W0xr|wr{v^ZqI(<1)iowL zA%$!69te}o){znOs_3K*H_JXI1c^h1pZpl#6eLvR9USpYhnb*PMGo#+Fu9PESBAv_ zpV3s$96sI8{q>JqtloS`r)Q-5p_Yx+E9D!9`_15tM0sjc=jECslx!TyjgT>{IZe)7 zmazI>CcMMlhI3t@M=+6t?zbCMBZ3uLS-BR6Wa!500L16pvs8a6Ou`K2r?bmpW4|Z7 zP&$Mbr_KcS(wWkD@G>LivFMFEkH0kLOz0#E(KT>CexLlbz%6=jM?PK3`W9s@o)7On&?$6I3Nliv`_$UcLEVSq zal%ZcWSM_J?4l5!pW?u`Qd5E!orLkKoEXJsH>x5_>8Qv(91ddc%aF z5|}*X!3*g#(c=y{YSXD~x+pb?nd3 z(S#}K5srT@6)1Cr0Ps@}_<~tq2y3+)eFfe_L^i5prgZYjX%tcB0ebE3#?Bl3hLR3- z9G!QvV;~PTFU?W#Zoxk+pCtD@e0Dv4DfQ?%#IsvTN(i%xkaMB>GnWAL2aqq@xX%Pd zBC`l=rFS&8hoqds~0qqrIAlQ^7NvHC=(?&)i5}ov9F(mPs9w z$Mg=n<_q&aWZg5=92TP8@0UbRE8ybtBggCHX{@<{4xspxRRQBH<21_)NP9f?jSn7! zvQ^X*Qw)|r(XKArCwk+RBezUFksv}tuTh4jQVIt%A%j%!q<7<>$)AB+6A7Ql>c zGaW-R4?ES)=)qs)&{~d@9$7A28PZnGSr4aw_JaB8hJlf7*!_Nt;8jV=DNg zP~^Zrl$C`p4|ygIe9Pntb&0TI+DWWTX%*kC+bDoe(8vK-CcO&4hsv_*Simwc`f^tBK}mnu)x1mjS`C za%2|rY9VSg9$#q@DwD1kPx>Boo3a6Vp#gf%!e?=To@03Y%tk^!Aac<=)$pyEE35$g znU@28b+u4c^A=RMi)VRCAHRLwO8Zn370SqgAP@@_O-vzUJ5t+M6ZIkaiEh&S`fZ4L zfNa>$xBEL$YGpaQV8OR~bt2lfFDys|;5>6r;G)dyCt|%VRBvQbOm40mqwNUY#~^04(uoA<15?IM26zrWsL7Ago9& z<$mqdd&wg$#D$&Lvoq2b7I)D-L5IySMvX9Y+(q~@r{LgVIE1IdzI`~XL+LK81aSjW zi}a(C11{#5RTfoxf+YZyh09>Zi?QWYH{m?rO|zKEIIZBzid19WxOb%k?pEN9F4^LU zs$6@MvAY+*OWXEMPtToB-gm4>;iM|8os>m)xFaVF*(k`LVTed}>0VK;ofG*sI$rEHY z%OWp5UtO#nDSu?nC2E;k6Ck3eCXm1Zbn~Fe8~^4mRHvEMF;}A7ZJ_l&S8||@8fB0Y z?SdK6yx~}_vQVJUu(*D8#nJzYILEB-eg6U+lup-`;gZmgh z0>R~Eqg}FadxC|X8zBIxE(+YVz;}%;whmBl9zto(D!u4loiD5Z$%NNKpeAs{A3)-0 zR*g4Ofwby8@IJ{F{K4R;M`q6|Oh|hQU45E=lW5D9j_X7UGm_2w&gpS{nzf7J$}d8@ z>08{oP7!EY#EOOw2rm;1s!*6b011lXbHs41dFLw#XZnA8Z!Vbo!r>RhzLbFrm;xFLUoE?;>?%gV5!INQg{2s8@)GG z&nfCQx`rm5ji$tcEm)6t0{)gcvvWU#7>k_^U=TK4dR5e{u&QFl$V**p>XRq_Y3cuy z{Juz1K;`aS^VjQ&iUL+r>q$LADZGYPh=vI7F^vF?Z>A(W9y9v-4QtzKXtxDQuM7e0L0_n~DRcMsY?;&WtK(Zfce7^G zAW!Hm%t2zK#5won%K9P0=E~M;)H<*zbK+UDcJJb}Q_Ag#9I8ukeLWSkshwTMVds-a zZAPV9s8TjrD9q=VC>bJO29Pea#NN>wJr1<2e~vG64$W8P#`;+vVTLU|VcTK-{fFMY z2RPMnOv@6rbnC(qlmIx$zFeCt40V(-UAxXMl8=|SUGAWJF_|@crD`wT7h=60$Qgtx z)Svw9GoYiRz>tl!1N#Fzc3sKIv0c4u9xi(h{3+CuRc(x}Q6r7`f!xhyO$)J}WA37R zxjz}-i$a0g9D!fSxtf)eE{O2mMRSo-*kIpZ21v#SQOU}Ve)FNhStq^g4N!qxNVk>V zp965+j9V`F-emKJ@GF|$`vG|D{%XQyvSF`-;Avxz8{Y|VHFONvZYfnCW3LBxaOIG>aq zVWlq9sLhs>H?0@;9RMxc&FoC#mKMuD;<_%9`)fn zY}DZj{&!pFpU(I2Q!~>gQp)wMIu`-jRNTW@`vEcvGb9VAMMkUj?H>cC$SKGS7~{qT z1k%Lafr9Q({1{Z2Eq5;T*g{IkOvnPCT~58GcVN7pP|-VMql!-UM)<3NotZ#=fyaNm zHwZ;o3Am7zNm|eAm+cr|-e&~Bb`3s`XW~cH5TD^j$q$J$$ZO%#UrRJq*AW1i$aG^# zRpZ`b*bjpFw*dEk+GXt(w*C<|fcTCz$ewfIQVnhSTnQA;dHQF@ZRP6E5ulZ&r#`uG z(yiKAVR`SSdHeZ35H3XI6w|uBf5@Kcm~?-c!Yx9zr46YSynk=VOmr0Gg(Cv-sB7a& zkD>1$?rR%5`tXwrFg|3wV7*Rm)SlHP4r zWkQ4=R3Ybftg9vW56;iW_tGM&41JD*OALrC{5d!WQj(;eN=WsmTUL;KTJYhGb{xGv zRLEmPG-k()!6^JYYX&kUzXvA`vf5k1sk*4QG&_+DX^a1-TO16?^5P zaqwT%?aFzWu9W*hy67QNheq#wizM1Dg)6d$WT0?v6pPjqH#HS#Uvet`f#ci?gDgGD z^CgAV^r>k@=M?}%k8}$3OwNC2U`^_&Nw-}j`2sOSe4qfpH5;aus?Ff3wGo*hxebVxhS4&kpwzHAh=Ns0GV9 zeG&o8UJ7KN=@zwb77!((;#NC|WVN}XK@t{qNEHEdX(ny_Giduwv2x!CsFZbWd6Lqm z(_1ryqebe^2SsoOzN#v=+EVGl8*8$??eeIgAuud+aVZj|cRL#PFX`=673d|*5Tt}I z-4p-E2BMpe$~pB<1lEk1-zdS`h3IoF5Pw8bs(@dqt=9zDxOs^RimuK*<$v~MAV zAS`Y3Vf3V4&>kjot9n>RC2Dc34*G_sCsm>%bQ7#ZO@aUvR)nN&x-56;fXQ# zD}pP5V$}Q-uGO>%IvjRoBdrEzW9yU#H~H?nr>XoC>|~ZyAfx#Id3ZUUSb& zJJK&^8Dn&Gqs*l-py4SL$s#AqTaA?>Q~m&>WFZLv5&So&N3isnoP)q^#e z7t!t%fKU*{8d!#unU^r7{nJMg`BiITeD!?t;LEalz=qxfH0Q{T%>hoH$6)?Oxo+@A zg96CnM2TqF)qqpQ9l7KLr}*i*MsM7;KZK~~FKe8d*8l*i1zeyGOM7R&a=vyx9?4~2 z;Rx=96o?8A%3Jj~&R-z)bL${W*!O8P&KG&2sjznd`Xgj@|6<~*Gtuz%i>|S2t!jmf9qz1 z8Cq`3R_V>)g2lr^>;s@e9dv9AteejWKsqKod>Ir&hux>8`2wd_mgmE}%YPzkf1@6C z;T|oR>*BADT-wh=?7RrwD@v3JMW9tUwBZnH-UDwDo`~_{v5GW<;{A6^9wTe8iDt`W z=XBG>;qh(;1m=i1_n62+nm7m|C_wH$z@~CxdiPAQ8G#samo4?a*bLq)&0=*v-gJ?~HX~R^ zBx(u$e5eRS?kQ^)GiTZl^$R##9um9&mN11)Ms@+Ris1J#ygUEMOX6wDPo9*3)XA~SGB)L1;;py- zZFk^+kX}uk4+&~sr?m$v}2 z1wsPBCW4KnxfNg^+rGs>ReBO{lb;m#YDB*|lUjNFEYk844%kGC$X(dKM4IbykJ*yp-euhqUze)H$FO>w=owfjtx! zHIbpmzfuCFm|Nq~V19K5h8k%gg^VM-^2|N!^KpBYPS|v@U8elv8@toMNH@plxt?~C zRF!Lab^9dP9BxUfj{qg^sJXn8>JugierC#F4$E3 zE!&ChB(MUKz@umZ;$Oxj9RBRSS0AX~xy@Tng8m+CZ~G0{w0yY^2F046mQC2RM5?SK z@9^mY9$F;+0n~#Ru?KTo0mhDcw`IEGt}f7j8qwPtUl7XtxDeQO`?{JuMP_+#eI+^au+Ik4x8Lq+`d83j}bOLjN+{=oL}<@MYfF$aai>rv|j1ij)} zlnkrgP~f4V=KqLn6_8$e`MLiZV}T(NsIIOOcYA+n4$XNElRw6&QZ$?Ez}Qz>RV=0v zb%~d)!Eh5596TAQBOf0Z$jAXUgchJ>;FVX6e!+)%lvH$j0x_C1l5j1;pAry$iZ&sX z6?cGug&Q*33?%mvNjuNJ;{&8!r?=$E`uk;pK?)f#drRFP(%)a~_4$*f{VpreX--#8 z*}Z*)AW~_)Qx%Bk(!XW?PqN)XNR0tQ&kGO5Gi!+n`csu%8s_p00ip0x;$O3=F7XVx z^KxB~pJDG)w-LQD0{IaP%K=#=kdX!Y(ZU3+l3~~gYZIvnQ__(?y6cx2uRC=N-U-nF z6<9bRT;DZ<%+;XM=?BTLXDF%&7S`fqB&}a%VPcDxLxm&*B`yDwwwdA7$ifGNofZLd zaNVU^4#qoPoQP+qoj)R68VE!q15-aWZdaq0bSvY^t8RN_~}+0D&bG$S{``qbDv7<8;-u)H zLC~rS3{4OLsIntwXriE60P*kZ+uidAX`7pb$b0*^>eEcmnlVL~Mcq|gTRuV{SOh2# zaOfapWkU$rs?fE9y!nH-bo+4Ug##&IO3I<=2=WV|n!#MS5C;`0|S5!oK-k0ntFubLZ0!MBk-^P6Sk&x9LF)bZ04*4oY;8w+(p$&fg9>} z_+{t5XVlkZSiow=;?=`hVSrW^(6LX0Dz17riMW0Y0=Ih7?cLyal7>{OViYI#GLlHudhOQ z0imWost0^}gVCxsR6hCgOAOi%M$UWtc_}!n!?wvTrZjZlyLSc8{k{RQiGZ`SST01I zPr{MfED+mYcc0I+7h7dw2e$i4X{$4!3N}^h57r(jqQaBQp5+Z29N9|@TLYdZK`Sqo zv{%0=Eqoi1uC6K<7kHsC(f1W?D1sIab|NSSuq$ISKznuLlfW(q##{_QUJAI#v~$U% z&2uvVS>{bl)ANh^qQld6?SVTMUFGrtn!^B(#7h?Hz!?looP4{N4$vhalV@ID)gt33 zSh$vYa4Y4D0F#6l169B^RR>}7$F-XiVH`bxM9ZV7%9%is(c$6rPZU|+2hj^Lb=N33 z5D?V4tzq@3TYDBt@C`V#NPhvrZKP8Oe))*M5-il91?cGYcx?E>E#=8yrgV&z0>X1R4(je*nRRKn=gW!a!>!Ah7q& z&CiF1*j%mTz{JFBS&Rg&Hr`RnI@GOGO+xATQ6VRn0S_CBR@D>~R{`;wGc~gU=jSj_ z@H$iH8?FgvKE6ADxE*jj4t@Z4>Hw-S65nJVYg4gps@eX@5F~J=%?Ztsd=&ru zZ3)2cp9Fkw@Qoxhnn^kQN!$VlEEg0g@Pn;DovSz;g9BJ)gMx~tP|E8;Jwyc9Fpym6 zsQd0niuX@z-bc6LOd25_#+yV~(Jd_b1Pjs@7ul1Vo_??>Hgd-wMc19x&>)Wj8*G#c z*c~+20E-8|)^N5M^pXIcvI>M%RpvKC(c#xq#(S>&Kwyz~e4 zx8NAQq)LP&;z$h$a2qj936qVuI1Cix<5oD}XK?O-64dJvd>itLAQ!(Q z=~9jC{N+)9chudjgI)$TK(v7tlfwSRNpg!+1o>LCk=O(`OhlT)u{hB}!H^_tq|9sK zaH#Lx$XdUX-j_(0hENjs5f+(X^chsBgJ6xOHO>@qC7LJywh8~+i_%|77dj$LfqX(y zhIifw8I$z~XvFXjQfT^uW>r!~e_K**qE=h)y0_+Q!FCK0z%IiIO21 zX);$p?mV!LDBKu>V$qF_S*DdiufzLNt~xItMh;+yMvAmortb3u z)W?ZKZ&BltDhZL#;T(aLpaU&3x2JMCpxOekD}1Cj9xz4%f?tHk!x5erf~MO*Ix`{k z5b0g2bIWW@3oqgUjaZ_^fkwz6D|?FMa?E=x0i#YQsfj#Tn;67>z`8KNqA0SA=G*WH z&vXAi1$0FB9@O5ODg+m4wK)3MQK-Y;fu#EMT9_tqUbLU6{_wel{PI0y(@ZR8UgDV9 zS->pekp{nG8jpZ3;@&gVKOc=`J@A6vzXbK}x-5)Y0k-msGFWtQfo_D7R1bR%ng3U& zS7UWT6rEH+xY?y{wE-wk%f8|{lyC5RvAnU$67yaQ=sWYLr+OAkYd%mQU#7o&BQJeO zO!$&;1k)wnktNmZ8E6y$t*?E@re=E}25@5{Cc+LqeUY7_u{TLHiD=N5_Td;<;e1%K zz#hNm8^T8CM2{aKPz4l(tTi6fPuDzkesL=4JUseZ9(tCb_3-2+VL^2y&@?)1^a4G< zEix=Zh|AV={I*s4wup(d;4WBw_-kc{e}t|R=7b0DL!tZZ_gWBun)o5XjU8nLED;RCw7Z?%tz_?!Jt-OTme+D=k$L6$)kVj{N8qy2M_A zAHMZ*SYKOAPps1I{`iEMBKMx>z*t4%9{RepaH=E1#(J{1HSL!8ej|tVzm{?P$dIWh zx8wVpm-6rSOL|EE_;RKs3jnGEBH?osH!_&s>uiEunUy$_$F9P_>K|M}&rk~7#}B;g z+R<$|!{R8~4G~sp^iP;`4?w|BThSx4@&dd*O497z#u zjyTD?N-t#}wj|_ayD9OTjTudE%Kqtkf{BE^vKi|qJ&INi4k&WNKl%EBC)CPIx}E_0 zenKnMUH?B^fVFQGHfQFw>`6_a>kdCOXrlC*E1^kTybND>o%u6rtC%{U@CxuGXi%bQ zh0bw!=_%W9vN?*z<`oo>9J@ zEo@nE9T)+DdPc&E%JEy(m%tXg_sA^&!AA*LdpPih^oa1UW5>f2U`hDaBF~WUUCW}v z|K!H4UhR&A94IKP2osg&CU?`KgdnIzg6Y;}I5K2r{|Kh<8`sh8)7rB&*`Rj~MsSrE zECs0J*LOMkMbi>HBw69WutmJGbvc}9P`Y{yhn%FbLf-SdPY^8vO?NA*h&;!{H{Eop z+3u)4QK>^A%;QNVYMLl4pMsZ&ML7DA5Vsk6BOb3bP0YyX{3w8Y4yKCJlfhOE>Dx7$ z1kMfqLwAm%^RsPmcLErYH!o>}sCmb|A41v=mcl${M3Mfg*S;HY>eflD#e?pI=X^d< zN*XN4TfgWwvTmnKr3Lm7ZO8%5!@cJbFkuge-$=zl-a6(BeYrHkh_Vy#W$RF2z#mQ6 zd=mz}qtoQ5b0LG~n^|PCa49{wFg1|1jA3oMRzB z`pT*uYn7wOqj2TM-&-*Lc)Rx6Yw^>H~>7>(bq2YsUH3} z$ntd-Gh!=;>QrboSxPuYa2ND00mLMYKM=x*2^%C_cwL6DMfXIWgCklIjP0%&6#Q?0#&pDdzz%l_bj#>b{71j;Rfac1fXYh2tA68`dMqTSLyZmPtePV~oh>^e~bg z!e4P2SSr#gwTbhK&|2N80z{SHYn;F%LPW*TX}OOIf{t|EDG6rC*5L01B3Dag&yjuN zy%!01Fi*M2<#;kA&UrcpXIQYrY&hq*Wh8;lS3BVMr3bX;V4>G zXa3wT@DPY@!e8VM@YDHc-6SD}bMN_4s)6sseVVLr92XtA^LWc+_EhSOEZy)`aQkwH zQ?@=29+k98>xRmP`wDH_GSn7%%Wtyr@8Ns;ZQ#fb-`B$mDvrB&ae-w3b;|qT!)UhV z7(4^dyC*gAl`~7HI;SCWCK&C6^Vm6o+Xx&Hzq_anA>QC^{7wg`>_kyQ-)Om{bvm?f zl4g1QeHg*hsj)Mn%rP7HyO}>|fX`f^J@Bj8aWZ=4xN~fuW$s;4%(N6Id~%IYQd zB&8(wRLhBFRnC66#A1I&IxH5hCApJD({Oz7afgXXW?a0-Lslh2X zDozD1sfXXLhAw)w5H|%n>Nb%W4dv&c^-Q{hanZ>+f3CM&GiOJ{&&k87T3v^^x4}Z+Go^92*Oa@A-X);OJ`NjR=!3~gr^(h^S5&}tF}4+ zjw8m#sHZJAu~EOhMrwRW%pQ_gg6z}V{b~F2S;Caf+Y9 z)55WJ$Kma}-E&W0J(Rmx@>piReCH&Jc-AfmzqIiF^F-+Swvd>*1PKCyB%}GhFgTc| zapyrHN<-GBz-Ii<+~s1Q3YYn=l6|-m_nI~tGX6aCo*k2KFaVw#czc94V-1!UPk1( z;~O^I)g*JdgB&&{takKMtp2FPTH=$Hr6qnEaCTrb#iW?FXA&tRMo=_VYP4_M&c{y+c#DgBYs;RKSRsDy$lQc zm1CO>hvmIN|I(`+=5#4u>2RNB;Yb#cUea3eceQ7Q#73&jTs)P}R$Fk*UnRrYrspRF zuAZ3b{iGFFm-uM2++3!-e#gJ5GTIrlE%&47RIT$0!_=p@gJL)vrX-n7eiAh)B&)x|gNI;;wC zkeAv{8@6F|)qlO1<32E`#Ntm;{TTCp#P-4#=t^}{hR@+%pepQj9Er9$5;gCOS`?BH zq4i0wVw;GG&^rM(SG?qsPWYcuc8WeC{MVPvLf@Q`;Wrf$qDV9(|DI6zr;eqitu1-^ zY1`zzWc=$kQRmC=!`8x=YVW!Zmw${Y;-K%h8NJ#No?3JOM-z^uJ;C zZ6^fn)-U+bb1UYlYmwt*d}S#3u6x6JW4pYR61G$0Qlp@(wKQpwvDGkpOoRSm5Rrs$N=#lx*a(ZgJ{PY%)rJ>>?5jj&^eu0+f#aXQ(7--^_ z$#&tkv_XkX9?-ve3f4MHlf!)KQqYf?*p;N?MhpqdG&^A6{AWPCC zja62r32G`uKgAaA!cFI+6>Z(MnO$XBA{9Hg){`HBzNi-2MpjFSohbQhzOUkaC$C}k zJV3<{Emg+Ia9VZVT>|6+1SN{kiEgvJEf^=m1=C zlm`u8f}Ys=z^T9H3k3@rWnRwnV8b5L_iNlTH7tkGT6ClSGg>f{miWPnX=bt|)5LNk z_n-upi-s;H1o9) zHV3oOiN;{!TE?72Zu;vt+x#j?NZ^-#p?Z#jVG&dN*5Me{4;m&_v$x+%0TCrH5FH0o>;W(cRxWo zHS_>3-q9+#tn*2?tShLi^H(BGP{sjVJ6pi-3b zN9`EPzMex1Nq>u%E0y<}_nzz5XLDI}B*KfZ_b&wW?W(=q3|xD@W?M{#?t;7<59G15 zq>GAGIrUkBg*!}4R!%3_MIl9p9yDEm3lfn;5^}`E}xva@sITp zxhB8Hli+}8J?ieGBiRNE3)LvBxL zwZ0ZZZ>%nVe^4tZyywU_y;c*&c+pt*Gq&e8XhCP}&@q?H`t#**pIFmbe2g}dcjqdW z=>&cSTCJvjb?tV;MvWECs}*r~cP$L5XJt;!Q)WD6yhZ#A#A)D(5;5+a&|quFR?)74*YE?s=sY6WY&CA&Jk zU$G-tx2ac^LK;mqjKvh08&!we#h67`GSmABkmJ^7(CD>4lqGz26B@%6K|)iGKwg|#%q$#8jaS?pk7srxsBk09+iyA=_R<&8OYQKB|~0*A*m zO0`p}iMGK#(M1|B4i^Y%MM6kOr+yyBzwD#IXIR4;*_fB*@mwzLasNSoZ-I~V?{9U3 zMBB%H6BG8ljP`?H65m|BXg50d{rgb7R2Iv62rFlrR+u%M3`_9U&6cX>V0DBr>rJrd zI)uyw1b-SGBi=*27Zay1_=p8-x7gpL%)Dq3HT?P8lpA+<%C4Lg1Xd9fr=qAp^vW<1F6-ZHozi%U*+0fiy<)+NQee)iJI`TrC@Q#A zzkj@>@$Ov{iPoIUgKwt;3iTEUG4E~Z348j=Ex5I|@;iB|Cn6&w1a@AWkXnhm_2{cR;z(Wgu2X*r4~WiD5FHgkF&Cz>Q(B?oE{yGT z3bF@L5pg)4gmH^$ZpF(l z3eNUq=%rh6%_j`o&@w+W9UU4jlgdhl!-=`ptE}tW|1klR^4(*NE*+JN_+{_@z5Kln z2?*Idlk%)x_fgHlwOo*7%=n}P#ZgX4GW5D!)S_{_SFMr+{^&S;-X$sd^_=i~J@(Va zhoU@Zl+CH?SeVqQmEyzJD1KtC>IK(Ye}`LS%)$0n8Vn~OM$b>Jg?~DiNu#N}p|@(i zoEjGM;_3XdgeFL;UTkH3aAZS*x)<~e!cOo0zYO#dDT>zy*jm{B9{f3HQCBhG$nBh! zmchOGnN-RjUF`UoNgyjzj_v|Ped=?wb%9cvbeZ807xI_h$+#wF_H)Fq_9}Mde5Fo0 zxAxLU(3xJdf#i3O4chm>xe!qGYqaxO?rbSMSeJT?uqS<`u&e>;F-Pm&S3dt>az@B zOfT-0J?dw6n2#vs7ba1C7UEC1zVb zq*=qey{#x7>}eVidS1%nMr2I%1+J2|ftj9UMlH3}EH+x6r}kuhq1?LCH^r*VT109E zrGx^)=2TbK)^z67G~$_iCNrNDy1*z#-`j@dH~BSCupb-|^TOvs>3@~Q--I;wwP)Nk zdt}T=P`+2rW5{bNIGdvRqjO{?{G=^yEhpKP)SvxQ_7x`ay$QqXt4O{feBKtK`P%(l zQ*94t4E5y$8g35}o0Vs)CIj9?+XXpBGy|%1Wr{AL?5~17D9CWIa9rIbj#H)q8joT- z#A$Y%<(A^=*O9!b6j8snfm6pCtGuGO1hHWBOd3nV-~K~jgg?%9t#!gxIMtuIvb4E^ zTS~&em#Kpm)mjRX+sz20DwpSNn54$|YB+-(RhHUEJ@o0c;H*x*&=S^??B1;4JF&C?*D3L*?E^_PEM+jx4Zi;d2h)XsU zN!Ox3d0lRDrq2b>l4$u{K&vXY<8e4XpZoRK{LN`gYhz~R^)lIA&BOUvY&2^?7JU5B zzHx-qPv2cR((Dro3B{mViBa&Uy-V>BQwO9z{-K-+A&5h<lVox2Yst9MXu@ zo1&JR4XUQ;$(N7t@YMy7NqX5>pxRH&5=jAS8 zyd&;?)2Xp?!jn;FjoW!C1aAad`R1DGDZF?Q(`-ZH13QIYr#@)dkTwLk!LAQ3RSZ~( z^pBbnO%~!`)bAFs`9o1sQpziMT_-uFR~F@ z(TMUbQDa1hQd{>u<)H}xRxR!JP*mEw5M7TTRdQx zebv(A#cnvi>16t9#jX(C9G1LkvP@BRBg*%J$i0@;Xaxu);ikQo&%4hx{qx<_h|$3t z-u#9Bf5=Mh_b<4snEQ>P^~biXM72SNj0Isg0o;?r*u& zfkfy7hW!XFRHbprk0^151%`y}1}6c%nVwU~B8%wTNrkvX8@TFJ0j_o^VzEQ@ZH6GS z3=xpSBZ9^YLH^Bv4#*7zD#q5frdmY>-+wb0{WBN0IyuD+H@h}q0*%a(X955WLvN18 z%H{N=%_4cDWJuk|YH`TSn@2J49-rbg+~FWbSr%GwOhwiAv;lsTl9XEeW%c{Q%NA!5 zTDDhW@2V|wS#F${--`F&E!}DR^!nXWPR`U0G=|PYdg0$#9MDM1_3J}k05@EJ z5!z50NE4xrQQIt()5+EUeOX4=jqpPA!dNqQTX7!-fv~{EEywoNP%B~@TMChzSR78kjw1{(D_1oj&6X;Kk&X`!s znVM_O%Vx?|)y6xpR|FkkaWxbs8OYdoauf=^5Fk<659I12f=*IA=?Ns-KCZw5S5n1a z!_EA-E+3TgWk!z@!@7O9DA!cZ_;GqO=lB|ppfxM~9dl?bDNg`%^Be~9P00>ZV?#_Wg;)N#$)+N@9 z{gLbY1tD-poMb8&WtU=)|4f?aaD~GIxTh`wLa#UpVk!%=n(fi&)!+$8 zBv}?u4@;VOVd!OuLhk_{CNR(fA#M%=JEfCA@IiSll1X`e&z)A23K#`DKlbl25TS|g zN}MD3n%hfW%lEYB3j&MK3tI-h>ZzF-YL0;`enz4&*877FO?L>5%}`Y@%P$~E{_Jbp z5bF$pbJ=%fh9o~6?o&FQ%3)jwH`5of83v7B_zMJgW~jFC#sFaz?a%sV16&U`qLM10 zl0BK7F2unV-{@DNLj2XNCre|>WVJ5cm$PyGQ%uplFRVuiczj?X5ET<7p95gYb(T@c z(%Y^A>ZP8l=OiFSi(>=G%RpCC1 zd`{CV8gOlC;9?Li?+;5?+H8v53$yAq2ovPsZl55`wfOQn2V6_63#+v%tALPzm-AzI z_eGNVn+7wZLU*$V`RBStS;zn+Xau;VJ32Aj*CB}MvuhY@M1x}XcuzlY3F6pV7!sa)+nz~lGX zB2X2~e?y3&fMj4j@iC5EuMt^j{_%VHe7h;Sf(%I86TD4>Ia~}6Fa2XcdN$u2tGN~@`41<#4S=A}{|pE(kR@e$e}`28&TRDm zUWi~w9$hCYuCI+8n%U`24+TUNC<_2==DzpOZ4QyC(h za-E~LT<0PGGEV&=k>^NdH>iMFvpZ3s~QPhqpRo)r+wPFG?nr^4?Q z>Ve+{aJ&Eo{$sDSdW*YUb%gDc-U#1f45h2iP_^CbLtzxe@sr{F|4SB0eSMp7 z!3FVJ+-+kp+F2VY!2Ekyw1S)KRP0kvc3ckSwW#XMuE`$JHrz>fMa)d{zlPOnx8zi9 z>8rl+^w40@KNR^N9f3mxJX|O4VD>o=mm;`al;+<lRAl-^s5xirIv(_U{cG3zs`ya*t~wFs zbAL~<3T|}2_%D@Q&r+%ib=A30F<)@FoI6H6nuwZ^R~-MBn&i4@B^y(*kEwjp!&e^C zM=}#M{yqG?@S;27+1?M1*WpWx%2fcE{?9XBfJL!uReh~LTn3J7=b$|JPs5vQcRGJ{ z^WZ$YmR*aC;#pu;D3?Wg2pwj+h*KjVUHZYeUHSK_nGCOxRIku;zO2Bnnn^EQM*S~~s5A14-0N6l(Ss~0{4@(+4g{O*5^Y1t zYX1B`HEqw*+B`{vW&csO6JhjK2pzQ9BHz{xPtw;iBjleYK4HA^4BZ_W~^S z?~hg;C%i$%K9ESqfOj>$zCIRo|Fdko4hti$Izl=NmkyWhB{|V{FK^DjO~9U~`oY6& z;}e7uc$d`cOFGN{Kk{>Zf2oSFu~5A3&EYJy_Da|Oz2O+c6zoOi%rco>DgnXUUJS6G zy}pp%|IffW<9n`FTWXZ$-N!3hVNGq#zkSHML1&GR>xAHcY|a2#o*+o-fA6`hRS1fL zy{GMx8eUS38^f@?|1&7hC+z3hd!j0m4`&w)Mv(7gj33l-BJf7;+Vb*xEO=@pM3nv@ zdsZ0eTOW_-m(BUbzvZ46T-J1*m|19(!vW6swJ_aN)$Kp}CvSN$jFA+6CAN9AO$)r= z4nQV*)4e*MUxd~&Zv8E)d6qw<|D)F*J$)~kHqHra?9;)wrBcm^5=BQ5aVG&K#x^$zAr(uCQ*Qbekj2IRY7Di8veDMgmX|Xsp{8!& zJ(LNrnTkhXa|k|%8(Ar#)ixCs%)S6h{MXJI3gOJ-`@*|OI1p&X|2|13%5Y=nST^B2 z;T{x%?)#m1eFj!Tr}cx5>qLpMc;=yktL{!AoDs-B4b<|@&y6{k`ddf@-ad*x{?taN zfcf-C2^4V|CSo~x%`L4jPY5M47eYpH z>eQ*+fxV;3(i@pWG#e#xiT3oT39ON2{H6Sn?T*;o-%-RsO>@^JF^(f&SfvMItIk-M zZUTJpgyBJN`Yo9YMR}$Hc>#IYsh{1j__P%=;;xmzsTe3SrP`zZ$^#`ipVHHFZD05Q zq9Ol|sxQ6=|4&?9SB!Ys=?FbdDGQ=4`yJ){@}?Y30;50ugFzO;aHsZ2!hFGop)7Li zJCZPw*U3JG)K+fh!3)jSgzQ7UmFNis{y2X_UAwciU-(Rx zE{bwbIcB-mhyDS;lG~{L$nEv%>*zbLEKL@qvry&j>H`3osOe<&kMGsP6!!C+45z!> z<0ecRoU5&U_R2_0A^+m-Spk4mGHVeq4<_+w^W5I;YTMW$9oY*wnX?0> z8PSwAL<9td2%v!%_BafyJ!)ykT|;Y-bfB&13jOxZS*;JDP*w`trKY$+k#9OLxV^n~ zeT-qlaF%I%tKb;G9Qw^F4k`k?X5E9MHkfh@&#d!4P6em+rnW<-R2S7uAQDF#oy|h03hwjf?Vt3l#){MxtoGIR>j0&6{5^xD-J#>osaaJ*7t(z+jF!-lIZ<$m zx&q&&W#BBbwt&bZB85tNEd`swxXt|G$>S}ASOl{$0*iAfq&qP~tu3XWy1tm?!%lwk zx?(UpnUR0d>5YEfA4sL4yc74tB3ys9wn>rqf*Ws#Oe-KH2EExYk7o@e+0fONny40? z+p~|c$R{s^x}-n-jP78sg}2dmi<^rl5N9O)EAS$|EYz+7}b|tZ~oHQD0{RYE>4!}2x zv%)}fo)2hAe~}&|)FGvyz=})65hK7I+>i~VjTe!~0_UBNJ|A_F&p7(fDu(gfMM3n? zlc53iC@@I+jQMoi0Gzgs=5nd*5Zhq^AMs6UV+2CWS z943DKdbh2`OTG3fQ1W@W!7*>6tY-b8%tDsrMVP9Jfh_@rj}s;5CgKr zYAJV>lwL5uEg)2Fc4Qtdz(Y6|evBe~(>>3c!g-USp03dC!!BQ`AP)rrL?#gC#hNnP zu@fg|g?rquV#7;#;^9qS06tBw4glL+>r0bnwF7*Z7^oz&PMMzW>grzR3!{w^0$NF2 z2$M1+_{j&5w!KhUpAmh8j~Q&Cq(X&{2R=o=9em@;La4BvVvQ6_5pW4v9G;D9I_f`T zC>xcY0-j7;Fa%`}`SlEAdCc=qfDi+W+2P$v<6}DBsPoI+87W?vETnEVr9o}^*s-=j z0jq1eHY7hXJrX~6aZ=<7y5D1V`{11EvyACfKr1*{mXtif`$ zSCfc#2vdXFhxcVC=mnha>sjU&mUd&MQg4_%?_@*cmd*(~Z+^DDI(G|AeGvpkl{)7m zs>-R!@fX6vTXOg<8IqCWQlwT|kN{tm8)6z!~=go(} zG?*R=w+qBv4?lgCF{bUxgMhb#)uVfY0)o?XO_>=>&o7alq0O`s0%{xRQ|rLDqKNt6 z;6Qn7{?lO-HdQu#Ni2z=<0O06vo5Sp4r|X{>8v``SWeC!L(gpI!v($ z$_y+gYmO-d3!D}9B^qP-;M`0aWOJo4rZ&em7rWNj6KKKt*58&{Zt?4|g*1@L~?ALocrKvGbzrAjkuEsIgeGyZJj_f9ypg zR0v&{j^WrBY0QjAo{g~XJu8(G_wWY(Q^WJ^#U{{e9^3qd;RvT*qJfrZk&gNF0q;wi z)_tF{-{uKFx^w2l8Cv63o7Pbn6OaDaixmTF3!_c;lN$BKua;M-&R(soPVuXKl|6Pu~jqp&`24Dh?6ry`Fv_Ngja$-C1Gw|@@M^_>NG=vvW6 zlBk~&1I9e^dvh_A${;XCET}4ZMN(VE!!o;h8E_{)g5Bt~ER<(0&YF?O{47QR!)?Da zk-EbCh+XI>{THSyg?lTH|0v=#)rw!|WYRu0W^@b#>_n#G3ZM}vL=R*1 z!PNx@e%5~aN|9<_bUhf&P`GMX_b`rT6|$9ciK};hf(Sv8inlQ&IM827jl(PI03`P0 zX{*>q2!-|Uw|%jm!g0lKQ|7(xHH6Ef9F`oqNE=0Nv#dCW@w#fwB{y;4s8i zgtIijbn+2AN{&4MWS0`NN* zT|`}Q!6)8AJjW<0J9t&u_gvfOO*#UC$J|Df>;VJj_kd3c@bv!5)Hl&%j?Z*Qnv-hi zJ@PTLw&3fkg?Pll>_hnIpJ$@Sq6a=ANRG4xI08m!Yoz`@=@UK;LWfi@;>l`_5hgFF zW`C8Ps6@txxYw@<{2QG~`Q?CrDU%%C?cfWJV9A%GT&LqaRD9~P=D|WP#Z-HbL|%x* z8csf!ykJ51$A07Y*qAs%G=ma>ndOt4J8N7o#+N?jqq__Rt(VRs3 zkSar1YqeYmF{+O{CQQ;%ZVHp_ihWzrd?0F z24q8|ZUUSv5V)F>x&lYP7Vs^=dx+mn-omY|_DG5nv$;G&jHaOF=1FjCT(ug7g!HQ*R4H~VN}&GxNSR*;Y0)V)vbj%*`u2L z8Xt2oxAnt0+6Akc!Q-UVHNASED-U5KC>QGN?*4b3vVyXZu1a5cBpsoOJsP_DGjZ7n|(2ekiKS zOB?I1vlJGAtt@e!L~41F8XTPN2y*@UCvZfQhN{}&aAfEe|+a1n70qg4jS1P9=ta`cWB=z>nUx$@PTjPo2{oEW?!L3~WB@59zPK{^izTf=y* z&Lbp^4(@q~@W!#DKWWIjp8_QxH&@RsJsk2FDY^mYa(1pH3eEmJk71-3M@sYt(%0Hs zH-U2XOkVw)RKI?fBX&_dq8I%1PoXzq>m~g``Hk3IYcSxFMp z?ueX1tZN_Ag<61cS+`0OgTV+0Gy6y_KtaRfmDVc*P>6`xJTWMNHVO8R^+0(Zq07rM ziY}X0K_J9@tyT{r*obkbhBk`*70s{ZO4e)R{6#?W65wF8WSnRR$2i{A?PW1SD0_19 zg4d-TY-bL6O2g+CFDR?i@_Y>6)e+^U^%nyPrrmobxrLIdQ}FHZ|H*`Gb>w#$U-UZ<2-LTv*@Ht6at(;U?8oX-hZvrBUmRq`) z*9^ovBj?${l)T9UU^4KkIIPYqg$KlwkM%vx+F!GLV@910Ys!s!JvUr1@Mpsm!cb~B zp6T`nCk#evE4XhUdXFSECACf9aS)Vvkq9Vfx2HwZp$=_lY5l7FGwY7LBqZ|Z_sS5? z;Vfovx1eom378Y!CW!sWX~_sLn(^L%0{<{k(0ibnmItP0WF}^ZzVx1efc_P`LIOKk zL`+|On}?!ghW0LIp)KS6q#cF`-K7*H_kCNV4DD0o4I9zKS4aAg7~qhX9s7)~6TMAl z5r=RHaU?^7S~Rqa_{PRWw z5?TxQYQOT1#PUDG;+{rUP!dN`eN29zJZz$ks@F~r0disyf42I%k+)uTaK8#zy66CgQgl27gU~^=o-trkdlqOIImSR!_8K{W zBxMT6AZ$Mx(>(qN*Pt)cE+r(E@CZb#77g7mttujORGOq5KV%|r>)>oY28T(~V}xj( zMA*0PbUOQXrnyuQa#|OsA!!`QZ)b^u-lT{cEA{t}cUoMUg|<=znyXq13?Fm#XO%G` z<@iq*yJZ4-2avylLG=Az9JlXYTDC6$u<$!MzP z-sVQvkOcjDCyz9vleF_ycG~-`ZOFVr{z|u+uZ&D)Tpfa<+^`NK zy^8J0QfoPxss8-tVR>i&8$;?SMDBuh3VK%7<_W}8H}$(SGSj=SqArY|j$nu&c z&(#z@Tj1!>t=KaY5tx-w{0_hf|Iao0l5(r@~@W6DfxSt zJq&dx#4NpgQ9T?DohSl3ZBZx1w`2@DIqi!bTRp~D>^Gh-XSmujFYvGmh(d&q7RLB>|A+y*ky9{YBX;|7V8Q@9~udyA861xcW!(_B>%AOvl{-p4#;rPh2uH<0I- zCk&`a1d(@#Y?1;%ucuC)Y*i;lFepLcK<=9^;&JLq@;&M(l`jz;=vz-S}8 zQ%8S8%v_^5o!b5j(qLyN&qy&66hRH zo>(tGI$z#T)J*ggc)ifRlK^Rfz2z*fq0v`yC3#<~1@i5Si`M7cVYqg#u>4x!(0z;2itfoxwcEGh+unTi@Qw z%D*Dhay++sg&n{Vykxum#$@FM@;5(daFWQ^vdtQs+u#l zPswcR_#S{jg7)sqm}{3q_QFnxIXXgyd#f~}U3PyzcT%VN5yaF!t63F8U?x6037>4* zh-XunzR>;iEWql}z*N{gFN)s$ko4{`=Vsc)kIeaH#v`2$2PcDnr#~DM{0W7@rBG-+ zFpC5(hmgDHAC7*a6ktY1j#$+LoR@J#3k1J9P-RGr3^kaBcyvtiAkLMn%G2l zk=3atJ>%;Rq8nZp9zhiSL*hpjxr=5KcW2Z(agMvCk>|?fK$iOYtXtrEArLLYCvGEXfmuc=g{7l#0RnsWHX1%tE?e=$Ltlm%2%K zFY6N~CMHeq3UVD7h`2S8jCIBnjB^3FCx$D$SIu>SE7xFB7AbJ?321YfY5ZevYt!Y>;B<^fS}oM+vgtxP829OFhfFH_NzK+TC}S}6hJ^~i{DZLdo`#^^%d1qI zbUZMSWYQESMI1=VfOB?FTGSoZg#P3On0}g;t|3KwC0tQ)Jr?|#m zxr>RP3w>)VtAG(7z;4$(9;g}hD4&%HyX}z^afzHV{GO0l&u0(88*6vU{Vm*<4~`tE zCH>L$;^*ik?<{W1g~^jLz4{)jS&?8(BmpZ?WO6h|Dxgc@f`}R z8t5nQFmySGP@IRU>6q6{hSLR-sfsHvmG-W^4U)U=BiPKBF>njzG}u`3z%n%A_&Vdy z-i1DHy(g6nkix|Gp`fb^!4tI@0f?fuI`|w1{Q()umG|`CyFuP_yh}nbfA(%i(&Ic& zK|*DQevQJW`V;|t4OIu*Rk<7q{YC_|P|N$Bq4jqH!vy*970xuHdrw6mvC$@RTm~-d zliY_rQ!7kAm;?hp0&=dqa!u%g840*tu0o_uDc)BUK_>Q7zdZLIv0=hCKA-4>8cw8; zqwmHB@H?7%n}PmGU6ofS1m45js-B-hVt~(tCc4omL~VE7zIr%mn*xmZfRM#5;aCyC zJJS83_xbZooK6RaCNo%a&+5h(8W=hd&xZ*287Ms7 zHT3>|aAERJ9#D&C)KQM|gWx>2><<`hC3lEvY*&T>?fzl%As*!X(@KUvbdtsbj}tl; zY~!oeIVTniZC&?wOID#1(O|cDIif_2HbMa7u3B6$`s=wr_$F@2s+)6E>H~$w+_z<* zv$J;XdDgzL*e-y}GzTN*7ks+<^LVxPU%PXNWNK8@_R0b-kat>QYHs|o*ZG@&9?PU- zWM|OsG9*z5@S6f??dRB}O-qs*V9J@y0yipq7aDU9yHwU+LgF=C3IsnO(EiYbHcK6K zB~ad(ClqM3y`{VHCbUPxRc$>9iKHIaEB_x?Ul|tF8numuM+G?winI!n($c9&r_wFb zARrAh5*8>@(%s$N11cq50@6q~5<|zgHs^i6AFsd8;hNdA_p{ft;=b>-o_n&tg&gyB zF=;&RpmX(g^rbM2JkH!teD8gyZdsi00Zbe`r#V=arHBh>5DclKzl`#;*?okG0)S&dn1c`SLI zszt+Hew!50?Or`VSZeD%vo4HM{+z=71xR81DfpVCQHWj?(pmz;9iu?#Q<|obJ%Ext zhGqkr_#blM{eZ5$sN)bf25O`kbfEm#R-jN$6PUBFPg`jv)ATi8=oNkM9m+AkZv$y& zVJ`I9nz$A=2zxx)g@3Q)t)ruR!=R86^Oja@hYgzG4(7UrxL7~^kI|1@sj zXp4S%9Z&hhnzI&F5%Cy6psNajt?4?AA`B0;w@f-n=2sAOD3Gs8?n>kj*YEas_dVx! zCnsMdFY`Ys(SHbuL7kD!e82z3^L)MIxT|mFqR_#hHEN={X zEbp=)umgoUum{C4@!vKf?XxmWdqw!LPCOgyfyD#)=|4`omFNEF%|auQ$(wg$-5N2F zhqvgpyOYj}v>usP1bYZl6oivNOR1laEc%};NO7)4 zlbHOi0*VGi^)ah^2M19))+>o`@!Y(kwZ@+uu6E~LpsUmb{9)4?bizmrMQ~ho+Dkey z{C#xh*cU<9iS^2BoAJkSdq?JL%3qqTTOomKu?BRHFSuw~8C&cFbe!euoR$1k=m$3k znGceF>qJ)vfx-eZ6ed3kp)epedO#V^cPJ+k*?GsuXN=_W0u!wpLL3}^@%!9=Bn*{D zXUZ&q{!#Nfow|QmZv~{2r)Nj1bH(2L01^fSxrs@%3A$M+W@+Aq-IT}T6Sm9XK*U(> zil}}!%Y*7QITI}%CI~%B=gyYtE^gkIcsKqCP~O|6_ggb?^U46K+)!}I5E@uU((WI7 zS9}!!#{Mp_d8m@);6~M{#b~eO5GTk9x>dBQ% zzRA7vVHE#p!!syto*4P)5J_J+!2dUf@LNpD>5kx=zugIk#*Le|q8wpD z0GIx=0`HgX^Rk@6AXK=GvPQ6001QN-#n#m$A26oO;B?Yk*+`ODz~7Vsf1tP`z&m18 z_cK?0kaOu)(*n|QCIGah=|33wEQi;`OS0b{36E!yu6#_r1x$;uvv)MC$;imgoqlsG znep(rA8B%no`KWy0Oub;-2S`S!OW3={xbtMWS8(W{ntH_hWr{P=ffKq8vzYYy^+4% z3f)xDUjUu(*!2;8wxOOE>b)%Eib?w=&`~RWM8>}@_Fl~6G>)y0YV z9s45tJJ2fIUG6m8vWdd3Q?4E$N|z10B8zSLJuuue>gzw7q?+%fL!oBdBz^i`3@bAazST%;m^lD{*e0X$rC zr(b2f_i*sl!EAgFTTZ8K4T+Z>A<}vswkOc=Z0jWipt9j+>B97CGGC{uVjM|LT;{CR zPj_e&L>frncWHzBYlT*<>L90Q0I_`;tAlbMX~4)j&@}!#AqYu53^>psw5*pc0TGA& z8rrRXHsS{)jUb|O?LtH5C%;)?8J-uzYT&MR^y`pED;x+3l6r>4DCFAFg_dCr!splH$?0occwP6BKT-eos)In?2G@dKWBJjI@YG3he zI@5C3g)7a};)1@uKH!lg7}sMjBIb(tAs%1l7*{@JQ&w@YW?TH&+cKb1s)A^?6%D}% z0bgsuLSnq3A0MFcm#^Q6#>d|h$pq*zYL$SL?NhPuj_KxrLeG=OkpFcQe?30`m^ut8%l9RPJ$$i;0q9!1N55mL5suH*xMGbS$Cl6 zg2XTufCrami!kW5lP08FT=|nF3!1dYh7!;_D%z)f1b|X-lREeO)@vx~e6isynpw`L zvHVU+poWNY1R=Y@CfmPryr-bX?>X`S>gdwNzjXKfN)f3ORJ}+mQH?)e>qz(U&h?d! z!*bbp=;Lm;bKe7hy{XXo&B6vt9v@r%xSyeb+rx1L4sp}dqYgVi3nHvBPm|4zK^QHLObSfG$Z%!LO zc32(+%t0p1fOCm$gMPG+P;nbJ=xXaS@sgD71On&p?jGAfU8v zOndX6r(kz_bS4x}VuMs1Vf~1%UILTiK|OiAdQk|i<6OBi*x&;$;3ojIhV;HoSL6Pb z1C~tdVd9IHrymm`VF8x&6SUhr2ZFk_HZ)gV!RfkTj>BrhaKhVm1H9jaw2%P#39$Tf zF;y;6>i*qhXx^5_90GG;0QC?9P%H&@#74;UZ6Of=c#!E#q@ir&Fd66)E-oo14iY7Q zN4Airo(h4@LBK5?0k1bu_$l8MM=SBH-#$z*xYD5oMT6;iJO20jdIr&PMNr{28d_QuDEp6eua{UtCUf1 z3=5w0LEYwykf%UOnJok+*2MRr%XjBiQ}v(gJZTGKNd^@z$>%}nlFa~_EP@lujK|9< z+Q)AyNDPVYj~CefS^Zv7IPKDW1o(|4a&N#mkx1)7fkP^2cBb<}2HgPI{q9Ntw7e%# zC0Fhr9Q3Aq4mL9!m8gnx4Hb4fdJ3)j$W)zo@863DbdewIuCo5FFMYey5YiPV$Z0XP zoW^?!KKKy~-pY2dW#C4x&i95U*H=q=F}+uMUqKog+uTmJBL|-57iPAw?t9fhFP!xM zKz-EbkhvUy+9xyIQ>MQs{1z$Dc|MPTlj{lSiK5qHd?-9#SBZoqU?pGRr6+d*P|8=2 z+iE<@M}UYe4>sP;LZq#pI^DNK_rBRZ_fJy-t!LbvFqheL2^@Q+ z5|!6pqU?iPr;u;H*Hd`wXb&3C?}YD=O@q8~WWAv0t0%J0bfJ5~o8BEQh)C%`bGz_{ z54+!I+Em&@#CrGf8;}gAm93(#hL5$JO0uxXpWXru{cqNt*PlR`io@s=mI__zRj%0! z8=ilLL81W5=%?`Q@3YWM`YpS(@4;_|qmk}4@RP+cZ2*}d3^j(1dInL|#o9 ze^PPUzk`K?knhzmnwBaR=nI6N!X7yF@~H?*!F4d;JAzQQaeH&}FII)QI<fTgK_NkL5nPrnmk19|mbEGnZ_ z#er%0_JdWqq#=XimWeJZvAgFkqn{kKg|mF2Bz%Mq_ZM8dF`wQpM1R)fpbva`)5y36 zOeFemh}0ADo+3ahr0jEP*Q7wdY9bJmWZ2xF$Mr4Q zx#0`8!|zyPtGfYfDghb9aAZX=bPJn-_LQiaHtdIq7otS&u_VRGkw8Y*p=QjqZk77n zoFb%$$xX<39Y~DLrDtxE@;H%$Pcc@vT7!%V#{ko)_VwFAhO)O=AM6ka3mkuQnjiQy6yVlE!@^z;0u zq35_U#03MdaWA9(;5Bc5S~-er152IE>Rr{zXBPU0EtdQQubt5VfR*iSjzIxl+2Z zr!br_GFEnQsa}MvS0}Px3W@;As^52@z+Uahk}BFDSM8Ea{`&~jLXx@HxiVgFJW;C zm|-nhd3bCzOAkGK*Ww3OakG)N6n+F<0xv{QTbkp4{XDn=T6a31vfNA?WelssADfR^ zgC7Y9sfbxY#}#(2=YmT9C7f3uokWp24*kpLUT9aW73qzk6uGKw!fX!HdHMM>&n~)m zxKDUy<5t>oa=zol2Y-&ekxOxM8D~KX+1A&r*qHp)8}7@K*{m)3Z08Jx)aV?X(UJ$%m~Uu_hm;+n3vB!J!ESRJFpjU_d4k z2gb>C)b|pPRr7E3m-E^e!o%$qPG=3O8cce=eZB{C^663cpjdx~iK{AFRi>5iS?RiD zeobZKcInWTt%^Fcl=fZY2gY_FUf=ke5N_vK@#yte*1Emt0Ad;Mc8+BY8Z!A@{qm?T z|B4}DR!t3b(==QU2o>|-qe-=q#gEfal#k3ha5`U*gKUiaN|O|QHH-whKegFqx_)Hk zh}G7~Ng1Pn<9YmdXS3|Y)BRZ!oNGz?RMcjKJ7*+e_v3h_Tl$it`I7djsT(8=s%Q5( zElirO=`=C!mSyv0cx9hAD;pp}INQp6t5+pIouxO{Fa`b8x!jI=Y_stv$WwJ=S9CCXa#394L(G}!t|GmTkX6d?^hi@hv5?HR}Npm z-Cn%br<&h#+w3wj@QBSa@=v{28>`6a%fB=SEy;96uUa^+T)q0-Z3HL7+jE@1KY#sx z<;ZE@Lha(PWf8JFS7ZqTXb~6SIbOCpR4TcCE%p(+G`5P->~c0#LQ_Kx%Gju-A7uoe zNs=`4z@;pWByppK{g)V5Jx^K#)X$aye_=33D;XtUCMuAiUyaJ`*)78}XYj7$fAx1} zKV4QtdaB~7t@-5h#Z3x+hrmkvIhpY=K|YVQ8lqdbrBBA*@NI{S^=26}^^cqv?YNgv zu0T=c&t9dkwt7NkYdf1s2a^C?AMM-)YrnW%mizL})YWyFiO+TMbxV$_-S}$S7fUmG zusmCqw6=w|LHq)$y~Js4KW|XyrV&qjze3ju9}~54ZrbobBc>WY_0;8quMgD&dy|f@C9dWi9&2wRMVxDC>^ z>n)b-BDKt(t!%sH=(zH`XY%lT)a9RaR37(N19n%-0 z2`Mz`KO@(>B69M76xsx2+T~oxpm;xJ2VVL?3b_)vyuNR#g*m_XCnR=d18O-{U~>5j zkI}JF$SC|WgT>KT7Dmb~S-Y;qTl!}k%Z90%?;|!nGGK*YJMbG*xH4Rr-TUQlc$a=_ zR%mwCsP+IHiT(2BOBxn#g@c_nOEwxI*Q1oF;<>I^n}Mne7tR}Nk8`_1LXbb+Tb{bv z)>Zd2WA7Q&=vvkHB?xD;)zcF#@6iNuUc{k(sqv54TV9M+A^YL&xl-a+gL7r`RxQd$ z&Q_x;c3JB*%L6&V&Z+Bi1NNJH(zZ@^dBIlZ3%qt2^LGiG6+@Ur^w=xfw|>kQ*{s)E zh&*P48A8QkRWNpI|6pM-OK`upJwGyr*ZoHhX*i3XR;r`r$Z#4epg$j}|-wec%IW!fkJyHmR=e?C+z6EtbW|TZbE%0nXHJwwVbR?PQ7GTJn>V ziK4mGb@fpjt-tKiq*IKLW!aQ{oFFR6nKSvbB%x-pgDjGC|H|g%^s2iVigS`^NZ`(QvOF7O2VoTh?a5nDmITJnQHsh_ql8Q6) z(IRb)*N9VLYu-NXypSQJW3L(iDT9I5yWD(XE)^Av?CkE+#P624*;f~cOFvuhlErn(;8)+|5O0wYV%s{DW!W{iI?%C`x((0n7k-iMglf86V0X>cc z3nHRw`8v!X;tB;+yKURNFPmcJ@L;bMCTV4kS$S@PjwUImID)^g6H|cC^0RzG`W} zq0b-FlGEHQMUBO!UxmR$bu0rp`8g?aNrw_lZyJ z($(K(Vmi7yx0d^^FF3FMejtS24x#q;zoT;k#PCf{-n3qBXi@(PEXDh_uv+HOu3a~l z!g@nhj3(YU4%*Z>h-(}z=B9pY!7W>L>KGW^?`G#{%jJUqSk$IIqF@1o_#yvnT>+;^ z%t~2}Rn#nKOOsu!fE`u6dJSnM8LM{8-!7KGBTHOZwE5&DAw!rFBCOwpr9IN z*C1|@j>)~+;2_$?OYvaNfrZJHsYKNRxM^DvO27Gcem7Q zn!DLK6HaklkNy@;x#jEEhCvfnRL)qpEgC3+&ue7N@`6!6Vf2;}7@*G}Os~?i6f4wN z;Aujq~IM82WTW8E?+Sr=CKW}mpD&cpdgpt$+B6F7)+>(S>qJg3&R4FSID+fBRJ zjnI@U(x_6E+X~K0zcYKQsK!0aws{)=?V^p_>`RR&T4rEi`HH{HE#)%*dSi1tXyfzr zENpI{-8Z%C_O=jPB3N2AXaJK4g5Qih>flJrNRUE215$Bma+*pX?yOoK?JZIE=x#*b zs;~P{b|n;iqng&phGDa1DTNRz`X_|rS?KWz@}R5hoiMQ%gni>r&VRp>uQEuGPjj6o zr6xxQgqQlO+z#fG_tibBvNZG|l5{MeJ2|(x$}^#+H32){XwY9bsFqEa+xoZJ>o+@K zEuRDWsGiW;>LiV$d8|fhR(}jL#H}n2C8s~^(#`v4vaM~Lz%>~oJT}NwY+Em3J~RH) ztQwDsyQcT}#xQM(nmg+`4Z6AGA{P9y8l(QTQ}!ywAJj~BO@PX9bG^2H@t6Od=eSqo z)rEPJ2~~`QZ1?IPvsdqUbdikpMw#jnIy)>kB`2kQGJFY|zB3|x;zjX!a z_xEuJh=SaFuO7Q>F?OX-KS(b4^|}2yy3}zf zGZ99*5-IX(p+Kg5BIKxgzsUYjAu@_X^@o6WH2L^lh%u}&6Hu9bH&73vrDYiBv2SW8 z6^XMzk2soc4^Y~JG~rp3+oQbRKh*B~`@|b&GtbOdE!A?L)zrZ*eIBHYSL-~#6YI09 zHV`}!HB$60pgaIcwhE6@zl0l2?R}nt6u@@tp2=(9@j{c%vhB}H$r^dvR+%jZ54{@j zp5cvsg;%x}1=U54W@(erArdsk#v#X}ubr4RUbk5zAx$0~YSCoB?fkv?yluvO0uOFw z<#=dqFjIK`PcKKZndA=vDe~Few@EzeVA!e;f0gqx3lyqRWM7A|=BLimw$iEf5VZmsvlQcwR?_x7XYfkVcN432R-l*t1q1)tYgGbXSe z<#w+agaIIcC(QnLq;JAk*!KLlo%W7eSy3DD`-kbIV^mCKr?N4zeqG8JaYU_bv(}CN zYBgkCH~>S>8h`B=nXdS$Cpef3EsiC%)V?r}>q8lbK;Y=}qIGHRK9phLJ45G6bBr}{ z>y)4vyA^O;T~*PVSV#W6X8urlg302q*~-1TqoXezH1zZ#L&YYt@fpIsb^0FC(eboU zYlR~7DypNaeJQ!LHcsA|UJAALwkBwR)JbOdQ!9Gx_HFtLzOf_DOPKU5F(j^e*d>Z9 zePLV`323532{rn4+zTvKsnkcZy$!>xvhp!_Lxr!P#GWgKv@M$a$pbfos_)pHg>kXM zm2BdCqrHMZxD(4uQ(44u{Y?$kVZ8pNRouUpkE_k6(_}6j{~F3fO)R#~C9pZI*!&^L%?Y|VNJK4-GG4F4hL|?dY zVYcVJWYq3BX6wW&Z*8}S98zTVJH1*@stcTaw3>V=sgX}lP1FjU#JaemT28ulnNE(u zfGkoe;0M(z@xSO3%finF6H=*n^xXzIoO?C^1A@uu?!WP>&;uAKpt zV`vrr@*e=;bR4YWDD6g8joUe9kJoT##F2c-BN?NbHl=)a7Lzy~jzvXnrsZPUPzN6} z^bD;w0h=RNo`~o0sChh=C|dHqH$TAL>hHBu^J!WPwt~x{Z&{j(m@M7ZZ7Z$A2YSpZ zv#!wNA$x|HH!M6_80;@>FCLicTXzc zEEbU;wB5r{CiSJu=Wm|$#fnTqZ40qU-iv@N?~ZJhxY9L@(?eFK{Lt48H%Ib>yqp^g z2&4&vwtwtH(%5sY7otoS&stIP`{eOAS3rv&&R0AqrJ@pYKWD86<2_!WRE1Gmey*LT zRVh<=1h0Y2?G*ewc8T3oBL@K?woZbkf*8C0Bpe?PU^3P9)L2AL{w9O7)gi2fY(%Dg z^3G>p-(L?wvTEHi(FykBgN1~CLB5~{=0ZtJD}3Eq9kE0`5a@Rf+N*GBsmoq5(7W;1 zf*>~)?HeP2R=B<*a(BOuG2Ch*Z_Yc9T^*@eOBb%cSGDcy>-pmrV79tGUd6S%J)lmw z`ukUT=J)g$h||&c*jGiO720X_86H}>pYm@NA$DhVOnn0d$=d!>TmB5RZiQZN?kzNw z6i45>MMP3_gB{cd7^}?I7UM2Tp4vTx!FHbEJzOwTuxi(1k9LCbfayYCamQl2MQm`z^g2B|> zYW@XoE2>ndai-uF^4?v>lvsNqCsP}44a zTG9jj%uFY8_E#SOCTT&6NbvE-Fa*7~A^G!Eaf!`M5%gw`%!u38fqX|1tB)SN*T)`A zq$`A4%q;R26rDmtLs}r!!gC*hqRa;y3w3$xq4`9d$ULUP^*D`}f8o@+BaZLzeZb{0 z*ZU>6ZWBcv9a^>rM%+UwnyR>{ADkH`0^6-t<0Lo%&|ti+#l*Bu&~I5nCnlNCUL|5z zcA^`zL`DVZtmm-FLeyK&@utYA60U;e(I8wQ10ae=kE$>d3urP~W;q_I$121r2fk{v51Nz9WikF~^NSDU)nM>p|l8BR8 z#<~X+<1)KnH#fKxNLpIC3Y7&>8>NVoPT?49 zeBCdya6RWc)T>v>DJaO1WX)%KzkQL@q|^|S7jk}=47KK~`5k4yuKVZ{VFJ~dwMpFS z)+!E%!PB=d6b;)yAv#ez_SAE($hLAFdBpS57XdV0@~uW&#K{ZZs&^D}-EJ^b%9Jhd z&?T+7gE3&ci}HNwYER3W#|%D;r$QcjB156wdl}=g7E>9u8*{Q&GF2L7-Q2Y1=eIV3 z)ZvG37NGVcI~tkn{nj|yPs#E)gQC6cLDB$!TK zEvKAzj;P1|Nm@$9)Ah8J!h65k|MY$nZ}O*PY7U^tci=68GDOO#>nFkZZJYrEcq2WQ zxg?9>s`)+czrw}IzWA*Luq*FqJ%*wZZAw2*Xq{E190?h_UEZXv`+43H+1Q8ZinK`sUvgv5q1bS4s@+ z-B2OqR=B?W3F*U4INPp20JCIWy(a&_3Lw9hZ|5%Hvm}pg9d_$qH`lX-!9Xu5qIs-t z!&Z(2KaeUk$0W=I*!7S-w}zp%#V!#yS4jsTf%3RHVlTZ^s%mzW?l#SRV3XK2e+zkR zu~(B>e^F=+YD1=QbVbu+hg%^oCdY-@;MKcFb~ZGSSoV-@ifbD@PK4B?s&%9XCU-QV z7^9cJ-NciBagevIn~M2f8voSg_4?Z>F4^+rwLYTZdQ@bR5PR;<5yKab+ax)*n<*_* z3xC}^amPMu%HwaOkab&9q38ZW@EqOa1zZBuIjvBV+**!hcWb&#MF7P}Zw}J-c=HBj z;+`)6VvQ4BbrSxVuzSo8c<9t^q*AWZ1;mGBWY7mMn-dQxAkYxS!F5j*)RG0uok{~K zy6GCh^&=ljR?`u@z^toO71@>IPalv8Nw;Vm>OdYhLVUSaiCi5Zs(;J9tWoQhlcS@!&=#al&~rQciWSbM^fO8`l_}2~vnVYue^90icfoCl5h!i87+m{~fYRU_*i&?Mbso6d z6(Kwo^k&@?nK;s{J(`k@WOwuWa8bMKh^XI6cGdO|6D=Q?k;)N0MkEk{1$ycwZ=LRJ zb;LI|k4W0Mj%oh>a@8lRG!~3|``r!{x}bv^e}=)*USwvZzZoFm;Z-@Th`Zp`=`Mdv z1G(cH?5sM}cZmCX2t&KVKNMc+cfXnnNhm3STDGinL_SzkOGl>Q!z)*=KdVtcOiNE4 zv3)XiQJi^oBw8e_4!EINVtpS-K=4R;GkW#L4n-zd{YJ?aJ3L-vTU9&m9EXzd8)dwV zggPZkH@!T(d|Z%}TGE$D^766$4=6<4x(g>k9QaS0V*H!a4-Gu}9?5$#vc#+;6j!tP zCChu>SPu1}`sr+Jti3lgEFYL~NfSDH$adpWVWz20Z_-0FT%@+lT-DQ)mZ5phh(1DEb9k zO3o|1z0IOsAydcZZYPJ=A;8s%vnf5-x$l3y)j)@ltb8yxF?#$%DeULLi~kG@op@Aj z2b=IY^Efw#BZx>ZM@s6PqdKvAWFYTjnB|W>qGY{oT6_FGO<U3s$iC{Zgv0|R`XCfj%Vho3ng9YJ~-fyvpSDfB+ElN$b5%E|vY>z?>WFS?KY{Q%x2b--|) z+&diU9MoQZ-O*jf(_Vg2gW>ZmJSZLYelUUUT6O>H>+^9gn+u<(N9L<6QJp-jc=>{JGsH){Pn37T;Yle^q-E6*XIzuh zHQm4bFHHmz{1 z5F`Fa5j}u$?)2l|x$40bvi)%p0orc-xxs;+dj(l6-gl-a|Ewxubr%INiFcvY&EsjZ zeP-}AsSH3LQalrgw#N0S*_3<))v-72Y6)#q$Mfg$I30Hb3ZgU#cPy|N&=Eig4efem zBrh9FspzR$zjnDTHYLf}JVyn&&r-9Js;ct{8gjJ3aR6b7gk14^@OPNpM@wxcLqbFP zmwq1&44ma@(h+c&#)DIZS|q%F{~XuWwvAT5jC+x>DLjAAe;3l?ElO5JRtz?RR3@7P z&=*|va8&lB=36}+H)Ino5A$qTd|4Bu0OEz6zf07r%5AwU>9)OBRL`1tt-%;B@^Tj< zd=mBB6NQFWM|flaUU1j0RNq7GC3gTgQwEEk4o1y9|B_(AMkgP8?;=%7Aj$_IVfrs?UXZ%>GY^WMtj)9Mq0pjIL zExi3FPYy@iB~?N}8|jU^ln~Rs@`PbW#o1mvD~}Kpv2FfJK|kqL*>AM>fkMOY;Gh zCTgQV_^j#ltgPx)7<|`jxjfS{Cq$Vmq|yNHyAkSaj$Tfp}|Jc~<{wmZ3Dtu05b5{MoUPJa=9QP`M&oIp-^71aF8LeHZgTN9l!X%h@ zM>hE%A~o0jAJp4k+}TP8WhyVdo7FPakTlo8F)In6q#2|6g+oDJ^3)HJUfeHe?pLZ$EBSYg<_QV7rY!Pweq){t`~ z>g4#K4ReIXWvzvrjfFFkL)(0AP;N`>=%>s3X>C=P9y6%sA1O?r3# ziWOlN>QEzznSx@RKqOh9{c|1?(P}nQbM4bRalO-im)HhB+AvFU-dY#4`2eQ)*v5sz z9`Dn@IyOhh-?4Egiga#P3{tg}-D(qsuBqWkl?mw`k6L{ned1`{_p%p=8!wLDQjFE8 zqCiVx2M*Vw1h`QeBW&-Rn}JQ>svFAHD(hXY+{ne#czqKo7iv12~ES z`%uPfn;qzuzmJw!61u0Pr-JaN)${(Rr`5G%2QC)VPL&(c>J`h2} z%VTV*xk$rIFy3mtBqABXxrHb6M-GHZa`HsgCi$PTqd}vf<%5VH8k<@+M`sbRFA{9^ zPe;SZaoi^VUHNrrq}6XP7^hjB-*jFN#c00qF6={R5$zGWbty z=(H{{;w>Qfm;pcqDNBLhP*I>1VF2)&c2GMJB2J4`S0Ln|T=9k(<#|)TuLX4DrP}ZJ z&NBK<3jozCP6mNA400z`5%+f0F{^wi@;C*>4IUeW(t5!bRgrt8)w6{lmp)}R==r=U zTL03+!0Dd=YIJpk%n%{@UkFjSl(g+~Ik( zgk7O-M~`*D;##4Lb{1ZK5tmJN$v+kwkFf}U1jkFX&dEp59+WPlJVrpCf>h`az!%Nf zD0yF}AZqidUEd;rA$#soHB4@F2xc_Lz%8cbRT2VY_ZgP}uE^pscykGYlB>&U;t&rl zV=!=Zg?g)`VD zDUeevA+jYf7ssnwdo4dmHJ$w~ZdV!V5_S*bDaQY+aMv;rQcyVFc?q+&HpgP6T^?Fc zW)OOvB$){o>}an(z-PJ+A|XPbu51i03k-yVOar0Qvfwafz8XP{5$Jes-Z6V4w|$jD zz$pZVF*3%Hvw6cF!`vhEx$0x|yf=k0w|SyVAL@~|C{@duGfsp@Uk@s30J?)X8sYpd zE)XTH!M^Z7f-pj66B|RMibQq)-w3)@qvy;S^XW=xAO_(Xc0N4>wTu3?Ov)c^mue5Zq1=qK- zM^M*gf64Y$Bxhx3XIIVZPks9ZNxBfVL{V{3$C{gdFwlR9a0`6Z^W3g@AT(NKT2r7Z zx3QaB`fE0G573Bql#R}-7$h@UIek<2(wbNdLX(k&n#iaB)dDn^QS@J*m-UfFYNJMe z$9VFhUpD#>#4(BPz{wf@Wk?=Iu__Tq^qp(%qkD?ww~9?yzouC{4^)4BLE*A0Q2jVo zkRtuFu&O#&MJ)JEQ|z`W%K&*YAfEX>jyzD(0-R;yr*BDsd;N~tYq?2)U!G09oQ7G< zl6|tHuqUO4lhFiFN=JbXK-T?uRy{fKC%6Zd2?<&DM?*H^`M-ppxgkKZe8Rd?*JDsR zNC7^r8CWI??IARELdP9u+O+ag&)vxS=fD>OvS!>Ps&&{|y^|8|b%%6mz5@=2500e< z6H>Vt4sA2NRjQUGkmebg8n;1j9$x38d}f;8!tefpzKeJ7-fh#ema9E`_RJ38H5AWN zKfaw+_C;W3UkV`r5220->l^Ey)lUIm!hx`B1BLwT)WzW=(w8VLQDOY4d^qiv6Yvj%sx1Ab((MV`&yQ^cGs-6fWh`=bBCG792fSba- zcs9=?K?~3xg`>fv6bm7JNMLR2v3pX`md|6G3H1Hr8U!L`GfxPSTJ7lPJI_ zfSXlx3Tfyp$eu_jxEsQ{{n;AxGvj?pgd`9q`*IyGJra14dUs_EqYmP;yI(ko5pMm8 zne`q+W`WWy`FWsUpSwll&>LIrk1dp{zM3+Hhggn@(Rr|3Tt_-apgb3=24XfG2O+nk z5YKVFQUa>R89uJ3@C3&cbZ%kYz|R71ZWmh5A_i64U03bRcFLsY0nq8B^7uPBLf+hx zOIA)pEhHA2$T>+51oGfY0D$d{eKesUya*E~`QocUZgs{`A<`K7Kh*srKYGn1f2G~1 zm!S9m+Dcn`DdBovc8=In6`z*l?y`ENvt z3dsKQ`f<0Azt2i-ZsX$OBHk&bAfY?QFrgqp35i-rf(@nl&D*3V%7$&~#H*i%1G!n{ zZAXFOPe;k)oIGN{erQ={EVV4TY+kwRxAuOU>*JJ?;xUv#-B>`OKy+%-*+LUc63GpT zZ3jCFvcdl4LP=&ZEg8^_X~tA)fS51*Rwa`Wz~n)2Z3JvXTo3?xs`-7xh(-)bADPV= zBZSpsvQw9Wq5z3aE1Jc|py=rZEi#haVYLz!(La`8RKJQP1?atQj`3*CwMS7yN>~J) zbS+w84@eeCI6xSRN6x17eNa$TyIV5- zI`}Yg@LMG!%k;blAa=g?xnp2{RBRq;-@Fm}YV&E@?&PJcLGx7UYvUL965Vg$;K|Xl^-jUKAJRF z#PM`&t;-gPdaHVAyDjV?bwa*)o^@PM=&m;m7xpdfLe`_yx?jsS2Gk%~u>31s1jO#$ zN7u1S{jyjT&M)8h`sBaflt&)XCY_wOQh`2S+ z);pxu5uW3nG%4h4u1^SW4(nt2b0dK>oZ_Xp)kUL(W1$AE-o~f%hqE&q!M)Jo`F`D1 zZ%=1SU}oq88&1J3P#;IF?-V^~0}b^zGzeAY^(4;(QwrMQA*j&>VU5z|Qy!_^EH&7xaF1L`?K2=4Itzj2x4Tb4UMdq^3c&b(z#%4oTBG_oLkC%6cnW8!x&;Nv$DCqYth(lYHV&~ z^6wvwL6qSy$Z($$bRQb(0;xr?CKT9{EFo*uCB^WNeue?`*$*3H#<$>`N*D$0xn*Kn zCg;=q9yd1gHRm%!V*o|Q{RD5<;KtrB0_EmAJ=)K6LQmtZDCiNDmeNthhYt|r;;0VYI1*j@%_uwxoTO6DO8@X0@ zRG0SK5Ji@#QG!GY@3X9Gt^zg7cuwfVNP+bLSny%*fae%BoB~rg{W}zTbs0`15TXHF z1?qBDNZcbGbDys758vud%WrXGL$@#^CiP86okjm%LUTOaPJHRCGwLH4H9)3PfSHfh zIqP$KRB3|fQ4K>2Q;okuo^J%Mf5!1S#Bm+E{`=l2e-4rPT5z;!$N%-X5~1kvEgYO^NZF0gqZXpWgS)QK z!{>0SO|37dokd5Bwstq4M91HEk?STfg55q9C2#}NH;MglvllsLwLo>w>;D^XJ~J7Y zD**-?b+{KH*k0UbYahL0SX_40*>ZN_=;xsdvT}IZ)#u+=w?zq^a#vEpe`V`2;ozt` z!m(#e1PkFey0cqG6RrPQJiFzqrwOl%y_5~iqK8e_8VU^4SCnCWak8c%gS@z%5#ym$ zVKjUESPE%W`NpF1Af^l4grM6KmHeSF?216&jQh`)!|00Y!z|=S`fZUeL{DJ=G5m6> z)y^Nlq}c3nvz;`?X_o&UCnG!SW+x|=`s&uU#XUY(56(&|&_Vq7Wh|5&R~8Jkp0vJY z{J0&Hfpkt}$Y%Ajn|J7a{Sa~?Yw%SY`4V4i&I2C#HSc}C-NwYY11yx`y6K8>w6GJh zZua&?BWEv(>OSVRaZB=M=C;&lV}FQ~kaxJY2Pp?z0KDCy;3hRG86g(1;;g~z@M)YX zklH_Bz`BIy*+vkR?Qjt+F=jg*>jxiBC5N!8#%@RLyJleO||1mnv-=s!TM>Vdki0jkt3g=LHi&VpUx@0HFY2Jj$_w?(ifx zrn&7okkd~^RpAymXpKEW^G*MmXQmc5rxVKpS$J#t#2Z=BfUIn5srbU3%BN4|pJCjQ z``n?sZ{cH<O=Tq}IQbxtU&xxb^hm9Y8duQS^zS;7U| z;kcKR>T>WwN@1LecDFgm&!(}lUkr;nhm*koDC>iSX75aEsXCMVpQ9_)Hxc)4h5XnH zG+D_TZWC($PEYMF8|gjfo z-3;TDhZ8%B#XCKoHz0%}*D5*PI)*3KMeK*3rF`HWN=#@qJ@qu>$~x2eH@bMy$lKTO zU)y&P$I-v>Ls9m$unFCyP7CZY2~l_(*W0V_v?OFY2kN3r<7wMZB+iV%>TqX`I>Fmm zgFgdVdmO8aTT2#{Ls#u#WT47({v(x0T}Iho3%u(3bLx zFf0D&*_W$C%Oc0$1N~t z_#Ke---Yj~)*-93q>WKG!cyj*j!_qx1ibYqey4TrR}L_}5AMx+!i|1Gw!&|#+v@;JW=xNMlt2`y2A|NZ<)ChW0oB?t8}hv9GdIY~8j$4H^p)4GGx)AcNj z2k!pG15Ye;s)@ngK|TpNWxK|v<4>@Mtde}{*#=-*a z5@edijZRhiEpcI+LmnZ=n$RWHmGRY)up3r9p~)3>Ir}Ed+|04O33DZbVUS38E&H_x zkOyyUUsuBj;d~=~^xFJkAF*DR7O!HLD-Mp)R(kZ61+a7_XI_Eo%yB}E8>2SI)$MYp z(k}@`fUUN&!*5iS$wU_k=_yZK(Ak$Jn zu7LBg9h7e<`!WS#wf zFtnrm_vqG<(QfM40t{_xKAItT-h&nT9fSYI5jJ+!VV11$a6&h**McE5Wp4|V8!2Z# zy0(&oT7@Tw22It?v&FNHJlJqWUS{tgCZuHakb`sDr(MfVxz}!87Mk!hi!xP;%8$jc zlgDB+)~AY*g$q#)^1oTe(2Buxk8fm^3*c<~#_7&Z6vKCWe;k+b{}qLaygyJh@(^y)tStZZbYt?F$Xmt@pk8~jwXs z77j%oq1v3Re}vf6&1_-rfplhb_EB|c(ImN;D{z$!0O_B-KOAVisOK&*7kPR=dH!d2 z;W7;awjyO#I1nzDisIFejHwZ!G5*d+!jH;zSc{{tl^@9(_s4l98?F~@w3z-t^~8-- z$h`dQrdQQw^6n-e`%rEJvVq(um!$&Si;Y}PTO4Np}Wa7X%GV&z2fz}$? zd>aHSNvOkf8R$3BWuc4LSXGyuGbHOYvj)Mmbpw$-j=W@*@GsfkC3t>e=$|WOsj>lA@B%J;%ZS(u?LWxXEZEH4yX^2BiMC5V5VV)C zFF023{rBcs{=z*bQg}(9-jID@zEd7lSSE!H)XPslKBc0hqjgVp_=k!)PLEl|h6iWK zqR;(PN`6 zZ4;I%7D$USD1TXMm+i;O#hqgCNGo-Q9Y2H8{#;f$ndk8Q{KVt5b^+bo3BMJq)4 zCBt4W%d&RDY=o)0DTk+SuVKur2CdNLOb$JTI%JK8$Hw?(#p{|ALR&a5%x0Nlmvy^+ z+-+1vtDo~MT(y4OFuzE99rvE)w<`!K!V=@{25TjjGbCKk5H;haSF(a24M76v&pk^(0jgtSp_QeHPG3<<)OV zw5*j^-(jvk1NHKH(A+zFzg4F$5Mu4^P`>~VDK#-Ba;l%2$~zlY{YeJe!nk-RZA?|&zi6)3M`o1F_!ulB0;E@8*|+h_k~;uLl`%jNtY8OnHJrY^ZWPJ zXZP{!O<740-nVZS1v3k(^e(RK1Fq{B9h&+^KOfm5=8uF*ujEjiLLRJBzf#q z!Gwux==Jq}gikp{9RgbQoYau`*{Kt_oWNpvL5B|lOy>ki8>U5rf!1^qV-p-;p|zA* zp}QMbL@cI<0Jw}I;&ZKk50y8oQd}HaT!S^gR40=V4th{^;GvD6b(7kS{0(%5?_!1U zNFVpzrxeNQb6&iw^q*zwJBka8UrcMu-WM!(hB2=mYD1=cr!AcHA z3~8w?B;<5tuW0$jLY3w2G~ z*Lnn(Mivf(9lJOorut9+A_K%MFZCZfm*95oAp#0ttJ)#PR!%-qZf<$&nFjs+C&)@? zDI7nD5j9z(1u3Enr@)g1QuDPf3)99XN2Wep4vhvke)p0PrG}6@K6v(x{xmQ(XpyKQXbC@1Wd-mgyfz&n)1*6R>+GcUe$LSmn#1u$nn}KDukhOngW`+ya zc9amM-&M;BsIq{!S1ZGvYAX@(7iG-ZR5MT8;-EKYeGc>du+-grggfB{5!gS=GmF@! zxs&?$52e{~7S){dn~6VDLoeF=tUR3qWKl-Ln+|nVQ;=qaPGPO3Wst+{<{kuBWP)cK z%9E#JLOo(?*-uM%Bok?w1Z)ctVJvTr&S0zRc?^D!X7HM$C5c6W(FM9NEmZ(!PJVO_ z+*RuDE|U_>FhgHlCbmqE#-FolFNiFn2?1m|iI6AF(_?Uo>&#W7<$&7S+-HE+Kp6rD zUw}AqK)ndH4=|Uh+Gn1g?7g2>%jL-hOQwh&p)D0v{BU4)izh&=Dzs>n&I~YLv>k1bX+Tf0kcSH;J;0r#NeP=(308H@YoNT|pNBuG= zubRX4T!+XKT-Hodk{Y8;VhdL)KS{o$=Sbg7 zP&hD<83@x3WZ5bN107&8Q4HZE11|53AB(hMkZ}PS2dA`EdaDwiH+mZVn(ar@Tol-G z^Yr)0$hwnTZ^;CZir(OaMlyw+64JULlwDfw5;8+3d%c2b)jyx6A3q*mG=m8%Tq0)p zzPr|d@JIBdOw&`1OvEm*0v)tu!T|3JCC&emoP2_cw+^SFx$h1WkCFQovXDV=8YDAN zMz37gncGBn`ImD}6 zpK@>?GRQZJX(nS(L36;MqE`VaGWfbrv8w_CvpohIUSQipv?dps5>$H?9h?`A$ZRcl zoN@AgxDtCBYr=@Y53_-yT+_aH?51yjD2BnRVfQ6#gK&V5(Lfz!w2AvzI10Qn9=aUk zdihc=gB|o~f-Q@v(=iGkerATfUt|s=7{z@#{*=J{S+mw>I6<>PtLPYKMMfK59jTus zjH)txp@i8Ev{A5!^uOrcuP*GtUw6 z%D3_W5#?!o>vHvNk|#mFHF<~j9%hnK6j$I-vX7}Ij1Rs14mj==zqvgvWnjZKV8JO< zN7fU-(6F_Y?s(-OH2D)y;up792 z6-GeLm_vRZ{^{wHTxKauh5VFPM9y>b`zkN^-JjlBRhl*nNZOw48^7co^2i0)9;1MM zrfnzuyBTRMZ!fAH&5RMp)hY56?(j}s66gvy4#5>gu<3_fT$W`970>AtneGsOjqubGk_sOB2viQ>`8bMhx z6y`;$P=W0LSotPbfYewn;)JwXQ*mHtt5XCDszh${PK7{9*&UBu=)QlF!qIS5cE@wC zytmt-Vu0OO3W!GPxM^iKR8>sy_QWYYcdezO`paGUvYeJ+0_rS}ChjeA2lDr7AGord z*sl6b2Za6ZoJ&$D56a2{aAo+H-)|y;D1Cf;jfuxIZ=^F~PqC6P<+2 zH2;oTuMbkY{(CjA2tqe^UWKeNzpr|4^$f=0r?rR&j&Gq(rYg~&xp)3in4npjsS3{{ zbJNDBRxu^G-Z2}tQUTMKhfiKQog&9MqlK4jyWmwbzWTK!0wdAm5a(z_8t9*|@^sy( z$Z5gmE*TANY-aWU*;wpo@nIig*1!?p))v*$r^uR&N;W4>+O6UWHM@+9NBQwm!#_6$ zE;#N+>?tZLTUBnnIM4Qd=UpyR#_nw>u{k=w^-^}LbZgb%k=IS~-PhP4wd0xJce6@6 z>22Tb^!@3tH(l;Byg*%Qn-DK%@sje>8yl;w>Wx1UJC%y+TOYW0?xM8bSS%&6@}J*5 zu5_6@j2MpK@zLXQcj(0Lc5J>BX#Z}fzrd{}n`I(koRNm|*gyp2vg6&p)ex_-eQTbr z&+jgkxV+bYz=l*!bCW%Z*`{Vqe8(ropC4HC+8V;^{@e>@I{b~^sw-gF?l5Em z{Y9?EVzA&NCWqWcel(Ak{kmbGHVHO*evfGP!qM#!)j0-iKM1?|gB1@vJ?A|rjeGZZ zfT^^7Ia*$5`*V_&{C3+#@2q}*)|c~q@OwZJGMwn&1C0@O=m44=ZAJf0p@Y4_izu;e zI<&Ava{s0eWVhD&{TPdodn=ig4BMns2s-XOy=QH;ZsAv7Y*lMyn2poC)Q`y|8U|0} z%gC?CJLcGh+osWQzAGW+-UZw8U2BhM82A5vAGWlMtC~Mrc919NLG|DG1jFF7ly1CH z8#p#WL$m*XZn4zYC_k?It9LPF0w3FWU)%M5u1y^Pcuxb-Cn6-)3X?MEI(x;>xh*B%{xRn8x?zwEn?o92F zsqTyhY&S80d8PXaQb^VV>~sTLvXVdF_{{_rNBz`udVzAot5B-0>ddvyPGE#0m#kOv{Sy6SwsM0CO z7Udj(6gYEEF~n9XNwJizy%5tj(VJ{hHLx)1T0Y*=K0RtYuac`vcy63qn~5_H(nU8- zY}_HbX0KS!uUY3dOy}a*u*O7syG6HCgA)r1RlVcyCrAoI6D#KU!0L@8059!$^p7d{JL3i#!Ea{8uN?hQ|AQm;~Mcnu~yg^6sS&@122RrSP2h@A&A@a%u z4#A!O`i5?#Mvlp6?}6YG><9gj0!K2`=bfk_Bqy@hDPEHC2Y1+p?;(OPoBx6cmcYvSWkF-rf?quq}LvhDS+oQ-Adq)At7FG@Q7Lgxs0 zIzItHV`ARk8YN<))B5s)gn3(Yln~}>Yn+hS*oSDAg|XHELV2BrB+A5deqHaJmmF^t z{%=Npy<>2qg+HWTX@E^_6Y%cIcbf0et8lgAH7MYXKpDN%&%Z05Wmqk!!;fal)XDM> zd(Np*aK^;BX-coqT`lVdj_Xs-4VLLoIRVbrJ#}l&(e`j588M3yqzptmzP4-gj1dP9;HgT- zA)}R9k?~y8W+hQm#$)T!R0GlLiqX%n7Y(7TFOI0cjz_J>u8pRnuOIcQK86&Ce7BzJ zs8fC2f;YZuRPMy7?Oradf{Lt%2$Aclx+A0Ld^l$O)-q-)B-1NpbA6+& zD~L3><e>7^;s)_5IuW%Q44JpP{k8M&tv2f+lUA=JsddAyH_U7j z=KvFXQp&OYoqWFi+jn|pH*X=cf9z;m2&hD%mHu)j;15+|1Dq$SOzZ?F<=`7a=5<+2 zg|`uo62L07MoFLYPh+TCd^ah8vv)_>4>(cdoJb)%Fy@ojtC|RpBJ8TkrS8WQo?Z(} zV4YHRDf_}ZaP<%8=}+1j8h;CF34r+ehBASX+XGP!5Rm>^5EEg_zA*nP#H6cHV^Z3p z_WNIxn--9#VY1rt^V3@bPTxNs6L+H4n5N6SlurG0yz`t+x|u zc@slyz!0mC5^*GBbNuphXb8T%QdR#a9JA*oO6j9TSNAln!B%Wzy^G92aD zKAURWUaZOWA&vXyB_Jn z5Ziv~zGADR@UDXp?GZEHe|cIbJJ~t&)4WH^c(QhjPJ4k_i{)UI8ziBWd?D4tTNvpz z*6TVpX_3^VsfK7WMMSe{)F`y)-KudV__Yqv9GDV&vd7En(9(2wh`aN2-H0=LNQLuM zy-A2PV~F$*%dQ+Fi96jH48?ao`M;p+4)kUBIM3!joaj2dxtFlXyh2vupv)DEF`0Ez z+C;w$Z6PnAFGiztWtq)JWOM*1L1Qvlg<>@bSAuw^nCIfIphmxD_ z^U98M(Yu=Ntmvqo`nDl5r(R=n__%drV2N#ELHyL2t@^_ygeOsu|YkheSER0)2d6E@E$zGI?spiilUJ* zRH9Zre4T&G{lWoGkV&0~?n%^xvyS&xTi9DW!2v$pJ*;I6tvN^+0oDp*kagnM)E=jS zx3#)&B3LKk2gSp0jlltz9de^^@JGi9Q$b5f*Wux%KM2(ullJGBcFpybpJm-ly@Y>n zkVCg7dRN64(@TvreG*AaEZFPPd#PgPt)X~erRc+$8^S`YRjJga>;E`4Mo8(YKDO$; zRAg!&aj^CA0d-%~OVY=~4%Qd=-fE)a#e9EGTN!pjH@=6I;*&jdOGmI4Iek`iol4pm=`(PIckx($NQFr+PBkLwc~1 zQ<6I4YfmTpym^@TyUTLxml8XQA6$Pfk$HJk<@wpFCI?c5z(*9iqRKFs#7?tlRTk^I zU>_~{b%pq_<=boAH!B=V&1gmObccO8$S*dj=P+D!#ogr;cY= ziuacQO|@2Q;8JzvrJ;oL53My*efS*DPvV~cL1;T_$Bf!4TT^^Aq=%sNVZ4#xd6mZs z;4H1;E8mnbQMnL(qP^4tA5=IL8vdheKpZscHwGAfP(Q5R^^HP^z@V;N8DI?5)`y(B@s3fAg zLFegxWrw4Z;BTE9WHokr*V^T#=CJ)+H_1N^%OGh^0R<61(JV?;EWP@^Me`rz%J5bu z$V$b*vZ|lw8OK*`7ERP9+N-x%00}o z^z+Xk`d})oJKuci1*8o(0)?@;Rw*a8K2j-HYV&28j=1Lw5CXD-p%4^N&9XTBz{g=? zIPpBZiQ}sTB@efsEv*2+>gAk^H~;xRBm#5h literal 0 HcmV?d00001 diff --git a/docs/_static/img/sb3_policy.png b/docs/_static/img/sb3_policy.png new file mode 100644 index 0000000000000000000000000000000000000000..d79389d40dfdc62ebcad18a03490891cccb1d5c9 GIT binary patch literal 180282 zcmeFZby!tx_brSCiU=x5NE%2=D4hl&(jqM_ozlGp1e6pJX%JA5F6oxoA}t~y-QBUt zz0X|wzQ61H^IYFKe;%Lf(uY`kuf6Vh&pGB8W8QC+6r?W@UM0lC!@D5!Na6_|-YF~| z-pRhR`0yRKmqH=%&uP>9^7rxZ3d7GI8JvNy8H^r1k;lVxzlDeQ@)aK50etJ_JRY7C z7arcCJ|3P>3?3eZT|%X@2>bxwP*z$34~P8su_hxDzC++BBQHrXbLt`qFN?GAMM1b2 zPe$VYQ`bMs$1bkV=A7maQJB%*9^v~>Pu|ox5mci>#*(En@_}7YlB1Az@I{dAHK@^$d|^|F89t}6O)(gZQ>%rd#Wm_ z{9aZ}LHWJLIw})=?(a>#V7$B+o-!X=S-wC0EO;owJnrVrn{Sho!M}Yd?t0*q-<|a5YGGg!GO)Al~L#im!;D9=! zQB_rSbaeb)K~9=v9!Ew_wyBQVQ5V_newZNS6_k9uY?>hK!lAY+{>LHxib7gWPRL-f zY41Sy_lm8#mP4h#@6}}eDDF_FrOzaiS3d1y!OJUc>Ri@;zG}z$^AY*k354V=Q3h}? z+#QcgkBE$aGg@x8URV)~xXzh#QP?t^1AiwkI+D9^GgGLMT1c$01} zkl3kR8XlAiE729lYZ)Vs5`!fWpdo&$+RPO)aIgM3H4zulDOJ@Mf;!BX_k)M-X_`}! zi0v_~tgIXzQHW<$S9>-Y!Mab_y^W5Re(>OAP^w++fk+){2E)V8?|`OtDRA!4;*qCb zYrOO~Y|CQL^$l}g{lpq{$7W_I-Q2K5s{%9X5i4xUQFlCIo0*?qw~S>D^3BT9;5pBi zz4BotJKbF5U_MP`tHN~MRfLC+@8`g;M;;!+xw*MtOTYdc{wXIdO<+?NZzmv8$lzNfaCl{VV9NZ%r3F!fD6=ndy(PhR#xv%;Qb5B zz82AS`nZVFK`Vho1xFq9+LlpFS?#AjE~1}5f4VOBpfw8gPXrRvr{?9Y7Wu?vWtov* zXyiG&G@t{-eK-;aQY6-JZ1HaYzVJx_rD8?1A$X9M3z41#jNEnQDGjM@MJ* zPet&LA3wCcN$GFzxD3#qK6M|~H?N!|PO{OWFm*|&;odrH84n-4=s=AJf4ccQ*xr@G z{Tn@K`kr6EBn%C&!^V?K7E6p`Q@8K0tlC>tuY2t6Er#1@#C0%SLamH? zvnjP2{MM5uz2Pdtf2^gGXFjZP<`-ic{Q&oahbC~PWQQ;fnk^kA- zt6BM+g}DGOuB@!Q)pRKI=a=nn&R`Q7-P=9za%ie{x;8WG@v~<%Sy^F}WG|{ls&UxE z;gVGja!$?m?S)=J$MtSk`qaPKr(AycUA=f*l#>9>`}gk)-1{s(uxmbe@q(L$g=O#9 zJB&u~TdDQXbxn5pnd!{PDd(+KM}ZeYiSVO?!^8YNSNWQ^8yEr`>+wuxLf9v9x3=cG z=7RN(uEU4_@#C=w9Z5$=M}m-3(8^F*vCEon?Lk!jX7@vqp8hd3rVi%?ufinp*0BUa z%CJN~CL)t4-TaA$#`igKoZC~$8H&22GrYXKq`NUsRXijqMv;uk~%uHp`oF!HKWu*W$EUZDJaksNrxvb`}0DLm*nL1hpwM5r$Gvaw~MkcRs=^csu?lKgSBoLgMno?H1BATPOyOEVO=_tghdC6|F z=^Qb=V0*w0wMMz!HFOy5!+uU&+&^?sfBWo_eD6&$u_R;J=tAceEwx-NNrTUre)LT<`yna^P3UI5 z-YiPk0A)k5VqQWj84Arj@+R#uK7AI9G;}yDNzdA=d6!4-?m~R=q4yZL7I~kl@|Loq z&0zlu*p1`4crabah;cFiX#Dd`3M_`e!UI(GZq5yfq-rm^FZF>dQWJP%SXq7`XO z=fV<~hw8#1e`#EF+oQ+%xs+A6;r3T?4J*#!`=Pd;o+8dwiAkeLmzJ!; zZ(R*ptW@Blte(UUhrApxEPR_35!P&Yzwg(8bLx{>E=%D5TVV}503W~Tc1_mI^Mq_utzgC=uW!}iN$8h_-dUYDIvy|8H&gfot*46VzhX)6Fl;6>D zhnlxv?W`Nm#YoFW!mp>BPht+ypKc2NiDnRFp~8E}AHYjMU776f?+=A+0&|Gu)2}1J zdi3=Zc3iw3tc;9|WD0CI1qEY&|5hut9+5IqnPLp*6?!z`C}c#XT{k9v-A+yQN_>YD zGud@A!W3pgsM$wD7+k0|y^YbIujUkS58~2n=PdAJQ^Q|WJkww&e~_Y-^G$z#D-~@G z!FtD-kn!&07(VN(ESW5hE>6WxL%)?hSR~r&qi^fAOofZnnQI8#9DoP4y@OF!*G4l9 ziBE*`kCs=Q$feqEhlHx@Hgg54fp03aMy{H@y}j#vL!j$?N}xuu$%VeYzF&R)++18- zi|-sCcF~6%v0n|iuc$}@sf##pVRbd^eU4sZiewonao}L3lVuKJN^LDR3!fxlb*|fs zW<^L+XW`g>!zi`kV4*j|+}1V&0Eow0O*EwIa3SLds!x}lg#!%6cSC6Ax#c5EOZkL^ zg=6FHMoqt;=B_&Wo4E|_s~CN7g+}PT`LuA?aUX)$;o)(RHM@pwW>D2_gMUjACM9f!aYDJ+_94KiWj^7F?)lGHSZO-}sD;KOl}=<76lqIBOo~a*mwxB| z`t@sg{k{MEk-oz^iaeXqIRr8Sr1%e8! zZs(IGAcGHn}1x%(W(f7 zWajSfF;MJ5GUQ?vZp@oDR3yB7`s4}U6b9yqrJkXowmhAxfnw9H0@lEJ-tfHZ+{Qo1 zW;@~o(Nhz98&lB<#YwTTt#N&p8l_fLunioYosnGVyf#R?6#S8yaA9GgJytYOHCwIF zuIB;a=6QIuXp>@C@0a4^8F_g&O-s_UquJTn0C8_JF){tlKJl?8Es;XcGa{GX>T_NS zaa|Xxw|F2ZRi6?c>ITG03TuQt4pvu1M@QcN@g2;Vi@59u_G?T`fk8n*m)16Na@dRR z6kqSJDT|(+H90sqkRlqF=o%{_I*=@zaukx0m2EUEZ1SGn+J`nHzQcKtHE|wuA9itU zY;0ja6(!}z+}zN-yu1PZgl5BQqN21_BVj`;=~Y!8O>@7Rnr0j$K70ts%G!3saE;e; zQD$bbnkpn4U{*&wcZ-u}dyML;XoQZX*4ewc0G>Y9X7#kZmH8)Re z?VH)y%=+chE2*mT8rn%IlaWJ7Q?K2;G8^5&#KQ7MpX(76ROp7wMQwzHg)%d9sV>b+ zm0>-(Q&(DdAK6d$tCZ`jT?UOLIzzd%CcE1nyhq?li&3l9Vcv9V~GU*v&V z<<3O2gL=h!<$ZKCDG`2a zSJ&Kmy}gqpw(c>I1c&MPb;Bv-dt-!cIdJ>QxK?ORjC?$K;su-*p8^rHzrQb1yp~8#A1!*ed>E+BSRl8eO=XyC&`9$nq@@78mBN^=y@RAG`k^sUVi`onKj*nHj5V zhrly8H@|lAY*QRv82L5@1;ti}IPMn4syZ+)k7LwhP4sH~)mTwWJ}4I=Yq2SrqvJc;$U8T*L1xF5ZW zq8$?t12c28Lod+0?_`_F$1dh;lvrGWPQ!Dvl?l@R9Te76Enl}IEnMUW_W009{x`A@ zCgx{zOJqoA;0~sY4b{(X1V?~q{jtF zf;6Ri*v06tU%w6)jgK`+7`VP18X1viCJVaIbpQGKbx!_p@xz1Ujd4E`!$OVu<>jWS zjkg~@w45RlsSkNa<%y{pGaf3nHtNq{s9dkt|9l_TCPnG>tEN{y^kRFuUYl>2_J2m# zO6cm+i6w`_<8sttVr2A(GNr7g_1MOS<8SU*S@~RBn*<&BYW0@FsQc1w=ao_3WnNlh z0^~jc0YjYi{``4o(Me3jJAfXyhd$cT!|}F7bIx&MCpq^<>%79J(BbH;v#EXwP{GQE zq!X8tlam#lo%w)Da&&Yaeuwe>*R39p6xHh~lle9bD85F0n)QF0n0k=PECWMxMH{u3E1{0sHN<(01_f^7af4 z%0Ll-JOPim7Q2(bRewxkhdpu|^;*-(%FbR`S~5C5*zv>;c1_t=?j_^a#WVBr!p=M; z5B4wL6Ttg$Tf1elIrxV0k-GpNZAw{Lw5Z4KdN1yP1;VDmq8J+a`K2Xl@;&L|gJjh1 z7T`wjAJ6oc(wa#rw2~g}Dxv__rUNyQ1QUs`5#TtW3REP=S;JqHm@M$SE;qOm3cOB zoEDar=4NK~6OIKYJ6D09*WomVG0`8a-7ymGj z_|n<<$3Evii%PUuO)eJhGGVA_Kd;N7l{MS zZER{MK8|sPJSxcif;?>;b`aIuHz)}dHzazpD_4{Oi^SeFweJ$e=ob&|MZ+yVRa3nK zi2N^L-0BoY0OQ#4stb#_iUJoI(iEcUJct4t8y5SrH34qr*I>1lfsi*4+Z#XS>%)+I zI6Q|CIf#-_VbnnDxsE09+?xo576gi&6cHY~=4Gn&!l4yc5xUElFKgJLPeOfM)pDT_ ztPtGBV0wQIlL_aOTizU^p?&@aUmB>Cqa)AR&RFW~vvxv>W^pYLNSZ~i=KwnNCS%`Y z#!$)PURXsp1cv}HB$xd}|LsqSrDmPC7rS5bhq#th|6m}PXOlcu0eUBX_(1+gYX~@N zT487XdHgPsEh51B6VRI>T1T#SS&`F{T4$m-h4v5Y`R%Blxbsrt)OgS*AVn!bz}q)X zI(*Zg)6)k{kD>|hZzRD^5tI|Ocrym~h!sD~_)11jUbLDA6d`fk)p#>&>(Dc`*=o?_ za7Q#s6K^%rAHM|xWuS74xXLfzYsC@>U8Fu38BJYSiBDyQ2jH+hpv-V*oy_{GdxZn^ za|3o5d_qD(c&QmQZr^AW!bU?e3^bD`FG_u{u&F`C=g`v90xIRtA1z{9p{DAJEsa`F zLBMjKD=UTgz98_>z~CBPeqduDv7Y^Y*L?k)u=Z$!MakyTR#&m-zI{p3R8R?>x=6-IbyJ!7gtpNw+h%b8jQuM{n;#piL!vgP?aa ztrxw=MHzD213w~c7hyyJXM9d8_M^wn2hs@GG4OFIJ$>5r^RsNB20N_27iI(sP_JoH7~IoqaUwKpNQB%a(K>)@@FD)p&XCx9OSeEcU@-q*_>54OXTYS6TkfNwM;# zf-31+qnqS5OO@ol$U@ZkT8_4xXLoI^%VVvMB7W3PluQx;Pw)6afcy*l6YqKu13~sOE9)jB z<3@M-qXjP=FCF$h;99B=N2`#{7)s6GlKw&jceEw)b>W)W!TJq&wo-1AZaJ~KPo7*x z@>FJK=C9ws-vJCMtKGSM$o;-mhadb2%KG)BQjT;@+4WkkJ2H;jkQC3=2^@)m#8_*_@iePUlDJj(> zd>`bd0VafT-8tYG5(FIrphaEc1ifLP%7v3eWE~F(Rp5;Ueuwm0lpwqvv;~>j*;BBX(N<5c zj@SYiqPDHQ&rCC^Sht%2?8;VDzPEoZR%~~9&@XRkzLIVz5Rm* zQcTS24wI>){ViC0;-&eqv9a)>m0LG%oQiJaUA_gn2AOIKL)}YYS`bVQH$d7pYx?~8 z`Y+RD!_H+vkk0@Z#zHlSr~4zJV@-Q}>Nw)g{t1^)#ieL@Yx&#IOBS}<^PHk3!haJS ze1k=hV~V~g>%5wgJSUW&$oEn+BYLMbGCKIWr&6ot?T{9yf-}p%Cu#E zO$*W{QH?=bPr?rOPS4Er{Q2`3SmW~)?9)jeYZDZ8duJN-xyX;Cg@55NC>+0bsg=Qm zum3RYeRl2A5Ttpw8ZM9Mi09w>k)4n1Rb}N%a5ZcI|1~UX?98~&A^q!IwD;mK7691I zOZjyQE-o%Pzv!=Tb)~goCC8{wt)77Gq^VN$y|p8yHYEudi2;TNBvWIP)Bfu!yN3GMwhxn9r(8bQ|rOP&J0!oEiG-N;~Y5JH5s4VT>C2>8$A4EK1Gk6pf8$Oy--~>qoa#GZiD7{b6Z8R7&g@I>QLL^T3r|P+D3k2 zX&3P3D?8yvSU6Z`_ve!58UpF2>l;qNqCUfT%-XLpLhZfVCDI5Qp5fAujGGx3DJY}? zmfP0u-&(VBW8#at2E_)tQy|=Kcie7dHD$O{&<`0GY5?RD4fYRh&Lef2lcd+PU(Q!5 z0Jr=4^VvWJ9jb#dJSN4&X&|KwlW4TdY!D3zUgq&!6aaIeWNre9px$0DO|ZaMR}))X zD~6p9$G)Cv`%vuI`UGG#EJe*@V^zjLV%nh7*y6bY$Au>#9SFOuJ_icP6?crW{X+lx zZ*Q`7W}3X zKIl>skYJ!>5WAKHl-4f&K)Cs{??+Po`29|a5KSf>agxv{K-X>MZbnKXe#AvfOMA3z zDsJ?6mrM+|Jm&^D1YUchuV1%$YQrx5hzJz-h2~zj$u~@{GkxmwYinqmGHIb^@jV8R zc@w}$a7RyE@Jug{jKbp<_z6~2Fm`zOS+y=FlVU>1dz)l|`?@xC3el0!+5h?T$56IW zC*O=>9W)I9RR|moFld_nEgf1)SJ5`}&7$NovAs}QV!vfzNo zb#!8nPF|k9@`+$@%<4V!<*2@Uh1yrvq&JQzK>m>}-edCX&z}mSQG0wx?wW(OsBm+oq zWMpLBzkl=1`k)*=plbnag$G?Nyunj-^=Te)Y#~vOYIo~8zZ}f5Wbd59zUlIa&-T(**Bws=I6bajeWXU@}ixs+5mBadaJWd?e7;zQ&HjK~9+Ed+1 zzoaSb(UU4!y+0dY7$Wl0;A(=HLmc3tyTF3fZgq{NHD+9{BY= z-+PA*>#%49m z=KvCqolwHlr%&znz8gRb71-ewo@7)tq5z-;fzk^Z$t??x_T&1`h3Yp*(dHHwK{+4a zM!gxEV-gmQhoaS)D2zrOt&8sg*D_FMOW(@UI$Seh1N;v*2w=dd`{8X6>&rbFN01g7 z38JyOO%DqTi}{6xiAECf>G_^Cgy^Z;+o~H+38b5ZB`6sCA`id z4~xfpSWkn{f?hEI4(PeD@r%`AL58AQH^+iU#S&`V^YpKu23if3hBgM0ki=_`{r=qo zX&w!vO~y8=Y4)mUMo z5jy&(Ehy`A3p(ge||BDrwb%5vp6Xe zd&CM5E{9roGrGU85&R*wZf`Oe2wPIJvw@A}1jAolz9O4UfTngmskv!wixg0r2K!A$ zrqFo_dJ2lbwwa)3!eyqvKJHkIZSj9w+M3+jhlXH#+mVJ?+Uieh6qJyM@Hs-lq^1bIEE8Lzzbt-s&W7E@iFq$)xB|q^r%hl%Dmpu(awr22Lu`b zUU{)SuUsiqg;23M55}ikBbSHDILY@U!Z|B(;JzpuDjc)|C)wCq-Q*KH1i!wdxHBU(KGAvS95ptD z4ANd6xHSXp(LTG@*Vm^Ah@s~PMF@en-^~3H7Zot2kPqg~*7Og<~7v{ax(xV3nzcO1JEVdmyaqc+rB!VLaO&izH|eE5eKR5!px?+6?OD zsw4-ouP3I>`gtqEFR!0RBLDT0lWgTdKQu~ude8_|-Ey)gFxLrwu%EwvH9=}L8u^~a z{}U0&>AlIpGrkTaC?^dCXb+uXGOGc?3abP8yD zNQa-J^-`;Z-!Xz7Uxx004y6Iqxfp&II@EqNs@2wei<+2Tti8VeUXGxKp`qcY`&u@1 z{=I~)KiYwZ-{_I1-VP&1G*892L%N zKok2t>7BgyeKEjw&p(+W8w6Mm3B<1E`Rc(nUDT9Qr+RWYuSE`UcIoLRzvObv$Hm8=JAJbHcz4KiK~`^a15FPi(k94oPsWt!v&F!i^(j654rB@1M>$E zlAyeU_pBYZQPWpK==*&92=*YJ&*J-mj7>}@@_l0p-RdqF3?d7{pCSK%3k%vc%?kS& ze;-1!1A+rE7O>%V#&D7hy?O+z0esjxn9aC7ep`VxvF)g6AAA-H2_pAxZP%O#X4QKs{t>0GsZ&1P z%ouKpEor=nhmK4t=+cWkT-Xg)HnG6Ok$+XV{EgM`=_J*@wThck(XxX$Xa zT?NKyJEpj}c>hPTr$4fi<>dl!FPXWy&2ypeUo+iel-gywoe1enBdszG9k#hC%L(>@ zSWyo?uuT@u&{kDd3F>(zK_XYu)D+BlU_4(R(qVD~WTb=yn$4}P*7o+BEG+izVs+sM zmF~neB3xFB4I11;ehBeEFTO_t_SQXw4D-hQSTkzaj!LSs8oYy@Kw(co-uVnBgej)5 zDd(zKZZCW=VBETRc0Drr=oN+c=C{|3^Qoa+xK&$RSeN&1F|zbYB1q~6?FPYQZgaG{ zZaY{0iCRo_DBpLnjFbUB29%&37s8v+5EFoP58=QfvWPKw`LzeE;MOX7D+;(uKCYROg0lySYoKeX<1o8nSv?{3#${h za7TkR9vFGrkblnWNvUA~AW9FTCn|gTsSdhiMNQ1`P!$zr593ejCWMCIAF2OA_3{{f z-tXy$4f|Jz{`z$t8uTLYYtD8hhxb{Wxjy*g9s^RW0mFTS<{t_xeA?DGq)Y+4@2KVB z2Sa7H;kRE<9N+!w@-l_`)wgTVOb6J6%-^=#C?sAvZ&$H(<$`{oZty*Zi`thv7962N zHk{+6Bm&$OAn!B}R>BPNV-_f{B2We2M|~)?=*u$bnDi8*hrE>_;O3j&atpKrLCBA$ zT}iaudPiD!(wOLmKVk69% zChRDCxdNm1^cHC7gq=g^kI*DQ=GC@?Dl_PcX&HDa1@8pR4Ii{9Gs^vm;R;o*D?9zn z931Jukb@l;Y1|k#sf3)DDIq#Qe*-HzkgbTIL80bGIsCyu;=^Y3RG6?5nSWR(ICn-n z05K}{egAPy{AhxNYMx$yswGq|o-Y(CB*{>;60-zsCNb=^5s{bcj=Lm*6!naf+^EuBzJtR9qYHZh#P^bD6yyYYG-jozm0&*oPF!?0^szXIoH4xi=o`q{m6VaWpC&3$Z7RgDj} z7ufY`6M#`*k2_O_mY0>4eXjpG3lYUYp#i7vb$%v_Mjnoe%P&)z_m0HCR|3g@s$eM+ zQXyzvG2puxR4IBPKvQ_)15lC9%L5Hy4VOB8W^8J@1@zJu7;G-m(|2L8D;`JN8RAh2 zhyUzp4*Qo`GJWp7a#s`wZ-HZ>?1f4q>q*lRt-zh8ZYR)-yO|rgz`JnNLw|f}Z0~^I z4BAv}Tz{DW$gUmPP5+!4hD5_ZpWqblo?GW!bH6)P=8n4c13S`gRNFHb_#kCT%YD@% z8I?x68Ysq3Nu;|=1A`84-lSW6%E$mU60_Mpm(&1h1E@6>RaM6Ob$?$pW*wOg3aKc( zDnZ$G{B_U@0gX`YxEVbNLG(lvvQ_hJ;CpF?Byzw#Ku9w`fr3r;)W00{WD*i`Ir`e zDZl^1%%O<)2j{OfF`8xJUtapp8z&6^B>dZb!C#Rv$X)#Vs_Et}nS#-Dn6HW5|S?zq+h?DVh?Z!mYbVfTo)Xw!`1(D%dhr+DLwTM zIKZ#npBwvBTT7>^n$Nn0QuV28oB;Rf+oTT)QYQGCv6?VIG>ZQJ+y>$;T4Mr_KM)!4 zKNrtF+i-jQ*#3vH3MSp$9tNT|Km`qGYLWnV7kb7%6tnR^k1SaT{Pj@y5ItcHh4X;f zqA50$W6t;u{*Lc4_wIY2z>GR@{&lULhJDumOYR@gL2kJp&4Ubt&Da;h01&ip1iSRF zk5}zC_En<}sMGI*eXHiJ0VNEf#6SX90v_A(%2igq*np-r)MltIA zXK-E3(o~goC!O%)!;&ZHLx%sZ)Y%tXkNyt>;QvR={J%^7yAl3(ciI2VCuTymMtgD6 zgiGF`+U5T1rF7;Wu516yKQR(F*Xvqsc<>U>o>-poeam)+S)H_=Imli$yjX+%%VJb8 zkiz$Sd3aR)EMF%h10@yM@K@lbW+y-Q$3~?xFKy|dyzYZ}9AILi#`4)Ya8LvLG`pg+ zglR7k!PXO#fd2X{Xt560`Gfi5t(&^i4h@P2rA8FNrEfyDPvif6Nl>R#IXQ!w z5J-59+~425FSv!gdE@KbxBxlg+2#>TkZ(?+O zhl6Hgf+_m1l>{7Hty!64Vqjj%acUDyb`~FKyJ*oceJZ5^N+J@m&eejudk^uT zh$zgh<7q3C0n#R}GSc8mWo2~^q|G*V)zAJC(NA~V(z;N~PUw0*ea(jY>*p}dCd?Oq zdv{>|LdKgnSw!@%T-@BD=lQ?HrKe;q`iCUSJl7+@Wo9iFSQ8KFz1J|=wz#toEu=lE zcg3Fcn`IO;S>6%0s<2ixR0( zmwwk=GAVkVQ;#0YRZeel>?E+QjP;3m=dc=)y}#ra6z!3l;qDMyJv$nG?KR5z$9cQz*1-VjY>0AKutiR=)o4K{N9# z9x@$>n#A2Q5EWVG4e-XCm=6epA*$tQ?jM$ysG6YLPt#>10|KS+K8d6Q zK_Emd&vkWr_;^UleDM%A$cb$A{MmTauKcpI2m8y~JrnV(d&E~RWN{ubyqi!aVrLM*>daFjN>CxvO4+mf`2<8;xpl)B>>?VgxI)ClpU zr`qZO1!dTgnqR{tN1~Mayi^lcW0tbb`qS*JpvHyrydOdHcujpz{fK7l1g@WQRS&9Z z|Be+*>||R_MnDoo4f~&&x}Ynn4KwzVzEf=p;@KTRZ3~st`qUEmOV1S4pTGTCZKoTM zXe8fk;PTA=;x(-cJdX8tUyX`|mm^sC)09+scZKD9rA}je)~Zud`|hWkx71ci85L@9 z5ZxmldjV}PaH2M4MrA{R$6EcIp5c;X#Sn#O{FMY=^-s|%M_oLwK`&8ZMemd!15moz zQa8VLLheXR{MIdnz`MIH{^H|kE+Uv$kd5Z}46G7(5;AXIhMmxKA)+291PTp^2%u6T zR%bBdGn2LI`#(t0czKb~f8Cw)^p60lb6IJ|)jiaYfz?l;E~Ivbp=$3bk_6@V^og zs>SaVonluD=DqC7mWIODQJ_#8@}EPO-qnyHqo)YZ3av$PWLQcl(GS@;+C6`N|AIVi z!e!pQDl;{)gQ|HfF)i1dq-@y>z5^?}${BYg**H?idLEW-E#K{eQ7$lTg?Wazxx7b^ zoz2_R*#1^7B7~84uR+GIP{;i!@EnGlEgDm{6pV`;6l(-{aS7Dzwb*<$Q709Q^t|CS zph_U~WRcZ|RGs=uYz$>eHtrcJ?qofiymk5T$EKMikBYI(tgBS*5}QX zp_@hE6t^I`qN-}7O4GAsL5U9}nCC*U-nXd@BI=8Oac23Y%X@lEy&*hrgFZyLI68tn z%Vl9MPQ)2qr>ib(s&SUP*5)@u6p^=%UD~OX=*4eqn8W;o4gSj9U`e(r(37Auz9j~U zK`|bAJ+aToChQ5&Bd)sfn;Z8|^OnpIgu<&Axqi%LSy?PXu2wOmW#42GBvwW*b8YOU zS?uiR58b42ZJy*;nVhk?x(y2Ct5=jaeQ-{9nv#b5Rr(iXO-Ah*%9^R^yE6vJQ1wSSr`w$eYm^t$iGDf!XJNehq~ z!E2G7eLlF^evt9fUl5k};nU^3$W&$k!UHs@V3P`K+hxeM3K@|;K0XGrmqx?DYz=1o zFL966)%#z6CgYS=?K>xKb}>=w&WA%+VH!+_BU_lbm>8X;G9~<3YrH}Vb0POtpTd;X zCxe_{hX`~xBzn|>!v}*PycluSsohv4o5@5_+QT&((q)?#1HSnIt-Kxxk zVJ2JGg&b|Lb=h~dc>BJgD&G@{Plw5T=urc}D5~z69PS*NIyd=b zt#ICG4NTnN+QStru{N2p#~C*rsv!$JDK#S2vnb(A&G=kS?%lh0j}#S6KcZej!wq@} zbh#|d%nTeHVY|D|fW!Z34^RqgylTKHt}qa<+{gu2%ykN{J;x-C0GAO6_Dm~2#ApjM z_~1JO?iScR8AV0((hm4+VBQ89q5}hP3^a%Ei*OhKG#TmU+dDfjjw8iz@m?>_6O|_m zOKUecIEcg14_#dNVXhHm87?_LpbDUfLC*kkCU{m2hX?~3(^gSn5X&^r+}_=VVRd@0ziAU!`nyfaJzh6jN41B(erIqBveht zkl|n>+!pY8a9V()lS@Q&Jo*$y{2ol=x@%;BI}Sb^xF-#})|QuV!c%DvksG$*hj~;; zy3*3@z|aaG@`~8 z=ia`)oy^>Vi_z&>ta$<;Sq-3!k?yeYhjfz_R`TAzC*o5>fz5wt`^xk{deD3K#$a_9+C-A+qv&E!LSylDQ z`BB7A5Ojr2=}MFlI;?PJn_yZxz2JSV6B?{xYf)N!4QK0YM03~D6xoUrSPAo8QY0py(w#{B&K1xzv zzT=gf2a5kVkEY!c;@KrxVbx977HeqX36HiQ-ymwEA4_$0Z__nv^o|7p3ZIh(q&T8g~iqDY{!Pl;oV=-lV8fgjp#F45&@cXxeeZ-z#d^KCGV|LX5Ah7m&0%?1m+ zPvHT%n*pjlz1QBQ{F>dX@*VO(FGWU0`NK?*T*oy*me{yIS790-X3RW~TG8;JK!o}h z6vg)C%a@OZnGsQJYKTLcoKp+fc?dt+<&iRqxlTg^cPWlbUdH1$cWL4z|sAS4Y~~IP2sjCC?atHbzn!jAMhE zG?nM%(gRrtoH&Cj#fY#DN^<$Lyp7uJv?r=jN7bG}RYJ&m!&ndlm@eVGh;c`uK^WUa zR9E)cRxr?mtUv?@i_F5|1EO=CiPSryHk9}?rauM?jR-h0g5fzp^kwE{-tw^ELxgsdqsXkTam`+?qC(2@(H0I~=#(YyKCDdqjlb zoPP3zf?ndNBdt<0WO$K5YjFfdMS;x(q5O0C7nssO%sUwh$%gf!^|UU-_XWsiOZxSU z7JmMnYCtHM+@Vs1?;&;Oad3tLA;{NYDYUKGAva~~eD^ly9*p3`r6W%S{IO6u!4wk| z%Xn7?jvkq@PsxgqdMO5JIZpv5oQ#aODpSx!wg#dgoZ`U=I*^i)!yRDhkdsir2o|g{QSzg%Jav)Vruy|GXtYBH%^* zl9@X(P-s->!p5db#QXLfBh|3|y_8%;`QlfF-7{t^^~o&rACsCe+!QG!k*mzkyQjoZAT zFx-FFw38-IJMLUyMe>9(HG3Flte|Y8-_5_ ze|f=&i|b;hr$%i8Yz?r)LD(@unginj{x5a9LfVL57VqB;1@v<6?hupSc4m%4|VAQ zm>~nXAP>S&!y_5_3D6%>9d%g%%s?@+cd!Rdf>Bsl*q(;emAEJ?iB|uL7c&Rue51Ku@PU(C2MDB;~gQeEgAmNaTZGq!i@tG$Ld^(sg-(2-TTz4^W z#D$xi8%X|$O&&HeSV#H6-`&V{VRvOj+Sr(}GEZ?}X~n+ck;3EI?bu#`R)Eo9hrtH{ zd;2{Bnshj%D{$og?k_9&0SwD;?ZceUw>bEJhpBv@p&aUGy2oRBmLxD!MxF4J8a>V+~gwE-s6H@qTX@-))d$IIwFR}aH> zg+G$x;;w=-R%b*9BKVH&stCB}zU1eF<6?5dT>ym@iKU8#@kREXS3VRb;qN zcsUa3_fsHdk-C;!WBm;`avf>l;`2dmEV1Nlnh7vKPBIFk7bkuu#-R>t!x5|Y+JpwX zPJ(+|hpVcCcQ|>G2!B5Vg=IpV*Lp8bMMgCnPEe2S;iJ~~#XqK}2h{EFjwt~N#I}_9 z<=;tPV6yLD0vj;&^#FI9;4Yo3;QSxJ&49xJ=z~J6r#JTM!geZjU>wos!_lPi^x4dW z=gyt`H(w5>WEk@UPy3^K`g_G@Faf~Upa?!;i{hVP*awl~+O-q%ec)<<>G!-P=;X*G z8>Tj(W4_4NBmemEw*@h(VtFEpdcw&c$o`4qztoQ)a0m>b% z9etGU&ZQq79Q+VWu8a`95a^E{0dE8Yg)(xFB$T7R-;=iGgo7#U1pXII=N*sr+rIHs zh>WsABAFpOk`$c z=QaE&>6a|jc4A9Q3+M}bTtJNx4DW&rAFdF{C&aG=(@_&|b`nw&hkFeE)F6?Bz2|&> zQ%EK&|6Re77b602sK6%$?&>jHX!F3uCXZUjsiv$<@cRb?U@XBXk1_!CBz6VM=2^cQgLy|rFO?u!^6Xfs;^u8$o+IF?G$G6DvJB)XJ>W7mekB9qP07Y zKx63Oqd5nE4|AM?nvX*tGua=M>#i{tK6JJ%?KBTH3%9%0`=fJV3oDX9otyGQRu;pq zr?e{gzPh@)ewWB#6ivY?37?5kLW$+8YpVl=&xEeZweM`sLKF1k>zE=Wp!N(?cg zmQz2YCWbNL9T!Cd5buUEjN@K+dg=dck~q%xI}D~7)%$6Ax;NG7jySqcsPYDm52s64;ZBW@1IBN0rf9Cl{?3qk5NO+i zotmx^Tx4-3CWP%>a?}SJI9DGrs@qV?6dXSbVvpQwYh)Rc3%k}ylJL|tJ`&4JT@7>A zeYsF(@W|Bc05H-gja7DU*CX_f()l|Dm!*5|UBsx_=e!p>n+$8D(*v4pxT6pimPdD!L zX%K2^_BI^4t%o50oS6#wUh?wbQ9Q1dYet5IgVHqWJ0N8Sg#$l>P~ zA4(9YpT|6t*3{I{9RDsE)F!J+rXejo#599U3WG|Rc9sMm%o~p12a_;3>4BH?wqjE8usYaOjbbo z6)rofi%2Bo1cNBnDeZ`6;;`Kgc#rHukAm_|p;@Z)0&b)Sqh~KfQGOETpDFRtc?W$! zMy8?p{`xdt2Li`q0=p5Ex(OlhsYMkDA&V-EzJeW1RPw;U*Pb=Z@0Ord*{e($UTEyf z{S)`S{b+W}kW$8!E$qs4o>XpV+um6xu{jaxlP)>KP((k?wc2$~{UJTcep1eD!DiBU zc(vBE_uE3QN$TE^Rt4=kY(h7$P_B<5lA=aN2Iw3{b#&Z&d`pQeiwb3xbB{+ljxidG zn!Yf+$-0hTX}g$X0a=;w$-|FQU+s}2@aUILI-<&nIaD#t%hPY&zaF)5ZThKHT<|yT zJ0-qnT*aX4@Y96&*bhsT=c9aU-7~3tG&Ok$+Za_$d$Fk7_AfiU(9jIuX@O<^F^}!W z*?^aa8Cm+w%yo~iH1LC#Vx~qoTAyRajwvNs0sZ1K%3Ms zwK0AN`P>xGBTER;Z8MTOU2cL~$nAVqMZ<@Dr_lIx*a0m(mrHz#Dpxu5( zk(5#;tJW_XjUT(!`-$vJ@ubr3jq&#?gqo^cF^38dgp8Wl z__%opE0iU5p*2)IvJ#(j8&!1nRd>KwUBj4KF~_ge0K=i5Moq7?^CGdR`?+}(Z|_{2 zwjN%6NzibUlHd_j!hg?})Z?MszjSTw#d7jmKUF8dF}eB@&>yov$&CGs-oG0U*xp(SB>lk&iq z(bBfkvdoRg;dTggGCqJQVRu3@s6Ett|IQPI=iEAv{`;)KsUA`9^SGoa%D=SZre@Dao$11P>u0XKQK6Lo$xsZ zT?efZXYS=RI7EI$qMyR+y2OM01yy`Ue`e^e2EY@W$rc}Qd+P}4V)M)+4|RZ1dY}sj zMQDc&tp)EY{6^%T(aRveb*b`g_j&_7QC5H4*1JAHQ}0Q>%Ru(=O53OMFJDwYKjH3d zrE5$44~$Fy8Xj!@kjz&kZTm?9!I_V~W6YDFUXKtvWQDQYTvmh_RLHXCPqjH_3x*#w zA2V%hq+~2_^Waeby+pKa6wu&5^r*1=Guh|bcWjjOg`GXTsDtCP<%DdmDKu|e9esSz z;=C;eQSE~7trDSmjkq1bQU%Shv{zP{;D#XYzE$&_M_!swj)T?sXW!uVU~<+`2cAc_ z=Hv?-=9R7U=^xc*9r+S+&U*Mm7d4;GAYH7 zl@jlGl?b-o;~Qtoiih48vlB&tA>@OxhGvQO>f)3kqdhuId(uwdE!nzsN>2Xaw?Ets zQ;U}`7?udjal|Wo+0rM3KAE&F$f`QNs7KDS@tr>dRMag$AN=?Y#~&jL(*?CWj>d3q zf`^H*h(N|z5kw=lqI_R;-*>*3`c*JzO2(DG)BR{N5MsP6P`-OtDukQt$0gi4e$^Mdb-w2resL>&`xDgOMQ{64gZCPTNqR=W+Gg~& zMrz&I=!4&)SMsH<49Tj-T)n^#Gax>mxph)xQIC?5>`&Z8q)8W~H9r&_d{KO?LGmRh zvdogkv45?faU)t4wYqel0zDm8RPs-DyojgKw$ur3HD% zS=PTpKPV(mF5%0Sp$D3pU~I9!$B}j&d1YZjAeeXu-P;o%Xz2Iwzo>S#n5L@s{nnKG zPSo{o{}VA8kKP;FH#kL7l=xHjFM3L{4LfONJ$+8&zLW-FXrq{dp?^ZBy=o|IjUCBu zY-#$Q5vBgOFDR$DgAe-UQq&*)E#RHbQIswf=DfJ+Pk3H4|NZ`_ibk`AcRVPb)L5Cy z?$n&44hI}+YrG?oNT%z!1_rwN{4;jC85aImZq=U=Y^R)idV8-XFf$k+_04O_hyQ(= zy)HMv@jD|P7Hk!-y~5d-^Iq-irbA9_VN>cmnIB^h$Ldx!);->=9rAV5?uE{BRnTcY zqR6wZcwCB#gjG!W$}dwYcA?5mzt)O6-Xq@7?wa074SbauLaPi>79Gql#&0lED7U~` zgM$SBT!7+@;wBcuB%Y)XR($c5)5?c=`HLbhF|}Ld`}c*p{jd`7?Xg2L)7h+_&tcOy zz6Y5|jy12e46=_1zPWg6BEL5HO77-oYl^5GifHt{*ygn{mHHUT?Z>aahjYv{e77 zpq`N&J@-aig`^GSe*dc>Qxp{|ZeI__E1bPO+kp=CGb4_Cq8*Z_vuGnGdy=tdd2dtf z+4w04Ac2a%`i)wzTDy9(fxi# zuAA}G$JYZo*^DY_!F+c}LF!X1`QW+E1C};vCPoS}9q>O3|WuXR@XDu+}U7SLX`~e#!#3&Aw5Uhe=(oo zZ5UPP?WYI#bV}n)A{3g3h9vqoB0di9QHXxay@cx>Nl=qX&{ZPHy)DPbu37c-hdA}) z#7Lo2hM#MMH^LMyCikZElhyZ+29ieu$8Kl8#ayDTrcu)k$7UsUBInz+BmWaQv$)DMH8>(F2YZ|-Amsk1%) zziRI$9QcpP0ABV6{D@?$$ETmj5?YFMUNA1iJT5*q+N2nNsQZ&l(ub>)MSSYGP|GU+ zw@T~CH6hoB{?2de!cdfoq|$?r2NVS5!tRU+GRB7y+#+!Ath+hJAKtEI%6jdI(WOe< zOPcTdwbZ5C$5rIm*yvx3`_-|%BH&NDVXTgI>w;)vgS@fN)r|^l(>wa|sE34lxo>;b z-Kp(7FINs-U&8)(I~gC2guJ|;>G7~#^z*}H2&(U~9l4zKf4Da8(mrG?IptW0K zc0j3%wu=!u4dBtvLAhYLI-mP7;Bu^YW4}sxNd#;QpDI3dvB5{LOR|Yy0vp}OzlPs@ zsdSv4sTMOaMW=p!H4s&?fB(;7;r)lM2Y81z-K2|4^_Sj=XSK~obX zOBs(B>nHA{y)7a@wz^R$P(y7%ytBJ+n$w{#KeZXymRv%a&yec-7Ki$6waQ6`>yk~L z6frLJHsdZI(yRToTZ|++iMw=6E}fB);k!4EiU1>nG>Y22E9i#-`t+5Py@3llqIT>3 z_K;@(7Zi*9o2CyP@Y0)~>*@?63z>!C*X0M_o(6ZfvDjL3>RgWF>3OWsJ=}kGU}(tx zUZ&D5G29&QgmHDA6z^%@idv9OmK1~c!OB_n5hgz=9?NVeB5O*E4mNjr_w-(z6+10M z?zVDrTf#*V?|~bmiaV)-iz29)cip1#J2ib#uJ}Ih-nwR&9+S-ls!rq`F62?C8r5)|RWNr~V0=hq_(i-p`uwzFMMGAJ0)lVwOz zcfP5oM+E2c<%`v8@{Rn28iSIO=3)m5Yq^F?{*2~I4VQ4Oggsv+iWc>IS>v9b7PeYd zG~Zn_$vzDU$kX9ExCdPv5LUg9$~u?TwSo2x!?0$yWF2{J4{7oBXv>adj4{qCF}xT* zuV&s=_dX%KX(^-zx1;Wr*0AK+}M>`HFzAAQiucZF&@o>3hO zx~gBkM4=Co(4d3@;t2EDf&}t$O9qXE!$tdn1YD&_|KY+-%`)-j4A=e0_{hNzDe?)X z-On1n&PBY_uRbR|XxzRvNH^|RRYR|qhtk8+Hx)upIUUQUr4S>Uv57j4Bld( zLYb^w=8mb8)a}3ddq_cRLe+y!Wu_xu@=+U&nnhfcwssf;6XBq7P0wXR`z!T-Lm12s zIihluSUl(Swo{5=7sZ2P=AX+~=~!$Uhp@71(yD-3L0mzn&n`Kbft-#N-$OfXcF^SP zWF&@!2rMV=zZ%T_V~JMAB~@~1B|-IO#L=Rz5J`0~k@%*Gt=*P^x_`9GKP zKV`aivoK!kW^_@ef`j^AKOaN;c_MxPMWtGE*LUx<idq&L*!sa5#Q)Ll&2>P}9BR6|7a-`=_rq$0x+Rt5be?~s7_MKHbEwkr1^~%O=H_tzoVh4ENj>a?d zcNslcui$;OGwQLVybo-OFKm5-DAVGPtoU2=oiin}bXfqj1EZqurQ<;CxJWe=snfJz z9bF%n1o1_C!GUSpyxi4EPLj(Z@+s;vKeu(tb7tY$m&UnFz!3Xe>&_Eh9d!pY^Js?_ zwb5`F=l5wJ$~O7eay%q2ocOonj#t`PB{IVtGA1^*@A1R~cj=0YoRYc>(7dbAh4v`i zkqOd~$8I_R&gbC-=8&dGk9_d36=-j|*2zrsZC=8yhPzVypBlT~rzI?3MA9HtlO07!UZ1*w@@j3FrO0qTPjf+Yc?$G$iELm)>3ma+^8&X#b9J@DV0_n+DyB2Wj69yT3L0%25AFxs}cgTo_!kI zqwrRGzc@8%7>hdg^ML`3k|-}%TAdoEu4At$q912r>-zkg!jq7kx+W^rSP-Gt=r1Ue z70;h|Up!Ljb1n3nAES%b(3L!_#%3h%Y>bSeu)6e-`yTmCN89HP8^1#=-~d3qh5X!a&o2&-2ed?RD_ec0x~Gz>QXO)_KtI zV`w}Co%Oy;T{gJ=C$oF=^S=3U31bPKU$0+ZZl_IT&arxxIjN}7l8`9#XGIM;x>NJh z;pjfJ>AJeIek0kZ?k@3pR{tZjc%)zN0a}Pb>!$U+c8jxw922OWM|usZ$_A-anz!F5 zK0CNK?DWJqRH1odLLM&QABVT7_3pA zJsT#0n)-_aa*of2cmnqh)3!4ecHemG!yO5^cr4eQRn6W|H9wl4{F&@G@7y#$DXS%3 zd@q@CUWI|4C9Oc3ozk)(dwJWzgH3q~XgK-5?z_W)RAKeauAsu53|%x<7{mf8SE5CN#0euFu1R1ke? z^^u7I)+N$8GE(qt;lqONo9|#j?k6cwA-(U{^YJ@P+=444w`2x8(YcQy9!+q=w8~U? zz*opoG9PK28SK^kfbHb0`}_n}bxJ(*6RlgY-F>i}oB1%?d0sj@HzT>yf4$G%nSE`y za-=Y;#)|S<>{frFkB~Sv6K{+7?oAwplN}YCQE}a`^oFeWVDXB+kll@ha>%+(7j7a< zKGtFsam)3O@crw`txK+m-;F50Eo9269a2^DHnr4ZQOFbD)kKuO=tz?MN3jxH$=Ylj z{-M%MVR~QC30ju*9y#;kUB`KUb>Y5@LrI!X%1t6n{Te@`9f0*W(w!-9K8{Da9!@k}`90!u*f6Q7Gmw=~by zytlAlIe88JE4d_Lq+?i|`yuNo4w@tYx$!ZYq(v)kZV#IlX)73X3RSAmW##52KTx?Q zbI+^yd(2b~WdJu1-gRaLh>)t(Bsx~)ec+cVta z;WPfs%4kXv6(gFGUX~V}gpwQc?R@RECKq^wXYfnB5}NAQ9-XMh7~vgRnhX7c4iZib zeu%(vJQobS>Sis;q{9imU-Wl-O1<%2px@#6io>wbhptqWKcl?UrA{S{OwaP(*1BRrO* z=B~lwZ=Yu&T^qz%thz7XqNPjI2?e(J7*rr$DPo*PmT%Cp$jd#1#0EvVTfQXh8he`P z^cDHgrxQMOax#6gWG?LB;0MRSV_xdl+R2L*KNGOJU{+()jNvzmek#M^y|I(EH>p`?sBEw^X2-3mI%A78BXvMMkcVG ztQF5}xP6e%IXfefhm-0du?hA4kj$npGiAd4G#op!R4aeQ9mmU!D1r7300_`$hukd* z?b?WFK48A~XrM`xT4W*z=@7I)6}S-G&}gv`rOTTXlduJkVRLLefBF5!Hxd-mnz99y zS8v}y7k8L#mry8~IOuam#K!s+8c-G_TwI?T39&LB^~f@)`GKG(3ubMzKvfb>X0Uv40>C zNc{EUF&5xG>^(;r^vC%-{~5Jy6PP$ZjE7gy+RZX<8=?6uKEPQg z{!*qA&raQXk;;(YhqJXj3~RZQvhr<4V2S0gx*sSK?z*0b@$nxMv0Y+5UCy!M<>m&S z19V-4q<#)^CC)?`$sO!d263?D82!C}x<+&iJ(NKWBNgT5gzwheq(a&@Mu}4S=g6cH zMjKEzkrb2c@SeGyjRxrz%Bm5D1-Brrqbw7At4`sb)gyY^Vd^GL>z~ zb*{hh5)Q8LKCyu`mXn){jLkFl&o4gvtDujM`ni?Pdr%s%N@-W6cxozo1z3>`xZI{g z6MoF2wpzP}A5r=IY*96SJKKv$bPl_E8q#HiCnNUs!<&2)D5oCJ;%N0h=5KF$f10I6 z=F<2GC&wy`RrH$4Lsc%ZiH7T^O|-7z5+Qz$86gixH5}ivh>?=Qco)Pv%Uh4m%)>!v z58S+@lla}fOLipS8Qi{Nn3>nBhRJVz#P+ild@_8ey{`m3X6^`1{0x%u%ECT{*<%(P zTVCcP+5bc+32j$}d_tsRYeR8Pqs`u;g9`(1Z9TiYnup6Dqx$HCr=vA(*_0Aq8q7pM zX9c2PP$c~)EpAub$9DBgmd=R^rJMb>`?u}=xij{^l>^WiS+8_2o?gb0De&T)As z0Amr;zvRnw7V=IJm*9)}I zXU|5;lZAOoz`qZ1V3Wk7yA$ikqZChxJW5hYM@p$`7&$}p#f-Yb(^!SrLs|cFxnjI9 zI0d+W@Rz5?pg^z%D%{zV&+A+B*XJ8}SxC!``LJ7YRj74N%#P}C!sqOz(c11NQ zAg@9YeL_eZVexxiwp}|Z{^KK4z4Z*LX`=|LFExwXuR(bR!gkxT>wpDr)_O$6u|wxr ztK_7ccsnsi-rSry&3%(^MZ7o-bnOVv9Y)4KL46Ixlec$+XsGXf%KHiAVk|ch_rnKM zqKPNFJY&D%PUle@s9&{s5O|r&d#ndZmI7G|2x~!ms9j}#2Z`|kjW75^V5YBN$RdMC ze_lK}#zI$%pVg{};36g7klIL5l%{Q8CFDn%oEAz{@xuTL&hJ_CA%5_N3KNfF&Mi?b zJsPU!UEd@}n`+6FEJXMjfaO*D(Lnx0;G`x0QjuY!NB?hq^q2@ne6?d^@= z3jqWSnxZ9VM05*aj_VM@d4r@jB=3x0C%SaBu?q{!Vv-|DKC!)Lj*{rL1d4Q=-z>vOEr>u z@n`G2Pgdtf1npiYhlwB$lGPF+vUv%_qbn2WG$61s9cLfoKwl)~L1HKU1Bt-wbW0+I^1Y-pRp-X| z+1OT*0tSudiVZ)4Uhb4>GdFo>1P6a2*6C6ilqs0EC>uEoT@`X-4kW&jQ(L%3(s@!S z{!ftL0*K@PYUYFX+01%Z1H{t^DGl^d+%<0ZRYW!}`aXCCx^B>3?-yYXxxmxn4k4%U zka1OK_%8Mr1>%zd3l^fA#Xi7BROSG0JY7jbHN%1gmlVjjAY^C14t0F4Y((k5jF#M- z$^ZoUgiwMF3{)V~0MIn<2Xk>cAd!v`J*2FA{0i_%9=1%sI* z=Y2}lFue==`PQcH>maXMyP`=C%>pRguSatu=bRYd+Kx9#Pm{bDl-G$1*Jhy zFfW|!O)S8_10focR{}nRTXXY2K!pFh+<^g{0UrSPhVhybN9-}`Ig0dxf^i5+3)!?G zMB}^NNSF8CxU7)`J@C=MQigbh#%yy-OQU0BW3|r-o0DPB@j4x6>70UN0kQ9Ha$z>P zOlU!PhKS;7kJcFABM0jgM5`c~v~C_e=+nWX3Lu+~qA(}mtD*vq`N`f`_m}MUcs>iF zbD)sr7reXmC%;HlM^hel;0XYxgEgQYi0j4Lcc5M@g%nIA-Ugxc0<{i6ee~i7ODb;b zHG7EeeW_ctGdj&ZYOT-~R0fzm%n1r2cCsy>UIkAc_}~yNJTUkWAW-#oRUD|?L3}2G zF6U4hH13x^s|K3^bOY5OKg33)e|%aH*|$6*x+WOj`bwstVxqOo?CXU}9>!wyy_q6z zNX$TnsSty}UQ}HVxOgR53rI1BRIP)Ruu+Vqac((2OOloIjGZn-<;cOsnJ1@yiUz>tlA_0qU&=kY}thVEPD->a(-ON4;!Xe)x69Q_szK2_KOme!V<*Y~y{ z6cc!jZt$9hA>`G!0XhHpR3nsW_p8~VKOhamB062m#QSy3$2U1Mzusoz6=` zuMyEg-wk93K%Wp(!jL{H97-pIZG3v1gz6GUEu=x75N+1pk43mrrp^>y5sEPRWALg~ zBMkV(2;)snU%%@+aTyPohajT0@z;xI!^d6bNF*x2=n-h^H7) z{#XDVh9I5p)cA?3)r%NjL*b0;F9GN7aIw(^@;P$E16S3c4DS%h zg%zD-hJ#sseX`)(xmV}pr6e>0-r}5v69Cs8phlA$EVQ|h}a-psZ|sERN13MY=rnI?HSO?e#A}8DZ2>%QGu0Y!Z_cuTmOqVNfL(BK}nnlye&mbFKuM}qY z8@4$R!U4bd-C_l%;6>p^9quPmIL4)1Be8|N}w9pZ{kALZJMRqjsM42V&s)x7dp+f9rnr>A#^11pr>7s8)rgqaw*wt^Bo6D_e@6Hsl&V1eCV@dG?a zT8?Oe@bb9@ag0!}CBINQ`1FTMU5O1=k%A+7VYOqJkDp%vipMySx7s4jxg(!V_J6TY zOuu=-#n}A`u`Isa6i01}gV~IwGYR_hX9;vI7hGg3RT~_zpvM*a( zDUhOfH5}p~-&pcPf^<#0EFDV9z`$VBTY%RMaNJ%SP1k@FO$=6tCga8#hyPjx;K4ig zL2YgX7!^ZJHoF77cwz(8$<9NxGh_*wFU)VEL{Bg3=SD~0xkF=XJ*(}+w{vpx+Jwi3 zsf%olZ=(WE$t3dz?l4^5y)iI6x+*H=zBf&34HS(yqgxOY;2qQPnrH5$Sat66jV>cO zoM4KBG7qB0z2>;5I?FCUj6k$CctMcBojd< zLS#qV>CzG6Eaz8(M+bbbh{bX1Kt|JA|w=0AlU1Bv-|G z)BS4p5J)Gd5qC&#&XEBg;^=(>u`1F}o=||)8$t`=k{K+;0Cr-x z(e>z(@51A|ESq|QzrGitl&4X>md>NpM5G7GO0~ARmm9c)q;3(99-YnGiCK_uuuanK zRo*ag|C$tHH~SR0riis`T8DrM0C8USRij|nJNHX5rG3e`*@O>?>jGmwWZ=Qa4|IOG z!BK)q4-zag?1QY#`Vk;8r09TQ(MXj(S2^VtNierbGXZ#>z^Vlg1t>Wn(FW+ZUK&Nn*eDLJ3sJ9LSF5jN2R zMHXvT-l4xxW*%nNHnt_{h8C#U;krfW~;xKl>vfXY;CLMPjl(YbBq4vW~M>QHExar<(ST+At6_1Vm#>-~htINf?xTx@LLpb5U z&fZL@l+3pUKy<-ZBqM`!Cg=~eN(5#;c=?vBmiWu~5s(5@=p*{Sn|m2#;n->hO;JP( zu${@sDI#N25vwSyWjOZ1Z35+ZT^-DiCkcm;(j2VMw<0e=kc3p0Ds+WdIXFJR9t`(a zV;&pn_ePY^!QlhmG*GSm!8i@)1=zLL)|7t@gCFac^~I!cOa73CsRycL^;7>oVtx9L z(hEgsc7D}zjx=qGGE0C}7_}t=IC@=9O0qBhyklzGCqDfTkOU?{xIT#_8HhMv0myVH zS8Xe-J%{_rK-16ohz|=J?%$_>xiI(*+{o1@(jH_x%>jhhe*?n+x)C%YZtuW*`|r@z z02dnqz~GK+v|BJdFgSQMw8_o9o$u>+FkZR=^CKNfOYb9~XzADe)MQ7T>OcyB$PxRc zJ|=LFBjqiUumfJswC&FuF|#7l7Bc+J}`kV zl`B>@`e))K?Q=>i&*(Q34cpQ0R`w4kQG!m#cILAjr~cHY;z3VhV*a6?|9)jB_4ML7 z=GW6a^*s4GK}M1~GB^q$Aw{0aE+?xYlC?wa=0$OE(D4%y*cVg;cNgiKp}a*;Ey)z4 z5y3p9xI-oV*%{Izk(hWSBV)fl6gm%%i~s0>@UQ*d3-Y%LvkJb3<=@o;1UEiVJCR$( z9TkHkF(LQAj1a>NfsTA|fFmqgaAnJWW=n+harB-iS~i3QNpXVQ1MsRYG+on&K26{7 za9dmzdKPMowgm_kDsgC#dHU>`s^)u(RCk=T?!ZjeUpLJS=&D|>0>1iMV1on)waHH2 zAe<7l`v-MVOPx|1X=%7ELT@M3b}@!5u4i~q{^7$5XpAR10{T=yqbwU< zT3Iy3_@2-RIU;R~&4FkYz~3vwfog}|5&-(_bTCV5dU5fU&K!sA%EYK?Dyd{;;}Blm zK~z}w=}&uM^b}LuPZ!OT_fuOYMBP#$w{uvzZ?%Z`O4re_w4n0h4kr|icjQE9<6gR# zSv|4k`=z_i>y((u))KOTBbv}L%^^#QB~5&~Mcj=|c{ zP9_#RJ_kq(5n2oApfnouz>y4SIcC^l`0c2{%9K-tCXO3Vx1`NXpi6zAj6$k)kv6F3e0lV>zuV1X- zfP=64VnP0k8Jx_XJ>s#M;{-ZE5>IZrQDxrr$R}q zYNKsM0VQ!fUxtQcz1E#drJ3)u>UIT7+(tRUwv@U{OBv#1_13tb-bF@A0FwE3Dp{d^ z#UtRrCZ+y)-9N=BCftSF=rf`iDC^rC!v3NSn z4_pGsTIRFpwx92yO7))C0S|ms`#&rwOQ39#`f#cM^Df&9 zEEt8l74?@#^pPDCI%QuVz!26>fS+cN695cgL(R0J2K3Z?tog5^XWdAq*R=C}#HX7i zXlH(8F#?WJ2o35R842w)zrM!@G`BiN9Xn+AzcQ^XKZ&^_-(^Y6wKKHxGOtK?3UMu$ zns<+Kz%n<4F)v+@$bm4}W7bPTuSXxd(W-KJWK-MngBTst`x|??=h65QBNWJ_2j9?ZA zp2Gpb0aE-xN*Wqd2zW&B%J5-v>QzcXI}-|_Mq^c?6->zHf>wGtZ(t||+mA(EZ|M>o zu|o2_ei>Z1oM3lc?;<5df|I8bynzkA4O4Oy!%+#dC632Rg=uWGI?n61U|WKyGEx_cl#dY7SalOsS$t)~<-B6=Pybqk_@Ub|5b+zByR&6e36wd3ijz3EX{y{f0yDOU!t!J`v45 z?z}B2zme70X`i!^iyCtBlW3k`b zISr8n`+!KWz2R?t<6-74S0|^fk8v;-edngCTk93P(&FtVI^3XnBCAe@UDY6#x0sUo zDa((76uZBNMRaDpSh%&To0;kEn=gI5=8&VN>TnghZ00H78U5E5MtmzH%= zh|i-wzjsXimi|icm7tc#h3nKsuk;3cWN4D~G#$p9-CzD*?PzOf=$y+GCzI491I|qJ z1UAfe@7-&ivtF*6b=)z78G)RVQZ^K6+8oV5-Y$q@&U*xG4met~YE?)kB!bsTrB%y; z>9W}!jDb{Igxdpbn&|H3l@-ODpfIg;9!Ncat(6huGc}FPsb3jCy1p^fB0CrhX@-zA zz&ybflKBsUzBXq`Rglbp%o0{uMQvRyL#~8!A}zDyLv6^+yfI4wIlvQG*90?DCU#1v z88FFrYZxY$-psoo@VMX@5~VQ*6~H zk*Wg#N!L0_UeVMI+}{D+Y2qX~8R)dbQy;SCLM_2f;d2Gu?wMv;j$;q{wY#gvhCENU zob^I>Q3>t-QN2_JBDJ_r~05Cwj06*l* z#TSV3-Tb$*-ul8OF#wV|X(L$r3Bl8hoXm#};?oA>hu47hmBc||PKg5uz74miRSZdK zLhz$m2p<72^{j*9z6U#gNG2~E37=iTrz@9m;0wOtgtw)^OK<_;czN&h5i|=WK6AJK zm@Nw*A1T!SJw5LRZFyC_$8F23^`#h~&VZc){4jIvV+L=v-xzn^92y#mj*K*#^9yW1 z3SeaY07C;0n8$gkzK|pW=ewH-AY2uRLO}c0Znk4a9N5s-VgYFZxM&RSbIfyL7`Q6a z9=VCmkV`{K3L)f90Pw=l&~RW!K}o5}ycFyjK~2V_wS}Dz{k@*(bv|Z{A~=qY^J+|U z={Hrj&qSy?pNyqu>}+B~FqYSX&SOZj60eY5Q%l|GBPs zuLUIzJn%ohJ<#d>+I!FjUR+8=^Jco6klWkWS3j7q1(mwX@-H5^Evu;)OuvRW38-Ux ztOZ{#0-h-zq9S8C^d$i+ca2jwJ+s;m5`2uxh9R2Q+4*J9cyCvk{s9HFP+??@6k$k4 z5!@1y((+e<4)BMRQ|{ljK5&HN1-Ujs-4DH&e*h`!8yyX6pUZ?nY(lB|=5#0*5|ja6 z--Y}x^=Hr8fz!~nIoIAhb6*Q?INkdBdVgT@w%;#)q7EZl%XLc&=pe#O7C}@`EjvFH zD|i1~o|ef92iUmxp5E@tc*9^~zUztiH`5IgWRU2!u)v{Hd9iBj?U?xjr0$&^kT9%+ zv62qX*?1|kTm5&4*5e?CHcrv0gW z3dJHqk4qLJC5yl7M8R1W3bp0=({l@5X<45?11SIyjxrMTeC*M5ryInd3JQ?xHW&Ih z3dSfv@X`6^NZu`W#4*m#B^yCw1_f30iv?S0X$j53lXZ{aVHCL~EzPWk!sYf*g<>n} z9HT2iGE3VB_eRFMd;>g`zkTn6Ve)c==INj;$Gfz!kpm7GmBMpLhA-;qfAj1TE)I3~ zr34R}{;#pYY=5#G@JPYiNv5&W{L+WJQn1SJ{S2zX(ZYLE=oDHr^_h~VE8VAIuR1W~Q)OjUYj^hoL|bOm)TEH9;KL<;mJYK)2jE|3U@;*Td9I+i5`fGuQ*hp- z2_WC^BnP(6AGgAJg9Mu37Qle2-s%#r-tJRC4Dt|S z0h2H!o*KqU$iz#s?oCwk2A%tuO!wpEN@c)70Ta;mt|5;S(g_g|uStNr3eZX$fOYuK zqXs@aWC8-c1>_3{$Nclr;z;{@9j>(}H)uUqcoGwnet)$h?d)Mbh9=5EQHe zmoli%;|x5gVDbSlHPB{wZTHH;6&VO8auDlf8(7se=ZvRGZYX|8 z|A>?UIX|7Y z;&^)~@Sq<30k>KZdazK3GsijO`e7mO-2gLa&<__)n0;0_FF|&7XBLEG0f!1+Ic61o zA_$c`5}*gXRj<>7h2z5^v`jxgC|wZg0y3yaqy&Iboxz>JU?E96pNjGqa>b939HQqQ z)}(Y^`&bB26^;xh7}I5D-hxMyg<5425Ta-C@8g&ofU}1yD}u#DZhh;gYwL?EE2a>h zdAy7|DH?{a)?7>=IzsF;$aUNuSE(Ptmixg%^M zOYpjO%h15e!iSEh2t-OIi?~F6vl&8?7-@&1$>&NTxc&%w8juMN-Fjp{f(A_h6z0Go z1Lh$7kC4!i;V)*FtHdUngwgKEXcGE-unQ>{{)1}Y3j`X+e}~Ywu3RdHUJbBXWib!L zD+|?Oj@W0WP~o<5tDil$)Mkit{BZCYYo6p2a|+nt&aV9x6&0{}kl0)wG$tTqS6v!Q z`Rn&h%-Yz)!0wj!YqlPK64N=sKeH+nG5UNhi!o=Xn?o*bMc~Inl`~hiiV4+^YS%u_nk&!(e zc^W--1ze!)?Cez4dX>(>P_4&9S&YQ&LeAc6V0Tf6PruYrX!U+5_T!NbeDa1sYev*7 z&z=pJ8!^D`As3>ySW?~mwlo($bK<-dhDsO83OL~2WU@kBq-lN~v`dUjC~N6$XvkE@KDCusOsPHfdR-y?FAzEG~n z)YL8z0Ts~rD8lc0S#_gQ;hRo@91%V>ZN#%{!@2_ajc@(@s}%k$JCDKUhaBj_Yn7B%@y5#UEIxo&u5P|w>x=0oUXbKnw}LNJ8aP1f>tu{ zQo}*@x4su07Z=ztb(1!N6lwxsUQQfzx$e7ShR_VO5z$eJ^#E!`ZaeUSC{vtVuthVa zIGVggCvkf`4O(({<$%%GpRV&HSY6c~ZHcHq_~2bB%9$UnIE|2sN)&0ypumZo`+ zo84((2jEhWzv>5c$T&BVw}s1~c(GsFWU9sBPFO$_NB2{$q=`BMw1{DH6$#Ky?kkK? zUc-EWo9x41Npctfn?oy2q-saU+Q#}|t_UU1eML%@9HNz@hLpfDloSBB1_-o83IG5} ziO{y#B6=V&)VBHgsmo)1y=`s93j1<6@Vn2~z52R!rihAh-_L5seU+z|zv8_Y9>1Y= zH$QuV;RnBBm>W&`_W{gPostsPA@gHWo^YXCEf|5B$I_|*r<_I|M_`OJEz3q=8*W_K zViKkmDV?K&6(No}=&pd84hBQU(9&wtpFD?*kOA%r4-*)yp|Bzr+kXYcMVR%5CfJoD ze6|_>ljR{iL^#HgfuG;O<&SnB7ob5SQoG@Rc#mTyPgGkRs>oulSF(ovA5-TYk7eJ-{ic>wLPC@+ zlv%dQ&Pqj!?94Jt%BqxEgk)tzp)(^SGLsdBblM}?tITBcd=A(BJg?{a>%OlmInU$x zjqmt;-e2vewe%t-HR5?1cwoHTxKVepdo{-&C>0P8N%1gK9g-;rbY0ogb1h{|4`DI! zxriG7;J4Va&yRLN0y2bPX^6ObQqs~mWR{ti0fPoR{8e&+&o1o)=((Z&gKC-VN}DDo zqRW#-@F@-5XK6jB%Vc{zm)4f;5bsiSeXmxokAIi47e%Ny8JfVdx{SU_e%E%xZE|(n z@79lA%Iu#faN|YjFiNWDPz3(;Sh}I{LhOE$6>!nQVq(~;J+~)T0&(f~k0C-L<^nU0 zE|i^I{h#y*hac9~)5x%Dr24HWt6rUWcR1-e(3$zigx3*T5kl1Snnwnhzuk=*v=3af zcaUu|?;G4WUB;T(XqZV_?6y9kC8#BGL(I3x+37mxHbdJrx0|;fzHE1)F+I`Hfy$}hR9pLXaS<`G7kS`SPEHsA z4O%S)5W5}J=c_h)VhAR3Zf8Kjn9S= z_v7Q^{fCXUwVP6G;4;N9`Nkae_t)Go8D8nCEQab15C0-&DiSFdMgz8rFa}#bTrKv1 zf(l)sYzkqTf!N*OeR&RtpCyjm_4dkBtH)oaJhwbwEVBw;U7Mm1qb^~b-Xka`;!CtCPYf;t zrT40BEwa>XHlL)ss`kw{Xfasa`d+{7i=fQM)^jcaEu^DI)f$v8Y0;bmbCfH9i}NQh zTTq`yMo+E5Z9ck0j0+I|B{(8M)muy!*{$jCbL#NDbj@P(b>DHhGB&~kOp`iu1aqY9Q=L$Zs2nL+?7i4&bV?4sY@s0pH%k2tH>ASqY*@&;2 zjPi`8gzPj*Ydja?c>--xLIDkuF6vprq<~L};~o@kKNKe#f|Yq2c2`fWi}f3WN8(kY zMo(PM&6z;o)t~f|^g-z`%SDxAv)i`vx7AIw@47%syLD@sK8UAAZ|{|DD0oDo0nz% zv%JBkdj8A7wXx^Vcu7K})~MNRVx0-~d7k7UNs!DF{6za?r?0;yokj5gvmWM->{j&k zi56eBBIxJL;rE4!AW;ptNERWTj#F<*B?*>6mFGQEt(WgEUkQ{or70>9oGeMt>z;J( zH(xlW^^DQ{hrjS zpO;%ynrFYX9?k1IwJs9n?AzzdFp(i2pU8OEF>z(zkWw=cX{j2A3LSCKFSkvdM6%Jrn;IoS5lqxdX=** zt^eZIvKRA@OZ@x<3q$0*caAfYMS?Y}yuJ!h3X~?bA*Cqq3;EBw_6A(Z7=g5V7s(=j z^TNn(xif;ash_$V`?$GNqS&U$OYSE)%Sr0-y>)!?E)Ag<)9*ZboF&ff|M`@S=QL1c+G}D}o{g0plX1RdJK0GZT>of}kH3&A9lLgXoG_Gj$6+9dK-5=$sJ$ zrsG-y7Q0VeI(k2r?n>QwQqM3K<$E2yrWM*p!CFP6lthE%5X+&Hy@E($VD$*FiYi-} z-%08UWNQlG<@I^?nY!@xfQxF)6b((!WYXoWZ2L>StvEAc61dS3|Kl_F!{cVj?3^y8 zTu_kp+X($ghqrUDtU|`7CQCA|R?R)wxI=kDi2QK5@nTk|?LnhE_ts%w^huN^>!7*O z^*#A*J`|DGvmVBUZ6fwU>K6;VQmtkpK1^@*KOl4Ztj@mRUx(GXWD*{%yD6OQ*rGTKKZwA zm4l9YLyl(eJlu^^xXa#b*cnAukfLE?jz5^V^nmo31$w%sOkv8sAtCajEKf7ROLSq^@`o?F_ z-efLG@6O_DT9>aiq4$fsLWMdh&fo9RU$)_)u1T4(7&E|Np223MIpcjxWUcsKf8{#^Oc00mHn~4txSz{zO3->L*#eQFTK6OdavO2Qrz%@!|Q8W zVz;GfZAujC{#IBza~0PqMrn-4Pn9RnOE3sL7V}SVeCygFALz*%uXb!!Q_3zcr}HFd z)!L$zyU%7<(t+DL%x_+km7{f3tX0I*e|Pqr>OLX$R#qxetXGPsOt@r&qLhXd6RSk4 zW_(fD%OnYJhOzC=ANCz$_~a{}I`p&u!IPXu!;P#{Vs>%TWf}FcJ+8YBWWLL5-ML&L zeZWs~VW7ou=JG93c<{pXr?+9C`WbEQGo{~GWN3(&C3S{C+TjzT_8%d=t|qH0$+biIp?@b>G9IioRPQgf+9zTv?Wu?xaGcK`c+({bCZlFX~z zai+T>X8gjjMoOF1$sxkVeltb`^IU5Zq~0|)D|vyKZ{+%Kna?M}pY`$Qn9m;iDRQx( zul?*(`KHz2(qik^7bEgdOVO|jiJVF)*leh}zc}gc@{K3*cU^sb(&6)2n8Y zN(7y?*HY5=pQOz9+Gy?Ss*>5p{H5t=kc6km8JG(`E1pvIyxqoaY-Awx#(HEyby2gF5j(q7NZFGkhFYh)@W5J)j zR#08e85pVBKg=5Lc_+l|-G4nz{l*foZ)VTF-=m+p+aH(Y?DMn{ei=)ZP<-UE z?zj9{zp+jPqo{q6`FS8s5OtS`)$mwo{MyE6DS3zQ{1=X& z#aC;#9}{}kbFZfV=eO_NM)ZsJ9JkKh__w1zsymD~bhYrB_sH&{N?K=6V$)Q z*(F5oF;nSfFP4qaU&{@Nzp z$g=SC{@~Z)sW0}Y#1}UDE3;!*c!9Q*2Ef7R-jBUqWK0ZyEjwIUL)KKO2#Zn0)$#*4 ztel$+X$pS1mzKxL?OHp||8 z)_>mfnoA|i_#aGip>Jif6-uss>DIK1=6L%j9%k&Xl88kH|b%^B-A#jpHa9 z76iJe;Gf8oDw)@_znLGb9BOeapP*AX-6W!@h-i!|IcpTR}aZ=+a{0@ zY?JnB+-PLgO!?FC@;?D%4wU3ituWslyt$96mJ9%$BlTD@1A$5OeyQZx+=$ z{O4E~e-tP8ufOJ6?|(Vev=qu~RIpyBI=LEajx+hyRJk)nhqXX&rdh>&fz>xWw=Vk1xe1^sO%VGm(SQJmZXL}dtd_SUh9QIZs!7R-p?Y6H#wAxzu0)5pzICa8+>18 z(+z$~(WV|d+rzC*r_J8Qxo}6QIVo>hqQ19lY7%lyOOhft`=4bZycE0zTjpjsgxf@* z)k2+Tcjs$tCwTpYX*X-H{-xh&h=mPqWzKgm?)i1WUwq&VP(8%}wIx568~7J36t& z?l)s)ydBccl=~WWiBSHvk4W+SVx8U6`RSY-|7tTQ>t=VIc$Of&C7rz7A%pD>w8NTy(eIyw>$KstHpPDnMg^x^Ig=K4#dG^lfYlFXd zxXbm<2j-?ez>{XO8nL7r@|AKQTeIb>u}EwEo~UKAdgM$|*OvAxlcFtsjIm8Y+uZ1< zbWNrd92bhejs9;QxUIsbH5Gq(Hu@xpcJk&VPadS&1P*()`Ev60*CjbGpS_?yOD9|L zKCI&lU$hs=n)1ED%aL-bsFI%*=PZh}l_zAj#zIJ*>%Y|X`rw*8o~4PCtG&5-53y*? z*5ji71AR4hrGx);bGE5DxippBxf&~@^C|Gb%dv7fkIzM2^BsDVxgx!GH&|QSG|b}n zNK?bJwC~UCv!z@4kM_>p)819i@LhN}4>3dij4u&Kvn#ObOo@bly;biz5~-`YBB)N& zbV3xVOw+UPJyRjFuDio*3^;EO?qrG;IpcNp{fjoavdo|nyS!6UbZ2#*m4t_U{k30v z#qP`0Nx>wON>8)+bncWBlFnGXg=Y${mEBo+Ahw%TJ&)&wF z5eczV@rksC7o_FXXSYapqz){VZm6`MUBNoy>psvMUA85h{D|}hb8?s5*XzA553$j< z(WKHj5#o=;Sgs5X_rD-{j zvI(-o!uEzw@nNbSLE*EE-lc?1c5x(`Xb7Rcx9wf~!tU%%Hl~h$mecFWxgD9WU-i~} z8tZdH?31jyW8dSGOBj|HO*AQ z1WCd91er}43cjxDWDyz~lg!rtFS;Rip9%RooYas)<)?SAys+VIFZb0GUL}#b#Ac*> z`c9tHe)2wH=h^2n>I?ouD!eZOt%mCG(oz1Wk)|{nbG3kk#*-Cx=%-Z zsF=6RR&Lx*77$Q3H3>MbxMhh{>)jRlKv+2X*#p+|s!2WkX~@C0SX`lqB3tnnJu-`3 zeNwt=AM3$)_S*zoXN$1Z+in4U_mU?mcH7zLicTob_zsM~ZrL(+4MT|jD{39yTJtdC zaG8KW=BoNes{o$$RfKRf=6ovSd)M~5i++TJ#K6n3+f0zBl-t+if zv72(StBLK29U|(45&3P&A}@w-&1!8UI!>lij zUPd=dc>xi(6FWivp0RZmmtXQ-hauWu{5jKqFYF3%xHd$U+vcR3{V%tMd@^lxso~}6 z2Zz!qagNxKxV|?in#G5^d=J_ez!jJ5<~99Som`HhcUUCH#*)3c7*9(>gRb$A;1 z{>1v=CzGQfebbIRxS7&sl;|`WMt;I=&R`kk^*-U?vy8j zTW2zh$JEsRhnGJKMj{stMlI+ZNlKyh?T)@&hhkH8uSZ`OdlUut(Kq6l-+I>{>28RY z1mih+%Qs0Q{TjFy%e%WQYaMxOHs?}zMTzDH*3cW&WuzQM!SUoq1+p@KB$l;=)LTQ1 zv&K6D|EgW5I^ZuMq3PM5TAV}EjfZR^n6PmTOJdn-+vG8Qo5%9=1uI?UZ2%@;qxOM|+oV`c(f^_hS+%{+=;I;@zu3Qd~S+E55B3 z$(9B`zmHvLo&Dr!?``+PQ7*;C^yPA;X7LktR^{F8X8hd~6CBJtFI#P4+chQqX=v2r z{4*P#gJ7LLr>8Hxz4>9Uabi4%82MHvWHtULZwI7U>?j6v~&vg@|N2$sJybLybCqEW2N$62wm9(Z6Paoj6=vB$)8 zwsBYHzc^anRHa4V(mr%-+=41&!@Hi^xXSlLLcSnnwLlx8_~Pf{Oe4#R?{l&tHN)`x z_Mg)1rN6(F`pE~mAV1u?EbmRBCAzzI;LPbmlA7Hj0ts3s8)aV>+ak`S6m9Iav10G+ zxW%vO>C!#KLTyu(#X(&;xtJ zJ)#q9J8K6%$LO7E8@%zV|3Gk_-icnpw}%Zq_uLOWv0S@*K8kVs&G*!^yG`U1n?LQ) zf7^b%boI~$pG(G_Z$EX5s3VNi%e5Vt&+LPnp)=d)Za?2b?UlQnfr9YrLyBZ>KA01B zCby^kRC8m@C9(+OK|-DZv!287_m|sD1bU;n)2r}pAxe3xfzdz+B zm{{}phfrSOT)Y!8EHsnJ9KM~zd-SLn#M2S=DoiaGf6Hd_qYHbt2yI(TkcBo^o^@Yc zoNSzam{16bg!@V0G%kdxdH#A$)~l=zZcq_G!l|G32XiuTI>jfDxM^0L!c{&}hly)oS) zskdWpmU`EG`YjoH=E(`)B7T6v=VRan9-aczN#2&Tp9#x6rh^>7u<|cQY|z z5RG5lc6d&O(>1)m+lqW(!GYNkqB6^<=bbQ8Xj0gnh^`%7zVG*v5mH5nEE%3aP9M5? zb9J9mDL4TJS{ho$`@)YIVvbst$-qwZ<_Z1W&5bq^a4G4QIg|O5_pUyOD+gZ?QX2tYgP1%KK0H9=HCNUkK?kR5i-nT1;4b9D!0Qf+SVh$<)`wt(c zOMO__cl7~4fQOE0dEa71qak1biv*YhqXGXgLIVTy0s#=vh?9xwylwLO)2`!%pCA_% zgQ^@8^gcn#0Iv`n0(s}%AAzf|;T>GN&FPoFe)_cma>ot7+ALyUgTDQt`2^NwJ=&Hq z{ihlfML`CfUyx52X`no<=&|1$P?dW64B2QQYk+dWyS2#mB;!fj@&U)Gg`qgEOPjC{ zSBHaEec>;5gmnO&`i9+yV2ptr9?u<;d?7GFAo}1wemwbYc0k5I&x&8a4v)2@4K3WL z-%s^O+_8TC4L>{UO`+jqE?cl_v)TdmDU&g4WE@tA`K6 z^B#xbg`?SvbJ_{Dy=m{plXI667r8F?Lk?i?D|@}Kv4%$KnU^)n_E6htco&`tIDun-TbdFVq( zXbd`ajrwUl-+SaRMDiU>XJ~BGhS8!QJ%iWMt1>kSohEUjr;l-y9*|O=sq-eHmk)iA zPP%S2#CsuM=k|-rMd?LDAs&w@au_(J9tu7|@oY`*m#_7G(8%ChoP(tTV_Gwc7ms2W z{E53^(UpITNy`2yDqBKA!maP`?AMf@rMZ!IgC?AYaLqmE*HeTB20eUe!-Bs^FH`L| zJxj^0s=W^UQWM>=%!N@934%74Z86f?pVUc?HdoN{onN$)$??){^&z#5GupBImdw~0fwLTI@D zHgXF65Kj4crL3fI_BgQ*AVK=x+4-V#2rK1k%vFe0aF^pCHm+?Kg^&D4YooFl(;%Fa z<$Y6TQ>URlt*K*$PYw(ps52o1qwI=LFAjvmkQhBW{g=+hyZd4Oq6F;Yme;M>p~Pfy zTid3eUi=N~11?IVrW$SZFKHBBVhkz7!+m$8YK$u3rW?kV#jq$3#OKDT69u0HIo zadJ~qE|5pUgzO;k7`=Ne=%IAgjZ8{7kfD`BA z$H)tYdQ~4BD+tht>}nN@Fr?l{qbzM~07vbaTLSv!B{T`zyq7Y;%W?kQdRkMne)mEr z5A)b;@~fn$ap9VnCXMs*7Y1-+wP2pEmpM5?=fo4tG!ODzHchIJ&~R`NjJXh;8_m z)zD{of@G0DKM%|?y0bTDp@e&8JPRdJe5H`=oZeDAaASxi>6|l@Wjz7_9ZdBqm}Kf^S$5&zzGoxKvH{PU7;%g;!TMA?%$MB? zs09!@Hfc|HDB*$h!3K6uyY{C5C8$bw;AZfvO4G5%%z3cHkYR%FfcJr}JXJE-PT_8A z6@3nb@`tkQXk$xLQ^ikrSHaz(kc_?@!}a+P-e|N1^3!2Ex`v-Nber0WfBQklM&K8A zF8sz>0!E*4C%}z-L|Qt=QsnHVO9x(l&x!m6L5kN7Qj$T3p$(y;#yNU;FK7+Wf3bM{ zJwgPBgug34eg95x7l=N@o&VBu?T?9Y)t+d zS@`k!<){LsO{?1s6CNy&OBZl;GG{m8WnD|#aCm(c@+7D6_C20)`GP9JomGVo4b%gbkmOV>*QQg$5IO*#)@? z+&kgThQs>F!R^m+<$VDjl}?r^;l4)&4^*~qVP^H*8t3c&w>G|Lnb z@6g87Il|lx=TPKy3->tl)m+CV(<%D%0trlDx<1)rWKYTl%VH=CIiE^wXE<_()OyUq?Peis<#PTpUC5ayw%9%ERH) zhQCDvR6-?@ElEcTg4R7O}B`NmjTe zg*{i@kk#YkNiBWL#^*Q5ZX@UlnN`ek+D%$RYEpTa^G^4~m?25D@3`0c{IZBMA$=&& zYL^Z;?hB|2;r~$HB`BmDO^H0z*!T!JS<}Ma{wc(6gnx&%VLQa<)$z*O+P|F(Zta)R ze{@xg2J!V|(%li3;i`!_9N}Kj;Lg1Qe(+-k7rhaWf8_~2J?=`?-FQlEG zXAjk7+4r0wkcNZx(VCiedteaSb>O%?fnt=iP#mCS_Uv^)=&OtY>hMbtFaS2Cz;jp5 z*O2vMIYl`jrd@tuB!F4j*M4^1fzbo1y-Y?5X9BQUd|ez?9RgDL;RBt5Rs_(J^)fSj zu-jcNaNgI_C$&1?Cv~Dv5YkRL#E&qFr0Ra$1WOsbEd+fJPN^I9cAK|SF-v`cfS3R- zf;;g-)-#qcxC7BMC+iXinI<4iH-KIeL(m91sMD`cM6eC3%3w~5LE`)O&sOhy9J^X5 zFLz-xmH`2pL%>4(Dg3?MRw{e$E0vsnOI*m~h*T%;J$OJKU~00^SCiJ(zFcH28gSsa z`9yJ%exb|Mhx%DKZRShoJWtu!@Vfr{T`I8qqB;kBeb>G|Hy)-yI7aX@VGU-b4z$N_ z06v3QY;&FybC5Ci)->_D70)hP24^6yTA7cN|oLw1Ht z#^?;x(?PW^kHP)e`$Q30_e=NyV1rZ>3*!)su;-%f6IEw}&K+|~aE>u4XwAE|4Dso| zdrHfzLqO@snzFU+IW|0eN7=a~eX@J90f3yGudH$qJ0x}pfHOUp-YG6$dA3x;+*q(P zAG|ya9WsvjM-!fFOXw*e+yI#s+b`3t%@Gl@f`S4*K3GE`!c)jHhGJchiD$13dHx$( z9-i=c2zZ-b_aYtE4>$L9kGsMHe-jjFw)br|9v0??X$J3>rgFGN3H8{@CyQ zQR`ek=bvLLTY`BnrS&g)PP)TG3L!T`&I?yo5C{lrgOSm_b7Bw6#y^>7PeIz;dsFvo zc1imhEs~d>9~G)nIU_s}YA+O&tVupUcKIwHT_4xl-~F(kMozU-0>Y&zLamT(1(#hY z)_G`Xj2v)DVPxKq`tA$fGM(50-@rdZhH}2>4YEDlKsfLTP%Cm=DrT_=w^)G3pFf|5 zAoPm+plBG||L?D6T7!bB; z=F>^LQh}AH?tWF;qAvC&%y#(x$WH>Tq6ju)jsICZBP1&FCvO(7F51L;EL!1-6F&bo z0+};1GVfO0Z}YXb_8NLpFQgnoE@xW=hsRaHMJK0c!^oWLv!6=?Ipt%R%lf7W-W%a{ z!r}tD#KMh(-A>4@sRN(J#nmIN=RSHYA^UA;d;1mS z5kx@%<`;a&o;~x#E2zKW9_TbwAB=2?-9-8%2M0$a-G=Wi9ZCS@iHeF63fzRxtk~|l z?~FD&nb)5V8hasj$K;}G?&r^EaE3uNL4Y^smt~$G*VAxN5v&Sg;i3!B0)+*Kt`Kf+ zECzyYggr02LYC!V4hS`TO3=T6nY@NO_#AgRc#(w39g0?;Iwq4o?1rdIgjG{#X zQ~N^lHdTbnKDYpglX-1*ZE0cwB*=N#gv>sBQTzv`Yb&^Y92v$nz?Sjt9FRm!PtS4{ zKlud(@C=tH2s*oG2B3BV)Q@6O3tn_aef}WMw;lis@bgtQw7k&4;D-(FEYJebLijoW z&P7{9NLUD*8^9efCr~%jacjNw91}cVnldX7arA=chSTeM>%04a3{oAEc$mOV+>L}6 z7a63@Rv+7~<9-*P0Qf~1V&I67?vie7*0tJ3A|V8MKprWdn9KI)y6Z7tCHo3H4^+4x z-@hL&UEBOcfYKy0f|8Zc1oWn3yfIaZzdw0|ojFMLmuEPK;g(>Bo zZ)c~zNOUD~b5KK^ru$VkB!#K7;cNuZm0;}PRl|PD1vmHD28@~Rw%9s@n-Yrscx(;S z75xJO2n^HO-%!tlSMRqFvo|NUrGA?FT(RqSS8g~`SyaS&-_{qi!fWt(*Ky)1Au?4I zbQx%$-MW3tG>Q`;;4ctCP$fm(JrZE9{o5(T9D28q60%@}LbVr%Ty@k&g7u0#;O4E{ zQ+s4rx4{`BhkyvCNK@3}ac(7d$i7DDZ6u6*5ad|6DoDGu@sVrI3;wv-!?c1@jBV~x zVEWtwY38Q$EdnEtV9}OFv`$c6JCr*t{!F1QrffMdaRr|EMT@D6kOW<90wL z7S%HnX}aUKrz#>k{DX88)T@{mJ9KEag6}onRp6(Xh`&{4c~}nhMH0zzc2pal185lu z zK0J@=(I=){yO*SKe1V>WG!dJ-qW|&an+wUly=Q6?y6QQ3x zioOUI_6AfSnFH5(vl?l;KI@+Wja^?NlOpw_;!+L(^T|ax^>Ph>y^JxsUQm%r;qAbUuAL<+wK` zH{;~x&A%F3l8JMNMB*&i$6jY=!Xbz8ohfx2fn&cXDsb!9r~76WTQ`6QhpSB-2V~X; z)81R0bqy)B94qeJx=a3wiPb0Wf!K?8m1xzT;>5-*1*OI?MQObDrwbB-MDlGyB7|PZ zdYGs*^X4NXm=v@*iyB!{$M7>shKAk4e*(AHpa+%Wd;_IW%SOJp3Eb$7ez5DVTg~{F z*d~$?%w2j;K-?Dzybyhnhhe7f6BLugFbWw7;#sF(C@I;b%Z-LA(aOEhM z2xfG`Y`7mcw)904-4g;$aQ_p(S3&TDQX?TFmI=YWqSyja<=01>)jquJjyXJ| z5$H-Xm`QsuwNxRZ4uwNCEdE7BS9dO~k9_oHzpNytf`ZldElFnP7drJ#D%(ZW)%r@s zTl|dHG`!y8j&sCn>|kz;AgySQh#wT}TZ=E~s=r|pZ%%l)=L))Czk4@%fAKe?_i5PG z(O-c*Ufq12h1IX#UW9>zW@q)||B4nbzRh=UEED+NcUS0=UZ->)zbUh7!faGk-^-65 z7w#9G@T2iU+NybHJjW`EXrKUIOu&F>-xb&{C0TLK)MkW7)E0M{GKe#JoUB{A7?a%C zc#+^@-P*6%Or0mwW!f(e@}~ukPTYrx8RBf$o(&ET@_5FCN}P;9?+$fHh=F~|j_?LE z3pH5Mlyt{k&m^QZ+Xc1FD%Ui)6T_DS3b+LOWP>lOl?mtqdq38k7Hz+_ury+RdfF_I zKeK-coL7O{0wdosLfPjZ1?pmmf|!TSV{o+RGLfK7XCV!#uZx z=jc(`&_I=KxV7TF!fsGDTdz*Tq$ZUCBoc_W45&RNSrG$1Pz%3sUjIRoRUe(Us=CVS z#3HY_wdEm^6Qa}Q`w4@4s3@!A`0@0h)i&LhmBk_Ux>DU1RPrh%l=9QPRI=U~;;QLCXV2i{CkjGU0qvho=v#jP-a3c5loRR zr!0&)N_hJgh2Lt_%p5*m#oze7qeF60n#4^*dpV86)zuZj`}V1S0rCkf5G?u1R40J8&qR#|FBg9n zH}msjS8IT^Xv?)@fxsM&N&;C>U5#Hy2aC%Lmx4J+4a=-a^L%d%%- zVFdGr0KWj*1MUIM0on@)wh=m@*Q+VT%!QnO!eg!g_^?nT0cC{oUS3{ExzIh};W_ql z^h{Gk#)$V=dAUW%?lK~HYkI45;o?Pk%Z$jFc8OMW@a!EocElIKvXx~ctMkBa(~{s3*;n(6GU2OGDQ!6b=lAiL1{;< z9l6k|w%S|2_5i)NVr0R*ms0x)m<<{;l$2DD?`FWJ2{`VnyodNOed@Cq)l?2vo9_Qu zF(}SqA}F$WZ&avel4|iY0vH_N?DoNHJa)W)$pO2maC({-NeFsH$(nJMNp0NRd`7E< z3opc7y+gS)rvWk$5PSo)X!Ezrh4Z}uNSY@{{%QJE^%2P~qzVY!Q20*FO-7;ZKftVU zb{i4ZU4C!B4?Bk7CK0b-I5QFTnCU-up3x|#5Tp;QL$^zId+MXf5p3D-!SaNNq?iTK zoD2Ccyy{Z^aNqT*f2y?g#cL)NE5N5=I11*mAi;zzASojw2(cwWwI`5704HF5qjM|F z99@c;I|LvYM}ZYN2^{XSfAt^WTK$)$j=g zLIu?%a8+I6{>h+_%ya+@Ac6xWa-hhfl{F_Zi zvEns_*R+K#O(^%&A%-KKSC4YU$!@n$w1rPxZ#CLo_aeijs_qcV$zY9~va;eq+_|<8 zx`x{AZgkA>-I4R4hynTP)hB?(;dzyCJf-eCgr4s;oHp+nt|vp zZ(ziZ%kBX<2!94X+gI6%GajCW74N{n6rg$5PPu-^Ibvy^F{!@A&Tu5XDzst+odrpR zY#^3rw{gB$8oeooIa+^j&b{*}yX%%%gjzgjTL>2X&pShhn$z^EE@((3D=F0hw!{_GEYYALr1}&@!kp0NGASPp^y~8~}oj(2KZCS97%$-Vug8=i=2A z6JS))gy_zSeUxPV>CXT`&j%TyWN|iwc++5mnORs6{1_bWXyg89Z}$gQV5xge2GJ<^ zVq`S@+i%%qTAd*Fh7yZM`Aa#S_&>9EBccBS{R1X?Vyp?va57q&2v~88f4sRs4NQv^ zrg6Aoe$wB$StkH1&r9)0<<5#TM^DkOEeCjVQv0!I7$bA#PvdWFYDUF{0ar6|s?>kA zC|%x46Rx|_RmYjmeZ`%a=7ei_w4hXg617TTb}cY)47WUiSQTLy01t$qC4?K?^+TDc z7@|Bgt_2Y*+AIiiR-W`Yqk#`8g+P{^ z6D!A}xt!^?b@!9q5#9tG9%(Ca)F5ljhS&p;7B!)5OTDiH2oftuW5pGjrs$ zx+NacDDtCvA(VEVfq?vEz>QEoDD&B~Kd7o2;RR@9 zz-#Dc-P}QxQW&#Z@mSL7*E=jIv)V#pSjdqLMjZb9PEraqKhD}_< zYUaE0-WREV=Isdsw8$PmfLAk&@`}u18U+=)=gF2H09DjGw@u4gdgu@S*&5CK-uRQ!b12b3SF#DoB zR@xP-fFY-_02>iKG zcy6g}c7d40g9;F_+>yQ`kRiy*z$qd~C91Rof6qeuQip&F|Gp;gz6l?lYI;({@a!Ra zCS`*boIz-;3|meZK{G}?cT09+Oy8eFe~4d$sbcGePSDg9cMGMEh}s!MHt%+JjYuR} z{U}NHZ0w`ny8ph}SEZ{h3$=Eha%dPQ8N(PD{^F0O)NyJ*2wW=qyEnGCcg&+T60)=z zW!S0+ewzLDW8mAPj+YoHI3=l|CFZ%hJpAtgC&D`LOyn@Y!%?9Ebo6tgZ{y>vuKZah z>ZL&P%-uvI50eqNcsq{vxBU2VChFu99QSWi;@#||1)xfH_F3^@@m`P1PUG_X@ugX> zZ$K*G4d#*IluaZJbz?mp-unwtObRKAGK4@e0+*@??FUej8JeB|_1(xaf^VHT&YPQ^ zZ|Ie6T#}q9EFz|YTrk|{ZT4391n`_$AV zA>JOH_j4Qeqall}p7}$Wbsy50q53#YW+hKgSyVSwVga}nyE(rXGr@04v}HOeIX-U_ zQMb0XhU()}-xNFVuOR&xOrd#Wm!ipRfgS2-h@FL9ykW8}upT=O!^3t2#r0c21Kt54N-t%xGkkPx%mLjC6cy@S z<4-2PoVepBJ2y8sGcbN!NqT(oTAk@u;|(z}G010}E~-l3!y)jby}9;-;B=ntmmclI zpzCc5u8ig>Uaj~0CBZ}$bd){9^pCz_npzU%R)SN$ zM`h}J=MSlm2ab;Wm(Tfk{DBhX=S11Md4o+ciIx-6j59rZap>BcnS70^@EUU7{62c? z+{s<@T=_wgZI!hYjN>L^rm{f6TEt5E_~{-H_Gi8$X~Dj)l@%TJ0zrL`Z?_@#!l?#kKnQm0<#S%r{l*Y zw+^;lhkm}9vZm-9pF&5>Z1=_~BLQ{ZM-`YWn%l{c+~RuT$fXoGRIj9i`0f`1R*t zZ$s$zI%?5PigY%r5W>+s!tBHs$GGsSt53O`HRJ`KHeemTpxtCWBATH6s@B|ecC?9T zg}c_<60Puy7hy&Lk*9QMrt6)w0~`DVAN8vrc>39dZSPfTc2j<#3jG;ieh$(a0Kn&` z1Guh$3LJJk6yxXtAf80?hZsax5pV$AItw)Nwmd+)pF#1`2+Q-1e|jfa ztWa-4`a#5hNYz@=smeFry^{$1=9V@R+Hk-Z0hPy8sKAat#Gwzq7vv|z^cbupTuq(F z-e#?KiaUV}xEoO^ZV3e2JA6L>?kWhcORrmUw`SHQk%W+=Kj*u==@V#|g!t3BKw8|( z5U6K#HJ~mhCXu{yIp3OpizP{8f)Jd!$0Vd|!uk!=!fU_uY|GYu*rk5cNEaDMa%x&!vYIgRYK2KtN1($h&HL z%&_Y?&oxyQ}f#fWP62a|Ws@0XH z%Mh$M4c5H`-9Mz48s$&jT#rLDL$yPwe-LNz$)7HS^c*dc9ZH{1-sIU!M;5_KFovK) zA9^i?H3QxHbb`-O!N@1xHn1_?Zo_2A0;=5n0^@6K$psgeBbME1H|Y!)`?L<wE~jdE7?cZgSwY z0&B@H{R9yo5e}i3gS+jpG+T^ta{VOH*3*cUP?-(0CJJ4L>EjCvUm%KgLdZ5)8(oEB z6TDt77mAo9Jsy|dKTScgt#x5RgH(5E3Z>vlf^!Weipqxh-5mcDOIr)|$*K?|mnG~! zUIo#vI&GB#YY+pq3D6S z0J<~FGsjmWQMqgHXjhp@B32Ee{o%0*6ak;&PeZ(kq6EGtk&+@Jfez=L-GmJE)DVt6 zdmc7@z;o(A;lf}vsJ)AUSBj-cIC-E-C)zJTvJ3R+Rgfkv1|1ya7w`w@w z>!c6f(8&=xWmF0AOu@7d35!&)HL%U1lVhhQbmR~Nk+C0N2ZSRQ{cM{{Ml`NxS5$#6 zG|Dk@WD(NwON6e%b{Mq8(MpB`f0JPnI{gwZj)ykF0932r=iX}J;ufnj!XkrImM zBX%(lfqZ#GLWWyvn{Aq1ZMB?)v1gcGgz2i#guy~Y93PlS$awNPj3fj%8r3FcRaHXJ zjf86dIungR+XAYBum|~u{>Qsp@BQh>%{J=UBqS_Mj15Ivvc;=ZYnF}ZBO>AhOgZ`e zdzxB8b_l?5Ik3RwE;%VqD=t&KM3jU)7QPw+G=ZMQJ=CDo$o*&?Lf(UnAL&V@h#J&M zAPl(WuUx>pLpz7c2UVVR158E81i`7}cso7OR>0LJ{%b6wgd#N|2whw^EQ#us2wnb8 zFatN9(sN~mcn&WVgH%8;4F5I32t@+e-%&{%;mhmRdo@}=T`m>OdANV_@{^@njPS<6 ze~6BF4!vi8FcQXPLz>skM|<=0;^D{%H~R!9ly zjoW0;Hu=vw*XD3y;n;)G1I1yYs4vC*f92!7Dj2wed1}YAS59Im)IFP~zx(g}?Rgh= zsf6$d2@CP?@EfBVRXv9AbmN#4%l0kM*(cUHne5#_FRCk?r4VmS4|`$%YFN*kW%*nm zJWXU>p-aJI^R)@wXR~&{T8>`7>fR8&H})&{UMIb(*}ZtO@k^T+3?LY60DlaLL^5;D z!=x-@`jb-5sgY6&S|O(sNF*bmHw$|*TegLxqL!YX&PYLB-dZUc=NIWJot_OTFsi1w z@J(gxQf*vTCb;s~g8K{PBO>}d)=#}NojV|6R?(utTO;)3s*cMgBk6IcHI{!n+y1hn zQwyBa-9ep1h=Xttpad=wt$68D_zOLUdUl;)j79J0`#@UyTEe?t_+ljt6A2qak)=3Y zW><-ywY>(QJCYWNN1X58`d@d*jm)>-q3>wn3`r=L8j*9*Rv#aA^B z919=wAUG(#J`qXQxH9l*(Q@-YdjG!$U?8jn{f-~sN&Y_Xr-#4p5R|vcWIqzARiyg! zI$^Xi#P?PR5*n%{`;DB{Rq(P2D!j6NO#H5-DEzch6z6&a4^+3--$Ig*(VI-uC}d7A z({4>JyHm1f{h#|*2t3LErO@2Uw8$}`{jINy_XV$x|JRG;_LZ3!V zwGczWA!6D@c!BWv7%&AgpkjRPB@v1C?ju~NA3Kj>^cwxnpA&M3g{QU$>-;ws1sS>K z18|1*kHJaMb8h}huq=MYgqm7)Y4iOJ&OV2t)vc@61KQCqF%|5PX`pMkcRTxx;I%h+ zD!$cc-%m5r(|=0Bz`Z}~4-dt4Kx8xlD2BKOBYyl7l)oQmzLT`@EB8P_sHq#hL3fcz z!^C^n*{(VD`9Sq=^JDq8Mdtsr%n}4H_83$f;Qx~}s)kK?nBi>X)Dbak<06tr0=ymVduU%akO;ckc<h-Y9Thb{Z)1~vL7BEMc++hAV7?566 zFB?=-g}|isiBaaiTO5g7e53#4E`Ukj5O~cVy3w!Fc>NXAleF9ee74E>$9-Qzi!VFx z4=amcIj5*;)4pEe;zb6APePo}0%T^*6`n%HKScjMZrMPS;rHLT!S3(Z#D3pgEG#eR z7%lATdo?VqpqXXV*29!$<3l4>ye+iGvAK;*6RkPeC$EHk?ZL-MF1J9;^gSZTJ~;a1 z*=F{F!LM}@yN(=t-SJ!1$CX9%#vfTnjCl-l#(1h> zFlBxvm_?~J$uaxP7E~RcH--(xg*PPN=r>Tu5j9rPN`G9G*e{t#-}CHqo_AU69MEprn;S^sl%^G6w zeMwQ%PV%zl>yb5Ydzg;rU3HPET;0oLU;XUS&HEJT`0brzIo5~qba(W>HovUA;d!N( zI>VJ$?KPeqarZDFJ2ix(#7b;ioH@&4XY|+1yn6HHZc!We)o&hO8uyL^Oi=sQw8c~P zUy7?Z8y6a9Tu0|0$_jw+Y3A0}t}@T80iI~+W$0k;TrwMvT&cAym&(K{U)?PZc*QHS zbtGrfW$CLzs)>i#@d~;(k0}lv`xR3(IUjsb+gNM!@QbO5M>n@fr6C~!Q%jVu^ z`%t~05@&E)YLk92S{%<0si`4{Rsev5THXO|iRe_<>t{_Q-iAWvvfB5Q=y6ec zNH0)aHR&*1=2)=_tr5E6eN6B+24VNvlbQ=!$dfo{@F(mc7`1XkN|WJA%k7H$Ln0XY z)(2&!1;$9&&fDJkx9N@&tsCN^$t*pLnVPGkE(=7+b6B_k+Lm|RblskSn&8=6OxtbV zNAF@eloM>(@+cLv^h~n$nFHIgo#l_YB2S6nXVmUXf>eR{cw269~$zLXa18pyx1 zN{wZiO?0XBfsWeGm(rOJ^-OP=?HQ3$;(TE3ZLfcemPh_X>WnU0rK;o{Ys$R-qD)QS zR}0G6%ZLHBVlf>dnPa#esHOS?YLXyqS_@uP_27Yj5RVBbau&G$MO-IC+XkQQpXq@# zQgn+d9h8Z0lX9Pu5Po)QgO9z(r@a-Ts%t9V7K)?=baqXed#hG*F>R&~+P*RSV{m!s z$^GK%BKOg>Br^X;jrEXd7 zH&;CbHmp^!zU`QtUH|GY<7VIRA@(*e%Kh(@iM^NM&g@@o5>>-8i}tqPPod@J#^heh zYc5Wo{Kr2$@TLja~_Dtdlf;? zlhcRQ)>0Su30cn{+`yY-`TI^?k=RymooW7cD`EPva_x6Q%n}#5tRqziifUUA?OZD9 zs-5hzDi#+pi2d?mytd;|@BcJF7}`U*Qx&W$!NAS1wJXXabhdcoXzJw64V+sm3;WuY z_Em{Dj(U#Eo;ovRd-TT*Mn|1S~w-=9MTX_o#)L*PgL%F*lo8Djsv*nZW2 zMasW_)w_fHf0Y$^fyqsIUE^#2_sSGj$my6i1SUU9cNW_Sl|K}Wj}CrZ_PmOJ_cr|h z{WzUF;R$spLhrI4GZv=i1=SqSb)36M_|}NTM0IF!JB>fZT?0G_vp=9)S7qn&P&due?FN+ZHU=ee;I&90;Yz+m{Ib$+$on+_DNiqTE z2t2$REA#i8=~5iEk1|myzp@V?5)dGh$!|~0hcK`SQbjm`VDgY(>f`wH=Zpa6qH~wt zJ%+mI3NFzdrxVE|Z#sF&Yn}fjxYfbIVXlXR8de`C?l|0XhyWXr5H8@*AW6M>xI=KN=jrByZ?y7Zbd0uyhJo8(aFs2>FH$B4VV=; zKxHQ9cEaItM0b=tZHna0F}Vw>bh0sZejOwg6&OZ`%DuP==pA?sUPV*qI9B#IUUMA? zx%8$>1MtBOshp0{zOt`SZcceoDO7PU%*C$!e$;y0G8IF@0f7Fy*c{7k9Fy-^Y@r$B z`1S)q>je43{2ry^@L51FgqKWmU!VbiP4VA#fK2*w5IM2!b#vu70@r^0H* ziRrgIn^tn zs`mYfTI`Lw;*Oggl&Rk~&Pl!waj#V>uYOxyBXr`va0J(-+_xs?Z?;Y?_We~qkjEx% z|9fgIL!f51AwhER)2GS#c!7N?UtF{Msm^bW1XtnN#h}Kh_+~D577t@%<8;dz!xQ0L z=p}RjDkFD}ho3(rF)^``;oQKai-hNr1a2ZC686;z0~1@fVO@A^B(Mg=3Iy&$K-(T68QsGT#R=Z-+rdzcL8i;ZiXVV=0O3_)ov zL>|@u)t>(bVFsv7ld)a!UHeQhcD@6gZ)Yb6;JEn33MlOvH!paJ39H$rTDw#$#5A=UL{nDibzIAZQ8qE)*JYsNwTT`{rg;d z1y|{YU%h&rU^E1?>+H*#zC@R~dm8yAP6 zvQHqH?c)hKudlC)nI*dnk6@-M5NQXB$E;A1op41(MS6e7$jlc|{qW|w*v{MnVU=Ms zoFkMYvr^U9S@cJplan*-%ASnYk0=jZ7ZVDIehBdKx^?U5=;&_OnsLG}0F}&r?)?f1 z3MAXaDpxk5$>(n4f)~s-h}OZ!xS+`UD6j_@Dy*+fO--r^QoBNH$fz&~b@$`*)|vOH*V~AOfHk-%_&iyqlm8ub^j=KYz*5}?mPV1l6 z1F2&dOJHgIGcBH!3R|6cw|ifFbwmEoYj<4uviGad6E$Yyt#M;W&@-1^__hyzkf?_U z8cy@^I=kzl`WF|Sq}ZH0JvR~;u`i<`p}*SPOBfiQSf5oMKG!!o>BhvwdMu-re&^1V zH_VH;lvw_~eSI-9uEKcnr|{?BzP$=k2$#VwXTrSqJ6vvF*|IK4JTN&-mOGAP`Ij<2 zpot*UrcIlUr8PtByJ_=g6RZ=}Xu;Jg;ZO}jlQ=OZJ^rV+P5?)%m+n+9Wq9Rbw!5z~ zyQdFUD3?kv>LN0*7*|^k>qFa9v@z9`MBo9jbHZD;CMf|>9S3hsOC$8Pe@ zhlcPj%NzgVgA&r?e5xv8^r8$s$);nRqk|V_P(N%|9k>hgd7kyKLJVrM{APw#-hPR- zhl7dX!?%(gYz>(8g2%4MxqO)|X>5)mu3oIy!02q>&Ec5j`Z3-qH z(QjH;Aai_fi;W7lHzA=1$Zo=ZQv;^&WA2qY!C%{c%`611*tqZx`}Usn*tj^%+Xx%R zlP6qaw(Y7O9?Mq*$t5flL(rVQ-bOUHlUH>^osWy`wQ{fn$5;GxCH;TtiFah*c ze*W{?=hJSoFm#Z;55@U;EiJL+>y_)XdFvRjMX0Bl!s0;`x96{yQebqfTpch$1G4K9* z{Zf0O{eJLk7}Nb({&Q$u|GS|Z!XDpmB)SbB0;Z#iQx#hqDw7Bhyg@fIAUL@H zfgguv)+k<;I6$Gq-;qo)6Si+lFD9l}5Jx2|-$zQQ6^r1^!~grQ3S?#&R{$(uX1d&a z1#g$+xMGVW^VUe|3~xd%FPJrTXyX?Dj_-$V^5;)8cIx*&>=h2PUC!sVwUrIMDE{ti zQCAVw^ph$sXv4DxPFKv4jSV z7Bcw_sg#UNJnY^$k6!BVUeLPmrMtKeR|J)a4H+OXoyDpMLY5}8)IM9_=*|W*wpYy?c28x6Ywe&+R2y)~+ay{+7%O2o1LAOARI~qVKBc-1|#kKOhC; znpm9GRpfskW!~m$Iu$KzAx1MvgJTWiRf(IwnRo&<1SKRSdcWs#rLRJHyzA-SIRf8%sW_GZn}-R@}9Gc6#Kaew&V9R_!%qJCOyFzR|oK@J!=?4VFE9^mh0oO;nV1er-1YDP4ChcgZX_mQ!q zuD#EEG!O)CE<&u1)(%Zz2E^J z!O5L9G~2gxb4%UxnOhffO1Y&-n3MBvbTre&xy3==V5JkDo^rHu6MLbSsx}fltAFm( z!pulJMt%DSTSAl$9n6_1z5{#ru7YG3%7_3I2l zi`5Iy3mPmZk#F+a2#nIabm`1X$!LH7O=T-pZMtRdl1Q*Cp{^`i%`13mtzb7JiDI(O zluA^edkX(i#q9#N~`J zn23Tf1oTTsb5O^qQg&NP^N14bIODAdZna)4R%09x>tej^zOfN~TY1R@BSgck5=}ip zTd8-CVncm?J~cc%{(AS$}{wxB4POedjQ6KgNCh7G=MEp*e>F+-ZTwvta zIR*C4pgQQvhvdz+)c2L>e@TWM6mP83Zd-Ac(Um|a`eUqk49BF<@bOt}nI(T0tf6*4 z&etZ&N{z-8H+VEy@7}%Z9~88omv?J|x?igmx!kywr~-kFyfOP~o}gZX@SORT*)n3B z-?*Lq5w_E?FgkAa=nseug4F*cdj5{)IuxHosr!`Y49ZDX*W#pGCXi?=WXSAlr zNu%ev_O0+v0mZX}F)mctgcUUsdM}d2COuXNQMVbyxGqEeQQuw~uf^gAMdB23VsD`? zHasXdh^fp>7b2Cuo5k2q6~npkA@RK6M@Owoyx0rRWgp0H3#u9IP?gnmzuVi}yW^n} zexYyjGj>EQ6m)6(xw)0$Yu=Tdj!xYVq|b$sfTXM3xwN$Y5U5%Gj0C z^udQ_=LhBlU*1|vvu%Sx+_^fu=pRf>O=z}kfrIVD_2y4|L{^gkO#}+yx&ULlXOL~t zHg=gv=ri?;!WYMjlb$_0h>wE9L{Q?#RQxGb<_FSyrKDmzv^w7}&r&qtsK6I4S?n$* z#6JrBz8g}?T#TYIs$@HS)gnslbz@`Zq&P`D+A(J? zw{w82?rWA|4VEh`Xp-Z3#-e*v)K$n;{QmWD=hFH$cYfGz>Jcb>iV_p$vIC`bmdsxz z(;gw`A^l#z!*lyI&!xN9aUdhRl45*qOy{VkA!NumX!i|ec`nG^>0 z;<$*GyZ$D2-S9^Fr8`!aBe_WE7skos$j1<2fR}}{iMMo1Tyh#6#&3!U3Lb;17n@6e z_pPyhvNTFt$2GOJ{gMxwJ+YX82uBb#AI8@n5;#(0l4G=4@IlOjTGLQjB>*&DE`MpY zAO1L{$$&Hko!9PrxFc$#WIkGHX7!-D1xU49Ko8Be@$$k91n%^^cfa2sD)Y`XR89$% zCh|j|;|!0ECb|+-flFT0L23$2RH&vuDVX0qaaZNEDyO(O<825g5Uw{^XP(v8H|c`y zZ2IpEQOJF1{(>DDCf@`ge+HuGrvV2L+m64#|Bt?W4hUsVV|#R_IEh16g>zzcXw3=e za!FQejELW@u_C{1EaIf9Kj)FkjGy7T(U6&3hdu5QVm8jFsS_)ClMxpV{*4?iVM4m5 zb6w+J$|_d_zTd<8D*N*=`=wpQ@jrk5Ac>4@O2nQ5X-tNez`nWl;v$tI71pB6_mzM; z-@`6oVq(ItJKyVoTa6^G2QjmLzAW=+t~W%>c|$h+rKw2)y%o?~6yt^=^xk)H_ODsH zwl!Ejs-r*erM3HkKG^my_WX7zf)MEV*SXAbsf6umb&|;f;apIL<1aa(S0vEj;lnNC zYuBtHNGVfP8akU2Go&}-$%aJWa{R*8*kfJ z0SU4F^astGzdIkB*S1}Cva*a<`l>(OU7M@w=qN%4UZeT`6ek|)HTPBCNH|t!YLXcc zhqBL|ndxaV&zY#J@F*g#$pqO|c{D_mqJVX^LL?$WS)h zHBQb)92M5`1_T^zmuz<*%`hYxN{F!xKOa87&}Yaa#vh5o@c8lL?qhwP;uqHgV7O{& zNs`UrWyOLZkqFpyyjIe@%)dNQ0o3I7=GL597Y}v=x|G@vb9X1LGA;DoS<9)*(;Krgw_>*=1w-E)|@8mA%Qk;w_HY-pOp)5Tz8da3_gd2aqv5yRNVh zqL#P7CI`fZWS!n!<1M%`jz(-_O4~?lPJvz)F5x-KS&b7OYtc)>ykJLmA0!lEZ^6B? zy5(2gwjhrS)Q@j{el+TaaqD(WvkM+&C1mg(nXJKA3a-D|_}HJm`;IJ5@F29Y!kCY})kWp|-(eNP$u4Te-?a)K-C3 zMsfYSqdfG{qrFW?BPfzf>P|tqN zWX=?VpMd%(3e+?*L;SxVGqNXEhG!9M0z?zI-h1pNuETK} zIcAgeda;*W<{w5O%b}sOlS)VrDwE%k@AW8&R(>fAd_byt!behS=;#oCbYGV<1s&;T?~2=9 zle5CNJ+)h~qxV7qrk6FGO70^h$4g_gkXZdiBZ@I|Ne_wUVam%eS?Zi{@a zy@iMle|??uK7!&BUNklE13|P7v ztf^Os#obuvbpfynfNr=bzS86IT8DRf=I=cHOR39NoLV#iFzI7lwFfcFT(`!@@OAEX z^-GYxnI0GWaMwe`qeLL@2R8uzKqLr3y#OUK77M^C|9}8Zz6EfAOMSFdO|@g#n{&Qr zq60r`XxR9;(vRr&;m;1LCbtGC{~$cgI$2tfb+-J?DB8TT`^&(L8@?@$4G;xQBhxyU zqZ$sK*VFr$?KNeMW||af7CJPogp}#IF&x`5_0r^^B4+tGYC~uzI zj1^`MQkKV@W29~Q;|i{&LL&b3Or!(yqr#4I2R-|6@bu}^b8~&62sxsg$~?B0ZfP5V z+SdWyVP*YPI@?|ML?r@Kq^qupIZ~HrccMQ>LYs<8na`O}*#)NgzCZKj)CDi>>>~VL ztk?^QIl8OZ8Kr&GRVSDe{=mbUh-%u(+8Q_EKAUtT+NTg!$>SHDb^*k8U*srLJAQl( zPe{_ECr`|L6u&3Ev_X_UWb+8~Lk008tDxJ&((;tkGCL}Bz+OzjO2S@=wI~zXSB7pT zb9w|kL>t_?@VSyt6So85Mwdk+R(y$F98F$@RnDKm!Ie+=76j|Ov-%D$EH)OG-%T~A zRMdKc%gZVzO>2bW>Ul4{xsSvP2pB=*E!AnP_~H@&RAK(-{Qv;5iiQMf$(PjIZ(R6} z97)0675Q~H8APsKzHkv+q=d`FUT|9b7Y@`3^G7v~zJ|CuefThC4V6cJpVZ-4Mg#Xd zxH3C+BX#XNHjfo8vSU}DYfhq`1P33^_55irw%{c9KQ!5;E3w1ETLj?%1Y{2Y5O|O9 zt*_T$P*%Qi;gx`a6GV;kL5zYm$Wp{?+%zprP9vwq-gvKDNoR6+E9ozeD zFlB&Tj@nOcOgfeYoR(~^c$9hi{=7%b_;d4{sJ#AWwxo0Vzk~W_Z?VtueL=ts z35q80Db6OM6fCt62SnZnlbzLiT6r+!Xu%yI1qfl2ziD7yZaBw*JW!zW%*-CH+NhY? z5u1!7iqs=WnP#yqsc2x625DSvlK*VV;QD3042ofW70u4pv-jYQL6t`o*+UNJ&gs1) z{}`1WpseWwnQU6m-&=~(Gw(i)>@}DLcy(UhCJ-PoY{Nw#BWS$Fxzm2*(fp%U4wXGc z)?6ybfyIL?(aLy=Tzdp{Icv=By3!zU=n%gA(t%RhQr=6i@1rKoO|jd^;W_u1Sug)u zyYS{MTQDO2Np?AHc1=))5kZy#+k+A@2#rEhP_{CYjXt0TR~=k=Pph^e+UuXW9sB73 z_pUMNKU6q0Su-SnSC-c94lu~(0kK6QEYstXDa`yT^lWTw*V=Q*6NP6Fd!o*DxhTi9 zW=YQNw_LuI$lm&E2mYq@`}hs1fS{lhbQM)kPTp`!cvC3DlHI$$mS;jEUZN+1`eJ@@ zWT??#{Dx$!UMp4)h@od%QB(FuBSU)PxKvbB2r%%wyEG1X@+y!Jdmc&#cGwgLuiwIG z*E>gJSey1(fm=QP1Ly)Qc%%9nYrq#4;2Ny14TF`%$v7Y&u;lxvxgYs$Q*F1)zW_V! zvH|gj!YiQO=6sFyT=YC+Qtr<@a}-98nkX4|QQxNsPHfW;YHn7v}Uq$QbB`z!>p&MuvaQ{6Q^wYdln7_wVqmpZWZV408a zk;rV~)=B4tG-f$CeT39-W zBO~b+pDY5XmyU*l_QTe%q>qf{F=Uq+Ar5f(y&d=pdFp&4m{9;4SoCvv8*}){0P&$W zP3Rkb(PV{V^I}FlUd=dPPq)B^&9o+j#NoPAZ&66heR8SdjoBL+MLTfRo>Eum28-MB z>H-uR9@Lu#cce?pHLz%w#syZQM2=cS%UX+RWwb^`{IKmd1=u2uDF;Xi$cSUKDPW1~ zQbHF&)+EA2KIDEvn@<8}0S+;Y9!bv18Wr&i0k>UIe~)hEZXC0hJ{3ErYi|J0rY&14 zhi;ItfUz;+5`)zN?L`b&4Dcg8CnssXzPNM-UN_7rC7UMlP*?$bTWpcg;4HzB#jqO;I`sabw`C*U1evHS|U-GYXPtx#s7 zYX(wpKk-8$(QCzZ>xQ-G(^L?A&00QR#!p6Z2ZaFj0>gBLvh7RHK;6(igi}k>u%D4CuU2`wvClQM|D@A2RrOEekwx7R#y@!vNn;Z@zZeej@Pwv*qw`*wyQLLeM-?Vva za&bl+Vj%+m?jp!D3X3zUs#`&){Rgb038mrtCyQN0jXY<&*u_y@f`y)HiVbb~Uhm0O z%Sh4H(s~c}tZ;Db7FwoZ6MjI}CHAgJMZOKRJcM1ExRMlsujnN-n(o7{kfn7?ZJin5 zYeUzcM^=^0{JnG`={XKdVyM1#OByj%fTb}XVGg(JTaM#KsNl1-&e)w21gwF0x|-3) zFY-g)rz&lA^VYDV@^$>RJJQjflgl2O&7iG+jTKh4bj8nJnFyCK!tiuWpk7j_<#>Re zDB%R*aAqi<+Mr z5V7w)Nvuln4yi-ar^H9kTiGkpHQ$WgqKVb19j13;%f0X(1h+&$Y z!#)JE;?%r%x!Bob-?;xVyF5q9{#Cma!N@0_*PA|lhQ=Udwqd@(^(p7f@4iOouDa`s z?X9#hB~=i%RTC!4%=SFAL$6|wVUDQg<;!Z&-Q_#wi;9VY{ZU8U1l&xF(=x>u43`Gk z*rhOHAS)Jl$(&x5%u->pfi z2XNZwwbA!3ZCFYc1v!e76<_Ap$Vgwlw&98FsRM5=VROhcuTO@S0(&(U4Z=P8QHo$r z(3kIG4wC@k+@F3o3z(a8S+jPHa*MBD`K3E@OA{y1YaHa|H3NB#4R-%NW3f0;8}EDU z$W$W(*O^_YbNvBnVw7<6&?Z`1j)x2;kfyGd$Nl0em?OJTDByl7U;SjBXh~=$= zsgL|h-{ec<$_>QMZ@A}3``}|_HTbq#9e@e};kD>r@3)OytZQAJ?psTx_Pog49H?i! z4-dlJ+}w`*=gX#gx(L@ z70!$U<$II7&QQhQ6uZw>9Wm z{0B8Mph+PyCrzfxh;ReoV2HnPY`g3%p$N(9cumjF9@1pF?rr}K;~YJ}cxPoF-m`Tx99?pHd@i-}MA{Pjo=SQ7 zMOI*6dkZB9(|sCD_hI2c1aUItwSRJKl&v8YrlDby+o$%N1gV4ui~3-H92M&M+$}8& z^onfY0+D$k=r&E{UMlM9<^!rpieR{3!j-jt2W7 z)&>Yi=4QR9&I1hzX8_j`MU4~A;n))!Y3&hH|K^tantY^XBuG2cv+Y9ATdf8C@`{kZ zhJV`L^4o|_5wqz;#^zi9Dks~Lp1j0M_jo~LR9FPhcSyLE8k`P1Lj-FiTpPejm3e{v z+~=MhDr084;Er}OFq}Mjl5pA4_2x(=M?i5J-WjQ)JCf)$Fn&UEEG3-FK6!2(@@I_} zUJa2ke53#ZGxNEri6nqx(;>MOuxK0Dm4vd&zAL9LQHU-9r8KuF7lTUpui@cd)%rb1 zq^LsnGYP2HWWdl6GExiJZ2UpFJJlvdzg!aGl_PNPK&tuC=cb6QXk3}p1>;hPy}#v; zr*=5QnZ0!SmzLrpaoB~0-L5byd|GL{6qkh#1yI}lQ@_RI8;>Xl(9ID1@;cqW?fiyh zJDx?`(&gRwjOVQd4E=#7!()8h&27P-cP^X@6j!|;hsTG8%hD<;>idrJJp{ZJ9v*(` z)Tv{TLOG_I`-~J&_&jPt*hr{6&irO4<2E~__(6Nc3Wg0(7APvE|8D1YjP70T%7p-u zOvpi5*wQlhVp-SP2$O;GY?h?}O)7`5OAbck{8>RHaXQWV#@#;=tV4{h*te0i^|sM- zK}4==@=#ix2DEKWADE9<9tUMX>h7OU%#xFnHwkZqtn5>x=U)*lmybzFiy|AmdrD%; z4yYvK8IfO^40WHV*a#ZnOKX>}&q?T2kl|-l__q2z!u=dkYaka>9ULx96KrjulbrTf z%XdQ{1#I^*VhR@SE$py=J;+iAA&n=7i7bS#+nzVz?BqnGvjp6^I2ROs;m(gB@R0-$ z0g(gcD)f2{lb_z~+OuN&2)pE_vL(X69GVQO#0|ZaCFzw@9mJ)ASb~Wl!=<~(F~ZsA z_JZY2GRas%2%~Z+nHL)**Rn|eldeZLB(9Po3FKR2}4YP4uss#-EkL;Li zlX}q9t`h!&fvQvkY4)?sbTeeNtH#LqKKaSD)h%i98ulOC2bT?%FJEW7K6)K;g66KS zEH^&=hWy6eOO+jVPp_ASPnLbT<;vH9F75?~r)`?|Mq1%ABbl8%s)&@41A_0bCW&2+ zu2YoUrRPQAigDC$b@_ifGuyr8t&^6zcYsJDBk`O@z8XUnQK$*r7VT2cjYlqx^~rqr zVo5wJbV(#not0Jfs>+bX_5{LIv#j>g85n#J+UMqQ^C)daD~M}sru>&aOn3)lEfwp!VV^@45k!GRRB~k2l5@o*z+^^0% z*i!Kp0_!XL27JF^vXD4uV6st;juO|^U7KvKEM0M4ZzzWy8$w+U>aQA^KOgo-tELLf zlp-=*j`}y7`W|h${CRns84JBlUgYcJ_SkgO@!$DzdcseJdFzI0w`ImI){2X0vyTA` zgwz8SGs|J)=g9}t7gwH!L~sRwKmc9xf%TzlYukTZ0U%Am;9BGWT9w2hMyoHMRdD|MP6B zr`JEQa?(XF`0u^vTu%;e29gcVlqAg)VI+2BgWR95Gbb-Pd>Oi7wf*^#it+kg?7JHX zE)ft=0WJHj*|AF>{|M+W1C0K6jqYAzLTFT6J&`4KI#WDI2Th?)Q8Fnn4RjXEp8yE_ z$6@P7UR<7fYnz{h%blT>#mOT*nCqF=YW0ANrl8KF^>`_0NJc}q2M}ipTD2Txzcnp{ zPTqF{ckDO81MDHeYB8EcKWa0hC*zHZ znG>1}H}sN-`E1zZ0>k>ynknCgQn!ReP)sNa;7$#%ITP-qp0@Vw*h@pF4s=WyRxm*5 z3ILxV!Z6^VM8M;D<&mDRL(l*BU0DJzU9C7khieOg7isrhrgM~OwrneYvg&V;862No zRk=vSMj3>RFH^BNlC=LV&muHdXH+jDEbJHvC#eMWYb%$PUrA*hURpL_fm|D6q5DdG zjKY6mq-h=!WDbTR7=(nth_hP)yP_sAO>K|sUjA=^`?MEX^SG5J0?MT4;P9%ub~3=r z{(m>Z{RQe8!1Jz4lQpnCl)R{YX!Ep6jgiGW&Y0Eao*V#tE)`Dn-B4Ks1qwiCz+30z z=NAzkj>qJklgbwuc2VA|GRfb0^F|64gFhnQZTe)v-mMIAPy;^QSKYUiy+6I0(w{fS zvUP*j3;kN^Vk5Q2W42q_$mj>7@QRyvB2Y5`dI)Ndk}-(9LI@IKvz?52!B-~?kdlm^ zL3!)o=olmB^qnt{hA(`P=HKm|bJsLd{mPWMGSG|%2QyK^8%rKZ57Gx7$3h1`nGCoW zI-hPq`xi+(Xp}IE#8^1?8<0?3sqzbJ^fcY~yd|ot3pW^$2t+!`Gp&9-P(#71p`xa8 zfS31o$?|$!TH5%$%Dtj+S2SR(If9vd`~LQzb|<4 zV(iWR?7Me;2qTTU7p&n&jVLem{azzI8e@YFZ$NW0ZQTI%Z3D0ZWLcSi5UxVrg~R1{ zlKdD(fS-Ym$?5nVb!9S&i$*sRbZ^^F3XMv*1oRzH28|#b%m~b`g`c;ywoQ5V?AJZI`pUi%fmT04wc(DT_a7Sm-3#QiH1+yqL7)5eiJ+0X zJ$ZFSRA4JGnYLf!=HfD|a>Zi6RnBbECDFaEQ-cpsPgRb=pMMvyCDJ)S34u$&4wcIb z#^{CH`MQxAdXSWbOZMp0fNO$IYg^ibFMROehw%G#qdOnh9;(H!=<~PjG93Ty{KI_0 zswn((V4dVwYNurUcC1Jg8MJb9tDumDyneRRgOtoCL zP@)BKsa9IGnJ~W8F+5)R`T4`g?dI}i3Ti23qy7c$QG4Tn_Zz`YTytkS!4M z*EX_iOMe193cwtoDbU>aT`aM>Jl<17D=_6G*FnNrXeRZ ze(A|=0_>u>8@O70EE!qS&@{p#g`F1L!^{fpH0`s-Vh%{26l~AdoQS*v!v=BZU5CH|FW(~K8EDrZ#XP)TwlTN98Z}W zPf<$Q2l5Q34cd`S+qNAzD7Z;?KQb3j0`R~$1{(*mN+9OtnaC@IzAs3e-z{4zyVT~j z%!qmq5}7aOnn5o?g0>;M_bG5KV6_mz8(UibinssB5?6!aQ5DbwwGI{mYD3}>01f8T z8j%a@#wea*OLD$`z9JqAH^`KtS67?KROMwez7g@vLHrJ$X(iYz5bQ+iV-WRNQqlLh zQ48kgo}1ZF80$7#S^cERvgB6|t`7JrNJAm;D1VZ62OKW?PMMjO^B{F6r>Jw1NOmu% zQ?s*s8Jp$dBF+DoA5wnw%^1Ow23tbd4>~l@2M-_a1^JU6$FVYtER2kCTkHhDvXs*> zLjH#W;_2n`K1_vPf|Ou`_v(VW;L3hL1DAz8yOVzLTi@8-_ov4AGYh`{nFicnmb`^m95mSXPPNG zh{z_wK4`^|!9sKa$gpfU5~%zsMk&R%giSsE#Z+TVIM)sa2A^89cUJiZTG4m3G9XOV zf4TWK4w+!3_n{n(kX3*9^a18bSOn^z1IPI250Wff!?}boj%@65v2LN!K6JJ`hYtrr zs|%3of92bY-MW!9TeosS_l&ZL_%{$chzf&dC-+3N*ZlKFVJX357TP=zinO%0UH-y+ zLu@5i!>yFvyW$uGoKguPKf;WN{}T5DO64lnYf5RYj~|^yNEa0hsUISqP^&^Eyj>)< zaj%RFE3j)Me}Qo^2Z@hWY|?)62_-^Z!E?23$>at+ zBiek|AlrdF3)V-d1rg+q%y~P5ef^=1q@dK0Sy>1AGF|YCRX!Q-5M|&QKzPUEt`~t0 zDHYH39zhIEd>PmSK~)?O6=i^MfVU3oXR4gKSpJ(*P7Mz-pl+yw4mDcZf`8m=Le6hH zhlzWIi|2$l2T5={e|{&4Erv2;MAAbCmy-zB2qJo_)l!taDWic4w4tszMjv-8ml}|% z*pH1L<9LF&VNb--dsx%&j#mmYZ5!q+dz^FUi4PajcIU>a^R|Fz9ToQd#{AFv`u+~e zikNT2W>*-cuq1&L>0)XaA^YGn+IR3EVzGk|_YSTM$xD+x${d7_)aOqsJfou06GvI% zCWGVpCZBb0p`oEcv4fA^IO64z0LbRC3zh#Ks9mB`87-hRKsX7u@d+Mauq<&%k!o&g zx>I5FBe8hA=ZN9_W@uZ%v=JHiRh2`~rs(i#!J%~3#-_}wf(+$GNMDLlFh1ehU?sndi8y0q{quQuR_|r=Vv& z4jJH0KM$YT>Iqk~;g;tz^T9lXnQ<%ac4kiq!Q+Ftx=)4%b6arIz!Nj|EhV(+%XdPV z-f6P@3G{LhQcA_N*2xM#j)~tKNz1D9oLG*i!=aowhqC}oJTmpVWV?tO1(jE)&8%zV zcw?3wdWp%;-n3wa0|<4R<_9a=RK-RP0%bUD#cS~L-uGj7_kVaqJ3K7x$Ii)7HbOAB zj%TMnd9p6FCdI}bZKR_Qbs|TmIEEnJhc7oN%y$lc_z`xEEkE=%Ea^-DSN0 z*B$-BE}^^V01$!Y8-zE2Zuu|BHj0 zCnqO~sT)$qckkZ$hXioL0}ysL>5Mzb26%Vjz=GH7+10GaMbAl884iE!KV%PJM@#_~xoOk6peXPbTXOSHe&D(XRFi)tK2b3L;^ul4B_hv?3tyZ!i#Q z1wDi&M;O93n@jdA2(tkc8*+DYy^rs`9WbrANui6wsen2UZwR;%CN-iAhZplmE=)^A zErvc72Gl#CW62Ou2#^AK>mb~;hapB1qqX4TIu%YdL_P0(mnJsK6BiIho4md@8Qi?I zek$gu<8Hh{FY?K_xVg_sJP(6t3=o}vQ0RWS25pBU2PA!gCFawjIY-+1m#)@&)MS_q zvQL658apED{6?1Y@lb28pFPUaqW1L4q3r!x;uKEo;XLJ&Tx6seC+8Xjbz)8m)K2gs zq6n-c8mWr8H7iSl@_8Tuu+X5RH)`mV)ZyJtWJ?&#bf?RlM}`xd&Hxe9lBSDDNeLDI zW${C0=rb1MwaaUXn7d^8ZwDlc>D~JPw(uI{M#Dl2(K=F|Wfp(u5e*+;vYAqZhidm9 zK*s`aF<$i^2m+c+>tLVBEOQE?KY1 zB~#l$yyDcvtE1};&eRRUIh<^vP&%rtr(*Jxn?V2J6;Ht^ioKCP%&I#2_}nJ#V#AOlu}19Ei8G zGS7>IJp-*mjpm&7CMer$dBn%p7t0LaX`SCXPy@4JNCnUKTt4!5oRXbLVK7?r*1RCt zuCnpdCvx^ez?kkx#&*otAT$9+4dUSd3jhkaZjPpJPn>ilHXw&zpZCGU(-Q1o_)UTx zV^C2_`PQ19Tg+}+kcohNGY+VF&Yg_5k$|Ak6KTym7#V9|B=K&tyih7n3Z~~-J$Mfw z@fdFPM|bNiHVv$&H^gG$SF@ZiiyRpD>+jev4PXj-0e#OiOM;RD| z_h!My#CEuA7aR(=G-t1rFV2DBoP#+dwEdbKPbO@lgwi09)rgP;aRcDqC|?i35zw(s zO*d;e&R%vVT+bPS(vwI^#DGKWCp8+Le|DBlb&Y>+qtrmLq^vIu87iJaRJz|@UEPTo zU{2_5(_2Te7Rd?lTc{nAB+mm-cP?9IOVvmUsq0TJdu*WgcQ+%WGL-Tt;Zt!KfWU^k zQEx#|mM=l}*6hRxWhoZKw^{pOuN=J1`yTHjvr~x<4Kc3y9+fo&BF7VW+SOGOE<7Y& zo95`C(v7;`!zMIfu)d(qV!n>`VXgT2vf7fODd;*=U%&p!^VZP;NhwoB3`fE+Cwgb7 zyaH+~v96d%JYsmiIf+Vm}FpnER+*_71gU%Lp3vd|JE{`BK>R;mfpy z83b1yDjxa>`aRM&c>!_Y;C?$8`=WX1g22{t9Ua%2(=c5qp`h_{?GgCzVbmlR{Y#8c z0$PD^MhiU5B}*Uc;HWnhWv*q9sD~uD7Ddc_LqR!F^CL4I+G4J^3~w<^h0NM*soxGv z0}=IrUW})oHDT(iX z1Ry<~Hai5V5YQSkUDOu6UiY@M$yN1Mmu2Ac4|Tii*Aqa&Nk9&~WFzFVLLs8h4L^8+BeT;tEj;VAI=* zsejn~F;b9}aG)Lgu?;N^6qJ)30Vr*tQG<+|2OTq9T3lR^RY3T)*O88#Kn0YW(!da* z%MSZ5x(fFoTEd?5R?uGqd*KssE#WmO^JC6BuW#&uqJ=a#4<9}R2rq(33uV;O@hLJ= z13e{37S9kCPe>^-y#=TA4#LxsX)@R!8m}L*>3iK_(}yWPsaf?=VVFG>aHTG42Q#t* z58|#&eLRx=8M9wb(8`hX-*sTf0{Qrudc~|8DYVDq9i_+a*?1@lx$axzWR&aDZwvW8 zV1p2oOy*~M2EVKIZE2eqi+t_%h7H*;C`jf}tH&ETzGU72feiShf8&HRv|c2?C<_Ud zUy08BBMfgIDaqvLR%rjte|FXuS6p04Jln#td!GtDYEwvE0^Atg#c_^N@50Bebei#vlR41a**I;1cZ7K?%o zz*Xn!%oj2?c4q$n1q|L0pv|@KuL9r+fqRV{#wfJhXL7R7q5Wy}T5*Jg4F+9o8WGW9 zr_gM}h#JLb32WqVu0Ce@GBu3bGwcxGPZV=qt1*UKWMA=s4g*zk|#OLgUf zP^W9A*OZhF24-l;&2af`XYXY!Us?~egWzn?bd&^Y#B0P#1i&$s zoSdSAf!~n30Z3?WX#vU=W>o<-<6~%|i9bhNEEJIFpEUX9p?Du{?cw1}*g2~gQPm5^ zsFH!x=p-VsDXffAR)SR`xF1A^`TGb2&7#R>lD?TaTp@V%3)hz~?jAZ&p##f_KP;^X zo(yLeF`;T3*2D~p!1fc4-UkdBSgCg$iOb&1!aW6Y3)S8*HYVrZMFJ?$T-P5f8)4)B z5iCx@P%PBDl^_gUV8utP_77fd&)bMNkotkV``<@z%IQt*@B-a^)q1K+11d?-+t1|A z2X`3{-dW@@!muC6XXzO^{BVpsd*+YcdI!rzKxe4h$>F(Kj+z=^c>kidcAjgUIf_oD zrLUAUXUeUpu|yl9Fa-ev(xa>b7{j;M9xP}zVNGE!?(%HyrHss^3;iK|AhAhetHu#~ z{~Mh{aTjVM6iV3GP>7Kt3QNmJE2Dp=yi6UZ41ib^B9os_(Bg@7g|r=`$CADq7L3^L z_9{(8lJ;2XpaD$9%9w~oj6P(jiG>AO^B?Ox`d4=I>wiG_N{c-UwqqQGz?NPbt*n7L z=~-6B)2Bu2C;I6it3P>CY2w5F&_}J7t_YPP3VPi_yM6KtgB0~I!bo=W zw{K*I%7Bqb&~E0?Q#T@)`=MeVsPYIVX^{`%q z8G*%Vm6GpzZG~e`sh{TN7sJjEP>AFL=+(&SZn}}i);mse_CpH(5U;_u0aacOIxZOCVlT= zW#R(CO+1zmKw^h#wPX7y54upf3WW#C;alTHQB_qZ(dytaB!fb9GIzr%OC)#UT(H(T zR|uzCZGT|Q@&u@_L+ca5FlZD+sdRx_x2yH{0^% z*UWjtGj1xXs%D5#GBY=Wp~RcF4(Ujw=)@o?9QjC`!o7jz1_4Dz!ZFB09!#N?HyL@I zg|%-4=NS$<9IAlhNu;=gqXQ&FTAz>9|B%PL`p;LLP>vav*qmHPe4BG$g06K91PuxB z^QXT2JeV#}Ck!e7rv!0E$mkFjbyzfBJ?*<6glsrcWHDvtjD{`kj-DMYEnkTOuCnKc zwc#=|3Y5@am6DBX$}AoasBS}Bh@$n+L?!Lo&n=y#S|dkooQ6jPXi|e)qetZo7IR{k z>XU>@pvMGR{24=;wKCZ3Ii#heBs_mh%yv5ppv#2-h3*>B0f|V6J~OwJUtC!P0z2$T z?0Z1t_YCbp`q39m7U@2{9{2!J8lej#Ds}uX(Zh3;&Cn8)E>5GoD{l&q9iBuA1~%PMj!Bn@5v!3oOU| z7ZK8O!bPX>O;Mk7N}E+aCN}1P97t7r~I7W(L8kyl6y4S67N z8}faD4D#*{ViuRz|Li8i4nhrdJQo+2ks#A4*hS$Ohp}I7dAfN=+$mgtG85@wV6xWh zQ;ny{SO}Pn4P1KGqpxlmUIR=D@i^xbL@es#tN*?mz1H=5W}N+$0Y#*{yQ$${gEZEa zVRLY320OdkTvMoLRiS+!X2w5Gf3rFGum1l-)OE-8)V}{@HH;Q5+EY8xAkp57Bub<- zXh}swD5aE!w1MLy2wocrAOb-mY>o2x<3 zFLCv^rJNinvmwjXYcxU^2 zkCQL7)y_zyuCnGqZmwRzF}s3qLU~R}h3p=?adf}W3vRU=fb{!0+(i2tUze7$Va0+< z0KM$o_bxmxLbLe?*R`dkt_Y}rdwK6b%2eLtq;FzUizfYRp$|JOEWi_=1XeHqxd?41 zZYW+Yk2kBYncuCH!O4R>rTf))c4nPRK-qDNn(_DBmF)xx;lGvj%5=6g+6Lo|H;kuR z3KTwAx1p8D?Tq7O8K9%*(c8UI3`K$XSHCE2SxuJ8| zI)2v{>k$(m!9wlgEQannPnD04kFXDGYHD4<7${=+*4mhEqBsP77KrRIZf;);zDbs@ zEhDiyJ3ABmOT9rX1~@>x`L)P4O|0-{=UpDH4e`D>+OOwc{C**~Ocl0VC@x_5$C8AO zl@sp{Mm!6#WX%^{f$a(7e0#ggeMel_ckHP4-F`?NWCFHuTrmi7V(ufNB1Wvi1<=x_ z6c=3BcM_WAF_fKHEEX0`OXY`)3B#A=;G_++GPn1{T0ryxZtCPSClm?j z2z_#8XqX;2kl}AawT!z0i|aSW0~1sBL9o3L`^C9)eX>bxtDa&SbohEm0fPqcNOLJy z>w!I3_)Ft&2pK4d6f*O`7Ne}=&Y8a$z!H`EJpmW^rnDg$^Vn%89#r-`fE@)8)fs(` zpJ(zNKkmQ*J;`ge5`QKcVQ?sJ35e-+;i9~GPQU^+Y~(}=0d_i~NJJ@haFA^=dznDK z`K1HEecN5IYichlX@_|4uRy2VwWciQH#JLI`R(Ibyr=|l24kXS?zdU5dB;Xk>p>?O zab_JHpqUy={$js&xorlxiJ#%qOA1Co4>K`ZL+8OncQAzub<{p*@vl3|Pm2(Mk#y4X zCSqb8&@P)7EJpKaF`nd<3<`O=Vqm*&@}UNFjfAs+>7Km*%P;$Zi=wO+PRm!NXQFuz zCAX3EC^UIkPa#`F5)OG2zyYkd@5|T5V@g zV}E7WYf>Bn|KY9&Y6{KTV9_{ao<<2f;Rh~X-iXwF3>#^-E6|RWyahZDmJFFoGq+hv zCfN6yL;(&>0kwz4pRSXvihmaEz{ndN9Sy5$d!Zsct0yoQ!$1Rf36>?k+>Ilpzi%^o zZa&`-SB<0z^h%IH<^e{+A|iMnDyrmbi-N!tG160g$0U*sF{r2gBBAku6okm6c}60; zN5%i#tNjvT@oeexitFw^1INRH%SgWYHo)14vYIaGBHf+vnOH)k()7LLWj|sRO$e)W z<^l&Kvm=pDo}L)LW8ebgLsQ8W|n7!i9lH#g^Ukb^K|vxK1NLpzxJW|0Mo z&7;-BxW>nhkNHLRg#g=zT8n~;DiuR|fh|HKegs7-{>u!T8DyLcb%7Hf8!FtScrFbn zML!Qdf)yLf5(U4~Njnxe(@ImFFUjmeA^qu7ON``IDr>vOFq-7Ic>{GtD=)u8+uD!0 zKs7d!L^9iX;Ks=AnSwj_{+E}>A(Pr=shZV_rPw_3;_2&iybTQvfXaYI68RP|JEJ7- zh6m~u60HFSd|1Ez&|%bJcujb~bfCkdrJ;G?5OzcF*@c3WfY9+;qjEjykX+r>Hx-r; z5nOAT3a0`rd|=V$$KA{7%*{zc$U6ZWU@+}7KY<}_GQUb!`T64&fc)f5dYYn?IMLQt{9(^eY@2Z)A_NC`og z2r%1#VI$Z-oYU9{2x=N>SJ-U5Q~PJzh}Ik;l?@v=O*Lec)vo`ea2S|OZrNP1ok$B2 zepvJ5%TWGQ+bN%(TZ~s0hck!^0zbxl)*y-}nRONw)06D%+3IOIZ$%;Qy}r=r!#YCD zzRt+7u8K+OoapJg`uY%KCrr1f;c=^$wf{8XZT;~>7ri#S_k6Q#L2i6p96bBPlu(=l z=Imj>%?ObH-Me?R-ZEYOO_PV_C%6w=C~~3X!+RfMa}RLk^Cz@$V1TYghl6U;SwjNs z2tm$6j2_raPXP3AFkgDeai13o210SaU4Dj!5Gw>X?A8M60-6^r0~8qW-c5JbAmk5j z7@UXXr56W~7&Uaccsj~^Mq&MGLs-j>45FRL&`s=JU`!f}%}73rSOg;41)TNj${?u` z7Vu+Qzt(R*__jmV1^1dd`RNk6$4&miO205#6nh_=*W?j4BzZk8cG9pDMK9ZQ!4S^>9utRD$oqmiBXb!(*e>SpSHfzGNtw@jI1tjpw|7`$9(c<(5)eS4y4112YyMsj%K+`;{!pJWXF_0#P%+56kz|(n zi0jm;Q`moI2K1j~_T%UhHp80)B`l~PS~gl;RJTMY`1^wpF*pbN2DvXek?eA1s64>S z<>zSbQIEVx#pj zd*T0e9c+aU1^x(hB-gXP$F8a4oJNV>Z@AVFBe6BDtOoXlX3sc^^YHNy&<)l;R@6D= z2oWDIdq@|MGfGUpWo4x%6qHw2#hUW3b$SRNr$&2+UVgGj9+Y0V^nSg9D}z6O(t$G> zmoLHd8lni7*oC9l+xv}J);0S?yA{!I#>!yA(Zi6vp8CmZw`qp#C>d5Ezo0B-Mx{pe za1eErOe7B_u!;7I?=J4oqI`~29YSe{EGE|d7a!#3PiM9{tZILSr6?gsrUC5I=ZG`c z02rJMiNdZlw{xM_JwrQI5oZYsa(uM8c*ol=LcLdie($C?>q=U5_0Q0tm9&OSMDL5Y z;)M&Nw=XPKD#6{Vj*Y8EZ`UBf84#9g^rW)y>tU!MzOqYq_YD^p2@bR=wssDNN@93? zF)$HBG=WYIf0~K5G=+aQ69~C z&9V73QKS{WPKN0Zoh=+^y?woyh0cvO2)#RWNP-FdStdRRtGKq3WuAYwZ&R4p*)2rQ zn3&JP9hN`WN_CzC?w@*`x@WJ%iaUc>SWlQQZY~pJvAj7c z2dpxq>lVJ{w%WnRigeZIKBQ}?;pDkqCBn_-WD|y+JB}q@=34K(Rg(gcYACPZ1_kX6 zksj>-A$0>5O-b@ZBo=}lwnB#m+h1|<$?R@)K>;vJ&1O8fPQ2a#?SKCk664k?T_(e9 zJiuB<(PhEh)`$p((8``Ou*DLuGlUTL3X-2z@4Cn*A`*gT5BmWuoj7JrpZPuqX^bua zNoX)l_FB_k-1hT*Gxx{=gnbAynIAstf_=`x3Z*KXA|U!Hsi?RFB?2)Vo$wu|>mv1a~w)WH2T3>9;Nyrx_ z9y*S5A|uSgf-vjiqY;>ZfB+;i5|i>Qrq7!%99s)-qUW1W3IMWrF8}-OkULgLID%0H z&XR$;u^u-wrxu;uHo`w1dToXqvAKuUpNZv`Q;U_pg*S#Vq0mu^h)(_0?$b7aG7ole zeN)q?hte3mu2m{lipErYBt;yA0%3W^{8|un+FpSgt9=S2U(|km5sd$&?lSQ`CXr1q z>rLw}fB+{_GO&7D=ZEvdtzz*^^J*;VEw`m&pGQT96{~uiv251$L%*Qq-4DcsO864_ zbbj;3enVs%QNG!jFRy%o_WidAniil!P_mPJfp5j@`5^m(rQq1NKc!3Z#90Ou?m}CR z70MQyggQhguS?AP+w;JsTTgAS_#L$o1xZ$){U!z7R)sYB9ObanWOo zAIyIu?;sK=s19Urgsw$mZUp8MYhSP_L z@FUxG!0n>4y^{jg2s&su?JDrDBV+_N9w^TPgMuy2I3eNs$DJtAnL%W0t8)m}M$c$Z z?SVEllb2?NRcv(^72!FANem0osCw}hYF{o1&T!1K1o-lK>z?f6bwdVevHLuC0@ne> z=&Jnm8e#znQN!GK2Yx|8%0yjhF6a1E0YLV7fa8gjr6;pr6zWR2>mQtiPYg zi`en~rQ3E-9y5FvYlDcp93X#Kz+V#e|E+%sUi42892l)9n$2X{2Sv@MYb(xbu z;B@)$r95~O**t$qBrU(YhpuD4{kJM)7#y~@&t@@)CxVCqM^=(SY#Om#_s9jI!6v*( zhKPl=;8u?EmfeJhg@e-g;C>v_;Eafb78W8m6Mj#5uIND0S8OH(9tKPrNcw=`d99gb zs=nbrN>4*YI5a-aHgyq7)n}J$4M8~o8bChlA!9d)3lLX@Mga;*G^bK5TOoNQxNP8E zM1nC+8D&2;;JCm#iFku$Bp6kVx!pr}8(wXSEff#a@++mS-P|76E-kM}$2Snl2nZvC z^cY-z6-}%VVd*F+A>p=DtiT3K7_nzm{>~0KfjN324I&piUKniMH{V8D$_0x^bk_v9 z`_i#X7hnNfE}X%mLibo$)jU+?;6x9uWvj#mgVLTJ_ngSX_&JpH5PnF%PMR$n4L$gq zLSbpR>GQ6gI95prmLs;jZT~COcMe>HB7e8EskWJVQNl`M*N36pT~3e2plt7W*}H!g zi7tt7eoWr;SqL0)%M_r()=8+qh^dmmw_X%1RPZdDG*og|1S0;)Ke(i9JDOM`C0y7n56(<4U3Rp%(1Y*@e2fn1WG{Jh;g2XW^DuL~ zB-({NAbS4j_f)b^1G5-4!N9=tkaln~dHR>j`@E;`^vxSr%05mHFq#hviNGL%R}FWV zINQ#v@}VEQ-c*^I{U+onzn(KrD)cQm!`{KTKHT?jCzGu!CYR(ag2nn)P3^Y$`%L5> zf{_O{njYe%AS^JFk`bVS;b44b;$17F1Sj9!pyTppkiw(s-X-hdN0^`nXRj9>V-+pj zUis={f`nIsrOD4H8A-hdx>mOl-~^HP)yQhst#{lOc9UT}pATb&osFcGHc}mMYo{M` zrthiW`y^2p410P1Hr4`Gz%3zRfc&+~Lt$}S40bEkAR}mk`qNn4jMK0!k4m#vDmDOy6m$eF^szzq6x-yEy=D5=7h=3GgG(1s< z#m8p_=^!OY&2uvxlJIvmfBB{gES|TT7c8qcr%7fLTN4~A*QVpUa9Y%3kO?!)+ut@v zf4^n*UeWW^0se2gA3uLq#{VH51+zX(4QwhF_9HF4*z)B{ev`A?E&n@LA6&1~1FWx{ffQR7OlSZBD6ZAvH`%4R5S@G;nkrZK!lk`O|NgNntv&)DtfP z-4_ww4B-Tkq6}sQ>Q2>8r+lO&&dlhaKCKI71u8r^p$M4&=+TyiI)!WRoQvaLRfm2b zJ3cX6{UK^GnKE?A@igs+`({guv%_L;@;*zrhwy}_rKg{-av$`rDS9nCZP-*_z35u% z-iI=5d62ZE7bR-)0or+@R#H$fZ&)Jg6x>Br-%|UeiO6^(s~za*TdNWa>HcH7XuR$w z*tf#brL3aXGwbo!bbVtqm%VIprS;dhH+Lc<830}#>rsFfT+e+wMBfOfz_e_mKf|m~ zq>Rv~W*O(6|1;icCv)>@ce62;v6_v*li-~z%U#jXMY=9anb7Wv=3*q8)t`dQUF7H++6u&W$%Mo6tFlVo4b zfZN%Eh5hSYJvs-!ed+P}aT`JnE!e@Yt*F3C9g}qt?~%S927xIN+azleWD*5Z-%W&M`4D z)KL-BgEBT3E+EeY4UZBup*Y*j7oCfuI<_=b58j|zT@!b2NT!Ee&5m!)PMzr|3EMjiNeTZDjRa3DD~@z zIVAq?gQYp)%{kwau7iaV20|bi!BGH|O0{6Avd(NW{rH<7R?cn9vIk&74uY8M+h<#x zl%`V8&*|821ET)%bY0E>I5kr5)$lSVIWzd+7racdEHy8FixX%l-HaJpn8OD#VC{bIVpH>F^J3S6C#JzBqQ3gUh-z{{x9{bh|u! zlZJ>ck9zWw-@(g(2Q% zD4tH|8egQL%3=EdXQ;NkFMazinZscG(VxRk4W_%w;xtE*3OvSkt~cwhPMC=@Vi&Nu zOJln4|KG$T#LI&u1>ri2BVsfi7N)}o#C}qgTOaw~$`!lKTJWWa@EbaaR+a7lcgO$! z(oyx_fx2c-E!PSD-?RSr1Gkw-La@)nynm#8RJPFnXMuXL^Ba11fuKufO+MT&2ULfr zqh$r_qF;>_lJ~B5%$wS<(f!d~1Jh?!DM&(xVJ}rwjFENWgO4vVanE#Xvod+@pEnqt zH@f>3C&n@%^lebS>sF4-oI4ejslVHs&*X*?59>}^o*q{OZ1>{in30I7l}YJH*J;rR zo|I-A1PzT?e;C-(@A{QLjA!^})BBJ{#@2Dxv-J*kQ=ZPv4yJ1=lc1z3?SOzhNNNS_OenQ4R}i~%XMC@&K9nXA=lN#9DUk=b>U&cTGHk%>#M;7{bPio^t2GY3?{SwLUNhLzRM)dyjZu&GBKNu?%ZbSO z;mgr4-Ysn$TDZ$B`dV#Uf0u#_#4_1m!Yh=?3+w(Y+!B5bsv*A9KUH9`Qwmc&52b9n zvV%zai*9-JLnxy%u=8wLclym5&+(si)nSwNVMU$eT0z1l5fX91r47s~xSnn!(XNPW z0V>$Kf~pcoa>*)Wnt94+4s*z5kN?iOy0tZ0xYp0#@9x*v3my80(`K&*t0xzZl-;b; zP`7dY=JMfYZJt+cNU2uRsR*y>K(B?Za-O=cXjbRU>@LF3Zz*Z^W$&4ftl~^t+Kgn9 zC(M5T=}~Md^*Md0Y5S9>kNc&|R<=Te_W)8Oq&1!{eECluvX8ccLzCczTIefK_3;DfghrceIH^)j6KZ7-9s88;}l zk@V(fuJ>AYU-E7FQF0a&U&wu31cQtAf8yQvlt+U}N0qdZ_=uVn$)b*rRM@B(vl={1 z#)tBRxwZ0&%biM1X&Ei%)2pl~4Qku=mo!{!XJuxr=(%hf6>~{0p>)uvD4@<-Ich+x zo;!iNTw|V?9l)#*x003RLDu~Nr)_kRw8=R0Ue>yl-M?+L+O7bxoQ+GTN;=1s9Y zwh@o#eWLv%J}Ku!t8d+X^HsL-ZoER*AI7|Svpr&OACD&*Jq&&{Skva%jv z!B9|dU8;}jsw?QdVA)_@8uga{aBdZAzn9Ez!kwM_l3~q0{|KJ}!?~51{!F&pv(!%i+t1rt5KS-FrQ3=aoWp zs%3MeUK4*q57i4vTf5IZ=ikyGm4xL7yCPFPvO*d9Cd9WVw&^bCL0E;p9Ywp zcbK=lWX@G8-1NKkWv97o-|_0vS)}fIouyswe1lX&#lL6BjqZ~~s>9^Q-&Q-_3JsGt z+#C(Tu49+KnEB8~N_rsuZ~g~z8n)4D3lJMxIU`JL(2MX%&v?9k&tg)w@b3ludAqgR z==ZDWIV4Arm+OED044?*Qtr2)$w=D2!gHtH#7jC^leR~IQMi4^s^@$V`|PO(hx9{< zon>N`vSr#{Y%680H7A^G0j9C#))-eSU*!Au`FAOLd^Tb^Bb;$Sm|tcTZC7}6+DFICXCF(0NTMrgdZ?`21_ItG zf1lC~<_G^qg&`PW{vZvm?Bj^n@mL`d`&pH~CbkK;66BkLp`QeT9)AD+t-1lZjF&G+ zU&WNzU7(>vWb2jrlmO;^b`0J5sjoU63d}KwkW`Q9W6-I?R`Gf9dnIYtYS*gsbZCf= z>9P;|p=+xb%t`NEb3_GCQy0v=va%R`U_x`fgg~cnSqrmeCeQ{JcEa5u17GA1BJYRZc9l|5^PtrFUDe z8xAXs+Sw3Y=c*K%*nLOk$U4f`K@%peb}tHlr6;T{nbCmvBR==i1gJof0%C!6Gbd-8 zMic-OP5?zEow}f1At?kw3o!%H1rA?^SoaR1eK;9Y{zf0fAWD%C5Fzdcy3@sjWWBve zJ0vo(U?YT20c}0p!4T#Wer5;)H%>Fs?ATq61V^A5sH^BPy9vz-jDi5nXHS%LZll6* zEC_HxN)u=t(lk-@qiIDT3>XWDgV0={*2TX$XzzK3NWT<>{2QbWP*YH27`q1$5m6nG zL=X%t_%$WPRH$B&9SGMO_FWXx2)0zjry?;1l`6h8s3#m3vvM%cp-INYf`zS5+#8MQ zTF@lJg#h&x#sWSOd;(Dc{)7ArLrkbBDUoalItLmGXxzP<3s+zFG1Jl>LEa59E3A`H z>jPNryj3wa;B>4d;$Mou2&2SNA3Rfd344Lz+4 zL>K)zUZ!SY!G%TO=jSgDA^F|i^8P3c2NH#@PB%!Sq)&st5HUq(A?_V(7 zLMsWa5rU4$2;dHBRJhg1<BU20k?qFgP3+!Ht)+#qr# za)gW*BLaF?m&UC>4;fsMKdoz=K#XI!1+@iIF0lResMg} z1dm4yrjPs+5A<*cKt$R=5sCnv+_pO8)R?@uLW{rWw;^B>jz@iC;~K)<1cF%o*}0l+ z5Es?fzvn^H4IKaar;qDmeTqh3sM-|mvzM4vs;r`FaQbxPH@z*o*qP zlYmx{+fMk0ruOWTxpEyjUNkIpA29I+FLDZsB+PFJo>ZlwKyYb~6Y@$>+oRw+Y4Tqa zXX@#fMbZgQ6+nw;0PQdWI=>>d4;i*qH8p+Co-T{cc| zD2V4kxA|dp{Sx5^$bB=%vb6n|+L!nEx)_pzD0&V6ZU83`Hh4?+YyFM&n+bh588PMO zd_KVk(b3Z}L(2o!0BjV22Vw1`Jb5yl@Y3@j8#--*SArYobL$ranI|-Fjx_Zs1(?9; zL1-<3&w!bKZojl2!1gq3xxbqO*b(rki-4_@iBZ@wFs4n(T~8VcV&Bc%E-lYHOMcq> z9`-MQg_v!4Xm$SFX^#b7LUaQ2P>I__FGv_-D@1_qM#Sbnjm`gxaAD+Ja5E_omZ8b&(ZTWed>dcKfW{uuwPU6R6Bs8Ej%Xj zJt8No&AaA>&@RuJT-xnMgui2=60FPBHd0j4KIK;p^i`MtOqQ*kvoxmEe$^zt8cQkc zdW4~CZjPLo>R37aAU)k5kjus6#6$+bxcGB|*+d-b>*?_W2pjKm>NStxf#%g@Wz`M@ zsOzjNcoAeE=z{75vavRESI<#T43~NyiS5|Dp&7xxj2;V}ANEUZ#V8qAgcM%qyu>gx zjM@9~MHhGy4gtdL2KOV7FO+Wp?g*bV{sgctkv+ZNU6}pa3@0H$`;!!-jm3;H4Qa*S z5A528d6PyLJaBHH>x82fFf_>6($zzN@Ij2;SY1clLP5du2h!r+?=Hh^9hfTMr{;%> z6hvLcm@|_0n8DiNAVv^LtKIBX-kCow3Lo_lZ3x3N<2MP*BU6hNN_dNruAdHi6z-cX zn<`b#^_Holj6p0v(%~g##2?Qo@a^}miTP z9DG7T)bOL*oDj$pCK4sUYa-whgOqMUb?yuW3`}Z7k^v@KnnWSS=AGShLv!;`+4tJ2 zs_(Xz*mBL^eDyrtcrEWqTCk$m=azuL1EN)crtl} zxn})(+lzKpHc~|V8Cc76c{Mcj^wTQRq?k%)3}X@a)KM*9L|NMK}><{WtBu7`qQKFMRo?7vyFbhJXC#dP>-$+FDf1=X39Y_6M>B% z-{gElf(q``df)Z+2@*#``Y>RH*o#1O6Y@w84>=rP1d$8C>EP+@vhA{{Dv)Z9LzP%R z;^HW4JFYE_5%X8T%px;-2&2rf13l7;mV|c@ixU%P32O-CxX!Q4a6l8`EZ|%bA2$45 zx#)m6+;G`o zb`wTh%(6iktE;;WBpgf{cX8q%;staKBxE$SY~=(=0N)ohT7L>u<{D`8gw0zrx7>_} zYLGjLcI`>G?GCB6S!~nV9f=5mUncoRckY8K$L7orU`ig`g{BhB7pbn#h0uvL-T}} zdY_&;Wn*s-k_u}dWz)ll>EOTcya|~Otavk<Elia} zGB_d{o=TX2ME(T&0EH45I2>7f#bpj-)uAc?0O^a-9q?sgFu=pYLPn_Bunds#P|7r0 zuYI$j)bh;QRF`fiIVf2Vb6-HRC34v>Eq`@B`TZp77|0B_2DBg*K_1<$jq6Eh6Cj0w z!}?Q4hc-kXs`tW)7=}-uD&Q9aLupr0j1r7>&IGYPxTgRi2zes(nR!R8AUY3+1PCP# zHZ$A~ahaiM3LGfN0QuP2(m=@OVPZs733Dtn8azxr1@}~bzCc1s#!oj310>FEljN?A zh*N%!uM$vM4lkp+rfzCkZt~s6Z4e^>r7ay<$0iK}qx8(r!0tPoju+W*Pmq<745yl< z$$2>62Ky8L@$kgRl6-5DS+wUH%)x9|f+yX{*-Z4-Eq^odn2+qC;PFF$6Oke(?|mD- z{F|M$!2TMUk1br>LeU3fu}ZMn}iEfooPR?PTl-HfA^Qg0c*;WwOR zyjK3OgV+X+0+d4X!WUt*8%Nef*Ht5%7sDz_)*pJc3))B ztNdIz#Jai=5)9m{Syv+h7md9vc&M`d(PxY$h~NQHsGH$ZdUWrPx_Nm zp5k)NJ*{_p+N9%MQw@^+epXC{o4P%{;39AO)T?LGx;*QO+OE+*FQ*iaR(J>%ar8c4 zTcCjW{YXZWR`o?*rB4T<<$l(^9hmD=^e#zoUJ!1Zqkb>+X70Y1`@UrJMr-qzgJ+9q z%088zeyrSMeU&YGW+;`6>^hl-@{wbrgGp*{2!HHjrT=55zSSyILS@pg$=ef2?ul0^aUHiUB5g%C| zy;=z&zIaOJCQ+ri|)O+v~4Bp5hHe7D07r*%@}u$Kt^sCT_gSlQy~=k z4u#bJq9wbFiLqb)Y8Q+C0x)Z|LF?b&Bb3XMv3SqSFCYE;Icuv*E06&2c~;(h$$O?~ z?t>x;0X03zq=jTS%SGNTZ&K@E|L?1Gzf%SQab%tE4`lPCCUm*JGuKE#Q-}G@DANGnOGuIgpOmpqA?8MU%W=&uh-r{#eMCpBR@tFi~29 ziFGlZyPN78zn=5lb^e#L{sZzIPY<7Z$P;1FxM6R&i;bV^U`}zFLS_ji4ULNa7BW|b zLkCljT%OJL%*q<|Odeb~#H)Ei*YIH^MjB3IUQ9k>CO-d+QB2VnV4%9Cm=d!~pvNbc zg46J)mZ7G_^BB6+w!kX~@1A9m_FxJMEtk^4MC~Sb(wMtRzQCrL|a&YSZU=OE>qVv}!7`jbUb^UZ!VHgW!ubhdjqWn-Pc5m9U5N8O1FQ~OMGmPOf(=#M9cFGHVzsF#RyNvj^>@VbMRh@m ztw*!(D@xEloK@raxNmo*)Msn;zh9!-NR85``XL|ll%9Q&03UDM`A$vkquKWqoq31$ z@5T`)!@$QD+BMX(_JxaT^()LJYz>~Q9`)^)yFs@~RSlH#a2q|X&*v50wNGqSp(IhJ zUXV+--E+ewQTx$^>vCu2G?9)EV9AhSC zxNp_2kzZs_t9eS1q#uo6Q5MZF^ za;OS71ReeP&z-*yBItrY-D-4Tx-E1$ialXLcBj?+Xg+i}QiTb78pJYFRqx=U&z3Oq zF8lY>*Vld;dv0p@W4hJ0Kue!1ynOu9PwKY>N}fF?kVBI~iPGr#6>aaDfA?=hphscd zF`l1`Tz+pI@=v+0?%C{Sm}a-UBSjj40)Gz_;*oeq<#phRr-x0M@k(C6Lcri9_m!pk;>l5|E@O* z3!L}y^?KJMni>KO9$S0 z%RBC|@N#8rdh_PE?54;2jtYn}a&Bn0VW907``?1W61=52^~Au!JZ1iJ zQG%$H*-;bSe`|v>)8-`cV?nIJG~_q*6D9tAG5W~KGHodxtyv*yZ6)~x!L{EeuG&KL z0Vpz#N03XUje2DA?cI5&bLcGb1HI=tW7~7<&+Yj8*{18)K4Sg4bX%@R$KFKc(nlv; z5X@G}*Hqxy6hE8^-ut7B42ex=LZD zrg>CeScoA=@Qy=Cm_hou)qiXIL#w%jV7MWC%L^kd=G6mkQ;6yE$4ne49i_O4;1QGh z#R}uVt=pSDm&Mmx>(UHe?asRD7tW@Lc zhXdnBZ?p_swq_bxT3JuQ&H5Peh=2j3`$oUoO9=T_!MX?frkfNgumb&UQ8S5tUdRd!U0do|!E~$i z%5ARN_m8g%YBY8}N_M!{+kBh9SU{apF|6zi^F}S>lY*=1f#(bFoj!Twq=xLmohW*n z3+3*`+q#~d+fulncKar}+?D=6$F#Qy@)qah_vCCP26&Pd;Xy(BI#hfuEg)TAn=+z{ z7xbZy4VLMS1>?0_--`E_+(xky2zQ7w_J=*0vIca$-^b^;bnac@xl(J+sL0``&b+K~ zm}0BvNx9-W`NfZcb3wX}JMYSxSn4M0)Q)^-eyX4}F{QL6>43`DwgBckT;a#R_@6Ax zx=BNh^6UH9OKox7Xg?YXxGv*zXtxD8&N!{DsSZE9vqvtQ)kx_Gk!Kfa^L&c@n!t7` zqanj$VP*aRXm)RURjKziDm@!3GKblRf8|nsd~s|>VYWn(W1V2OYx6HR=v<1XsotM@ z7J1l{8V};B@b#DHQ=2sxg6)+4;aA4)8S@|;b1rQ=lI^GV@SA_Gu)mEl#)@u z@;G`bbybnVk55={qv$~f>IOScl?s(ZJGV9G&F;25sN4AZ))g(GXIDQ7b0(XL3DU;- z^T%$~FNry0ePI2d?k8h+zm)W8tmM1W`C0APJ6Z%xUvBboS^vg0U|7LBL0^wiRp#M0a$T~Nz-C@%>^O=zFr*qX zr;L-i*Y-bSR^DZ?^@6;}qBLs@tm$L&&pgMr-8OD$|K?b${Kux(6n1D$aEjzco{&*j z>(pg&w@)7%TI$z5^>(H;^o;wXbNMAAn@62r6wr|C*i?ngKbAV-9DXA~`{I_EDW>uy zUBx$MGi>sQ@t(=`?3rz@Op&>VKp^W=erc+lgvgMD|ITmN_y$&irZXIs1_v*Me{`Gd*>>3GPF_f zF7sbcx3!7eapO3d`oj$Y{JyELvSMD2oTWN1QZug0-#z+Z_9MIPw)j11CR>dUO zW}6&VZN0o}nE7}#*|QG}bn#4$^%s9L3iEo-9aNMz+wvv3+`U&|{h%H_^CV~ARDXQO zkV*^#&(Ref+dVFlw=BqG(lrEjr9OK`Lwc5c;-Fhvz{9_lOfR1@s;?bqUi})~#@{}$ zPy5sgg%$P0+eD+o!b2GY-8)CzOBI{i*DL!f1-@UoHREMl6`p?M<7KI_d$-C28>ltb z^`^4;&=o92i{ICZ+Ek@@WYcIpaAwx&*w4$58dEJk?*7;g87@uR2@Mzrq(7Elxi`|{7HqX9)s4siW#{O1n zt9Hv_ijNoPLU+{H9`mvNK>a5BR+LOS$17G2dRmemmtf-~Tbkhs?oV9-6a5lc6_!m& zYPt=Il6x|p&s(LnUwpEKOsk&i*N-iE)=k_O7KJt2I?CU+ADwC4eg0X`E>839=XuC) z8pQ8P-e@?s%#-=fJSB$XlJb*#$2R^~Thd|^yWZrc+@d+^Zq;vaaN>A+w%6^~%vvAq zb^VJ+Y-NjX{q%O+``MYzmL)2N6hl@dwe{gyGIE{5Y3DKlCb8D8Z`w56EX$laF^{{o zFYb?dE>M}Rv!$@`V}TDN^Zi!KrYF3fKTSVret7;$KJZ9gWXAQ6_C?cooCX=g3!Rv6 zygcufF*>AR7Eo3Y9^*5#*Em1kZZN~IOwn28;W(q=!tH;B)*?|DN~NR>v-qF&^50J; zJbj>Zn9p(1B0bw9rZ;Y1k>n-H?X%67XZ}cyU-d2Z=dP+e;^#IT>3)NES!b`V;k-|W z(zw%^n69!}Pydz54eO889oD7r%i$E;upFLVC8eN}am&x(jO9nQCR+B=mKx_l#t0+% zI|*^OERC8fViWJAk5Z2LoTY!86s0@&>#F7PrqnCd_FMKPR=WRYT0vbhGF%a?BKEpQ zCpc&Hi6kXzBndwnv?UhJOwIlBN`FhaTfKWy{YV?09ry*A#LXMs){5QgwyDEzl0lhI ziU-#1iR#okc}-STN*j9~v9t$kLvL^S!_B0S?XyMk#FGX8b=*f+#NzZgHylb1O(rc5 za&WTTNjD_jO6OLsT7D!|zC1I^>9>izUfr)JijRCu};EvH+;bE^|LAn&qXxMabY{22mWo<#74J@{@)sezQzP3UNYPzil z4}a|Pib(e^%NTfDvSIPtkvRIK&9*EtsvU89@$$5S`(Mcnm}??gXOn7r>FNFvF}k1E zjQ3U_d|rOtFUIHg>d_kJgYI#2iKlAgBmz3FQr9Nzr#E&uR9qRWP9}ad{>c8qfcnhg zD?*t9r=5kAwhj(uAEYiWm0f~y^2h>w2(>*2*aBc`!_e)-G(F2(b?B&EP*1ko> zqU`xBVE6h2JZ4@|M>Rvq%|S!yC_3QM$R)jSN2=e1GkemXNU0+xBU7 zvD}LAq#HCRQ(3Ryzr*3@zgH+=xwQs*sb3yrik^qYS{`NbbFXq=Sf+oskbGPH$%}$P3Eb>{K+)uBI>kLo$e2mg}{Bn5sdvcsVEIwM?rAKRq6HtGF( zKC_kFqx)6!8qOTleU{~`@6PXCv%N=9fX}nh>(Ygtz1ixP>xLGGUS3zMyq|an_xD%( zG)1mi6R{VvDOBlKnsE89#7tf3wRcFR)=AMfrPFB8uPrjwAu1<7nxVM6Iippn z&spUY^g<)mc|;vWRa}hs+Rt(`)SCvKcM<>GdbkdwtiLUJE_doYne#GMyG>PptSb3f z2+69?n4c8_mrnV(0rZBM!@=l%DCCK~yMDP`^zO}CW zR<9klIV-ML95Vy6nu28`{G7CLy60=P_c*=^#j)5Y^0?x*ij6{PqQ6=0C)Z1iJ!jc( zX6@)HqX=0yoTb2}HCKKC4O4(|D)YH_1SaBSlPeF4>T zaeOzrw8-%9%a+R3ebn!o{$8cp(7=L=Lyu`Z%@Yv)Vz%Ip>~D347OU zLw(0^)?Dnol%$1{T|{DuVoH(l!^jv_i@r;C*Q(ccff#bEhrRd2BV8U^8H&3ww;Wf{ zZtiIPYC62Zd+f#TcFoG1hAl6W-rYWBkbXgA@rY^v$OIF2e|Cx%qK4)h-AgNxhJST+ zVQ^~u)*Ei&uadP@m-(mZ^PkCalAY(Nck-mZad&_5_HoWPPa|*$_SYw=opffNdvVz> zowuG$M1td@TgTOnMK05|S!Rk4((Yrco=lYSFDnRq>_cPAl*w>7C8h$v8voIs5o+k%*tyxYN5NKg8b*&#U6QZd*lp`32pti)nu*520eJjWbee>{z%`m(ioH z74TWMPI=fr3L7zp`wvA$v)x+~4ybemrfu1m#4RTkgHg01;f4`ONsWD>3c3!uqJ#&* z$w?pBkp(%$Q%^HtmiX6QB5P+}x)jm7wulERbKM*75te0(mX1%2D>#;WH4+=j&^E?O zc@e1zpMdl?j*_2ELT#&Zlr%~6JZYWjv|6!SsT%e-PEfw6VtVwt`PSP zr-$~H({zIjTjv9lWXsP~n#j5P+%B|}yIOFkHc6Hd2TE~e;6R{LIrZD0b72zpn}&B7 zF$kYZw4uHdgXM#R^NezZddutckI3TEd!8M@li+ZyKOdp!eDuK~xqpW>>Q!^fdz$Ao z9|}G%`Sd>V<#pRE_VgmR-J*{BdpOxeHWz-JY(DS7y6v3uP0GqwtlW!YTTCq8ox85w za#>1;opq8i>H&k|iSySr&gO=+H8v&m?^}%yrHN}@;9%qLXjA&y=8KIu>83^A;Fk#0 zOh0~W?uu=Jd?xf9~q&{27dD*+eT#Fgz?|EvvC#&3zO5~qn7HLV(`F)PWu!f7T z{`6SR4!Jzmsi8V>IdAZD#6Fj8w$)Z&O~*gK=94}wRU+TI^VykUs%4RonPfR#QY<|x zbDHg2PI-zLpIv{L;zrc_6JbAfUT&kRcCC=oxmnJU{$zjCKR5y1OmYy`5PdKY6(F-bUS_8Uc=ac_#Csfr1hbc*QRhgkLSFcsFH9nj@ zGvs7#DDElSn#KOeX-?#;{AtDYoiEthRV#ChhaF`DX(_p1XoP(sNeqT(qI{$9Uak{o zR?$^mF8@IO@!jWn{?(Pe`t0$-8=vKou~59&*W&FYJ^W}#{}*8n>v8f=L7He_xWAE~ zj3tZfJmR4tYpe38U(8e$d*51dC2bKP@o-nUU9<^XO*uzEc;D|5 zXN?k3bR2JZdq3WqnOrkVis#i03%x%P@-CvWE$0pM7PoC`N>0xm7soB9n4QDR_vO3k zJJ*M{EGI4Q9ie2r)%!tw3-|qzT%9jVg=>4b;2i?ehhGd4JE7Hq)fR2DZ*Haci9CJa zHMMC^RF~TETJ48Y!&wT7eWlX22bS4xl)4?(U8m-{?!B_d!uw;S=QfMCZ~OEXa9NGJ zubbw>wIAd8(h*!k>FP=|GX8K7`(E9i+9dhVUncvm)MYwd*-N9Sq`5a-o?Njl`{nhk z8~Yo~D{wfi{A(H?mD^DZh?cUOIEU;dhhI7N>!!iXgw{Sx`y{OK$^HQHc z$C5v*`+XFQEAek=Hw2c4RdPRu~pzNs;R<7?*)=8(G{q12AH*)5MomdxA^2-VfqUV0TdUv@<5&vvI5 zS@cR)R@#TEwhD6|{d>Vvu~^=iFTPva7ceC)O~X3Jt#j_PXNYY{ z3{ERf-r;SVyGmw{+fm>7%-68>?S|8G9+AZgZnXE)jZ|IIHa{YJ+H_kgTcO!zCO`4b zKlmmt1qywqH=l+dMas0~z0Zydb#VXmV#5CJ>)+CTn?0ZBtxJ#TW_LcFPFu*e8n-oN z`1rR4m9l8bKdhVrx73xF)ef?r+P7**89SwZ`w|&9gXQC?gGwSHm&ZGGRa>|Gwqp$6 z=e;fICf8;6xYmPbs=U5;umKv;5X(xPk2=MC@t|7Wm%`z!0||7K1o$c*@;qLQEx=bh z`cCgtI}lj0_u8c2lvwZ@F@DoXxED$nswfhe7E1+HuESy3~XLkNn{E*x; zYDW{^_WTRAu}8IlvhI)eoEd#MYbcS*WkL~tX|`<0SWr`M@a2eIhudZzu6nvhjye2X zQyE`JAIS*aSsr(gOwcXZbZ<+Gl-TgSHE2X}$`)tjO(y>vv}M->^9X&Y^?#Q8b~j?%fHu9^We4vqgfeyg~XZ`gJZr7sx++hD{=)YbNeZ#{68}-hy~#mF*|vJ zmQ>r7GjWghx>Rq}8OEqd_KDY9D#8Ds$kkZ({4}YTzKw^zf~1<<`k7yFyurrrj5TX{ z^94V;)qBkAc(_;J`yEb-^S_qXq{R^;lWu!r zc&T`Wu1ro*nd<2_GVZVlzslEi`;0@X66*TXskr}-rt<*Dy5IkJ>nVzg77~e!R7NsN zl!QczB1&W?dy{cDjAV}pAvzwnP&hv2Jzu))! z8SnS&ZJ;KfAauFlIF^pmN1&YNxVv0+P(@iI?#B%epO(GhFLmduUP`?wSN^Y*@y(;Y z(QnJC&hHczpv%eV&a!28lJe*dd5_S`5V#mS|=ds=j}kG1moV!OHDGfBuLzNgaq);#>V&+8e* zUF&09qgtgkTWh7nyjhH`0Plv0xTupxiOWiBXy!_ zy{V^f&{I22V3|ipb&w1uqtr}$@sVw@rg*pDy(H1g=@)ng44t0JSgIW6tl1|2-+orn z3(G$av$7MOOJh&u@|UlKznN(<^YFa*u7b7cQOG-r{;ROveLoic73O1pbx^T=t*VO6 z&JF^d28uOqo|7eTb?3DH`e=JFqiDH$U&cc1VL5kU4L`X@msK@lk19~}z3{S5Ymt(_ zj6fEM`(B-~-5ygf^59NYmdjv{++Nn2`8BiuUh*0Bi?}uXLPF(2MbzJt!=Ej(!?c=} zLUUlp$H3F;)H@3323^DW-Xw$&IYA#XTzui`0}A@$MVHNm3*$-9x)HO*daF$GCLu+3 z47kr92HiIDd*1I^g;|;Pb?Lj-tyJgl#}D1~OPwLFZ~BQPvJq3s>*h0=?-N39o8`N^ ztSrE2FKnDNz z>R7P{s`^rFzj6NW3T#EIb=jvmZBpv2j&w_iUhg?i;m<=)R7B;%Al4GV_HJ&q&y zbd6VKKg>n`p34pig7yxyuq85yLNYQnJ-T=My$DVzDx1#glizI*v2B6JX#Scjk0eF_ zj9oOJmTJvs5i@CpOfw?Kv*iunF9DuhJhhrtEq8-C0et^B2%grG)*D3)!% zcSCEdqb<8fkfoqPzwLY;gP0Nw8Bl=}@jnE30m`0RpPa?)P4m3jW&O1yIH8U!72>Bo zby{og!JuWg%fi1%j(=C&SE5S4evtd*pFTpRm@wFAki(X!kVdjngyF0}k!!#AJ%+7W zw`juJFPYsvxdHfc0)*e*(s>oO6{o-Bv01T!7NV(TQ98%zo9mSRdmK;W5xDFZ!fD{F z_m(U?e`WO_Ih>w~Zb`ANGBT^dLnqXcskTdedc#?}aqae>X>#;ZrXiCeffX;#9hZ&2 z4ig9iric0Yj7>s>Y8^{usx9zo=zzbCRAVMsEMQs?bzbDh4+fbnf=HGemll^@aB?IZ z$(N!6+L1_Yi@in#c?^Ee()D{Dn@b4^CQt;#Hu;}hf#cg)93DmFpN6nJ)1}f5#sqmG zCjE*(jH!2XBJeT(o#?C#jF$x>&0v1C84Tlq-Z-7QD9L}LOz;z?N59$orR_d_ivE?C zxBa4~ybO-rp+?355wUAyYj>~7RL@X9$D~!zk6A(rGUeQi!YG8xL&RRW4QLz;wIh-O zBg1N7<`B~&MT3ZgwKhbji)*g1$16u3uNr@J){OkrLiAf!U*)Y#?TtBx^_+XR1C}m} zpz7Fb&#J5CS=KvB_wL`{h^z^y{2@)cFU8Kw>kS3-EP%b&R5dE_K`mOWc;a^lslQh@ zpYGs;|4u21vzXp6Qn5i>A39vrRc~PBmfx)yI%=N3j7shwZ9(}TYLHY7v-|gs&1E5{ zXtyfXvQ2eUP5H}wpzme4Oc*CPHh!(CzbIdYfB5jk$N(_f5&KMAw!nwgM_R9^Tn3&L zJTCvEyV>t;Az1#?o|5a2^R0)hYEI-{Q@?(B{?7vu3E(Z5Vi9eQ9~2^meUT&^lwNmo z+1W+U;S^ri)dk8-x5~jSQdFvGcgorQCB8V3oc+!5LZ%j)9vZ5TsT`Wi>J8mT=#PDb5*mp@nLzW0<0qfeLEa3ydq>!`ld@RFdq5U`Y z5C5+gWW*q1tJJd_rZcuKqIy( zn-J<7#yEybt~E&}rsR8-dmpfFiL&pRTcV=b_UnW3fyldU(qLs1oTa9B$$MzFVifcr zfBL@3ed1H|w{tD?cU@)Qkfa#tQESjgB-qUj*6Pj~M^@EU8vtX99X%1R0%Xigk`vEy z?u7JoKL>HwX2tOeX5NuezSJdQ#bB_^Kdp`DiaCxVo`dxWlkeST0XZHbKJlGu27zIh z6Ow|=pU7EtT9`O`>Qrztc~%H!tRGQt;uGGx1<{WwSzZ?iyzhq1X_xbyBJ#FMV38mY zQb2_!=Dm4|xuo^t^gYm|92R>#TMO;^Y`I1WxF`{YCAr+-&oH|y%Wgu0<>53N)=@B5 z4A|GN`4NGJ;1Pm2(PP07)>MvH+p*7AWN}8xRRL91L=sO!Mjpqe$8@>Xmu|>B`(Mcf ziC$RA*X0R^`$vwO!*4ZuS=Fq zPc*1?N#DJ9>NwjL+?j*2lFM{JrMOA4p}GO{EU_r{6$IL+kQ$Cd1r#EyV{P4CU3KZ2 zg}2_{rvtYK&Mu4YiWP@}7itcawFM}4oEN%X!(gQYYSYgsf+u+a^N{jueti> zam|k*1paBIo3j4)!{NPBkAy!)#msyDs5dR}eg8*@d1vKK@>PYV1Q5=)u{b{hVCwbQ z8RGP~P5OI6CyRkV-ND+L4A18JW7o-oxszwko*^iRgjIvZAAvMAgmzQR>F;s8nq>w# z0^_WxAi|)2iv&f)pHQmfc>#OdalTm%H!psC>)~lo_RbQ{76gfnoxz#?%hZl(^dOaI zsq_&##OUzw@j*#+HCp^K?hbH`KqI8{k}-QOG|8i-^Iu;nHNz-_JI?of(72;{bLo!u z#4wY$?*k4RY1~JB_jx$PM+38bM<)(V5VyxFnY2U+3J+f+5nsOp#Jtl64og#%H8K$=2o}0wU33p%$){nqJ%9L7_Y|4`6XD^2 zW@W=_sVe~((e*cLWy4D&%)DFrXw(V^+y&2XZ-_%JywM;#dJAW7N(M6 zskzm&!UXIU^oi zeijmhO(61J?;9E2b8uKxsr8bk_q}BPoc73nd%@s6(50NMdPX+7RGIdEaRvxCBEzSl zOJF3bG&srqeD(Y3IC{zsm+!Lgf9xr6?XeF^Sx*wU*U9IGe*9{b#3fjJJnkiQDN&W+ zV%5vNKO(2^OZOaz}#tWdb{+ybQfU=Y7+AB^jty^v6@-&yz1=SEVajzqP&qyI|1N< z85eaT;|uk`JKaPAYt&i$Y~7)qEROwwkr%&GmyAA_e`}p1_M@wANrJVsfm_>@%6UKT zj_|;$w$sL~vz{kXOahdwi?k0rG>iOiFw7kUEk+d`her41J#2rZ>sPYQm2I4zo&Du7 zB||*zQnBB;8|;b&H{x{0vuGi@niVIz;iaHH@p}5FJ-(W>9GOCu!ypN#T>5&FQ8wxu z6XEtZON~OlNNt?UiJ|xq<+QoW*zK2%WkL#2TmrUZ^3Z4x;)<>|=j>o}pgdh@MFfY( zy~Bz9YrDFXO!+zdJ7Emk!B-TZu}859X$AcRRk6YCrF{Arik`)CNy-yX$p!v;i8+(E zikjTa#J!snvqnSiR&u={kz@pDZi&|(u5`02nU=V&FI66huwAeE=JU>d_a<^TD5~cw zLDj-$eof+T=jwyehoQ=|E4i-z>0_y6-E;Qlc?U~{o>lg4d9txCMzTJW^b%-9+aHF5 zFsiQ3FHj=TNr|%z5xE5)EEO7&GlaN(?^RBQZax@L})_CvFvaB;`v&kk;V#>@L* zi31;cLHZsqg-=%0iY=L~>0N5WdJsMTd*6v1z`SCk!kpAwa#H(wd8@fuf{+dK)}pfn zPF%0{0nStG)*fp(oBngAF;vsG4~nuBqK65!M2E56{r;zS}*xcR*L$Ly!N zYT;AlLC4%s*v+K_(ad8C>dPeq9#U?#%?ZdVm-js8Wca^Ju z7;-vBd)!0EAToK!f3*m{v3F)R4yOYi@uZFW%&405->2mXj|!^fKsmu3yBP(OavCK zFLtr244mZu>ndpFA~ESAn?CYpuK0dxL3*;Vc~D0FVprJe9+6%imD<3m@Qx6ssaKWX za$$e?;KkpXzVCnE>@H$73F%xE`zbFC2P2IJvsxKP$A7dIza04bt-2o$zHbqyy@}ci zkkOE@m;V;V2)JxT_UUUlNPws%praeAtE<79Pe8gR9NB0G-h``N#E%Mi0-7faB%tES zR)YV!{8IU=bM~P~_`nAYe_dIhC1jCcIB(GI{^SDRSlz30PoGmqJZ7wQaS$o6NR6-FYu%(Zj2pcx0RH`^GMk@eOW3W#Y1#-^!4z4?5wMmXS*Co z6po5LC#qVwD*0!Nf~P1(joy5^cJ$#4@u!Xlc+JZh$Z;sIW!RNbl&!62#4p8PM*wq* zd8(3+^`F7d4#onrd13VWYJy2uivP;!qJ!5wiGQ7x*s2#lwnS*ijHiI5>seiz!>>&Qtl>!pP+ux{TQWfQU8peX|FH{VBmPU=qn9utWj@d|yc3-d-f@*0;99 zav`~J5hn@v21TYKp8((OjwT_3RQy<5+vgu6t0sYSC(aEp{%^Eud4dqHa*Xk3*y+tR zPEj|?z5jGEATYeV;KSU^{L12lH90Z9_^DA-(u}XU^Jdr`iQvRPfwD8ZR>Z8AX~tSI zh}g^39)Hs72pI(3^afpQ>3Him%E*l@r2!q0_?GH8iS;oji&^;MwpB z5^;q{Qs+K?yzJS*)zV#*jao5$A1;TXx+DHvZ3mNvh6aJ5#e)?0@ndN9b9oq9(3Q1* zG(A{yjo(ruXr#Fbg^cCos+d{+zlk~c5lljlxJT*Jb&OS4!vsxC)EX2 zyPf;?4Xn--r`f!>M_k?oVE0{$T|lfyR221=&7j(!a#^;KSe?H3C6FK1ZJZW|cc*HR zqZv;b;slko5tYYto-dZa`qv&$TKr}ND2cNq#}z#Rc;uF8m4^rS$zujBlbYh|?v>%V zQp}6j_CMBslr+eRJH`Pm2S{SX6Et&U%UUWqRzY|-A28Fj?~n*aYuN)J8NrST{RI>MX;jDh~USm zUv)&fYj2Ix_r^9Ubt6W+o-yD%!`vPe8Y+V}AuA;iO9sM9g!yXzT-{j#MYvQ-aZz4W zDD5A~kWj?r44&RWWX&f5wG9@#v&*M4Bl*T+yv|}ozVUR|&xK!kmh40_Mb>>EMLyGB zJa-OF#btXsFS+ejUGOdF`}Qup5=^6T#06okF`jBdeEkQ=e=gyZF}iOer{9hrN%^L_ zJFz%qPsZ>%C?E-RD+(V}b9#OQeP2=xXwY%hr;$28N=$;|dEsx3M|| zh~*3ggzGo|Ap<+{GN}qcl&bm~F5Xk#NP59x2U8Rt1OQl=&|y5PHS0_3CSL+l#dN|H zQ8zmFzu=@?bpH7?C&?)FPF9eZ_0aU_yNq57(O+PSkD1+;MHyt~ABkuDZpWV^@6^&b z`6PG6)XTy<9`T)+JKnc=hNtAW%^8Ag#6Tk`D)iMm@^1g6lSmL*hwUEQNh*rB+|RJ& zvV`5br1}u;yB)iJSLby^bkuMEqP*^vawv=UlvF$wn@sr1QIZ`rGRoJlmjr9M1`!M$ zBGqf}h?kWYZ3J(0l{C+JU6SOygx08FgKZRG#y`_FF@hy}0-~v{^CPJ9#0>XuhdLM) ztr)!Q>Y!{8%NI-e_&g#B(HZ`vujZXf%vYUVB!FL;-i6WVK$tq&^;~{_eo8DA8+@V2 z&{PKVzSF*2(bZJ~xrd2(56&O{jN%e>gm?|X?QEisvmBTt40!vK0!6x<;gQIn_}QLk z+G#2zEY$lqk4O|HQuI#d)-bha#w15S+>Sf^uZpH-Z9}|lD?livrKO0%?X${%R<5Hf zSRtfXI$+~6I3@DG;nfME4n$WKG0DmBQF(5t<>X;ZE(`Y95`(7o_4ONCi;kz~kom6N zz3u|n-To6A(zy3a&IiO-`US;=)&F5^qISR7KQMG%MdjjgCUz#@I3p%}0*FeOtC#dg ztB4;2Vuwfb+Rn)3w$U`~Dsq(L5p(Y6$9zwY2M>jb6A9gT3~0q@r;Ln^`}(Vb;c5bz zWNl5t&ZN8KNzs5F-%IzTqtHCxW8z9Y%0J(bEgdnKEI%)zcuIeK(B=#X4YDosAv_(1 z2gN>aDYaTSI3FL#>C?jA%a^E!LWejr2vjtBr@IXT+T)z%au;(WBmB>jXMUPLF=q0r z^nz-SBhHAJp0sp35Uf{>jGvHx)yKJe6G1hJ&GIE$;0Y0S_pzU8h-sqAXxZ_3dYb=# zAp<}FUBeAMrfodxifp2pCFk*utfuN~jBLL>Jv|*K=3CyRrCe&U?Rq$N0;e$Sy(Lo% z@ng$*9)4)Pi{6hM&9=@K1E&X6T!-U+(nO3o=0+&fVo?s^2M1re!kX;l=Quam(9wwd z$B@e7iAj+?`@VsFG)tbutQ~%_i8WJH6g~x?@KsOP@%Ci=XP}6{(swUmXc0QpX;6ys zH)0lI$gIQeSQx8$H*zb)99)=RdrCfsQb*2NqJGq}X%2v#vGj?)!8bgsn5=RAGlw=??y zN86GnhqT-7UpDQ-cIrn0MLiO3??=6|`I zpJar2JZzxK;e0=Nw2D<rL+u+yT?U2FY*viT(|fB|q+s@zA||33j4i+sH==Ye z_1xv(-V+<2qub$VYHpmI_QCXr6QySti6Pg!RIV=iY-GU#>d~(D<+~3g5<)-^Vs}Fw z&G_hUPFT`l(hmWGj*BZ0lW(+N_!{ruH__%7;QIeN82%_OG3C3EfVC~PZk)-YtD0ds z);A;SN|v0Tg7_bU-h+{~p%2tTO;+q@vSe744$8jL8l4JTcF~Kwy;0RyvhU?e)LD|B zeV2>pVA5dO*oUkyS`lh`$G8aN=NvKpHtRgorqPa<{Pm>6A+y|rz>%Wq+q>Omd~737 za8)%mS1uh69$LDrzd0;*;gBrzgNO>iwILyRB}sDq_R7!ay?@K7Nyq6tnv;HuS5lcM zfgCx~C(WYlPVg%R_kPg{J+3L3cFufw`v+;-oSo#YsD=clNO zbypkHKVv2l9yMmN+O?NGYO0#N!!qOIdi&TZUQ0V<;)Q5wG7A&KA6pD(PaW#95dBah z9343LHOA%60q-le`gx1pL8iid=M-6yo0ezGvNU&Js2ZkSF^6gG2N#7}*XMd?sMx;$ zi4-rD{%^5HD@!Op6OlWqrKUy-KuN)7k3!+;1rlY!#?~I|;b)b7Fq1&Li;os75nL|HQ^q;1--Ub~#3hc+zVzWY z(9_e-X~m5xf9Zm)<`CAJ6{s%9ha|6^SLs`*-?&p27PBf?FG&%{|5{;?1Eq9x zonli}U2VmPO)qvBo~0?L5yp%;0Ddn_>Ru*!rH^%3Rvv=c+4b*S476r+5pt$QzC{@v zE%jsb$~SIoId5-o|6p$8zbOu>#{qg6Ejp>YiMq0nd1Czc`P=QcF1R)fm8Oq{=o@5a zCTvk_Y$cV=Gx?McPw%qKD~{U5#3cHo;p*7u!=(+w`h{j4KCVnv*798-A7Rt-6TQ##HW7%x7JFTJAdeVlsIO z>m=1Q4fpne!Eddn#@jx4<0x!XxRql-iwsa8Z^IC|3wtmm`D~Uuhi33;^E>S$$G32kS8ax%?g3s|I8e5Hi>8PO%^`9*EO*&%LoO&leh+w0m82(sH_RZ~j2>*$) zu_#&;xRkM`K#Exa=@}kQg2W@vQW-g!nE584r{~_M#l-{pNO44I^9p-E;uA(>EgMhi02q=CUUCd-dj9t5lOLHqb!HF2$X6oUE$R%bv_O5WGApFQ4yesS-hE0)(iwZD;0u7i&SFwjP%K%k8FFuG(5 z>8 zQa#=*1SSV#@Nw%Qcf_rG&#g_)S0_E2Knp5^S2@%XJ zBt4d2*&iGhDeI^%_V`}SKc6snU0-)KBd^&-uU9vXx99Hi>PO^@J~&QTp{$Qbl4NjR z5^EwHOmZp&!3|3e?hJ0(h@iOMBRI_&Sy%?UoKjmRI|~n#@`FS)U>&ogdBe8`Z|6oxh{(YD`VLwY=YA2)6 zJ-ObfX+l0WQ>jz1Ch%Nhui&Q0!J87Nq?Q2Re`y(G$)T^G{0Ldv)+p z;sl0xMb!E)4f^FTeKXU$DAT*}ff7-*rn#LS^+RZ+f&v3ei2rhr0mDCn1CDctNVP&B zDH0`?)gZf9)R|P8&NoxjA9>4v?E?Wh+ZMwkZ`3WNK2Z%a=qv(JNIyE~MJI zstF%3^FIi(7B(m-JP6qVqM*^{FfuXgfOUsr4R^6y2t9DQ7~0`jG(-@JbR6A&zB}LW zq|-_5_AZx08cS*91st^eGyucv-`&6S`n7B4T`r!(r+IXQWw#jVrH4xI=QC5(*co7LP3_n$yJfL01~&yW|Phz$FJG1kcBB#L#vL$Y6T zf6wl}?9)`IadSU?l-8-!vj_V-i(FDOn+A0*H9V>4nXR zbo@_`5+r+OwU4j%$GA!%7Z-~!?AjW?e}ANyo0z@FS%Q_-$-mt#_m$_}i@?m9Eq^p(_ppdlVd>bjCL=~KnYftwzp?-O){hcRf9f;x zM)=mw4M<5O(rGhg(b19C1n-^_?)5L4y+L{e+Y_=RiL<*$?qzHV+p4RiW~U|)d{&i* z?D^9kFiEafvSN6XXWr+6&HT(4Z@%5$6%0FR_H6f5p6}Hbd)PjHK!XmQ0z@5W?IsC% zPtRZ5Tx)c-(WD+nC>{wZN51u1R1r=FBC#C7J%3^d5dr9c;COq0x(U_W+NbppwtHC4 zZz_Qh>{fvZkJ7pMz3Xg}Xp`|N-6gHrN>F|c0miD}+xL7+j{^$qBD0h#WqJG~7H7lA z-}2{%7Dwt`pVtqSV$)a7=NgC;4iBnVc^YkS;Gv_KEdD|g>q57^WR5{yZ^x1~@g{0% zDT&e>w0|@;=_(4xbM|+#{fgryu`CB4TzJ@a%s9NlY!?_nOoy;)OEN;cNr32{UT~d% zrm3$ufRgh~$)8%9Fx`XI>CwnR8_E_AK*eX@{{R9!1l~!-67`}`q3hUhl(=!;lrR?0J zwu5a;*bMc}J3PmX{@w3wFF%coz{_eg%XdJZuwlQs!9sVd^}fp=d48Pa>Qy?@E2;DJ ze2~wg(w)T}Yzzzxzgx&lrz4UQnSX8Dx>Y$YBkv8vOQ`5_{qEv2pmM)49MTFNMEF zN;0eFf25s9gR_;_^aYzKx(`-m{}vu-8-7OqUF|Lj@opb__7v{X@^c^0o;kbdNm|?n zxxk4At}R?jQCrTpr0t#6=Kl&_ta5`5&rvjJpZF{2Di5NRK}{Zi*PLN1*VX6^soeAU zBX8clV+TqaXLTgHCRRxCm#0F*N z)~m;lH|}H=Q$YD35i>QIC+oZwJ2{)9%4?G4TciH>mT|$?vIWyJ?5gMQl38A|Yp$7A zF_DrUex~eESwjh4khI6UlAl&r;Vnt<-;s3Fhf(-+E%^SFW}_}t_YJN@jt z_Q(K2! zn~loo?!YaLBls)`8gBr*`OFaQCITDEWETC8td)5e!L|@t$xti%IvA{(sy^Pb_|vnY`>=Hz^ig z{-m+8*Io+gA}DEDIBuxO;iL*=T4id7cui_B#5P&-Go3+sn%=MVWt%kG{JVn|B>6 zQEUP(3Na0!OdKs#*yuB%dP74)9@A&2f(@ZiE7;NlCyu_pfc2olf1#wG?v;=O=K`5q zkk)`Gx%lMdArGI+0+v6TDz-52tY#RuA2<5&>0&j+Pbs-3p-yBEu3R?)`17InLbkji zd*bESxxTkV`QJWa!|Xj#fr@0@q}$J@-n!vfvGbR&dLvt2j|^Ys+G);dbm=JYB~@=y zD^zYgKgGL^phJ{P3M?FGIl@JDI~k&PXZcE%G?aCN`m5=^)dG90Qy)}vi5%0IZM~Nu zTb&rqc5-21Z%lN;qouz&-4*;a>y?y6*7x@B-#Jof*a^)-mR=RTkfJWySj^X=9eSyS zJ3c%nueFm0I!{d9&7A@BdYYBeN9rF4(;6f+fmyf4J;cw}qOX2ieeCaeXDLcK%$v{F zJC89ETa5LQWQsHg{q=eql}9Pbhs}jrmKbZZRfAv7h-$};2=n$z-rTfK-P0pqAFEv_ z|F%V3s^r@M;VJJ_6k_d=8jF<_{9)<(jru6rA<>bI$t8>YSV&>t3`IKOw5ReBA==J? zFVWGD&t^OVw5w=evqQ}>%b@{X>tuEHqh8;(^BjF#XXnu#u{+{$N6Qs)hRmNA30-&F!>9&R{7EIwk{7kYWBszScQ5WBz%P?J$&=J zA})yf=H}x|E-sG3K69Sm#TWlO_F949{9uEREhdnU&ksV>h+8k(dG<1tqf6f8#T}>x zp|nQphmG{7^gheN4m2YIsE~JhKD{sWP;&}c78}K(Nm~24U%LJ?tQ0ckDef z5i!y;2M(b@4*|j675EuwC9lPcN?Z zP^h0$QQdOA`~4=kW(`Hwc&}3qZ!kFSA(u*f%XELB>g5WqW*%dyX+-G!*uRf0P=4yp zMULO!Z43qmQtH2QT%vm1J|WX$^CH}G4|`%l=Ia1VWHNCELus=#z(>OEH2t!BpOi(5 zru|H<myORWfkAD=XoOH`S~@*z zi_y9~v&Hp5m7OwsorJaw|%Eu3ll z>?tMX#ivOlp&O#VaTHMrZhxCnG~ikt)}bhK{j`C&%#fEqL1BpEvZH4j&00;B9DFQu zKlFA{2z^3x^_0#%9@>2(SJ_UCydGgHXnM-a<^R6Xs!0E4LYOMNnP0-geJ7C)n4X-F-rUl3G2?q&li>0Fw@@t zRl45kr0_!I9XX>*7MkHpAt&>qjNY+uCcoXUn0xBSb)~*&k^2(UJ0ExLS7^$V?yuy} zj*8(y!6=ZjuB<^vxh#61y-n zIuea^D*DPUZUh9t<8~y&1M&K6eU5QYH^-kjmatouL-ue56^U(&;X|jTs`O61!cK8f z!;}Av81;SJa^ucz4vV_W1(MFJ_B6A*#3|ajIUxknN)*d1x!aJKj(kxx zA9ZzOg>-R?HYf^O=^$Xkwf(Wma5C&opM73&f9;*>_c#5o-O1VNS$QRs9Q-kUstH7k z`%Ssv)BOK$VCx3w&K=S^?El^0{quivi4QMMj-|9#((i!{G3yeaFYE#Gwk%H_wk z23Jh2j+vcI2sy4^l$Xjdm z2%&3XHKSnG63YAh?a_8KqX;|ABOc1~5EEiy0m}Kf)1o!kktPj_jeQm@9X@!Za z=_w2|FfR|Ht9kvd5c4IoBY|IS4|<2t7k18$V6mB!8m_l;`CLzx%?Y|M+sBJ2>nk{( ztpARyOU3SnKpUf}CndjaBX^Z28wT;nmEV&7(oz5LR*Q{`LjB4?uMp0H+RLg-%*VpI zWm|h`6NW-})6zCQbl~+Z)QTVbVRq#4g|R`Kd=c)A1i6zu?anJ?Y9ntV*h|XUUgbUZ zr>O{U&vnKT{TKM^e)Diw1ctkt*>h;5o#^mG z2KD^B_Lirr!XEQ+{oZ_H-)^**TiV6xA1N0%DHH2zzI0>WxtLAvb=MHPLBxY~E)1c> zS^VYsK2bwMh0#7dV!L2fSe>*d>tgQF_gs~23lfc!(DItEuPZ|50!;%Ud?)rQ5FuE$ zr{&Ic%++f)47E(ysc0xyqdquS+>$(@nmy2h|BUrC1}CsT;{?Z_&(bO1jt!w0+Tr$G zbHky!C@3TS?=c)WQ!eKPpV_CC$v&(ydcQWXxAd!Cx3wmdBv zJF-58i>hRo>!UyiKc~s1p|5pyN%hGRtSVkr@rU zu1qFbwaR2y4{i9~?%mEPdHatFBHZ&JYU3<)$L8Z^U z<4)48Dh(6G(!tr^->QxlC(xKL3DpE-WHJ~U>=gDt`{JSbrM7`fyQGiY`^6=oYpJtQ zvSjiq&cbOC5&n(_`MiwCmn5oFOdkyR1I@%5J~Z0Yw$WV;q4RNk^Uv0cZ0nqLVg=^03r{cD1Ty-3?hmv%#cg zYK**Fgr1w`wB}(SM&Q|5CzhXeuP%$&*xBkZY{dnRad{@BaD@I4`$tGdcz;BDr} zmX1DXPL?+=%?b{;_;}#8(Xib%J9EzHFGoO~m{C;?R=wlFixR&5X=Zy`lY-tRpSMk1 zbPZwjMrB@}Qf@0Nsn1-S9P!cu$MJ00lmAH14`cVtu9YT*nC_`uyQs~qF_Zg56q#Hk zjxL)Yj@W&Fw8pW~QpNPnu}3>}TpM~XAya*Zuq&gV@!!`D*YSAjct{EJ9XXLMOfsR; zO@>+&YptfPE{WJIDmE0Gdf4cFOAEgu$~@>vS-j{h(S6$SR+w_HU16(5%U1CBv6_fW z{D4W>a>#j6YKy;JAn8~iJXgSnU|-n_wSPUb9mcMX82Vki;{XN8j;DD zc?j7i6ytCpn~qCnlB%O?n)Uic&40WMUT~B9LB9^_B$S7l(63s}SwW0=1{Uo*w;+| zBwf~P3;R;Gpw|H{AK9s6b5$X9~L6CwVi3a>;f%qSTB1dUA}a z@>Q7uA(Vt*Q)eY7vYC8r7}EC}*5=poyvejR%i`JB-y2FMcdnkGU5p%MG&5&A^Ib`t z-Xlq~@`1Hw0%f!7qbJ<;IF9Dkmd&~BzDnOao^gss;^C~tV zvq83*IQbyrk~aF`?t7SRi>yD(K)k<1ZJMF zBuRq!aWz>dbhIu0%!mEiD91u78av*4Y&!U-wqRX;Z#CrY#||GA1eWte-^a1#zdt=L z8dTX3Jk_N}r?1=*9*o|Nwhu%Uow^gikp_B(zQRRqXe-;v=aW0mIW`qo8B&BB&a7}4jsp$z|P^jLE>=_;5 z+|Tgw+hN(dp3_A}_89)Qwx%=|?|qJE?DK$~uknMVmu-Y{XS?@#DgNC>dIp{djGVCa zU&{a2R)nuFZ>+BH7S6GoH*YdaI6Fw`8kv~%6EAC*!yIjPP7bjicM0R@9_pRp7<1ry zsH9ZjpPNf;M;V#nP1s4S@gboUgJ=|H7Xn+5<@E63@Z_kB{AD{kVJN%c%69Cpr?H8+M`Z0Lv4#kgta8*Z04$@jV2cZjbFPFhx&SKfo6v|F`Wx& zw9=B52O*A}*7m_sclRbGJGR6fKUMO3H(K!;H<;LYoMF1(wy^)iw(gSd)@Mc)k_{=8 zrX%kg&h;mTybNwQeo~A@@pYkiUbgjR-jk)buU~p-bQBo5JFQ~FW=ho=7dV~Lw46_- zym5VXi~sza+#Qjz{IXih03-K$S8 zE(J}^&No!!gmwFGxFN^{cOU59)7E~3n6}woZ@z5fh4kyLR8_T4uzUlq5t=x+(w-2b zo=WOPZ%SljFk!7h6%R9ia8#5pVpE?Td?n$+R4Vkfu6l4hKj+UMAM^d;KFVAo7ekM$ zf5|1UnL?1@Ryk3^PlG-1o3qy|ukaO@!F(t>sPXR28|}Ud-&FNtkD^uaBRy1=eyqBX zrxKD`<;7zT9W2D(1IH$~F~r^y{%N1q4yJqw9E%16&tHV7g~Vg~fJDt0iCSzNM#jgP z=UVLwJD`5{ER7d-dbsis0xTJ-z}aFsGpXZRA72rwC2ZQUU&W$A57zvpK32+0`X#7j z2>}b1o&D|y?*5(*ER9b;ijUcY_8?)vgHcUCxrF zL)s>1mHCaunvGdvE&j?g8-=@hKHM|0bho_4=9L47+xN252i7*|+&D|t+mU~ec}A&u zWV6&od(o_9!{0o|j}#X@_o*I92n*@?R{f!E{HTIP>ZA2%XIvQeHG~1xM7@V`oa=i- z^)=mdU=cp)JS6S8)yuQ=Yv6Skw%t;fX!ku&@sN9H!)JPEQO@l)p5NSj@r%84XI!Y+uakOUR+EhNA3e zi>F-u@s>wKMtB{x>~dM?;01%=!_8Y7Wm$D*I8UsO1t3A;_Y`elXe92rq&mVK=X5erBE$wDCJfK`uOVHIRB z*iONw4oCh{J!M34b3k~YSIvaHR~J_Jp4c(s*|~+J(|QSG(?^F7GjB&y(M#M~gh%%s zd71ZWj2P`_!n2Ksnvjz~_5Sg%uutC2?qXcv&}P+4MXww}c4-_kCLcjnFxl?FN=#f^ z#abCGH^Y4Dbi137$uYgS88FV>YjXzvv3|WFB0y6}F&^VY%9137iwIfvA@U!T<;0mvCIP-qnO9TH)=kM9lLDbsj;Kfn^2mAPA;v26R9V zAVQg8kN*nWtE_$QQN06L`s8%GhPIm((<(Lrc9M6_3{r*g)5iNL6XZ1!uq@1OeP?rw z)>`fjbAVU_?3SyS>Xh*86K)OEK_!y?O5*H0@FC`d4nVle2&WImTzPbrLJFu+2+RZA z5`;(xlAn~<&ykESr1&Ma;fth^yqHxV#G+R9=N6{NPt_yzE%nDKVxUi)OA$#kgOTRY zl|kFs+aSwYj-CT^NNDxBZA~Eef)ML$_xeuUukg+=i(3R}54YLxzINv}hG0j)BB#%n zy4W<)GJfHD1ZP3%WxH19V`}1ZOTDxX+1}6GH@|141b9Sce0Q`u(C~S~^vLKUmoR^- zpeLp;G;_-z=dL79a*h2t^sA{j&uVs$?Q%1kcvm){a^Jgs4`bJgVV-)Xv2r7_IQ9aO zg^(*O_~XZCi`#oZ#q_zx-DCLqWy`U>&r=Yqr1~~7N$JzKqviY9-LxGXUP5l4El?CV zP!on=txa~59b%JHTU>eCL5LqD$FNQ?*Wjd<<0x+js5J!q=#*hS+-j}x4%LM6VK3uJxUL8* z35d?n1`>XJ$PD|lDp`oVH$n0pHEMPqyWLI98A|zSpub|E*+tmIA_TWL|X^55& z)mcv6ezEU`GZ+yG#qm)naS1%fY2%+DKGgG(magD{(!}!}t{zUDahOWRc(OP@tMHH! zCVvb^cJ)m0l#4%fwuCS6U8Ghbz$=81j?n1C9!&g7D5#h|j93)Q8R(hh8E@92EVtk? z0ALn5qOe8jLjJJ2LZVC|*t(y9q2S}h6}%)u8SKtn_nU|4eD`(_-16I?XCUMxmtZ0X z{ARE&{0eM6WMHr$OF@2-I1BGZLKfB4jo8R@%b$eILfC~Dmt&lx$SZ~9cuW9@#x5c~k!C}%?)I%ui)IE7;zecu5H27t{%Vvy z0NfT8C5dvpXKylcAI6e%=g*<`CI|~K6`^ZPaB4z~7kX0&(4lBU!nQlKj`ED#KE8i% zvHZuVi}KqABC9tU8OcROuOumyHL-;bmtD5DwndA*-mlKMJco{!(5jQxl_=qoYZWn# zot?GV#Lqk|7N1Gva+ogJMknQShV>D&7Iz)z_rbC#i=zOVp)H#>Ld+54G;9s2Bzizm z%US8Suk`yrNFwDE8;<+t=1@^&+Egi{dNn7n3D=XSokvf7Z+s9{QGdN?T}_jSZHuy_~RsXW6NT&F7SCQ&yVqNb0h8o zf-~~skk)iVo&PU5FA2?=lH{6DT3Q;Rp#Sz#;wdgbU;O9f=$ty|F4jMj(*%~E+%LI%8nmpp+$dFjOiRP^} z*PN9Ew}&69NC=+AU_ZJ<(YpTD`dRuI^vtlY;2^%L@5@oo$92}zeaC(j0ff(QiZm_Z zagUq5?cdjmDhXF9txe>iT7$ypN4^UNS|=E)h?}2iQgx|&P(LKkj=&$J=0yBmkA55) z%#d~eNVPLNi}F8Gtp7*Sb;onLzTt+-sO%BhB#{-0MD`{sN*N)E$V#@dM>a)Plq5-% zkQEWKSCk4#nb|ArcfIF#{yOJ;B);$a+|PYq_ce@%b-8#qAIeZY8DEgNp~DyX;$W^$ zl^65aRr)9N_XnF>tAFdTWb634{PP))ekw1+#PzNBgM(-I`RswuTF%1`l1;=3>5Tj~Jir-znFVF%GrQBiN_i$nFZk}kydtx_?#w?d#?K2nDVj+A%a@xmaihB~ ze`-78B7HVlg4;GxA|oayNnZUrnVlU(bZ2O|Mv6dis^d{#yz4gMP?4tT24w3JV67{_tTk zj9#)Jt0M?b$X|xwIO^0A)5gX|`9h~=cTZ2&-u2ktJ0MWm7lUX9_ey5x)P(!@3DY20 zwc)S7hOAy#Rp4|1@&V!ccDE?xWT@jm;ZH)!Nnj9!AQu$z?5?DD`(2V-A@n7n7tr2P zo}H8zWLWF7o!7X$B>ccKxe>iL>>(M#3+5lE-uU{4|Eg;WPLV~_sgE0!-`b!)=b|Eo zYd!`vBE@)pSZE<1`EJYZx=yy$H+qA^ zxEBfWoX{D826+FdF`+sm1ftjEwhj;uFm`m4KPr;9HmbIc!%U7alDf`JXy2z?fovpt zYP6IyGYDRI|9)^=e&xR-c&Hi+13Vb=l|>>>Bs#XgI0r9huB+dF&qSZJ+m7G_1@)=C z60y0E-NA$v^gpzE&aQGD(d^ZW+DCPtlJ5Yda_-t)Jr#b)e&)_vaoltG!T2u7>b99~ z)BCe$LW<3q4;DPCEd_zcS zteO+69~Rj{*RR8)5T@5LZE5K`T)~2!>NQw|pt`ep4OX91-G;)6nk!tsqVU}KQP)Uq z@?T#2S~Ard3qO{|WgsB%S^WFM;W&0m&>r{_yvv+{bV)mAI6ATxH%0uP?+?2CM_>vo z1R(*dKwzQxd_XE_j^)+7Q03!&WGDXtRyKFp0=Kk@Fr~W*0p+?;;<*#TZGax6{Jdx@wT$I3WZ`y`wEJ4y8s%!@ zWc=O!I7-XV&`|DQHQ@k`s`p~2wBCL>>9j&Bn@$OP%-g!hr|rC>pzb(!yY({I40%RA zU2opGtY6X8ScppT2lo=-TaNg;*3CnBnR9Gyp2&f#R5!hhA(BAa(G23;LhrJNlFbq% zo}40T1raEil5bWf8P)&#YV$DY!6y6`C|Y## zhwPcT&JS<#NAhZmf7h|nlb~-pu3vD2`-h)Kvs--H>pjRgksreS;4x%M*HabR>- z9S{p~VL`+H<<0fD9wD7B;jk5po)y!fgJv2{f*nKe(|*?_#HkvoANUbwQ$Er)Lq}(> zJAX(_P+UK6@rlx>+P~H|e3v=0c!6?p)>PbI+dOmj)CW850FScJV84bE$DtFKRigrI z)oY%e@#a5F_dvHW@4*K`gUUbaOADnf(cf=OUV|jbk(m6Zu2n@~V~9x@=1mbcyYAl* zc);@|xD1|Zu%Lv~D>M=;#RmuV;ZuM`B>E`$WPq-VS2(*f%hSVirPp-JR>SS%up`!* z+x7Fle{>GqDshIf;oa)(PvV>NQ*}{VVWg&BQ3(SF_Vt-PZ{8NoEf}W#o)wnS(Q(}` zw=zUY$n)`9#3v+NyuKVd-4y=4R#l~2btxcybU8Y{Q%r}!Ph6K7x;0+9;B`f4+O1ks z$Y@}$D0gI@3pzpaAczPMNE+ce3EKlq(PNXNzo6FQ#C94C=2L=3Z%E5pbUZyz;u*p* zO717hq!M(^;7sY#lXU`(m6y3c1(Td zng~ieK}cU;afFw|+1_QoR86%06u*Vd0Lc;Fm{8>uUiq;5zL0TP*U%G8iBYcJwK#u( zrPuomg77nXjR=@#co<2@ZPm3&c`S^ck1(L%gu)!^?e9jyWvLJ{z#ZcN6Vo;hnz!p* zc;OtRQ)-7lA1jX+TU?H@*xi&e8`(eoeqyok_B8NZ*@^l1K5ICiWSP*0DuHbQh(`2U z>p@~Rh3grvT=^@6RVy0!Q>fZ{c8sbfRoEUYaqIo=X|}+&1qgd0oMhUQ~0B8=jv^W_2%ihPBl@A#T(Cd{***|LhEDl4o z?B|7H5W$$v$7>;?m+OFIDDQ?${8H5z$kCV>2}cps{)y?!ZG?^hA1Prvg5FDC;y1`B z#TRSj#+XfD4e^KV-$CFeL+DLlz{=OdY|jM`EVSHE6%)^dZa-9okfX@14=TRB@g-Ix zRdTOCZeYA&Fgbv759U6eUnsZ!)3jpercsTctt0;{nHhS?VpXKBXRV@EtN$?TM;yEtN2;)z@ z_AnqL*2w_m#Kc}`Kd_wgavE092yL+kVS&w7S{<W1k6(vHi*+zTeACuxf4q96b|gD-d25vVAI6sn0JbcmWGM|4f~O$~A@ z+_F1Blm~Kcu9v%0!N9;^+z8JR?gQ|$GB5~wIYFF`w{In_T2ksvy{3ix7CH~@XP4DN zCP1~BNYPMqd21xAoQkvNF`TjAa_hlc@9{a{mKuSikQcbZ#*4~#7q)G?vrSz|{v1rj ze%6NCj^}5TY!cm9w94M=Oa`@b=ZDi{Q zawqe0xxDFzD?f*#d0*X}3BJssKjom+^``c%tn|Jg8n3IA@+{0BD_ZaPKJ+muN!!Tr zdZtbljjmp3Lglo&W|7*#VjXt2YKau=IrXqz$={!{71}EVP!Z#7)mCs7*!%=}QD@DmdwYUR&Ex0gD?=sOcZ@W&bguSU-5!LdEj z59PW%mLMSnWz+Y@$3QB-`Ncn9DlJYDQ18m;;>tQ&MO{KL2TOObdxrnWZAsyc4=R!$ zQXlplSYsm?Mu4){q00J2K!OSSJZ%``MQ;E7cCNo6rsF4Y0H4`#Q;vYa%82FJEMaf{ zqANsc(-f|%MPEm->e|!#-V`R2Q;@?gl)XQOizX5dfWW{k$6~W!h#v;mFjLck66ag9 z(tN&Y$RKYT!3vTEKKU88LryR4w0rg07zj)px)S5tKk8n$$u#Yxlz}=A;!71mZc{$9 z12YO!Q1yMe8yT3)RoUDFFsH+X%{s!jy9_-gI;Bf!8q0fZ3PAtHjFu<5Lw#gs^yxv z)#%u;+1bHrcfXx7IGT*ougyHQ5;pGrfq%Bze)!hcI~w2KTJf}66S|{3oiJ*)OPy-+ z$>G>crLc3;vF9yLhT0yizup#iGv$}MPiSgw0dG^-)l2a&G}P(J4lnJG-A3UteZj?y zvA=13^THGg{a8Qx1)YsFP+E>{^;&^wl<3;2IejVYC~7n-Q)C#j)~E z+fF*7tiO1%7b|c(yD8rO5y}-Qw^0t8qK9xQX9T&E7-QD|?Ydzh`BXcVVef;_dnQ_f z8At$BVQrgW6xdBYo$~Iy!(l|*hhYV7wU}K_DFL#F`EqmuCnf5zxFZvxO5MjIG6))7*a_ zu+mDe9zUN;7zsislsMNhpj($j&c)3w<-F9)+jSCMGd5s*k9=%PJ$C%WTU%QK&CJNy z{$L`m@Fq$aJj`iWSTv)u_T*oiv*ixs0=!QEa)jeCPTT2Ao-BYLVg&ZzyB+qt$HqmP z6_XQFgo={9rJ2LF+#U^l^d1#yfIL)!R9|{BpFeCTPep?DoZH4??{$0o-*0=0sjo-Y zjyM(E9x!=(gUrB#l+4h+|Ls0Kx{DPy5)khK>%K$f;szd#!@{T@A>UL~^gl=stMn7b zKF|;L67sqmHw58Hj#`M$HYYdtF`Rkwd&ArQ47U{jAR1Z1?F-N0nBBNI>N7Fsj=6}5 zNh`ozo4CtiH#Y?H3M&_vc+4HUxXDRkVB3o=;0e#af;tbJ{ppfNYqoYJ zM3s&9`LOGAl>8KaCSFZHddH=4m#;cGN63+y;Vv}+6K$(8>E^~#wY%FCrzGAa*kuhb z2u&)U;`r{C$?C_!LC&?EpCf2^b=hp zr+w~$ri==-_fK=oWPN8LCpSGW&b-ek)!?P^%b7(LDiVlcR-)23_C#n$ytq`hKdM&| z4%)tJw|!Z}+vS9AeMX6fh!fP0E3gSGZ1`Su;)1fCrJH3UqtC_;mB%ran~!{S(Rct# zbL#(C+yw`Ic>O{G{aEv5yUAZSP}|q11~!iRRGA7X-_DUG%(K3f1$T{PJ4zXoJlRI@ zNHQw#);>|Oz6E^E7F(_@uV*bsh{vk#+9?VQZE;qn%V+%X|=3s<0LWE zC%wLAnYY`&`t;??2LUNTcMzSBpbRfMj8p{$1x-P3Df~|3sG?ceQowyTbY*DoK1v8a z@AlZFI@lQtF(%9PX&HO7o-KTOX5dU)dwaCZQyf5oqljTHyRvdK*84!`OSkEYWo+fj zzAy=xz{oy@rtdlBT*Vo5H zC0N14$fD|@;_)h9>~%RLU9X1rPrjBJ8LIPXjoUGwn%r- zzS#%2ioL=&;obTPSo(U+?qTqIzA67f?#)l% zoIObaJa=We?mx*1ZxpPED@pnCtZ4Vg-S0+a%KpU7c~wT8%0K?7gL&@h?)B|onobT~ zot-5w%mey*sw935K5e2QT z2P;mwz2GJ(Pt)DI;VAqjnN^Brc(@_-J^jQ{7##)J1cqlG&fnzs+Mp)6OK{gAI^WMK zUE9z=3U(sca)U_}%64Z&$v^;lrF2~m!`Yffd~X$@}X?}o#)uWyxC zx$Z>VX%3ETmH_q-%lJC#v^YoN2!snMB=7SPgWxzlMF8 zA3uJ4?%X*o>j}%8?ZD13MLJ_gsNR30-~zlPk~m0jmsYZyIu7ZZdtxo_^m1k7pdqeP z7+Lsk_4>{)bm<<$#>HpFpWVFRg5{9n;|?_!7x6=J{C$S9!k_g7VDP%&vTP>KjH^i@ zmgnEvzkF$<5e+hBj)1+MaQAd}o>%r3O;XH*mbb=d~4>ub>`#W@2xx6=R%)?cD>u9Et|3^B8Zdo(0v`UvD9sdmm(m`iwhkcah z(|_N8?Fg&LWmPq${5y2TOK4ALea0=$`i6*ahNjH=B8$rFCF38m#&riK4@>ovr-zBj zfZ!tFj1?Gd4IF2jN-ZC+d7P4ac~vfbq|b2eyXJx3bgMf)Y2MDfI^nwgAE#Ui+ZV#+ z)qO&PAmJK$Fv9=}7ZOgVc7!%KZwOIR!R5{n9tiIe&>-_y4!-PGE4!R5mtQ)RAT3#8 zBJ8>JHvm&vAvrw+Nxb9v%((MZ@o#)V@pNk#^3r4?0 z)+W?XlQ)d*(Ym1fuF$6}-r~1LrE`{Vux{toR45XIcZS!|Tz)n5^I9a2Iu9?eXuc&I zUM65&7-8r>Y}tmGub3V+{Y0Y8RQywTx8XXUQeN<<`KvJ(-=)3zl1mn@sXFI5cJqYt ze8DmVYuo;)4bw-htjnSPX0bBfPabvE zPR|bFx*DDO`1yxlQQiSh-9PnyUi+o4Zj)a>p}Y0*?_-wToQKnx=Dd9Mygq+zNk4uh zAq)j4C@7eef+Eh)!JdaMxb{KYO2eVRnqL2Qjhh(vBO)TOsR&I>WD+;p zr+>O;itN7CYoojqDb=KurQ{vjl8j_|r+Ytt1_o8LmOQ{GDanv$5YD3x&Hu+67Wa%) zF?itL-T!i`)1bRbc+>anE}zw6dBWGfH(bs+cJ1T61Mp{df^Y=z z-p!SH{SrxhcmjTOGqf{nvSf>lN<59>93hFBaq4M~6c;HZsZvGh<*5pmB)c4K`0}py zGfmiX&p1AOZ>V5lLe3n==`YK%Ya3ZxB7IpFi+q*>3qzzvHs$kaO%Yk$^wS)N8;)D8 zl{A~2{QTDHqqLFZ{)HjuJ~_z+IjJWcA)lW2OVD`a?0zEL>PALEali8bE9*vQ>&ahI zRio4Fy1NZt8a2jns~&C;HtUxD1lO&Xn@s6(j%?56zjbvzeq!wvYF%W6Rt*)7Y|-${ z3Nx)(C1xGgR8|)kSFLvu<#}%NI%*2uBeQN;JY`~G-f5@M;2eEgjg}mR;kupO#^to! zSRPSn>9GE(LsC))DonOYl^DGA@V?i4_;A&+K{1%p*x-TUP;P|zim!6?lhd!BuyZ^+ zM|$V8^`Q(0*`%!7v1dfs^F(+2Q4{|DhqBYdT)_Y6wOP;0rj2KA>Cs(vlv9t5UiWzy zHEA*9waZUMalEsKCtgr#-K~yiaQAsLs9Pje_1^Mnm5-SI_1=vp zwXm++3csMdpJWH-TUu(i>d4fSl_LJvz2d7^LO)uDn7?YxS9x4*CQib)D{kbEm&U;J zb4N-|^KG|Uj#z=dTE0*v>}@X&h1#_%}Ywml?1;M zse`+lM!fe^)8fNumwS&M1;QI1K3?CPaH`&6FR^1{{AtEo@EAi&U;g6JlNv*ox|yvo zs&kZ!J7{cDV@kf2K4Hk`x%!)2OD}ZgTV&My%0PDU(g~r7_WIo4rY$Unt+SMBPOX)X zi}wU)u|$j1*3u>zhO~{CzbH#Bh-A3IPB9l)vz#Ea(Uv|dKx_EU$xEX3ZsS{elDoCF zN>aX4HB*ZHm+jNj-?pqwDfAie_v;qa~M+(Ei+>PYr6XDqnV3 zE9DtLi-LZ%u(OnS_#z^h9j^>z9?t9<2UVuLp)VpVjEjrQ4{gCkQFocl!!)e)+Na4y zfGu)RoqpVbs}YGe*jCuP6Q6!weX&Sq&*}H<&$m{4j=SyBi&h>!boS5f z{Yyg!M=i`VD_73%b6ELAqaU20%p`o5N4>VT)}c43EK$}%8pQP*URSwA3q*n8V^y;KE@^u#^m@c;e~BB{i6bsqqE=x;41=p|75{~ zOfxDF;ce!U$&D?IS+KanmivVyZ1I}2++0O>>>#F*^2~z?8i5*FH4}@wQ}=areDfXH zDBo!9uT`pd=(j7L{p&f6g$8%=O`kEZG|#_MV2dHAB>1wTl#oy)yfisUt2cC0r4zo? zy^e|Ys}~UF!_mr4_h3%dQ3Y`x4!B&ryn*;aYBq*e1iM+o+Xh>UzK=Ho0s}kx`a=4L z4NPWCxujF~=jk+!Hawe@0m1#unG#pooAR~Mo9|yz-i}F$)q-1({c^;j(7r5FAES#O z+rJ4S^8!ug>8(4NYx5GSqU%-Uq&s}_lY@5V4==w{ORji$_xG{tIj_a+nbJwqvNNuF zbb7hnj6GwieZq5Fbd`c1bN*>QP&Z>Fxr=qA{@Jrcrc)@+fTYlUm<)D)`4VcB_d6#$ zJu|-w_TBXakq+o1u!i#T^6>Ft6&01(#Ke#uo2we1>A(&91ZM?A!jPkvL@@yTuzE6< z=ZXk(iFeDCl8TBG+CuwF-YlRa;if&;dv1P4k&>2`zD09ujfSur2F=N?zqkM$)Wv;;x6I7CH?`7r|1t%&_zkDb5Oj=}dQK|}e%1*Ypc*GV&pgrV=JzSr{AzJ_hy z&*9;m!};&c3tqwKh{yu~*+vDb=f%ZEroFp?Jz;ICq@o;~kQB&4dcn>B{_0I|`SJUc z+ndp0jaDikIBIXq|15KviiF_z!=}B3&;)?CBV1;O6&HaMABqbF zIgMrfGTlo#;@K!roj(!uu7J{YduK1#6W`8xIudLYSH86Q1j!8=?u}z%QjdLg(QaXJ zxlzIO0ikqFtkJr0q3Z4(xfnN{;N*QnwZlJ=62xQz}=0C7Lebf57J*TAPeGE`! zYl~|eP;XJ-L3$Vwgr4k-g1Z>_sVYp`5Gf&=-14pT?|<%s#y-sHZNWx)Pjg>WYw?E?jz9mGH`7=^LOLa126v z?27SXz+Gf;{@*C{vX)}hY;3KdEx_W-s&!HdCLDMREeimN$l}F=B3d&)aN(W{>ZiJn z&hNnt|H<_h9VaK}_KC&58*ha@nZCaXSP=E2SB<}xQu8O6NBtT4u%|NLE_@90t1*mW znPMX!+s1o{M_)QW-ha=2CoG>dg_;988Q|yS^&u^;W>Fbs&yO_2$;H$HhJ3gqhg%JY zR1~R5FmeyEgl%Uf60ER_110p^ur}FvuG+#x-ymr489W%gLfu- z?(>9#VJgMmYF=;#c4r2;3G^O{yn%q)!0@|2KY)2Rf=B(Fp&@rzrVLzG|Ek3Dz%ByU zw$nayd8&}eP!+n`aGPJ`e(y&c4o_Du3&F}cBombt+^?p3TN``GIhBQ8(@L8d8a_z* zO!e9lXjC^tbQGv#&+XoMOx#R6{Uq5`$2wu{(v=-yL%zGzVgvezH@gp7?Fvke z$@R|hG7;vKrvmqCZv1+~`+XaeF48&AU&JOXRaw)}(1^>oK+B18%1Yi?~*bDz$57T)n&p_K@{oR*F?|SCNJ=b0HIedA;X79%{tM}V^W>ij7 zkzhV!i~4gcA-41~JJRR?sjPJF9jvUB#nXo3Fu8m75zTp}L~bzNd-Eb+iX~MQkX7i$ zhbGnh2}G?x2o&lj@i{hCc_!gsD`#tumn?q1)L|`7c!)o)tTb^q`Sw1bvoPNF$;@?9 z62pJI>NO*-{L_qqWL~FwhGZ$@*Ho_1@Te<1?uiD6s@g1Z1~^k<%WvD4ZAaBfl*LV}zC}hPB;8&_EPV%aANgPc;_w=y2xmiY6q~t$KOXTRwwU`< z{DXTnw{U{cF*-UrVn^!g`-U*@e{I_<Y4!H{1}5)Lc=qlu=MP}QDFu@#0qlLy2n8d!J(8hZipdq4CKd<#gj!xjMQGz-7*9 zUdU+HFt-PUXUtDRVqf{zR%?by{Z1R%FvG=luF1#ig0Gg#Ui2zghI3g$qr!=}5W^zB z=|vAri!(D0nVFeFVi&5^&_V!)Fc_ue4CDI$>S(q6fsirLP5QM`_-_X)mR^kaM_D-r z-KJ3*mIB?HO6z6eb z3QW}r2J+aC32tOMFsVjOOD$!l7~?MuJ$>E%yi3r0IC-Iyd zjA?&R-2^Yay@#gLXKJq|8QJ5G2G-(%YKSA-Cl-!(uO?|7`kZH6kN>s|UJ876?)f9< zVi#FzYQw*STkp8kX~@XPzF`O1oOWtWvtzO}hMF{NZP$}KG~@XW)Ebef(V>UK=16`S zb=RLiLa!dG4lRy__yK+a0d7oJ=sRp|Y<|%j%2SCj?L{9x=~{N#K!{7m?-#tsgcupZ zcfl4uK+3P7K@Gl<=*V!_;By7K|DTEO!}g%)0_)p-BjUlwFF zY-o4)XJTuJrKQxKq5z)fw_cyg9bR%Riijv7m$@`s=F`3?%YI>k4%6 zBC_IPqYV>;bfgg#z+(OPgVyZ)swikcoY!j|Yd+Kv)|6o4s_;Gs@hJmt4N&|7WF|C+ z0`|t$*!L3rbnx7cD`^DR@KT-8Z=@3{om;!E(h^9m$EZTFJ;X1h_Ybn%FTa_Fl$^bAv9KsU_5IX2 zgY!B@=SMorqGcJqv(MB8{z>!PkB$_cusuCJj73MqP1QAYY7Snpf-TF#sM|ROb61^* z`c~gK#vwCkxpMy0kyeBMOs_$S@fP~yoVPsE!#4cxtug6aVJcFsXTRq!N@UYjvJqJS zBZJ(Iv0J9D$10Zs6dtgbY;Bh=lXi!xHpGvH>#xYww(HaAgRSKo^5w>FlyPhVXa1Gr zic6Hd#f;B6kV8Xx z)DNC=iid*+{zF|$YcJG`+z{(5ym0}>G_$t;&0`|(Tj4qbXeeOnJt@jdz{}AEI$oXd z+lg5*pt=_S?v1&XvjOn|LIRH#B7(lXt4k3E04#f1up&An!JF{zpESWHK*G;>cMi@- z7&b@d7UmcpR(ARjqnhq*%2KIPlxd`Syzk7?B(!Dt(!C3mGqtq3_j)Q_vx6cl!JJ=K1U-~luXXsb>jFiJWvlK<>e5!>H_ z5U_{c>JHP%e*ET5{KaF}u1F@U>u5Yh>^QM9K5>xHP-Bd}KTw#`m90aIQY2O5r8j=3 zCgLs!>F8p0PyMWpRA4ftI{8bD?Cfk>T3UY&QlPSgKB>LE{YjAgXsN9@lPJ?Z`D@dO ztPlDnhQ6S6*}TK9M@1djm_9Z_KT^K*)%KvArz=`0h{I0X0uLPIL#i#bB)8pKA zSo26rKMbfPX*EoHkUBNICUI`ZQJ255JdM6ARTms8?#W`=mD`i3gnaYQ4EAwSvZJ9eSerjlVe~>!G5a+bx`*&jecX(kt zmK$wmVf~lU$u)vKn9=0J>~ecxN6c`WX>J!%Tr97dTEB>pYqeafdPPZJ>4P&&&qaMX zuxDZ+-TNB(1LatrpTj>1WQZ7S}g|E(VG2tiPpSLgX z`}z5iNsKB!>#@N%qCU@Mlz0Dpt^wpj>Xzqtz}f-Ii3)@Jtnj!&c2~jA`}DvtpoUch zySiTY;I)5m#$Hr8pMq}~@DTJ#>^>`;M6yQFts4f#G(}W~HcH z1YrBv^u(XZq}N(WVu28W!y?>z+WMMzo1B+xbX3t`e$pW?9&!}7+Bdo_M$MjF_^VTQ&BNkC<| z9)-95UbQcqlEf5_%m4!AeRJ#jGYK0dc_LZ_M>s_%n`XEBPV{W(0#Mho%$v2th`vr` zw*3Po0yu6*Q>EX*;k|e7%*FGKKuU>3iT%MAHBSbt#l;}CU=TLZQnA|Ylb#OnNcQVI zH6~R20K~Etybbhh*5dMt{B+94fAo!wjt(+z&4-f-Y-EVPg}6@4chif8hSs5v6($a^ z!90i83Ez!m2=~tFo>GR;k9vq2%yGdA2I!<$2K^~R1q-8y>gU3&v1SnH5#$XWZoge}*PI)eYQR!lwDS!zrwBHy*+@SgQphVAge|}oLBMAIx&`HDYl{}mr zXpIUTFk2OZAy-tim|2RoDU6Hfab*xzp};or5HYq9^a6BZgCN{gmv*0z^l!hAf;P_G zy?QmNei)*(2k1?JN{x8Z$>w;_k&^tP(?-iS`SS}PrL%}l2B1;yt4Aj~x)0!yl(!hL zbDZfb?5|&2)bD>GIT7F-fg7!L#Kj{r{RF`Wy~ajMe*93_P`-mAjaY#9W-jGCBguBi zasUNdSXk87*UzsEM|A-T1m6dNesvS`g*GEc&W)R0u&}h`K_bBmlZuC^;AU!Ox|C3X zViSt@ei8vM5BimXjl2%OX2J3rD`B>%J*ft&`ud##^cJ89daAas0JZsWklL%HUa5Yj z`>bxv!-t>3g)2k8+dueFhzJ^#h=@4UaYo-gz0Elwsh-_41x-AxtQPCKT z|TDFs8`ukVcT^=LCt0QI}{FN*lepV9MNwnx{5Sp}7y=$3%tB4^@J zS#~loKO(w?XjchY{N&`5{3_kQ+{hAznL8>z?qamt*jXs#84LrCLktVhK2pE;rz3^LrCe5`4>md!;aiH^fI%qPCz-lGuCbQ*OpX?R zQ$R}#&P3toCak>3)VZ|#Uk#N$!${)56#0Ik344N*BBBSfq?XEx%34OjlUQZr%Y@s8G zgE|y*<%jzpR-9-`*%gkcQ~#I%n%(Gs8?p6odMkDsm<9O6kmP)t-o!eta{2o-IEdAu zg|r~5jCdN;DWjYJ7`6FR6%+mDs_AAD!rKI#gYx--Y$hD$eNf4y@WYnhVQTyrI&N0}n{@nW zKir1*VYI5NtLy&U13sv_hL$;UhvsZMo^QUa3$wcgna4InlK{g^hHE`YIj&fIc-ZciD zND7iTv7tuQhi^o7S7RjWNs#~1HDLqN_vSjS%46!Ndek6FogdvcQp@)EWpKl8JLCA& zZz<9ha0K>4PY_TBcTO>B=_95OumWuyeoDi&|KjIGqSmCjt&@?En5-;Pphrsc z8NbfvyY8r@XSu9eMgT>koH2NDUjZonTe7j=WNp|MX1rqFUtQj)*oo9ww=l!*ArgLD zHbr9MG68kw886>Peo)Yr+xmE5xHsDwf*qPXG)&GnOg^^<^UUioZz05X>6pn8N` zY+(KO@%U814>m{b-6|sXSnk8s2VLb>j61I(2xa@B9SMCFcUrC9C zSj_f3Kkv16X#lQGg>V5Dc$giQsRq!A@ZAO;0?YyL0+wCc*s0e}9*{<(`9h~+qj3bB^H0w| zg{RnHL)+<|MYK^}UCu2!#{=_s|JMa_g;X;}%MDgNonG7U!6{S$M2PtvAH(SAGePEG zPNgf>ZK_VASJb+1_6C<(<`*`NC}3~XJ5c}h_Hn|>7%USu^nSqeh$ZZt7w=n9mdwK@ zmg1VRO1HEnK$YKC$hV3hAf9ZVV`5@roL*pk;hN4rY6_knu(2X8Q(4_pt zCjcvVcijcT z&`cN9HXUd9PQhE^qHM;%^aDdZCB1mlC)}&UROrIDN^m5K6wpjh7{mGwpzWHIl-~PM4 ztG-(bc%qbY3nIj8f|#uGnF&TW)DjH(@KK2YkqYl1e$*uDsfg zxefpxA(zBYfPavGC2GZPfe{!-hs}M38bom<{1LWPyoBWpBE>QRp!L12ltrI`W)^)DWV@CXen9>o-1MHm zByV9r6%rAV1z&Se`n6x~eeF7BM8x&rW~y4-m?oT9#-^wFaG|dRxJkYkIIW@KFtV;N zRIkfzxgduiHkpm?E=-?T=4QB!LtJ@xn0L55aXY zTVot!Qn`@BwlMoqU*E7T&j2Vs7Q$rF(m(ZmY6o^V4;i4PQ z#{?c8g;Y7c^A{yJ^#N>QB1x3=W(JF7W82NpvgkxUMIYxYczpR+#ZcT5tek8j>&{Cy)xc7APR4g<#TG7i!Zzm+7e zV;`p{YAUhoJ#jEn)8qq%FIyBmP;cD0QR9lG2|yFAf8RBy{%E`{?f%1<)@2#iZ}@Iw z`Jud7M#L=%k=TNiv|9kdOd+0xc;CX15{C z1c%VG0!GJ5EGsJuO%LAlkAAx-(BNR@k2S?FwddvEWZ)9gR|WbEgfKMKXMV^3qKiEn z8yXVw2w8g2dxc%2Ctg^zk|=0Os>-inPj+tnQQvhRA0Lr_IYr8m2Q}5zp%QU`N(SVc zL%)*yZ@e{2i2V@!>vl_%a8)zlK$g!pL2JA-?LF-Y8es^f+vGev zN`juLsYwA#7~mP8be%RbV(u@t`!h07A!s-hGmx--E8w;-%J>!nTZrI6%WQ{+i$q|z zuFo7T!B!$diEnRy+M9JIatXr>&N*m=!w%Affo~{TDACA2*j&yE|1m}MbiMwE!;~I! zmu^+Atu7T;MVRDY!y$6=S^9Lzu<+)9EEE6@jjdN;z78&AJAhXCn`WTav{7=nuL$x|`AiK3TC17kz8l9OnKzFVwb1CD%dV( z^sLMZ|FZppT`@5t&+HkUo~Zie`ruPAOTLpAw@iG!K&OvMk-1!xyGg4@8l+lUt2{ojGu z*f{GOHcF2>X7X-&YepJ{g@h1B7+0GZuuwcgQGXz!+RHq-<&fbHrU9SC3pB5)s?sA^ zjuWCz`-?sl*yCrGlq6zQ)jxab1E8e7vTMQs65YIi;`|XtJHi zbe|xT76&Kns$#FTxYOFh{*hgQ6apLp|8(;VoZ;6ByTwqWtl!70R`(HvrLJu42Z2A) z(LpX?j-Ho}fuZyB=b-wDA>8j^GlJanOSBlr1L0H0GGaf_M;NPffwNY0`vq?^-N*d2 zEqb({33ZkrD_%q&`<_SLF z^g%O#>=3a0I{W$>PH$~fZzRCVxVU8hrm1(<-vVFeU}HVKjNYvVa~~oPlV7DC4#OhE z5!V-y=_~a`o;4ku1nfjV>rFiAxRVi>nVYm6&^gDWo(bW&UBCaoZJfqg?Xt+8kh=Z% zAg&L~KbC0nK#F;9Ep9Ht^j{^E+1ddrqLa~;0I3~u9VGfQDc#S_G}K0KN2t&WjtD+K zbT1b#8*XZl6|B&V60Vq3bZHeYuS(F{GyQkJD5VLi9dA|qz@>E3Xlcm?W3!_!=>#%A|itui3-CA$k5|I&ah68LPSU+cMyD?KZKuG|< zXF=7ju|*H@uaEQ`vnY75{dXHdM7SA_cIgOuHWtJr(6L5qU^K$UqC%X`#8z$*J&4*H(uHb%QY(@pI*K+ zYxS{CqmJGY5D~bmi{qRPMl+%Q&u47Df#^%ZEh}y)#vFlXy6^QD;!5*Lm%Ct-&}L9N zq(8O&)z=qJNoJy?b!9XyffpUg`R8^R)Y`VNO0~5J$mE|9!Gh2 z4^gwyZ!|iR8ib;7;}*vT2cJb|CYS5T6)v7!iF)lx@g& z8u!Zv=2HS)3M~1~?ireJ;47ow;mhB{s<5oZl?(`d!eDKA9!xcK?nnx)e7Uxd|CI3M zk~2b6ViyxFtqVmjM)XB$X@aPua)#`DKNHf>bPM4fi4s#3xnxuH-1V>%!>xG9$fl{sUUPj1-$-q4eoT{QYf7y}-uJvZ_m z<4=;Rfh2_n+*ap5ZF;VS_kdo3vy9#zTVVJfdBym7sGS6^78@TAa!fIu>LhKlB89A?MUanh zcI5$lxUEj zKhGtSZ~q9i#bL$v1X4)&x4X|C7euqpz=ko($FA>0%>(~CT{6_>a_W473yNA_XRu1?p>co(P z^a)ef~ro^;isCf$6Pf$l@? z@Ve$uxI9(9OucC=4|aZ7mj(@7^6_oqb*=may*i3rqM?}X{+@SKfHI)9HOO7eeArG+ zO^t2<%kBIlZv^6F5rmL`Uw-%AJs2EhmqdxhqG!Csd5%m%u$)6(U>r-QC z2~O>e;s5dW-H%-N>;DZ2sZ@$&q=b+am5>lZRz}MvR6;0Z6-n76BqAh4BuTPDcA+E* zAv@W7y?r0g`+WX}@BPC$r~8Dr*Xud1>v28C{EETP%859Ns>D^f0e2G)pP;d+g~qG7 z?H)@bI_Gq?bq&PR_FFDb*t}apv`O;WgrkEuKE$GItw?j*vHe4qf)*J|!N|z}@56GQ z25^Lezgj&{ef@wCmmH77%(1}AlgT45Ie7yBqqFwgMZSteIZFA;xhCyPs%~hAs;deT zYPy{^NL6I0ftd$OF88r>A3oV_dG-1Py426erhL`*?hQzs0@9`1y*aEG?r5iIOnmCm z7#~aup8V*&)h_YhrvshW&Xvs%SeTgD<$BoJ*_F9~yY}U(8MHqFhHoE(aEEQSE2DXI zhGEYRRS;7)_-r`s@eosff0<%Rm2y@~?pw73FY$SJ0AW%n53k$-u7qJJno6L>fmt@O zA;0?U^GR+B60G)>?B#H{2O|kppsWC~Q7VzZ4ApCO2eNv6f|lx@hVP|uyyZv=Gwr9;y}Sf9JFa+nO78s+{Y9RRBimYSpi z*Aka6t}|t2XkRaya$;6j$_!kNB*9EOF0i49EA!CD_Rw}x_x9h5Oex$2a zOo&NF)6o0N3y09aYU}A$;&~$#6cdZ^u&^@F9s2YOG$d0d;{ClIoSQVfaZ$0lNswk8 z+CTbW`om&kr_P^${Whp!0KgblenXj$Z0;Zyev)3=#9|@Z#k%y1LfGP` zWuVwO-n^y8T!L8~svA^w(Ccn4e)J_2B`X?6fQa@qS@>pa{OKzt0`*nk_B?WJL`T*vGlL3w)gaY(q zrzFDm1`kAouZ!gu3IwQyiJtRjdtuValq2Fuz$d!5PvUhi{>$u|k@=e{-I z5yNwdjD?4W@_YZ>0pSj!n<&HE4~a7;tPW5H_cmZyCvo8x2`JC8T`l&f|8fnC>BjbU z;-j_8|MFQSuk!9^Am0JBBZVF^PCc~>8lK8jAshWM_S}gQ;kSU^W2A1hCbnyf!uLwXvdAcF9}+LYnwd90$nS zcGd92n*+PYSe4Sfy0r1uzjnke_Q0+z1^f=Q>TiIOK(vFK5$!!f>?;VmTIiA^FwIZ1 z4g<^={oFWAhW`C6d~Py2;p{wDiZc!{c5CCO=Ir4J;Jg zA)uE9He<$OV4ao^zoZ~fL32G=fhkpIoXFGYQ)?ehQt`d{4?Jzv%1hQG`uy^ zsZg_V0V*d}DQC^har2zkR4DO~Uq-@e2yqc^=)@QQKD95PxQO_zV9mkt;0ES{TweYs zO-xF~{C*c3UNAbUjDGM3M5q{tGpaY`w98 zO;lS2%zb-nbmiREZQl--SMaRcr@zhVaB4v)i_%1IC(S*ZJ$<6>jv)J^Q)w!Q3E_G; z>1Pj@Chovx+J~#xF-^yWk$5~&{#JQb5K19be%XN=&h<856-S4`#lu4w+-SmavJz4i z%uL9d@9*zd4&KHmH2ZsL3~Q+Ywl-_$k<@b<8)&?Pn@ZasGE($Q<{CBJFz}gwKi@^y zUHw`>-6?+xPJM-wwfq2C;NYc(Sp*J%9f>c%1~)gqhcH@0(Vx#wDepkmaHc_0`Vn81Ji^+N^aHF~)D70NY131`lwxeZNK0=xY6>2A2i zVDGzh`!ErP+A-h|po1n>_z~0n)_FhuyH-;hW4v$E!YDSpWRaFwWv8ge)AdJqW(a`} zpsr$1*VfL z;45nn-L9&l;XSc+9uqPpT|~5qS{%Y%Dk=C?(dmyj>LL$8A7*Kp z!m5i{#ofQ!+LYCl6_WLCG<3|_0V;Api-eAK+lJ7C7ql_gz}P$rWl+Vz74cId%R6mO zgZJH4Tl*ZU1SAhR*?tfzg!0Sdw5i9lQ9AYn7Q!&*A$PoXFTVF3dcHxCX)`PcXD zaAxM_MqqL861+&7BqVX2N@+^?k=m#K0X@nwUdF=>UjRX!&%b&AKh!Mak%eFm z3^7dQU{--8ZBgGYG06w&@(${vwN%739{=t6QNz-5;9m8EO|(V#vJ~5);1IzVoz*@o zfnE|cU6OnfmVW>eSkA>$=oc~ig3&1Y=lK;54-HKDh@U;MY8kV4BCP5gQI_IH5OEsS zH>7mbO;1mcSC#|q z-Z+ym=tVcpfHNFYFvLjEY~3_~;So?6A}c=(>Kw{VXSA!eAJ+C%S>i}Rfh@JUJcYo4 z2@nKxtQSNH>3`}gJEmzsm*GW*;Y95%AoL)nxlB|rMW48SJTK!t6eQn`4X>z)6WR{8 zw*A2hQ2cQcYQH$s4PFiI8ED=}z*_nKa(qm3d_Yr*U#I2zx0mC)X*%?3+yA~e#XtoE z-}#2sMUBD}V=2XBxZ0c{79wt&3IKFGSDidH98~Rck1@~~Lu+*U^j?%s;Wdl_`cp21 zv=80}yfc>ZbYs@SOZ9-c+T%TGTodw!ybgiJ%z0ovEWjklO zS2^sInfCtG8Kcz-aEb}m0x>9N$o4Z&a0g&9)uBh`K4offO$PlmY>euaXY>WzfAraZ zZTxxCOuOemQ%2^%l!LRmv)t3Ans4HuK_cs-(0ZvMR-zn&0q;b+_3Ncp%choV96ZZ_ zC;iWD;X=?XWMfO_8O&-AUOw~kz<`}k>t;nCZChI#2)gf6Gdg@K(=f$?1w5;i<9MQ@ zxPOHgD0J{dM`ONi_imVu(V@JL+4 zacwbxQU^9QDjbZF;I@I6TME{(n3!Wl;uAWfL$LbcW1y0~5WV1~`A~MlrM2CD^tE>w zPbM`t{`gHed`_7K;UM+^OT?JVNZ3=E9-i^I{J8{GO#@yzAp$T%8ao-$x1(5tM+4 zBd@Ew6C8FlUGSj6EGyqy2kR}V>F;S(HTm>rdyrLbdAtvQFiwM$>9FNz`u>rESop0# zV{qe`7k~HZ(q9EIW;Uz*yc-oEIRPGD55SQFR~Z6MEi`9&C|oxfxwo8;-!FCx4=f7a zluGGHox;{1AL!=Pw_$?1(YKt__jHg3+)$QF!wJ*gHCUdVE4td)8<%0(6HoZF#Li$b^N(o735wXdilyP^^whN3s*I z6R>K)?^gHlI01S2gfs4)O?1_>j!jJ+@<<(kty?TOCw_W8NLsNioS+7KIm2L&POA`t zZtxOrR?8nWh2i}9+X9da!Ktd%|J%9~BW`FXh|vi&~Na`FM@{?&b*FB>Z}WnT!8fxQ#$* zh2{aj&;Csj2BY1?g!I)bfG64^MtAg5lF`0lSV}-n``!l`rkhGP8o?O@{b52k=c$MX z<)fI;khmsvQY2tn0{-3u4e^$Cb_`I%qwVAvrGy}721R*wb#>QZb`pq4KB#u_#JCHe zV%U}7YC851r-em|bY2o`pKqy_x=*PevX31j@J1*_{u$nlfC1Cx*-R@xUlrjp;e0aX;}-}g{L#2mHdhCU&$ z?F*9Y6 zF2BSFuw2F+he3kHzjkKJdMdX4xTc`V0Hch}QRUcE{9*)K(@Rod*sx3Re6^R)Cg$Dj zB#Rka>PMDQ1B@)}TMzgMF=3w#PlEuw&O^agw!0Rzg*WS*AQ3^tIb1r{Oga=|t_qSi zsH--?u5!_DHTFO9dX4EMDXjGLc7aSZxVcpHj=FC+mJlEYtm;aOobH+1eT{S14r=Y8 zp%PTd{a{GF`o(WA4Sss9dZZRKA^1{I>>i&fy*P#=0sZOX#uM!PM=ojh{um2S=X1X> z!oE7@Bg5}@M2^}6g$8<%R>-rVqdBQbB?0>YG{D5+;bOx0jGV}F7Q;j16}?w_ORY{X z>@MKIdHK>Wp3d7}$9;s_x1;o{ znG)u2Rbz9QRn}*D%T4*70D)MnaSJ?JIlx3m^oksk|31lpqEc8^q7wzpt?T(51ZUi|b)dSU6kN0=^PV zE1CdMfzbmcwiY9*xo)I7>cpmj9}SavcKE2kF^2?jlIsDMKO213!Prjy=?Nc2wv2qY zztaZ;auwm)(rq_h8o3o&+Suwut=W2V^xxlDvCjhLQd_6D3QLK)Rq#_KM5$Prw*1c^ z5h^AOo=t;!lAf5IFYyZYDVR#FdLF)&40o9D4(n0Jl z#q>~Z1Hq_W&&QkLnSw4yial7s*dbZM8;S|Xn>TAbNjfgh0wm+YN(pcW6jG?YLU(pZ zvj?-DJz}Uc2(lyY7J?!wZ^yGOTIGm19aBN2olr?`Hez5Em|P%By{CLDZe;j3&l zp}SL%wShC)`jw);jNL@B{D7N@;pJ*&(c)dxZ_}F4kpqx@)J)m)KqK8L^O@a2!J8aL z8XHDcX*ct^<-Q+0?%_tO>t)*8X${%Q_rx@!(8DwU#_&9!g&LkpDt+YWTF#$3_x$Oz z@n(5!>NS_UJSX&3A9wn+0r(o9U%bEQKg&-SUr1B6Cy*1(NV9n9lffRDYgevBJLdMl zLQ_zIH+bUgfSp9S9r#|~uCI8M==YDS-e!KjxQRTLp+Ly~qT`J6-v#Pmc z7w23!;qbu3#3N~%>9l2N2EHS9&W@uS1Ctad` z;-pG?6tT17<<;j0wsSx=@>$c+F_*v3^}|HP;$8b9&Iv0$hQL8<7j5wiq6UStkl>{s zXSyqUbC_?Q`>@U}XG(W?t1_r_*I6)F0U+bv`X!oCmVsO$u#r65aVH%BtQ`n55*JT( z*C~Z@;fDVGtm=p5rsIXh7xO|P@`UvdjdB>=7N7}cgcd~JeLZ`yrQD+su+URd20kcm zIoN)l{B-*R`TZC~97VN6Y)3Y2D(H8InF6iE%=p8k!Lm1s@ySnHlI~}6DMJ@fb6w}X z2m{qx7=64v`xsw_e_Fh_lo^o<3iJZ?m3+B8Q<3W+bS1A#Ryyj2^;-B{Rdx=pS7dOJjK(~LxQO;Vw)#k+Z z{YMY=1bz%TCuSbvTg$wgo*Ia8Q*{OXW2fs>0plU36A)lxR8*KgiH^> z3lY1Vl9DkXlVXdJaK*7XqwFxs7tlqZXhB1LtH5B_@{WpUugy+(oQE+*i~K9S@YvXZ zQsZY?SzFVa{a~DkNi#eEfQ8=jWh;^m=2HI59}PAHNfn0&>e=(>PXlb-;@cDU=luT! zKXj9ugngoqA;cVd`dZa_Zh5*NwmQVdPIF!#03I;(skTl^WKjJU9RH7YQeqg0mpYX! z>Q=t+sThXbi-7BK)Y`O_mHh_Q->=3r;O=t2Zg;+UIpn1F^)(gL*xr3$rLR4=*y7 zjn-kAsjCxWD`>|8(z}S23@D)(gW#y4aiCI-6d+PIFhE6xg^7BeSOK7TfkF6Ro!nV9zSyc@}#Cv!_ zPcA)i`S-PoX6x1g2#o#K4{)DiQ~=DrnC2;SB^vVb>oEjj#jS=# zci$HtQdypAKOPAXafM@>Y$UqjJZ4v_f0s>H3%dy_th?2(W z*4B;?>|NM8z_1ts*QWOGpTTqo+ISP?eN6LWjHtq&NuF7RvcOjRD5O5Q%U(K%T)kfjCzrDFwahpB4YlOonAQ`|cO- zGI!;*VP7T`7qjCW2BgXGGVwR^k|R1V5)fSne065(Kv!2l8U=0J+f z%E4X!3baR8y|nt7j~<0m_0~w#-nEm>}jJ-t=T{3VaB622_hw zQB9=b!R+t(^XD*qLODZ(X9J%e?!eRy9unu@x$VZw!>Jw`7a0Tz-;g72qU!3;(8Y)0 zBp_V)y$l~mZZ);E`Ko7`!IJ`<(5cBW!$^mgonDw9VCDv_=d)nk7vNX;#Fi*WanQh~ z-79r_TINeKxd1%u)y8qc>-RG+`&=>fPQem)T`r!Hj1t=P{s3CuMsHWWV zPDVx>I$%y{ci>ABj^7BA6I^t4Hj!RsYCdr1UW@MTRsp_|tO$It>0}u&S^eWwvcv|3 z2dvqlEU^3g z{U9WJVO-fL>RUhR!7#7L5WCy2!jw~LUo7P=#n`>=J~e#khG(GG!bAh#EkC~C!w1Bv z0<{H*k1;TK^2iW_OI1{Cxif<4_A*~&`k;HPeQX9 zt~QW9>FFV%{s`O;auvwjE0KZL+@XMV=%!%^1cqSG|(6!vrOFsah7n8Mx){OBAJOfafa4;lo*Tw$G z|Mv=JSNvQD$36^$R3SVAGkc1HBQSkt1;*V4#)1@rRJg{kS!Cb;Eq_To`tuppK3B& z&B4=MY;;)ykaNs2*AI|Qj)!Q-LI^cQX-lI*aDEnFMv^m8`rv zyMBo;Uw_qbYo(+akEbK^~3eyoBSe7EVf5;JWg7DR?|Cs{1gDCYh&mA0U+b5z!q%(?tbg=v(KWL z>(;If)N{~2@ph*m_uDc_kBXI?fI$A0Ow)fx9C+aX1I`(`_}sfkh>Q^)y7O|h$w3mV zTQCQLB}_WSXdTFZh`PtF1dkPy4}Z4%i6>D|_%jJv4Agf~U2rkf-;HVmsvvA*{PSKv zbNH9je^n`Bjl;Ba1$F$RxM*SQ3BD}y5Wyp(4{n+AnUTd-#kR-r25Be7uX}#;Vsr)Y z3WdbSs!hy-JPsBO-6))npqAXlUODEu>S}$YO+0ujEv@%&hBHrLSeTU+#kP2KM*>BV zmltdIQjj2P@Ln3i;gnfvlDTZ|NEN79E*bMXe{?RTEYRoA58Qt5uh1snfK&yV`Qm^5 zyzS3<)3uERF@h5rWwV;JTZ4ZE`M8C?pd7+ZqMx& ze0y;uYP72$?3$CoQdiG|3$M7HN3n~Ju^~d?Qm$Ho2A)wtva>wsEAywI zb3qVL?m0&frr9c{W}$c{n63j_ri|LI2BpNg(Rr8JeO!ZNLBpPkJJo_noL|XD9yM`jg*xKiK44Qdd zqx<~+=o)2drsiFIg#;aRXdWf2AC9?k$M70mar`+yW-v4hUM-9mTJOAT8`)t$P&KBM z^fd%EIao%pX6nTVKKzGm$q%a|u!1W+eb#h422OZJ28^XUU;5$rBh#KT*3!oDM)8>` zpX(GIst3YKAuz>(mY2CxOr61hC1Zf&cnN=N6qhm>Z{U{!P0(Vzk?*DZKwq%sK*NtFsbY9@2gE;=@EB?UkKz>a}OH^3NlV7 z!M6kiiGU3Hu<&p&g`O_IadTsZB?mJ-jug!Cl|kmii4R-^Zq>(z&aVjQkE|S_Nb*I{ z{pYjySWMbc3ic_utGlL_K=Fzd3;P2a8Bjm4A`rgo@%8FzGW~d6V3w-;YT4MmMO{F< z@Oe^J=G-ssU7dOwZVM9bi*E()XV|R}m&{Y_g|&sS+=uD@;l$>v+Z48?)Gb^-+V|ps z80TiHb#H7m9!cP3@6FQuJ9C?g%8l|YH1lj4M!Ch+$MW?BuUm=6!ZM&;!ok~QjD@&o zp$gN_te?D4FVCnDwmtl`npy?NILI{$o}My>S#x1QKAJJ|zwlccQQDHZ+c-nt%{XNa zK+C*eL}VL`e$NXKK{ICBSFT?DkZQZ^Q6g1(l$jny*@6)<5N$_8V30c)$EAFFEiDSx z&M_rO!mBWY10Ch*=7QXV=}m(;gfpY9$w+-h6c-R*rfiB~YY9=4UY` zA7y1_I6_h9!W^0JW@E@6Hnt+}?$dyD{x{oss^^>BBB!AdETAXJj*ShXLOv8XB(o*y z2dwHpw>nt1u5{awSq&(AB}{*lKy4xlJMid0CIKsr7^y(cqi{bvV_+}cj<;-RITBS! z7%0>e&_cqTO4q(}_<&E)tFKlNgqfgdhdnrcOM*G=xXdG{QnNcNw|8oR=BV$EZQCYl z&d)m+u7zs|1`RkJi7H_sc`900%Kz)fax6B;{}=l&rNIR&xWCtaE*rAP3;<~rk$J&j zA)=)LVOSJmXN)Luz{sYwphEg6vpOXME5FEw_Az1%y2}5l&#`;rHY5&ybMQGM3sNNd zWl0%R`TTdRejkal0K+ULMulC1`p8x+Wxc`;Q43x*Oxw^Vo@~5ByE}bcdDIz4`mF-@ zv*l{9^hjhIyX!v@OrJsA0(%~FBaF&Ol;ZQm#6M3Olo0FYwqQS6G++olC@4$B#%}ea z2P#yDnu|lI^NAM~yc|QkDm5|KCCL)zaak~-1+}!l0qP9p2O6lkwnNTWUvGg*U*O{V z2jqqWsUKESUhY?jbEZ~$Gx3Y-eQu6+5dNH1j21W*(5;qd87+&$7iL9g|FvblMAQ<& z33}-u)DvfX!m`Chi7$br8Vzw;+rE>{hu81TT4rMP1$PxpgCLY0`fkjc0f!)5s-o{!O<$DEaI8$P zaP5~bo8KJ}(aNj^?p-z1l+yRDG~E3t{5xm%{FoZtAB=O_8v@gTDNK$&6DRZjLoqiB z+9x0HP|(UW#t;_$oD|f*fPVoQJ$M)t8gHT$#NG|DR-k@9ulZ|e0Krk^w>7AFshbY> zL6kY5aKm1wmo)dSIlj=qv|fmc39rM9<(MZ$3gULoFW zd*ODY?kR=jvnWc4@4Qy1)~Cvfu(-&2Sn|Us7MUR!GOWcw4B!bF#N-5XGQ=`oLwV^H z`y{NkW3C-8=?=)?>~C%{gr~?AxtS0oK1`N#EQjDmM==4+Y9p>^m*gp8u8FZbwi7H% zZ#55Kj+iG<;!}b^UoIeCvJBX3Bp(T<5m6{jHGm5m12IL_VGWu6d;D$k_8?I(xuuLe za$gP)USAJi6y?O`po-l+;Qh~I>QCv^DZakGFjaYMAQgkoawcCYPw74B0ZJr%bv4GyX8&JXHc&QV-^IBtha|b`>w5Kq9ah z5Z^Eb+RbS6S;!b$kx<_cz+H#Qb7h}ko_~e^t|@PhRZy*Vg%%gh#|4mk@ zszz~!z7`sYnEc+PLA-j<-=JY7ArmF5OC?Eq8zPoBQd3)5S%);pr!>JB7?WEXaAwzl z-g(6O=f}L_g+o}T=664!ECW5r86PY)Jqu}{0a#KJr=&1!i;ZQ)F-DHT3po~L=Sk`a zm|$Sfz?wt_)Gl1wI7i-GY2t=W5&9q6loDqF#uli&;AcS( z4;+aYU2I{OIyN*9W&>=G*!yZy;y`&PtsRWzp)oguS)bI(bQ6qM(e%he%R=@jQa3n7 z?q1POF%e_ITa~g~UB4HxD>N)WyuGLAX3RKpFbhWsK>OR*%nT|wl3hWZm_P_Y!SxBs z$8YUkLS6fLcziEZawO7A9g+QOCEV;@Ube^tCbs`LBvQ_SWGUXKf&k$(nn)l zsjj|7W+6Quo}dA!F3u&&r@zHlGE>DWn`!GW#41g4(b5<}aL5(D4rRc`df2~f8>D|;;O_w(bU$W3Sf!ck|aoQp~`bv37!jZ!;qTk z)oyoMbw?$NlLY>6x2gf~0`N@BcuDNyP!Zw|GEZ`Hfuqj-e_ePdmT0?)_5%cEnh-cE;=WOXXJRdw zN&lTasj1*}Oc6Ct(U?at;#C1aA+E?2raNw{OPwj^HnfPBfIlNVeZhuKfeHXJC@>2R zv(5#*1lQMVd7a06IR{C%C9_AIIfMsrbhS4I=$^xTwTZ`4Tj#gN#*@G_J*QecD_#V`0GRMg7>K=lJQhP5VPxH+9WiWqHx3sX@ti!kt1xnulR6|`;$ z=)#vEg>vs_@C2o`HiCbFGFPmD#-C7|PPjk8QX^>^aMxLw2=)x%Qg)mEs>I~;B?}IN zDe)d;`T$jB1!7OGT)&nK-cZT%S0n1FKZivh8y$}yp!7=YE8E}e0bZH9D_po}XXV}5 z(5--H>hk$Zyi7dt!Ed3=!_UB$=itGIwv?Kq(K4PVy!JM>HrHIsDy~E{5ImAe=#EqS zjg5Jk?<2i7A!LXqu3x2C())qKH+|R!@>sq7jzLl`rP^mGnVI+An)vt|F-)srb<5^&gFwrOEIPZobQI^D`L^svPx#Y7BnEj2Tm)qn!evb03kCM2mXVPLZhDwsTQn%S z5;tJX*V^6ZIv@et0dWZ z%-f>CASuFG9?R#5>lF@*@;$bN+r;XN=iJG`4{h^8XD3AnQ*OYMpvL>F=)i@h6iiro zn0kSo^=2ZeX{h>h?I}=%;cfsf2cD^UQUNXlod2RyqGw=`in|4xS{PNqtpKS<)gV%m ztIyQVPV-28SFO(o{NONr!R-M+38ZqkXW+zu^D+QJ7$Jv3fwl?EGox~xnE?A?Z=}Tt z{&^4VkOnq(uC7H30<~V;g9kKtL4<(|W@{zPRAz&6iAEP6-D9E7=nY)P!D|hn>2Kac zPp#4)X$RX>s`A$6tN5x1egUSyqTwk+O}+8Iz=K`i;844|)Ury#LGU9zKMJKR>Ic5> zoR3%uIqLyBUdeog5IKh~^VH1`$1jUJ^ndOqS`-+QVL%1cF*P&O0<4@q(ZmG(G5O6Y zCdzBp$gER7C9C7R1u^T~DZC`~25xTv;Dyr_{_NrXfr&YpMq5k$m?K~dC zNL*9d)wwe2bfM5$1fRKZ`kN|1l8K|9Q%6briWNg_U&!@r zff)RB7B63d0^o9o7wSGtj&O_M3BPe`wKw$ax!I}-h=#anty_!cf+s`WSMN08G!xrG zL?;i_$CDg!Ksn?pV|Sevfm0KV4(&&$n^|-@2kzIH6Yo!Cw`10h@3h>@BV*F-M&Tz_bq=?GFi-dE(`) zre31?R!Lu9Uz7FgNI&RtkVfJ$f$~U_qEcW8i`fpw-jxW@_)6`NR;3gbFvfu}E%e=Z zj*yY9^ZdBFYCBe3-ow{$Z`HFw=gRkj1FdhkE*%-w%{1O2%xq21EJw?EQf~X%U2h6n zJ&%}o7M5(LRX$P^HtcRBM{C0QNK^l{h~WEkt?wCBBM+#)`_L@T5-8?gzpx?_>us+3 z&OC;@Gk&=Cri4|0?7*XFiMgrvh82(l=MlkHuXeQK6cRH*9DAOe{1u`>m?~J=Ub9@D zpJ1#PL{j|3pG`t<;KXV6+hZ>S9CJC$%kI(MD>uRVAR!W96e;QI;u@myk;8ZBesBX8 zeqY2}^vQ47@!=-W!1ni@y1JKF7H43Qiu-R&+0gtayU%c z-e#M$uC9dhILto938ucM_}2PxZnZy4o&JvfBfoQF*d(1${7R*sc|^(7KnF#Kwsbp z4snOBE==`n@yr6$_ZeXF8*{|gbm&_$*nwKY@9zA3d>-s`xUo@rttIY9Q$kW20Fe|n z`oHhKzvr}uYL#g=B8pr_W?JUwIXH5#Q^=`^CL5lf+$c89mV!q9b>V8mpQve&25%Zp%mH3S7-Pb6*!scS$y&k6&^_U z^LE6NL-gm+Ahdulkk1HTx&CO&xP(FDZ6LrC?_k&lqHu!$@^+|O0$3&f({RofzP{!{G$&>O6a=Fr3|1l zql^|dDWk&DIG=kpVjI_55)H94?-hA?&})LmUl%1b0GivQc)p`o_h2Bba9nQC^I;aq z{NzGeV^;qL+YTCcUG4o9&7Y%Ac)jGM!_ttNTXwK|X`x?^0szMi%5KUvaiPrQ)*SSAvj z0%MMBr}I6nu8dIM=-Q?T))V$Wt&{*?681*lQ+|}wunh|%Dm#dQz`6}?8hT!}Bki?} zs4Ex#&(Oq>cc(YsHx_>IVz#Ko?L@qm+Z!|)OskXoUcO{e=*QW9{nhnQ&#GgB@2C6m zZCqAnT1Y0WHn&DMR1%h;ElMqqmy^9_P;hHvR?=46(D1}_Ya_@xtE-L6#CO1x+-&Aa zbF8%MkHIqn0s_51F4ejXv2 zU&u) zGnO9l6d|rrT_4v1OUJMyIX&GU9~AihF=K@>e~4p4ug+R*>b83f6j`sm*z2&aCH(G5 zFd=N+aL1X5TR*pJvpzzq%Kr$xL{LR6iAAn_<3e;#1H7l+!vQT?#EiPrZ9GP5uH6C6 z6qu3$j)v~f3reQavxG8bgbw;`k3OpQ{H!xdUc<3uA+t1Ix`eKjrN+Y|XcDmt+6(oq zk6em3z4GLRNuEL^gILTLUcWNmOESH=Ek;SNiZ=UK$N@!w*dTySGSVx11Eds*ymZ+& zvY&IM;J!ctQSbddHeFxGq*Qbq8Es2ej=X$f4`uEfxYVo1OMC(9fDIs0f7JN%wcy}j z%kR}%fWC}=xLZT#66_zW-=+~MQaDk$N8n{ny9%`$fq8UPUV zK3X5qc6|LnE#_#p?dl2gP*v5+adZPKCCl(}dKO2zm+5zXcyj!K1!3RD;FXWnPvDNp zpdqs&2A@Q38JW)WvSqMWy*+KE7{p4_x$(qJ{yolacW96%rUW`V=Ff-YU4;hMmKdt8 z`#3yhDsris?Wuo<&C%o6>sOY+{fD?9_0_9nm~*aI*y!w8z9dj^b2mYvNXZUM9vR9U zhi~EJ{Z{LsjK9Xg`{4cp{4g;oDO&p45mJoRG&B&mI#!_*^+!48xu*uc3;;gqN{%v2 zOiOzRB!JjH;GNEOI?13j!kGFqbKud@5sl;Bx8pY!?MK=Zu2MC1_4+$4?M*$UO*@91 zYgj{-FSzUmkrZR$%AnoXzS}iQDzk0bxP$x5fA0l&3y0hIT1Ze#vAKccLQU#|1WE}w zM))bqZi4afyKk_%9&G;#^c4dNvg8nYkAGFg;rSU09MgV&IA9*iJ5&+~#8A|x{TR6p zhmBVKaZg{L)OIKG%?SPC>JmkOHy1LB+V_0nHXp+`Z z;37UhUcNPu(NtIWASg=!nsGIXSbSy{u-LTA-v=?8X8A7x;h6os^E=WZDyTqHvOzkC zS&9D1iyI)Q0JY1%Jah=_&klbQRWyG+_(1{v8Oi#DGrok@Z7)=I{{hg*oxoVD{JC;C zh)xNBGb}07^pFq&%4AOUe+yphC?ias<~>}n{5wFI<$2ZsN<~5 zBpuC^-~4ChlO@;etn<)jlm@O zcDwS;0UR%>V7zk6)H*w{>x|_$Qfpwn)?lOiCBH<5kq4Eg?=-cvw$+@9J|3}(?#d3Q zO~$*^2Nr-{-TL=aR^Ht^wEtax%(0=~P5UJmAZ0C8`P{U#!q*0(M@WR^>}MkBp-tH+cqP~a=J*En>3mDHFg zW@m@uqcYD`sHxiQ!^{uA!VV@T>@7*>evJlAZEbmB=HBco_MQY==~LF$86p>pZ-2Rp zRZFs9FI>1lXyX5R%m{M#39H`XhFsp{Wi|Ks|>un=QDPh?PeZw*^WBzpt<2{l<$ixNXh*3&kk97m#L-( zieI#K1S0w)qc9+M?A6F&KP%B_IPRIDEh9|pxVZYE!zr}X-U9hIXpxCUoO>!C^kM1E zJZm=1W_E~fVC#^IqAa3dLiu;&X1?>_rL7{av(_kF@MR#aOi}L7xj_2}ad3;yR zV+A27p~||2p$jW4_-kP3xpGrIGL9?bJ^{bTIBar!f4fW7Q^@Sc(Pn{dXz}lo$mY@A zr5_Uvi`u!c6 za#wQ|TEDNYUpZiI3seW9>#UA(apHOf15YQvg|0!Z`0NZY&hcWGDe}Ri4M*nNm-DUr8R@hTl7s~2 zk9d2>+!w{xjM+ZOSoa3DU_Hex4a6M}8=NuV&|Xf?wRceITQtOd!SIL|HR!mK@BTTf zV`qiMI8T&To3lt-2w3E}RBI=GTHqSz*ca1$OQGMAmh;~G#lfu!7U8Wl=goswnN+nH zN!_30>9Gs?O;n9bj-$gvpJMW?D+hw96mGLPk6i~g@w;>U1iBaKg<&A~F>!d82-Hv> zlb+uTrH|DjC5Ze6Hr)scZxCFiPnlu$H_+!>kXTBg@ne*f_TrPqCs*K_wNDi?<;I*(7nhyXdfom zFp_`C8DcTQBA~YkAzIzfT8_1d1(tKL!6_|1xvrxEr(qU2=v3>hTMIXYn2rzJ?_h;BSw=}tNB&0c|m2WB0!`#>4Mk1+Ys$0UT~tyl`^B9xW|#7cOh{A>RBb0N;47 zdL(Jad5l3?Kpe)=rXgP{G=i?o1V_9gR`$~DSkmChZKIiOFZ?I4;0Qs$w8f(>wrR#$dXl6TbVz-4&4MoT&gVgeQ_vE0!?C?L>q z9>&f3LL;Ug#UHQ)W8`eZ+k;K5iTJvs_{IV=3)^>J8{B#zw6wH_^i{Y3sgP5 z1hT*=6`vHX2ejQ&0|S)LnH>epGB#3}n&QHcl4h^hdD`7u6OQMyNBiPl(X5ifLe}6a zhu_jWxA?ZcyS@ig55?IEoM3RfvOv-ixgA=Bihn=0zfAP#L6zpBT8Ns0eRIHvZGR?t zk$JlQR?*+Byj!KG88j&WlzX~Sn~s0aiW8-KPd~RQL1^uCK_?{$qX~~iTbbm9gzh&j zbKmn;bfbG(UDZS1s~FTO87%!N-hY3|sVS-=VsXo`6XSB-^h|2wn0IIX(65!iBXO6s zZj*L&cBSe+OAEjQ!BWdK8Y_PfVn}4J(#mvFzWnM#sh&ZH?@LKp+dMb{tAvJj7>C5z ze0UfyHL(Lz%zf8>GBIgG9E#=<7XhMAMRP=<0YOr$sHG$$TA3cjxW#5|?8fP(jD&No zs|~ATG9*ECb(m?@A2ZUn(@jUB&)kzsn0gs4-%Dv~iE3$yY?0ho-+^d4%m^5O6fL+B zvPXD00A`j+Dfjf|FIE2IC$VL^WQ9=!Ap9?1 zBHJV?#dAvRi`p0G!qC0r?Lz{KHwQZu-A|ocmHrwZ3{PcT*(k6>BI7DkLy3DJ$wtF6 z&2ee6K+8U0uyuvP&TIT#>Gc#i*6?mc5or27RqI$H8};(q+BpC7+ z3BQPYn&h-NJ?D~hUI5t4iKOYpBLKp{%RyrWlA<~$*$XZUfPA#j>9I-PifxHb?rgZd z?ekscISXE^<^I3%oZuFJ_ib#;+BmS{PECC=@~lkIZ~kP^B#FZTIAGPEXm>!1Qel3i zdu{=ih)D>bLfPy9=v1EQ&qNcwOsO?DD{TPmHw7rK$&{Z(UiTk#o<{GuS`rhTfJZf7+I9Im8L-!<-f zYUbU@XJe6fS$$?C*;w~hV<2G5+gHC8UN5$lobVE9Tv}XK^Vc!L z?Xt4V;Z57Wfn?rcSwF9&JcRF zzv%d8pj;KmLtemyK*>=@49LEG!Gk}3XEhfC8n>7Wr@biR*R;jl9;*iYwG;h}^(NhA zKFHBvejvr=D@PQOFn4|Oiv?b!yax}Oz&#w*zk%Hy*;Sjdm6NYFY7bDm7Y*IbVt>44 z`dOyT;2gUeNTQWii<%|A(;Md+-pIe3+wQ%>7X0E0Q_+2olFpw&ln=l6Pp>!F5ngkz zbL^<-+Qm(NnZ2o($8HYItPH2kt>kJO-JdXRIax9a@3xO{Js&GV^D!#b-w@M)>vwnC zx3La13Q$RbdGrEz4?%$q45Ygr{iI~ddk!ed0K0)Ag`AJm(M_zPY=83{hV_{WI;a2L zL9`Gg?O(tm!_ZIUxUg60UO<%2MZZ-&sGEXIoqrU5TuI7P3KqI;$vWie0LTBQ>Zpbe z^6`}cnqfP;K6gSc{^9SrZ}|g8wwA|ON1JL=R=MW}ZRgtS5fVT@e;iNY%hiIeNzMVfbd!u-{1$bM ziY|y2%sSJ0xC&a?gKzIC60iqWb#1M9P>F;O#t&4I>yoht@dsl8>F zJyv%PvC{hPuREOf@~WeNK3#l+(}nA2b$13=nK>@g*YL3EgBMtM*@YT~7C4pnAKid| z=~ccHS;3mlgV(oEtqZ-Jec!oQN4H{lCa$x_J~Y_hu-kw`w<+q6MAUr?oy$MgH&6X? zzRP{-kX~Y967|f}glEP%Z2@tVyT`e|a651v7+a_6h-hu3tWhu5XSe+ey*1vos_fyss8nLC+AG8=rr3rS@A%2dStCr3QBp@P9^2b~W=dkDrc z*;{kwy~VnkgEgUhzYbJk@w^i`%UJ}>7+R`^fjv5RQeW~J#(a^6zs7SikJENXxduFWfg^<0N6!7Yj8NZC>e~jqjO2#b(bEwH2FHEo@ky1d ze%bVCfM2t-zrJ^ZtY)#jK>m82&$?-cRgZ*>*aX&VWA*rHBSO z57ZbpbSA$W%UN#!+lI&fHaBl($%%G8OMrIS_Zdzs0@lJiW&MlT$6xxZFQ&;yQ$IV! z?sol1TZosWf)=@sukKk~)^xuRqq=ai)w+oNoe*bG060v1d{@U)QsR&Nz1z@7)j}7# zrWOR4Z&vmk@d5u@uit{dfK0d$pMS+STj9g0{RaJywo4}u+rPU}qpk4vwB5~v)7B3Y z=zQd(zSksNVYzGkOk<;OanO|UXqFwX+XOw+f$ig47_IpAjqkOy(jJSD{r&IuWy^W7 zL228Vc&cosrTT)OTjw_@(VbCQy-w*}vVYwwu+mXdIRmb7!y^qz$(gx@6^cfdT2>2| zs5$pgi!=rjZ0c8!eG_Rc@YZU$L+c}-aocGpAQG^Wx17Ix$!$jBsXF0s6c;Tmy4{)y zJ+gUoxao1*i`71dSD4)rF9co*|41-JheYf4yg-;}*>4VeJcBr;$_Rrkpgk zUiy>gkFn-9w}h{yrLp|!U#?PdMU`9Rnq*(hJpZz!!nD*P*MRb_J|SWER7Nq)t)*lq zMg!XTIS(OA&w=8W;h8^ko8N>qY2IoZb98=X``5+Oc8{s?GfR%2N&DvRWZM?K@@{W` zmU42l&pHpI=f}P$Q%SL0>|LZ!Y}DJ!u$T33inr{-TeFj!(EB=`*bemQY;f=EI6fF7He?glTcCQr!O!5DfljMx7%6p-n?jdL#^dlSbetr?y(io zw?_KSsA@1v?gl3KIZiwVfx4IA$WkZfCKSCHx zPfm{Py~iL&GqE#?mku&A+~Y?jp`37#Zp81;6liEd^Q!=FU+PXwl9_``o% zR~KBxlW+;eZu7oqqe;KjFOf7|9+av0rk){WgDQ^;-Fv^6cG(Btp0hX+tg(6Ur`AM^ z+UJ4{!f6ynsr^>qG^ca0l&QHe5>LKKd$OjV%Q0@{U|AYLA-fa8) zrvLsm{-gXc73Kf_66Ysj?z*txRX?jf1>j%^}%ctu(1Lzwz zvjingyvfKt<6mq6ad`0^wX3(kw&jdRMlK!_;}Lv+mA<;khp0-2on)>-UUhXabTzw_vHPCtc%gEY_3rc?*(>e&s(Z` z35r*~7?7pCfI$&+RXjN9V{x#{zcnN5VyD-54O(riz6gS6r+a=@9PL#IzqB6$qeee> zcBW)zI=`~24{HH}wpk-l@2Ijq^Xo3Tbrf$)+4J2J(IJ1Tx{D;K;xZi}bS<{SFY~lx zrNRl52T6Q~@6v+A25bA68`ug3wwMe(=`@-s|LSK~22UZ1CB6yrT}Z)*DHF5&f|Fg# zld74wZiR<|xqwOH%JG_swiY!BNfHCgdMh?o8|W1Jn!YSI-7ew#?%k_mbw!buh!K!0 zFAfX9Fe$LZiVA)Ed~7CdgStLsCKK+SpsRKQUP){4=$O?z$qW7mdOJX^)@2g{L=Nzw zz8VPr-T~p&8lt z7m)a*s(NFnMul8H((DECw)Mo0i18dc*+@x#kN*QVh{^8gi4%SEo%0kJy4CTMHiGXw zG^3P4PkqS1z#!gREa;G!FxkYi@quUse|S;O)>#DrSPR)dOYWF93R#cRgg33WSQj?i zvvsio_=Imqr_K7 z$JS!4olFPKzSSd6ao3#zKl@pX7sNBZnKxAJBYH0^P-(RDDb(m~W!jUoIfW$6a?1wCw%jq!b%`Qc%c+rabI+~Le7bh@{yi#9HdLmj|Kj}zvnqS z-**17ukU_JXWIDCRZmYP5I%^>15ro}X#KA8IwFXp$UoIliVR&%U^ zeLyjUh)`*g6_^;L%N??QGBvHiPryR%qskdKJU?;W=3d;dGb=AB+`FVZ@%VcPf*``d z^+XQs__H9SFm#ohN_&spG4g0Se)mJ}uYgo<3h#SO z5ZG%;D=@>fFs1!*$Lna_hSV5z`7OCOlxP1tOsUK7!ze0ybGQY|M9YWQhcvor|$u30r4Gb5zA3JPUc)TuTymqK%>v z=^%8?7u%d8(j=nsR{*GcA`H($a>L~wn~_1N(#3&M-|o56rg3OZlQYCg=mnN7ASql1 zImu|r^-uC5L%_&LV5`%@Ef7os>a%h>BJgExUFt9Y;M(ShM<@BGrD_4$yx|vvQ3W0f zw8gI{5r*BEMq*N)T9py99h6j26hoQ-fZf58&roO_9M0pS;I^x_9p{^^<7OVnW)>S} zf*;+0N86-G30bl&xtB!JYGWyqVsg>ic)CX?sQ1}(%|E8!Lrm#(hqEM(`=Pd+Fvxwo zhh6VPMI8m8OEfGfBy^v-CgN5CsREi6Dqw(NQXXU0Fn)j+#k!PWs2pK&hJ91@TPTO# z$q2|&%6p0s7U7}{60xMT9IK^|J3D9U?*{34jp{vUOo$@rKk%56JaPL0d%~bn)y5q0 zdDZRh2X=DY_OvSpS6Bl$vraA)=Lf*@bH8jb66hklCA49d(2Wmu(zog~No7qG?9&VDOC8KG5TUI#$Hat+tQa%iK6gz0LKj6d<&zne; z+Ct;Nw}UV<;JqITyIlb(RtSf`;Si!A-a^Qc;`S>A2{jyrqb$LEdk7Ta*5MuS{&#~y zpV}sJpHv8tyj%dv?>ahkQH>Nky_g`rkfLKR;r(B{KsrICV@`V++3Jm(a{O+Mm)DS$ zU*Zn{?W&Y6Md<8WER1^xEnKD zw2`Do%=K6d+U)3xA*oD=+B?OI&Wq0Qhn-*yP4EUjz5p1ZY%^KICp%yUhRBp`GHbBY z{TY=FZ4JTS0D7bAac&E0b05y@gBL%@VBRb{gzSr%nQ}S&8~i?Z7in z`YL=0T9innkqrKke-ri%&1XWMY>R?|=cG_mhq1mA>!b#oXlYPQ5X5uq;0nBbrIXId_e*6t#FUNI%zlgjrBzK_Q zBAR9>HhgB|>5PdvFPZ%ZYK8mb?o*MDr0=@Fln(n6jAv1r^l>p#n7t_2ItL>FT?3AH z+^Zx|k}Q1BA9-;Pa$+BtDpp)<&uZd^{E-yZ5ua%lWSIO?;jq%GDa__gs20HfS57%l z{~ri`e>rzH^&T*Mgb|S4-rtDy%FK-V+?d(n21*?K&i7K|JA<02>A2gmel@m27j%+Gc+>xGkT<+^3c8_DkTun!qB)??~T>@9H^a~VE(r}6F;_F zm>=A&eEJB`ONkK^vc>G1X93C&4jgix>F-O9Qn(_B{6ai?7%XsC0AGiBN~z6#6Ro@o zr#G-W3$V{G$G%7)EyJ%UL0?-6HYDu)PPK=^sG;L8UGQ*-3oB%3u5M#`8l^R28=Y_G;S z1HH;lS=o(-k-;FlkSlMu@+-OvY&V0w56>lHkEOsdZgf4^-I1kH$SP>NoBWXqkp5GyTR}Jf%Ncsk8a#M0hc003p z-jH0yC_Q<&L5{LmUXBNX9M)s-E+-@qAGp=t8NJB-2F*YbmHs99f}DpL;-)LaZkdIq1J($QPm3K|y?OcV z@Af~p8GgI}W=p26^WdHw!RWOxfR_%qSmL-s-&tgSi)>(pUHSgi#eHBvZ!|jC6uoAH zkcb3MA=0Xxu+lKj;#(h`X9y!llLUlyU0;Xbpz(SwNrAp2W)Bjw9+2YxT%`5%n*{2L zgVO4E*@s+QWiaRhT|j+`}457|_&$R?&%-H2YV+yTMDgIHi~Ln|5a^7w=qr=6oSxsZui5jbOPvNQE$d-&pk3glOd9zw6 z;9oK3xo~6>^(?s24`+|H3^yt`3O2)C=w3n^N`LNLKVW;qQHsn-@h4( zW-P1$wp8X4=$s~hF6Ks4AI?i@hxcN_oaG+=e3Li7Q`e7DwUHq&fI?Ah$ZcFlktnZs z>BJm?s86!+8$^Wi-pV$uDy`_4tW)e@?9((hzV}oIfnaabh3|G|#|lw%!y#wJG7$|p zrBF20^(kLh3Vswm^W+rSMTb!UURpFi;KYl#w#>73GCnIp%j4xTBZ8LLpvk~2`~e>m z;`)_z4JUcj6LV9dIv0m?AbGer_#|hnLL}@@G?@^poU?Z7O2ero=el%qL0AJb7)`NWuh6VXoNep9-JqIgSU zSCPsPH}>W7DVk9Yd1L4~b|E|w2Y2A@jXSXxt>OI#I5z_HCC$a+SU^gW2n5m-?b+&N z(ePPk<@JN8?mJ6&f}%m!i8dmg3itK(OkGGw&J5P z%UW~~)w^SfjR)xwDQGK@@GIlcE9OLBaP^r;-dcxHdkk!JuP>3Y6z?;i=b4CMU=$7Y;xg)-O|Bjn8%QfoD^@CB|5-p zLV{)xCk2cEYCQjA18Tn*W*to|M4ST%MAp-pp6f7Jf~#Dcd&GQXK><~^^*oJa(is~Y z)4zI-k5J*BeTCnHJ4cyz7y~}h>dt8eRi`PSMeh&~G=7(CwfPjKy#}U9u=ye7* zzUR1pFxxBFUp<(qYue(=xFjsi33Q4~RvDWF-VkNejvYig*dJSG9ag>uH1Jgd`y;jb8>Jz^4) zh|-;SsW8w-O;G+0NfA(Ug%dX<9}FGqr{DaJgYLP0#T3v7nzNCZ9~KMm+!g=LVeDJnV*j2YlmkDq3bk)>NdoW5r=Q zrJ%H|tox!8q?(Fnou4|!=R;G3aUAt+!#9p^b1(TE)0j6h|KdbnIYyLQ@oKZqV7b=| zn9n!Tm>Zl{n<(v#|E5}SX2JjSbFS1s+YTH|O%nUHQ3^5v z_;di^;$J`^P1YzfS5d}UlKsbBEHvs_;+kamLuG}YJMvL5*+K;`=rbr`dtwK!tV34{ zYaJBtB+L=DwmIKTa&KhS(Fl6;m9bgxgiB`y>90T1s3RBH7L-l;`P-e`=+Odvt*-Q@ zAB9shGspdC8<}mkWjb)GZW&YEw4%N`1#crIorVzZ#l+$H%WWAPUpay}^?Wj7&zG4M zmmJ#&!%DErkr~GGEKP?^BnSXAI&OOO0#R0K%?e^-Yqyorx0ieGlh|eQg|d7?@`I-J z&pqx{$NHdf37?ldUPXsFY9)r&m!+>rj8fcpQJ{I9N9_2r!kN%pOL-opP} z(O>(^AB4?lJ^q)J^P=n(VW+2X`@&ZbtDST-Wk6A{uwQl=`owi#dpLe21MqKr*~i0j z{jY#oGGTe33|f(UKYhMOT}(Q0ZTTAp-#4^7$gkysMgvLT3X+-Oo>{}bIClf=A#7pb za*%h82ENoXZ~G_Kf9{3vrN56!1)fX-s8XM))Iazk@mL5owW$W(lhO>nTtW*~U z(**WiPT`qYzS;4CD4Ib@dS+&|^rfLr;s6J_=9(`#S9ow;TYkF}aFU=TY^GSXaz(;+ z=lSJZ7uVGG7+lI`oz^m4a2_HDlI$(b_%C9z?-;9Fq)=MJzLZ10 z*N;!CMW)RD%V5US6L}^ZOk2;e{Cja{j_0P-Ga~DY|Gj#FwecbUhotA!(uzwWqp56D z?Ncl)EIVD)PrF!KxY#ILUADo0SY)JRr1wb4?vYk7kd{-HmsOUNmXLzbM@rhSIs3o8 ez>#WgZ|m_tzu-Wp`D(m?<(Rtekv|TZ`~3$1P0@D% literal 0 HcmV?d00001 diff --git a/docs/guide/callbacks.rst b/docs/guide/callbacks.rst index 6588f90fb..f5d9d02f0 100644 --- a/docs/guide/callbacks.rst +++ b/docs/guide/callbacks.rst @@ -185,6 +185,11 @@ It will save the best model if ``best_model_save_path`` folder is specified and You can pass a child callback via the ``callback_on_new_best`` argument. It will be triggered each time there is a new best model. +.. warning:: + + You need to make sure that ``eval_env`` is wrapped the same way as the training environment, for instance using the ``VecTransposeImage`` wrapper if you have a channel-last image as input. + The ``EvalCallback`` class outputs a warning if it is not the case. + .. code-block:: python diff --git a/docs/guide/custom_env.rst b/docs/guide/custom_env.rst index f598e7055..2b1e4b988 100644 --- a/docs/guide/custom_env.rst +++ b/docs/guide/custom_env.rst @@ -13,6 +13,12 @@ That is to say, your environment must implement the following methods (and inher channel-first or channel-last. +.. note:: + + Although SB3 supports both channel-last and channel-first images as input, we recommend using the channel-first convention when possible. + Under the hood, when a channel-last image is passed, SB3 uses a ``VecTransposeImage`` wrapper to re-order the channels. + + .. code-block:: python @@ -29,9 +35,9 @@ That is to say, your environment must implement the following methods (and inher # They must be gym.spaces objects # Example when using discrete actions: self.action_space = spaces.Discrete(N_DISCRETE_ACTIONS) - # Example for using image as input (can be channel-first or channel-last): + # Example for using image as input (channel-first; channel-last also works): self.observation_space = spaces.Box(low=0, high=255, - shape=(HEIGHT, WIDTH, N_CHANNELS), dtype=np.uint8) + shape=(N_CHANNELS, HEIGHT, WIDTH), dtype=np.uint8) def step(self, action): ... diff --git a/docs/guide/custom_policy.rst b/docs/guide/custom_policy.rst index f8aecfeaf..f17c10ab3 100644 --- a/docs/guide/custom_policy.rst +++ b/docs/guide/custom_policy.rst @@ -3,8 +3,8 @@ Custom Policy Network ===================== -Stable Baselines3 provides policy networks for images (CnnPolicies) -and other type of input features (MlpPolicies). +Stable Baselines3 provides policy networks for images (CnnPolicies), +other type of input features (MlpPolicies) and multiple different inputs (MultiInputPolicies). .. warning:: @@ -13,9 +13,49 @@ and other type of input features (MlpPolicies). which handles bounds more correctly. +SB3 Policy +^^^^^^^^^^ -Custom Policy Architecture -^^^^^^^^^^^^^^^^^^^^^^^^^^ +SB3 networks are separated into two mains parts (see figure below): + +- A features extractor (usually shared between actor and critic when applicable, to save computation) + whose role is to extract features (i.e. convert to a feature vector) from high-dimensional observations, for instance, a CNN that extracts features from images. + This is the ``features_extractor_class`` parameter. You can change the default parameters of that features extractor + by passing a ``features_extractor_kwargs`` parameter. + +- A (fully-connected) network that maps the features to actions/value. Its architecture is controlled by the ``net_arch`` parameter. + + +.. note:: + + All observations are first pre-processed (e.g. images are normalized, discrete obs are converted to one-hot vectors, ...) before being fed to the features extractor. + In the case of vector observations, the features extractor is just a ``Flatten`` layer. + + +.. image:: ../_static/img/net_arch.png + + +SB3 policies are usually composed of several networks (actor/critic networks + target networks when applicable) together +with the associated optimizers. + +Each of these network have a features extractor followed by a fully-connected network. + +.. note:: + + When we refer to "policy" in Stable-Baselines3, this is usually an abuse of language compared to RL terminology. + In SB3, "policy" refers to the class that handles all the networks useful for training, + so not only the network used to predict actions (the "learned controller"). + + + +.. image:: ../_static/img/sb3_policy.png + + +.. .. figure:: https://cdn-images-1.medium.com/max/960/1*h4WTQNVIsvMXJTCpXm_TAw.gif + + +Custom Network Architecture +^^^^^^^^^^^^^^^^^^^^^^^^^^^ One way of customising the policy network architecture is to pass arguments when creating the model, using ``policy_kwargs`` parameter: @@ -109,6 +149,70 @@ that derives from ``BaseFeaturesExtractor`` and then pass it to the model when t model.learn(1000) +Multiple Inputs and Dictionary Observations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Stable Baselines3 supports handling of multiple inputs by using ``Dict`` Gym space. This can be done using +``MultiInputPolicy``, which by default uses the ``CombinedExtractor`` feature extractor to turn multiple +inputs into a single vector, handled by the ``net_arch`` network. + +By default, ``CombinedExtractor`` processes multiple inputs as follows: + +1. If input is an image (automatically detected, see ``common.preprocessing.is_image_space``), process image with Nature Atari CNN network and + output a latent vector of size ``64``. +2. If input is not an image, flatten it (no layers). +3. Concatenate all previous vectors into one long vector and pass it to policy. + +Much like above, you can define custom feature extractors as above. The following example assumes the environment has two keys in the +observation space dictionary: "image" is a (1,H,W) image, and "vector" is a (D,) dimensional vector. We process "image" with a simple +downsampling and "vector" with a single linear layer. + +.. code-block:: python + + import gym + import torch as th + from torch import nn + + from stable_baselines3.common.torch_layers import BaseFeaturesExtractor + + class CustomCombinedExtractor(BaseFeaturesExtractor): + def __init__(self, observation_space: gym.spaces.Dict): + # We do not know features-dim here before going over all the items, + # so put something dummy for now. PyTorch requires calling + # nn.Module.__init__ before adding modules + super(CustomCombinedExtractor, self).__init__(observation_space, features_dim=1) + + extractors = {} + + total_concat_size = 0 + # We need to know size of the output of this extractor, + # so go over all the spaces and compute output feature sizes + for key, subspace in observation_space.spaces.items(): + if key == "image": + # We will just downsample one channel of the image by 4x4 and flatten. + # Assume the image is single-channel (subspace.shape[0] == 0) + extractors[key] = nn.Sequential(nn.MaxPool2d(4), nn.Flatten()) + total_concat_size += subspace.shape[1] // 4 * subspace.shape[2] // 4 + elif key == "vector": + # Run through a simple MLP + extractors[key] = nn.Linear(subspace.shape[0], 16) + total_concat_size += 16 + + self.extractors = nn.ModuleDict(extractors) + + # Update the features dim manually + self._features_dim = total_concat_size + + def forward(self, observations) -> th.Tensor: + encoded_tensor_list = [] + + # self.extractors contain nn.Modules that do all the processing. + for key, extractor in self.extractors.items(): + encoded_tensor_list.append(extractor(observations[key])) + # Return a (B, self._features_dim) PyTorch tensor, where B is batch dimension. + return th.cat(encoded_tensor_list, dim=1) + + On-Policy Algorithms ^^^^^^^^^^^^^^^^^^^^ diff --git a/docs/guide/developer.rst b/docs/guide/developer.rst index d930594d4..e69e51055 100644 --- a/docs/guide/developer.rst +++ b/docs/guide/developer.rst @@ -31,6 +31,9 @@ Each algorithm has two main methods: - ``.train()`` which updates the parameters using samples from the buffer +.. image:: ../_static/img/sb3_loop.png + + Where to start? =============== diff --git a/docs/guide/examples.rst b/docs/guide/examples.rst index 3e30c03d8..35df576d4 100644 --- a/docs/guide/examples.rst +++ b/docs/guide/examples.rst @@ -396,7 +396,7 @@ The parking env is a goal-conditioned continuous control task, in which the vehi import highway_env import numpy as np - from stable_baselines3 import HER, SAC, DDPG, TD3 + from stable_baselines3 import HerReplayBuffer, SAC, DDPG, TD3 from stable_baselines3.common.noise import NormalActionNoise env = gym.make("parking-v0") @@ -405,21 +405,23 @@ The parking env is a goal-conditioned continuous control task, in which the vehi n_sampled_goal = 4 # SAC hyperparams: - model = HER( - "MlpPolicy", + model = SAC( + "MultiInputPolicy", env, - SAC, - n_sampled_goal=n_sampled_goal, - goal_selection_strategy="future", - # IMPORTANT: because the env is not wrapped with a TimeLimit wrapper - # we have to manually specify the max number of steps per episode - max_episode_length=100, + replay_buffer_class=HerReplayBuffer, + replay_buffer_kwargs=dict( + n_sampled_goal=n_sampled_goal, + goal_selection_strategy="future", + # IMPORTANT: because the env is not wrapped with a TimeLimit wrapper + # we have to manually specify the max number of steps per episode + max_episode_length=100, + online_sampling=True, + ) verbose=1, buffer_size=int(1e6), learning_rate=1e-3, gamma=0.95, batch_size=256, - online_sampling=True, policy_kwargs=dict(net_arch=[256, 256, 256]), ) @@ -429,7 +431,7 @@ The parking env is a goal-conditioned continuous control task, in which the vehi # Load saved model # Because it needs access to `env.compute_reward()` # HER must be loaded with the env - model = HER.load("her_sac_highway", env=env) + model = SAC.load("her_sac_highway", env=env) obs = env.reset() @@ -663,6 +665,32 @@ A2C policy gradient updates on the model. print(f"Best fitness: {top_candidates[0][1]:.2f}") +SB3 and ProcgenEnv +------------------ + +Some environments like `Procgen `_ already produce a vectorized +environment (see discussion in `issue #314 `_). In order to use it with SB3, you must wrap it in a ``VecMonitor`` wrapper which will also allow +to keep track of the agent progress. + +.. code-block:: python + + from procgen import ProcgenEnv + + from stable_baselines3 import PPO + from stable_baselines3.common.vec_env import VecExtractDictObs, VecMonitor + + # ProcgenEnv is already vectorized + venv = ProcgenEnv(num_envs=2, env_name='starpilot') + + # To use only part of the observation: + # venv = VecExtractDictObs(venv, "rgb") + + # Wrap with a VecMonitor to collect stats and avoid errors + venv = VecMonitor(venv=venv) + + model = PPO("MultiInputPolicy", venv, verbose=1) + model.learn(10000) + Record a Video -------------- diff --git a/docs/guide/install.rst b/docs/guide/install.rst index 9632777f5..926836e96 100644 --- a/docs/guide/install.rst +++ b/docs/guide/install.rst @@ -29,6 +29,10 @@ To install Stable Baselines3 with pip, execute: pip install stable-baselines3[extra] +.. note:: + Some shells such as Zsh require quotation marks around brackets, i.e. ``pip install 'stable-baselines3[extra]'`` `More information `_. + + This includes an optional dependencies like Tensorboard, OpenCV or ``atari-py`` to train on atari games. If you do not need those, you can use: .. code-block:: bash diff --git a/docs/guide/migration.rst b/docs/guide/migration.rst index fa2358443..9b25f9572 100644 --- a/docs/guide/migration.rst +++ b/docs/guide/migration.rst @@ -98,7 +98,7 @@ Base-class (all algorithms) Policies ^^^^^^^^ -- ``cnn_extractor`` -> ``feature_extractor``, as ``feature_extractor`` in now used with ``MlpPolicy`` too +- ``cnn_extractor`` -> ``features_extractor``, as ``features_extractor`` in now used with ``MlpPolicy`` too A2C ^^^ diff --git a/docs/guide/rl_zoo.rst b/docs/guide/rl_zoo.rst index c592978c9..9b255733e 100644 --- a/docs/guide/rl_zoo.rst +++ b/docs/guide/rl_zoo.rst @@ -4,9 +4,11 @@ RL Baselines3 Zoo ================== -`RL Baselines3 Zoo `_. is a collection of pre-trained Reinforcement Learning agents using -Stable-Baselines3. -It also provides basic scripts for training, evaluating agents, tuning hyperparameters and recording videos. +`RL Baselines3 Zoo `_ is a training framework for Reinforcement Learning (RL). + +It provides scripts for training, evaluating agents, tuning hyperparameters, plotting results and recording videos. + +In addition, it includes a collection of tuned hyperparameters for common environments and RL algorithms, and agents trained with those settings. Goals of this repository: diff --git a/docs/guide/vec_envs.rst b/docs/guide/vec_envs.rst index f7be23332..7958fe0e3 100644 --- a/docs/guide/vec_envs.rst +++ b/docs/guide/vec_envs.rst @@ -27,14 +27,22 @@ SubprocVecEnv ✔️ ✔️ ✔️ ✔️ ✔️ When using vectorized environments, the environments are automatically reset at the end of each episode. Thus, the observation returned for the i-th environment when ``done[i]`` is true will in fact be the first observation of the next episode, not the last observation of the episode that has just terminated. - You can access the "real" final observation of the terminated episode—that is, the one that accompanied the ``done`` event provided by the underlying environment—using the ``terminal_observation`` keys in the info dicts returned by the vecenv. + You can access the "real" final observation of the terminated episode—that is, the one that accompanied the ``done`` event provided by the underlying environment—using the ``terminal_observation`` keys in the info dicts returned by the ``VecEnv``. + .. warning:: - When using ``SubprocVecEnv``, users must wrap the code in an ``if __name__ == "__main__":`` if using the ``forkserver`` or ``spawn`` start method (default on Windows). - On Linux, the default start method is ``fork`` which is not thread safe and can create deadlocks. + When defining a custom ``VecEnv`` (for instance, using gym3 ``ProcgenEnv``), you should provide ``terminal_observation`` keys in the info dicts returned by the ``VecEnv`` + (cf. note above). + + +.. warning:: + + When using ``SubprocVecEnv``, users must wrap the code in an ``if __name__ == "__main__":`` if using the ``forkserver`` or ``spawn`` start method (default on Windows). + On Linux, the default start method is ``fork`` which is not thread safe and can create deadlocks. + + For more information, see Python's `multiprocessing guidelines `_. - For more information, see Python's `multiprocessing guidelines `_. VecEnv ------ @@ -101,3 +109,15 @@ VecTransposeImage .. autoclass:: VecTransposeImage :members: + +VecMonitor +~~~~~~~~~~~~~~~~~ + +.. autoclass:: VecMonitor + :members: + +VecExtractDictObs +~~~~~~~~~~~~~~~~~ + +.. autoclass:: VecExtractDictObs + :members: diff --git a/docs/index.rst b/docs/index.rst index cdfae8707..d55a35c89 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -12,9 +12,9 @@ It is the next major version of `Stable Baselines = 1.1.0: + model = SAC("MultiInputPolicy", env, replay_buffer_class=HerReplayBuffer, replay_buffer_kwargs=her_kwargs) New Features: ^^^^^^^^^^^^^ -- Added support for ``custom_objects`` when loading models - Added support for single-level ``Dict`` observation space (@JadenTravnik) - Added ``DictRolloutBuffer`` ``DictReplayBuffer`` to support dictionary observations (@JadenTravnik) - Added ``StackedObservations`` and ``StackedDictObservations`` that are used within ``VecFrameStack`` - Added simple 4x4 room Dict test environments +- ``HerReplayBuffer`` now supports ``VecNormalize`` when ``online_sampling=False`` +- Added `VecMonitor `_ and + `VecExtractDictObs `_ wrappers + to handle gym3-style vectorized environments (@vwxyzjn) +- Ignored the terminal observation if the it is not provided by the environment + such as the gym3-style vectorized environments. (@vwxyzjn) +- Added support for image observation when using ``HER`` +- Added ``replay_buffer_class`` and ``replay_buffer_kwargs`` arguments to off-policy algorithms + +Bug Fixes: +^^^^^^^^^^ +- Fixed potential issue when calling off-policy algorithms with default arguments multiple times (the size of the replay buffer would be the same) +- Fixed loading of ``ent_coef`` for ``SAC`` and ``TQC``, it was not optimized anymore (thanks @Atlis) +- Fixed saving of ``A2C`` and ``PPO`` policy when using gSDE (thanks @liusida) + +Deprecations: +^^^^^^^^^^^^^ + +Others: +^^^^^^^ +- Added ``flake8-bugbear`` to tests dependencies to find likely bugs +- Updated ``env_checker`` to reflect support of dict observation spaces +- Added Code of Conduct +- Added tests for GAE and lambda return computation + +Documentation: +^^^^^^^^^^^^^^ +- Added gym pybullet drones project (@JacopoPan) +- Added link to SuperSuit in projects (@justinkterry) +- Fixed DQN example (thanks @ltbd78) +- Clarified channel-first/channel-last recommendation +- Update sphinx environment installation instructions (@tom-doerr) +- Clarified pip installation in Zsh (@tom-doerr) +- Clarified return computation for on-policy algorithms (TD(lambda) estimate was used) +- Added example for using ``ProcgenEnv`` + + +Release 1.0 (2021-03-15) +------------------------ + +**First Major Version** + +Breaking Changes: +^^^^^^^^^^^^^^^^^ +- Removed ``stable_baselines3.common.cmd_util`` (already deprecated), please use ``env_util`` instead + +.. warning:: + + A refactoring of the ``HER`` algorithm is planned together with support for dictionary observations + (see `PR #243 `_ and `#351 `_) + This will be a backward incompatible change (model trained with previous version of ``HER`` won't work with the new version). + + +New Features: +^^^^^^^^^^^^^ +- Added support for ``custom_objects`` when loading models + Bug Fixes: ^^^^^^^^^^ @@ -33,7 +103,10 @@ Documentation: Others: ^^^^^^^ -- Updated ``env_checker`` to reflect support of dict observation spaces +- Updated RL-Zoo to reflect the fact that is it more than a collection of trained agents +- Added images to illustrate the training loop and custom policies (created with https://excalidraw.com/) +- Updated the custom policy section + Pre-Release 0.11.1 (2021-02-27) ------------------------------- @@ -608,5 +681,5 @@ And all the contributors: @flodorner @KuKuXia @NeoExtended @PartiallyTyped @mmcenta @richardwu @kinalmehta @rolandgvc @tkelestemur @mloo3 @tirafesi @blurLake @koulakis @joeljosephjin @shwang @rk37 @andyshih12 @RaphaelWag @xicocaio @diditforlulz273 @liorcohen5 @ManifoldFR @mloo3 @SwamyDev @wmmc88 @megan-klaiber @thisray -@tfederico @hn2 @LucasAlegre @AptX395 @zampanteymedio @decodyng @ardabbour @lorenz-h @mschweizer @lorepieri8 -@ShangqunYu @PierreExeter @JadenTravnik +@tfederico @hn2 @LucasAlegre @AptX395 @zampanteymedio @JadenTravnik @decodyng @ardabbour @lorenz-h @mschweizer @lorepieri8 @vwxyzjn +@ShangqunYu @PierreExeter @JacopoPan @ltbd78 @tom-doerr @Atlis @liusida diff --git a/docs/misc/projects.rst b/docs/misc/projects.rst index f0620f0da..c71a8b3ab 100644 --- a/docs/misc/projects.rst +++ b/docs/misc/projects.rst @@ -7,15 +7,7 @@ This is a list of projects using stable-baselines3. Please tell us, if you want your project to appear on this page ;) -.. RL Racing Robot -.. -------------------------- -.. Implementation of reinforcement learning approach to make a donkey car learn to race. -.. Uses SAC on autoencoder features -.. -.. | Author: Antonin Raffin (@araffin) -.. | Github repo: https://github.com/araffin/RL-Racing-Robot - -rl_reach +RL Reach -------- A platform for running reproducible reinforcement learning experiments for customisable robotic reaching tasks. This self-contained and straightforward toolbox allows its users to quickly investigate and identify optimal training configurations. @@ -56,4 +48,29 @@ A simple interface to instantiate RL environments with SUMO for Traffic Signal C - Easy customisation: state and reward definitions are easily modifiable | Author: Lucas Alegre -| Github: https://github.com/LucasAlegre/sumo-rl \ No newline at end of file +| Github: https://github.com/LucasAlegre/sumo-rl + +gym-pybullet-drones +------------------- +PyBullet Gym environments for single and multi-agent reinforcement learning of quadcopter control. + +- Physics-based simulation for the development and test of quadcopter control. +- Compatibility with ``gym.Env``, RLlib's MultiAgentEnv. +- Learning and testing script templates for stable-baselines3 and RLlib. + +| Author: Jacopo Panerati +| Github: https://github.com/utiasDSL/gym-pybullet-drones/ +| Paper: https://arxiv.org/abs/2103.02142 + +SuperSuit +--------- + +SuperSuit contains easy to use wrappers for Gym (and multi-agent PettingZoo) environments to do all forms of common preprocessing (frame stacking, converting graphical observations to greyscale, max-and-skip for Atari, etc.). It also notably includes: + +-Wrappers that apply lambda functions to observations, actions, or rewards with a single line of code. +-All wrappers can be used natively on vector environments, wrappers exist to Gym environments to vectorized environments and concatenate multiple vector environments together +-A wrapper is included that allows for using regular single agent RL libraries (e.g. stable baselines) to learn simple multi-agent PettingZoo environments, explained in this tutorial: + +| Author: Justin Terry +| GitHub: https://github.com/PettingZoo-Team/SuperSuit +| Tutorial on multi-agent support in stable baselines: https://towardsdatascience.com/multi-agent-deep-reinforcement-learning-in-15-lines-of-code-using-pettingzoo-e0b963c0820b diff --git a/docs/modules/her.rst b/docs/modules/her.rst index 6c5cd9350..047809ae0 100644 --- a/docs/modules/her.rst +++ b/docs/modules/her.rst @@ -13,6 +13,12 @@ HER uses the fact that even if a desired goal was not achieved, other goal may h It creates "virtual" transitions by relabeling transitions (changing the desired goal) from past episodes. +.. warning:: + + Starting from Stable Baselines3 v1.1.0, ``HER`` is no longer a separate algorithm + but a replay buffer class ``HerReplayBuffer`` that must be passed to an off-policy algorithm + when using ``MultiInputPolicy`` (to have Dict observation support). + .. warning:: @@ -27,11 +33,6 @@ It creates "virtual" transitions by relabeling transitions (changing the desired Otherwise, you can directly pass ``max_episode_length`` to the model constructor -.. warning:: - - ``HER`` supports ``VecNormalize`` wrapper but only when ``online_sampling=True`` - - .. warning:: Because it needs access to ``env.compute_reward()`` @@ -59,11 +60,10 @@ Example .. code-block:: python - from stable_baselines3 import HER, DDPG, DQN, SAC, TD3 + from stable_baselines3 import HerReplayBuffer, DDPG, DQN, SAC, TD3 from stable_baselines3.her.goal_selection_strategy import GoalSelectionStrategy from stable_baselines3.common.envs import BitFlippingEnv from stable_baselines3.common.vec_env import DummyVecEnv - from stable_baselines3.common.vec_env.obs_dict_wrapper import ObsDictWrapper model_class = DQN # works also with SAC, DDPG and TD3 N_BITS = 15 @@ -79,15 +79,27 @@ Example max_episode_length = N_BITS # Initialize the model - model = HER('MlpPolicy', env, model_class, n_sampled_goal=4, goal_selection_strategy=goal_selection_strategy, online_sampling=online_sampling, - verbose=1, max_episode_length=max_episode_length) + model = model_class( + "MultiInputPolicy", + env, + replay_buffer_class=HerReplayBuffer, + # Parameters for HER + replay_buffer_kwargs=dict( + n_sampled_goal=4, + goal_selection_strategy=goal_selection_strategy, + online_sampling=online_sampling, + max_episode_length=max_episode_length, + ), + verbose=1, + ) + # Train the model model.learn(1000) model.save("./her_bit_env") # Because it needs access to `env.compute_reward()` # HER must be loaded with the env - model = HER.load('./her_bit_env', env=env) + model = model_class.load('./her_bit_env', env=env) obs = env.reset() for _ in range(100): @@ -123,43 +135,31 @@ Run the benchmark: .. code-block:: bash - python train.py --algo her --env parking-v0 --eval-episodes 10 --eval-freq 10000 + python train.py --algo tqc --env parking-v0 --eval-episodes 10 --eval-freq 10000 Plot the results: .. code-block:: bash - python scripts/all_plots.py -a her -e parking-v0 -f logs/ --no-million + python scripts/all_plots.py -a tqc -e parking-v0 -f logs/ --no-million Parameters ---------- -.. autoclass:: HER - :members: - -Goal Selection Strategies -------------------------- +HER Replay Buffer +----------------- -.. autoclass:: GoalSelectionStrategy +.. autoclass:: HerReplayBuffer :members: :inherited-members: - :undoc-members: -Obs Dict Wrapper ----------------- +Goal Selection Strategies +------------------------- -.. autoclass:: ObsDictWrapper +.. autoclass:: GoalSelectionStrategy :members: :inherited-members: :undoc-members: - - -HER Replay Buffer ------------------ - -.. autoclass:: HerReplayBuffer - :members: - :inherited-members: diff --git a/setup.py b/setup.py index 0ef4e9ba1..a68aa1861 100644 --- a/setup.py +++ b/setup.py @@ -20,8 +20,8 @@ Repository: https://github.com/DLR-RM/stable-baselines3 -Medium article: -https://medium.com/@araffin/df87c4b2fc82 +Blog post: +https://araffin.github.io/post/sb3/ Documentation: https://stable-baselines3.readthedocs.io/en/master/ @@ -94,6 +94,8 @@ "pytype", # Lint code "flake8>=3.8", + # Find likely bugs + "flake8-bugbear", # Sort imports "isort>=5.0", # Reformat diff --git a/stable_baselines3/__init__.py b/stable_baselines3/__init__.py index bcac479de..acca18ace 100644 --- a/stable_baselines3/__init__.py +++ b/stable_baselines3/__init__.py @@ -3,7 +3,7 @@ from stable_baselines3.a2c import A2C from stable_baselines3.ddpg import DDPG from stable_baselines3.dqn import DQN -from stable_baselines3.her import HER +from stable_baselines3.her.her_replay_buffer import HerReplayBuffer from stable_baselines3.ppo import PPO from stable_baselines3.sac import SAC from stable_baselines3.td3 import TD3 @@ -12,3 +12,10 @@ version_file = os.path.join(os.path.dirname(__file__), "version.txt") with open(version_file, "r") as file_handler: __version__ = file_handler.read().strip() + + +def HER(*args, **kwargs): + raise ImportError( + "Since Stable Baselines 2.1.0, `HER` is now a replay buffer class `HerReplayBuffer`.\n " + "Please check the documentation for more information: https://stable-baselines3.readthedocs.io/" + ) diff --git a/stable_baselines3/common/base_class.py b/stable_baselines3/common/base_class.py index b272c5a38..bbb630fd3 100644 --- a/stable_baselines3/common/base_class.py +++ b/stable_baselines3/common/base_class.py @@ -35,7 +35,6 @@ is_vecenv_wrapped, unwrap_vec_normalize, ) -from stable_baselines3.common.vec_env.obs_dict_wrapper import ObsDictWrapper def maybe_make_env(env: Union[GymEnv, str, None], verbose: int) -> Optional[GymEnv]: @@ -130,7 +129,7 @@ def __init__( self.tensorboard_log = tensorboard_log self.lr_schedule = None # type: Optional[Schedule] self._last_obs = None # type: Optional[np.ndarray] - self._last_dones = None # type: Optional[np.ndarray] + self._last_episode_starts = None # type: Optional[np.ndarray] # When using VecNormalize: self._last_original_obs = None # type: Optional[np.ndarray] self._episode_num = 0 @@ -221,13 +220,6 @@ def _wrap_env(env: GymEnv, verbose: int = 0, monitor_wrapper: bool = True) -> Ve print("Wrapping the env in a VecTransposeImage.") env = VecTransposeImage(env) - # check if wrapper for dict support is needed when using HER - # TODO(antonin): remove this with the new version of HER - if isinstance(env.observation_space, gym.spaces.Dict) and set(env.observation_space.spaces.keys()) == set( - ["observation", "desired_goal", "achieved_goal"] - ): - env = ObsDictWrapper(env) - return env @abstractmethod @@ -287,7 +279,16 @@ def _excluded_save_params(self) -> List[str]: :return: List of parameters that should be excluded from being saved with pickle. """ - return ["policy", "device", "env", "eval_env", "replay_buffer", "rollout_buffer", "_vec_normalize_env"] + return [ + "policy", + "device", + "env", + "eval_env", + "replay_buffer", + "rollout_buffer", + "_vec_normalize_env", + "_episode_storage", + ] def _get_torch_save_params(self) -> Tuple[List[str], List[str]]: """ @@ -389,7 +390,7 @@ def _setup_learn( # Avoid resetting the environment when calling ``.learn()`` consecutive times if reset_num_timesteps or self._last_obs is None: self._last_obs = self.env.reset() - self._last_dones = np.zeros((self.env.num_envs,), dtype=bool) + self._last_episode_starts = np.ones((self.env.num_envs,), dtype=bool) # Retrieve unnormalized observation for saving into the buffer if self._vec_normalize_env is not None: self._last_original_obs = self._vec_normalize_env.get_original_obs() @@ -662,7 +663,9 @@ def load( # put other pytorch variables back in place if pytorch_variables is not None: for name in pytorch_variables: - recursive_setattr(model, name, pytorch_variables[name]) + # Set the data attribute directly to avoid issue when using optimizers + # See https://github.com/DLR-RM/stable-baselines3/issues/391 + recursive_setattr(model, name + ".data", pytorch_variables[name].data) # Sample gSDE exploration matrix, so it uses the right device # see issue #44 diff --git a/stable_baselines3/common/buffers.py b/stable_baselines3/common/buffers.py index 4a25c2992..253787d64 100644 --- a/stable_baselines3/common/buffers.py +++ b/stable_baselines3/common/buffers.py @@ -1,6 +1,6 @@ import warnings from abc import ABC, abstractmethod -from typing import Dict, Generator, Optional, Union +from typing import Any, Dict, Generator, List, Optional, Union import numpy as np import torch as th @@ -164,6 +164,9 @@ class ReplayBuffer(BaseBuffer): at a cost of more complexity. See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195 and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274 + :param handle_timeout_termination: Handle timeout termination (due to timelimit) + separately and treat the task as infinite horizon task. + https://github.com/DLR-RM/stable-baselines3/issues/284 """ def __init__( @@ -174,6 +177,7 @@ def __init__( device: Union[th.device, str] = "cpu", n_envs: int = 1, optimize_memory_usage: bool = False, + handle_timeout_termination: bool = True, ): super(ReplayBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs) @@ -197,6 +201,10 @@ def __init__( self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) self.dones = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) + # Handle timeouts termination properly if needed + # see https://github.com/DLR-RM/stable-baselines3/issues/284 + self.handle_timeout_termination = handle_timeout_termination + self.timeouts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) if psutil is not None: total_memory_usage = self.observations.nbytes + self.actions.nbytes + self.rewards.nbytes + self.dones.nbytes @@ -213,7 +221,15 @@ def __init__( f"replay buffer {total_memory_usage:.2f}GB > {mem_available:.2f}GB" ) - def add(self, obs: np.ndarray, next_obs: np.ndarray, action: np.ndarray, reward: np.ndarray, done: np.ndarray) -> None: + def add( + self, + obs: np.ndarray, + next_obs: np.ndarray, + action: np.ndarray, + reward: np.ndarray, + done: np.ndarray, + infos: List[Dict[str, Any]], + ) -> None: # Copy to avoid modification by reference self.observations[self.pos] = np.array(obs).copy() @@ -226,6 +242,9 @@ def add(self, obs: np.ndarray, next_obs: np.ndarray, action: np.ndarray, reward: self.rewards[self.pos] = np.array(reward).copy() self.dones[self.pos] = np.array(done).copy() + if self.handle_timeout_termination: + self.timeouts[self.pos] = np.array([info.get("TimeLimit.truncated", False) for info in infos]) + self.pos += 1 if self.pos == self.buffer_size: self.full = True @@ -264,7 +283,9 @@ def _get_samples(self, batch_inds: np.ndarray, env: Optional[VecNormalize] = Non self._normalize_obs(self.observations[batch_inds, 0, :], env), self.actions[batch_inds, 0, :], next_obs, - self.dones[batch_inds], + # Only use dones that are not due to timeouts + # deactivated by default (timeouts is initialized as an array of False) + self.dones[batch_inds] * (1 - self.timeouts[batch_inds]), self._normalize_reward(self.rewards[batch_inds], env), ) return ReplayBufferSamples(*tuple(map(self.to_torch, data))) @@ -308,7 +329,7 @@ def __init__( self.gae_lambda = gae_lambda self.gamma = gamma self.observations, self.actions, self.rewards, self.advantages = None, None, None, None - self.returns, self.dones, self.values, self.log_probs = None, None, None, None + self.returns, self.episode_starts, self.values, self.log_probs = None, None, None, None self.generator_ready = False self.reset() @@ -318,7 +339,7 @@ def reset(self) -> None: self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=np.float32) self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) self.returns = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) - self.dones = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) + self.episode_starts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) self.values = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) self.log_probs = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) self.advantages = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) @@ -327,20 +348,25 @@ def reset(self) -> None: def compute_returns_and_advantage(self, last_values: th.Tensor, dones: np.ndarray) -> None: """ - Post-processing step: compute the returns (sum of discounted rewards) - and GAE advantage. - Adapted from Stable-Baselines PPO2. + Post-processing step: compute the lambda-return (TD(lambda) estimate) + and GAE(lambda) advantage. Uses Generalized Advantage Estimation (https://arxiv.org/abs/1506.02438) to compute the advantage. To obtain vanilla advantage (A(s) = R - V(S)) where R is the discounted reward with value bootstrap, set ``gae_lambda=1.0`` during initialization. - :param last_values: - :param dones: + The TD(lambda) estimator has also two special cases: + - TD(1) is Monte-Carlo estimate (sum of discounted rewards) + - TD(0) is one-step estimate with bootstrapping (r_t + gamma * v(s_{t+1})) + + For more information, see discussion in https://github.com/DLR-RM/stable-baselines3/pull/375. + + :param last_values: state value estimation for the last step (one for each env) + :param dones: if the last step was a terminal step (one bool for each env). """ - # convert to numpy + # Convert to numpy last_values = last_values.clone().cpu().numpy().flatten() last_gae_lam = 0 @@ -349,11 +375,13 @@ def compute_returns_and_advantage(self, last_values: th.Tensor, dones: np.ndarra next_non_terminal = 1.0 - dones next_values = last_values else: - next_non_terminal = 1.0 - self.dones[step + 1] + next_non_terminal = 1.0 - self.episode_starts[step + 1] next_values = self.values[step + 1] delta = self.rewards[step] + self.gamma * next_values * next_non_terminal - self.values[step] last_gae_lam = delta + self.gamma * self.gae_lambda * next_non_terminal * last_gae_lam self.advantages[step] = last_gae_lam + # TD(lambda) estimator, see Github PR #375 or "Telescoping in TD(lambda)" + # in David Silver Lecture 4: https://www.youtube.com/watch?v=PnHCvfgC_ZA self.returns = self.advantages + self.values def add( @@ -361,7 +389,7 @@ def add( obs: np.ndarray, action: np.ndarray, reward: np.ndarray, - done: np.ndarray, + episode_start: np.ndarray, value: th.Tensor, log_prob: th.Tensor, ) -> None: @@ -369,7 +397,7 @@ def add( :param obs: Observation :param action: Action :param reward: - :param done: End of episode signal. + :param episode_start: Start of episode signal. :param value: estimated value of the current state following the current policy. :param log_prob: log probability of the action @@ -387,7 +415,7 @@ def add( self.observations[self.pos] = np.array(obs).copy() self.actions[self.pos] = np.array(action).copy() self.rewards[self.pos] = np.array(reward).copy() - self.dones[self.pos] = np.array(done).copy() + self.episode_starts[self.pos] = np.array(episode_start).copy() self.values[self.pos] = value.clone().cpu().numpy().flatten() self.log_probs[self.pos] = log_prob.clone().cpu().numpy() self.pos += 1 @@ -446,6 +474,9 @@ class DictReplayBuffer(ReplayBuffer): :param n_envs: Number of parallel environments :param optimize_memory_usage: Enable a memory efficient variant Disabled for now (see https://github.com/DLR-RM/stable-baselines3/pull/243#discussion_r531535702) + :param handle_timeout_termination: Handle timeout termination (due to timelimit) + separately and treat the task as infinite horizon task. + https://github.com/DLR-RM/stable-baselines3/issues/284 """ def __init__( @@ -456,6 +487,7 @@ def __init__( device: Union[th.device, str] = "cpu", n_envs: int = 1, optimize_memory_usage: bool = False, + handle_timeout_termination: bool = True, ): super(ReplayBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs) @@ -483,15 +515,20 @@ def __init__( self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) self.dones = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) + # Handle timeouts termination properly if needed + # see https://github.com/DLR-RM/stable-baselines3/issues/284 + self.handle_timeout_termination = handle_timeout_termination + self.timeouts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) + if psutil is not None: obs_nbytes = 0 - for key, obs in self.observations.items(): + for _, obs in self.observations.items(): obs_nbytes += obs.nbytes total_memory_usage = obs_nbytes + self.actions.nbytes + self.rewards.nbytes + self.dones.nbytes if self.next_observations is not None: next_obs_nbytes = 0 - for key, obs in self.observations.items(): + for _, obs in self.observations.items(): next_obs_nbytes += obs.nbytes total_memory_usage += next_obs_nbytes @@ -511,9 +548,9 @@ def add( action: np.ndarray, reward: np.ndarray, done: np.ndarray, + infos: List[Dict[str, Any]], ) -> None: # Copy to avoid modification by reference - for key in self.observations.keys(): self.observations[key][self.pos] = np.array(obs[key]).copy() @@ -524,6 +561,9 @@ def add( self.rewards[self.pos] = np.array(reward).copy() self.dones[self.pos] = np.array(done).copy() + if self.handle_timeout_termination: + self.timeouts[self.pos] = np.array([info.get("TimeLimit.truncated", False) for info in infos]) + self.pos += 1 if self.pos == self.buffer_size: self.full = True @@ -542,19 +582,21 @@ def sample(self, batch_size: int, env: Optional[VecNormalize] = None) -> DictRep def _get_samples(self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None) -> DictReplayBufferSamples: - next_obs = { - key: self.to_torch(self._normalize_obs(obs[batch_inds, 0, :], env)) for key, obs in self.next_observations.items() - } + # Normalize if needed and remove extra dimension (we are using only one env for now) + obs_ = self._normalize_obs({key: obs[batch_inds, 0, :] for key, obs in self.observations.items()}) + next_obs_ = self._normalize_obs({key: obs[batch_inds, 0, :] for key, obs in self.next_observations.items()}) - normalized_obs = { - key: self.to_torch(self._normalize_obs(obs[batch_inds, 0, :], env)) for key, obs in self.observations.items() - } + # Convert to torch tensor + observations = {key: self.to_torch(obs) for key, obs in obs_.items()} + next_observations = {key: self.to_torch(obs) for key, obs in next_obs_.items()} return DictReplayBufferSamples( - observations=normalized_obs, + observations=observations, actions=self.to_torch(self.actions[batch_inds]), - next_observations=next_obs, - dones=self.to_torch(self.dones[batch_inds]), + next_observations=next_observations, + # Only use dones that are not due to timeouts + # deactivated by default (timeouts is initialized as an array of False) + dones=self.to_torch(self.dones[batch_inds] * (1 - self.timeouts[batch_inds])), rewards=self.to_torch(self._normalize_reward(self.rewards[batch_inds], env)), ) @@ -602,7 +644,7 @@ def __init__( self.gae_lambda = gae_lambda self.gamma = gamma self.observations, self.actions, self.rewards, self.advantages = None, None, None, None - self.returns, self.dones, self.values, self.log_probs = None, None, None, None + self.returns, self.episode_starts, self.values, self.log_probs = None, None, None, None self.generator_ready = False self.reset() @@ -614,7 +656,7 @@ def reset(self) -> None: self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=np.float32) self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) self.returns = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) - self.dones = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) + self.episode_starts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) self.values = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) self.log_probs = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) self.advantages = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) @@ -626,7 +668,7 @@ def add( obs: Dict[str, np.ndarray], action: np.ndarray, reward: np.ndarray, - done: np.ndarray, + episode_start: np.ndarray, value: th.Tensor, log_prob: th.Tensor, ) -> None: @@ -634,7 +676,7 @@ def add( :param obs: Observation :param action: Action :param reward: - :param done: End of episode signal. + :param episode_start: Start of episode signal. :param value: estimated value of the current state following the current policy. :param log_prob: log probability of the action @@ -654,7 +696,7 @@ def add( self.actions[self.pos] = np.array(action).copy() self.rewards[self.pos] = np.array(reward).copy() - self.dones[self.pos] = np.array(done).copy() + self.episode_starts[self.pos] = np.array(episode_start).copy() self.values[self.pos] = value.clone().cpu().numpy().flatten() self.log_probs[self.pos] = log_prob.clone().cpu().numpy() self.pos += 1 diff --git a/stable_baselines3/common/env_checker.py b/stable_baselines3/common/env_checker.py index 27326e45c..090bb796b 100644 --- a/stable_baselines3/common/env_checker.py +++ b/stable_baselines3/common/env_checker.py @@ -5,6 +5,7 @@ import numpy as np from gym import spaces +from stable_baselines3.common.preprocessing import is_image_space_channels_first from stable_baselines3.common.vec_env import DummyVecEnv, VecCheckNan @@ -37,7 +38,12 @@ def _check_image_input(observation_space: spaces.Box, key: str = "") -> None: "you may encounter issue if the values are not in that range." ) - if observation_space.shape[0] < 36 or observation_space.shape[1] < 36: + non_channel_idx = 0 + # Check only if width/height of the image is big enough + if is_image_space_channels_first(observation_space): + non_channel_idx = -1 + + if observation_space.shape[non_channel_idx] < 36 or observation_space.shape[1] < 36: warnings.warn( "The minimal resolution for an image is 36x36 for the default `CnnPolicy`. " "You might need to use a custom feature extractor " diff --git a/stable_baselines3/common/envs/bit_flipping_env.py b/stable_baselines3/common/envs/bit_flipping_env.py index 62f07100f..f5c2fb4d3 100644 --- a/stable_baselines3/common/envs/bit_flipping_env.py +++ b/stable_baselines3/common/envs/bit_flipping_env.py @@ -20,15 +20,25 @@ class BitFlippingEnv(GoalEnv): by default, it uses the discrete one :param max_steps: Max number of steps, by default, equal to n_bits :param discrete_obs_space: Whether to use the discrete observation - version or not, by default, it uses the MultiBinary one + version or not, by default, it uses the ``MultiBinary`` one + :param image_obs_space: Use image as input instead of the ``MultiBinary`` one. + :param channel_first: Whether to use channel-first or last image. """ spec = EnvSpec("BitFlippingEnv-v0") def __init__( - self, n_bits: int = 10, continuous: bool = False, max_steps: Optional[int] = None, discrete_obs_space: bool = False + self, + n_bits: int = 10, + continuous: bool = False, + max_steps: Optional[int] = None, + discrete_obs_space: bool = False, + image_obs_space: bool = False, + channel_first: bool = True, ): super(BitFlippingEnv, self).__init__() + # Shape of the observation when using image space + self.image_shape = (1, 36, 36) if channel_first else (36, 36, 1) # The achieved goal is determined by the current state # here, it is a special where they are equal if discrete_obs_space: @@ -36,9 +46,35 @@ def __init__( # representation of the observation self.observation_space = spaces.Dict( { - "observation": spaces.Discrete(2 ** n_bits - 1), - "achieved_goal": spaces.Discrete(2 ** n_bits - 1), - "desired_goal": spaces.Discrete(2 ** n_bits - 1), + "observation": spaces.Discrete(2 ** n_bits), + "achieved_goal": spaces.Discrete(2 ** n_bits), + "desired_goal": spaces.Discrete(2 ** n_bits), + } + ) + elif image_obs_space: + # When using image as input, + # one image contains the bits 0 -> 0, 1 -> 255 + # and the rest is filled with zeros + self.observation_space = spaces.Dict( + { + "observation": spaces.Box( + low=0, + high=255, + shape=self.image_shape, + dtype=np.uint8, + ), + "achieved_goal": spaces.Box( + low=0, + high=255, + shape=self.image_shape, + dtype=np.uint8, + ), + "desired_goal": spaces.Box( + low=0, + high=255, + shape=self.image_shape, + dtype=np.uint8, + ), } ) else: @@ -58,6 +94,7 @@ def __init__( self.action_space = spaces.Discrete(n_bits) self.continuous = continuous self.discrete_obs_space = discrete_obs_space + self.image_obs_space = image_obs_space self.state = None self.desired_goal = np.ones((n_bits,)) if max_steps is None: @@ -79,13 +116,38 @@ def convert_if_needed(self, state: np.ndarray) -> Union[int, np.ndarray]: # The internal state is the binary representation of the # observed one return int(sum([state[i] * 2 ** i for i in range(len(state))])) + + if self.image_obs_space: + size = np.prod(self.image_shape) + image = np.concatenate((state * 255, np.zeros(size - len(state), dtype=np.uint8))) + return image.reshape(self.image_shape).astype(np.uint8) + return state + + def convert_to_bit_vector(self, state: Union[int, np.ndarray], batch_size: int) -> np.ndarray: + """ + Convert to bit vector if needed. + + :param state: + :param batch_size: + :return: + """ + # Convert back to bit vector + if isinstance(state, int): + state = np.array(state).reshape(batch_size, -1) + # Convert to binary representation + state = (((state[:, :] & (1 << np.arange(len(self.state))))) > 0).astype(int) + elif self.image_obs_space: + state = state.reshape(batch_size, -1)[:, : len(self.state)] / 255 + else: + state = np.array(state).reshape(batch_size, -1) + return state def _get_obs(self) -> Dict[str, Union[int, np.ndarray]]: """ Helper to create the observation. - :return: + :return: The current observation. """ return OrderedDict( [ @@ -117,8 +179,19 @@ def step(self, action: Union[np.ndarray, int]) -> GymStepReturn: def compute_reward( self, achieved_goal: Union[int, np.ndarray], desired_goal: Union[int, np.ndarray], _info: Optional[Dict[str, Any]] ) -> np.float32: + # As we are using a vectorized version, we need to keep track of the `batch_size` + if isinstance(achieved_goal, int): + batch_size = 1 + elif self.image_obs_space: + batch_size = achieved_goal.shape[0] if len(achieved_goal.shape) > 3 else 1 + else: + batch_size = achieved_goal.shape[0] if len(achieved_goal.shape) > 1 else 1 + + desired_goal = self.convert_to_bit_vector(desired_goal, batch_size) + achieved_goal = self.convert_to_bit_vector(achieved_goal, batch_size) + # Deceptive reward: it is positive only when the goal is achieved - # vectorized version + # Here we are using a vectorized version distance = np.linalg.norm(achieved_goal - desired_goal, axis=-1) return -(distance > 0).astype(np.float32) diff --git a/stable_baselines3/common/evaluation.py b/stable_baselines3/common/evaluation.py index a35200066..6d1febdd9 100644 --- a/stable_baselines3/common/evaluation.py +++ b/stable_baselines3/common/evaluation.py @@ -5,7 +5,7 @@ import numpy as np from stable_baselines3.common import base_class -from stable_baselines3.common.vec_env import VecEnv +from stable_baselines3.common.vec_env import VecEnv, VecMonitor, is_vecenv_wrapped def evaluate_policy( @@ -57,7 +57,7 @@ def evaluate_policy( if isinstance(env, VecEnv): assert env.num_envs == 1, "You must pass only one environment when using this function" - is_monitor_wrapped = env.env_is_wrapped(Monitor)[0] + is_monitor_wrapped = is_vecenv_wrapped(env, VecMonitor) or env.env_is_wrapped(Monitor)[0] else: is_monitor_wrapped = is_wrapped(env, Monitor) diff --git a/stable_baselines3/common/monitor.py b/stable_baselines3/common/monitor.py index bb50fe40b..74e2b9c0a 100644 --- a/stable_baselines3/common/monitor.py +++ b/stable_baselines3/common/monitor.py @@ -1,11 +1,11 @@ -__all__ = ["Monitor", "get_monitor_files", "load_results"] +__all__ = ["Monitor", "ResultsWriter", "get_monitor_files", "load_results"] import csv import json import os import time from glob import glob -from typing import List, Optional, Tuple, Union +from typing import Dict, List, Optional, Tuple, Union import gym import numpy as np @@ -38,27 +38,20 @@ def __init__( ): super(Monitor, self).__init__(env=env) self.t_start = time.time() - if filename is None: - self.file_handler = None - self.logger = None + if filename is not None: + self.results_writer = ResultsWriter( + filename, + header={"t_start": self.t_start, "env_id": env.spec and env.spec.id}, + extra_keys=reset_keywords + info_keywords, + ) else: - if not filename.endswith(Monitor.EXT): - if os.path.isdir(filename): - filename = os.path.join(filename, Monitor.EXT) - else: - filename = filename + "." + Monitor.EXT - self.file_handler = open(filename, "wt") - self.file_handler.write("#%s\n" % json.dumps({"t_start": self.t_start, "env_id": env.spec and env.spec.id})) - self.logger = csv.DictWriter(self.file_handler, fieldnames=("r", "l", "t") + reset_keywords + info_keywords) - self.logger.writeheader() - self.file_handler.flush() - + self.results_writer = None self.reset_keywords = reset_keywords self.info_keywords = info_keywords self.allow_early_resets = allow_early_resets self.rewards = None self.needs_reset = True - self.episode_rewards = [] + self.episode_returns = [] self.episode_lengths = [] self.episode_times = [] self.total_steps = 0 @@ -81,7 +74,7 @@ def reset(self, **kwargs) -> GymObs: for key in self.reset_keywords: value = kwargs.get(key) if value is None: - raise ValueError("Expected you to pass kwarg {} into reset".format(key)) + raise ValueError(f"Expected you to pass keyword argument {key} into reset") self.current_reset_info[key] = value return self.env.reset(**kwargs) @@ -103,13 +96,12 @@ def step(self, action: Union[np.ndarray, int]) -> GymStepReturn: ep_info = {"r": round(ep_rew, 6), "l": ep_len, "t": round(time.time() - self.t_start, 6)} for key in self.info_keywords: ep_info[key] = info[key] - self.episode_rewards.append(ep_rew) + self.episode_returns.append(ep_rew) self.episode_lengths.append(ep_len) self.episode_times.append(time.time() - self.t_start) ep_info.update(self.current_reset_info) - if self.logger: - self.logger.writerow(ep_info) - self.file_handler.flush() + if self.results_writer: + self.results_writer.write_row(ep_info) info["episode"] = ep_info self.total_steps += 1 return observation, reward, done, info @@ -119,8 +111,8 @@ def close(self) -> None: Closes the environment """ super(Monitor, self).close() - if self.file_handler is not None: - self.file_handler.close() + if self.results_writer is not None: + self.results_writer.close() def get_total_steps(self) -> int: """ @@ -136,7 +128,7 @@ def get_episode_rewards(self) -> List[float]: :return: """ - return self.episode_rewards + return self.episode_returns def get_episode_lengths(self) -> List[int]: """ @@ -163,6 +155,52 @@ class LoadMonitorResultsError(Exception): pass +class ResultsWriter: + """ + A result writer that saves the data from the `Monitor` class + + :param filename: the location to save a log file, can be None for no log + :param header: the header dictionary object of the saved csv + :param reset_keywords: the extra information to log, typically is composed of + ``reset_keywords`` and ``info_keywords`` + """ + + def __init__( + self, + filename: str = "", + header: Dict[str, Union[float, str]] = None, + extra_keys: Tuple[str, ...] = (), + ): + if header is None: + header = {} + if not filename.endswith(Monitor.EXT): + if os.path.isdir(filename): + filename = os.path.join(filename, Monitor.EXT) + else: + filename = filename + "." + Monitor.EXT + self.file_handler = open(filename, "wt") + self.file_handler.write("#%s\n" % json.dumps(header)) + self.logger = csv.DictWriter(self.file_handler, fieldnames=("r", "l", "t") + extra_keys) + self.logger.writeheader() + self.file_handler.flush() + + def write_row(self, epinfo: Dict[str, Union[float, int]]) -> None: + """ + Close the file handler + + :param epinfo: the information on episodic return, length, and time + """ + if self.logger: + self.logger.writerow(epinfo) + self.file_handler.flush() + + def close(self) -> None: + """ + Close the file handler + """ + self.file_handler.close() + + def get_monitor_files(path: str) -> List[str]: """ get all the monitor files in the given path diff --git a/stable_baselines3/common/off_policy_algorithm.py b/stable_baselines3/common/off_policy_algorithm.py index 98d15d679..46e6d56ff 100644 --- a/stable_baselines3/common/off_policy_algorithm.py +++ b/stable_baselines3/common/off_policy_algorithm.py @@ -18,6 +18,7 @@ from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, RolloutReturn, Schedule, TrainFreq, TrainFrequencyUnit from stable_baselines3.common.utils import safe_mean, should_collect_more_steps from stable_baselines3.common.vec_env import VecEnv +from stable_baselines3.her.her_replay_buffer import HerReplayBuffer class OffPolicyAlgorithm(BaseAlgorithm): @@ -42,6 +43,9 @@ class OffPolicyAlgorithm(BaseAlgorithm): during the rollout. :param action_noise: the action noise type (None by default), this can help for hard exploration problem. Cf common.noise for the different action noise type. + :param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``). + If ``None``, it will be automatically selected. + :param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation. :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer at a cost of more complexity. See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195 @@ -76,7 +80,7 @@ def __init__( env: Union[GymEnv, str], policy_base: Type[BasePolicy], learning_rate: Union[float, Schedule], - buffer_size: int = int(1e6), + buffer_size: int = 1000000, # 1e6 learning_starts: int = 100, batch_size: int = 256, tau: float = 0.005, @@ -84,6 +88,8 @@ def __init__( train_freq: Union[int, Tuple[int, str]] = (1, "step"), gradient_steps: int = 1, action_noise: Optional[ActionNoise] = None, + replay_buffer_class: Optional[ReplayBuffer] = None, + replay_buffer_kwargs: Optional[Dict[str, Any]] = None, optimize_memory_usage: bool = False, policy_kwargs: Dict[str, Any] = None, tensorboard_log: Optional[str] = None, @@ -126,6 +132,11 @@ def __init__( self.gradient_steps = gradient_steps self.action_noise = action_noise self.optimize_memory_usage = optimize_memory_usage + self.replay_buffer_class = replay_buffer_class + if replay_buffer_kwargs is None: + replay_buffer_kwargs = {} + self.replay_buffer_kwargs = replay_buffer_kwargs + self._episode_storage = None # Remove terminations (dones) that are due to time limit # see https://github.com/hill-a/stable-baselines/issues/863 @@ -169,15 +180,45 @@ def _setup_model(self) -> None: self.set_random_seed(self.seed) # Use DictReplayBuffer if needed - buffer_cls = DictReplayBuffer if isinstance(self.observation_space, gym.spaces.Dict) else ReplayBuffer + if self.replay_buffer_class is None: + if isinstance(self.observation_space, gym.spaces.Dict): + self.replay_buffer_class = DictReplayBuffer + else: + self.replay_buffer_class = ReplayBuffer + + elif self.replay_buffer_class == HerReplayBuffer: + assert self.env is not None, "You must pass an environment when using `HerReplayBuffer`" + + # If using offline sampling, we need a classic replay buffer too + if self.replay_buffer_kwargs.get("online_sampling", True): + replay_buffer = None + else: + replay_buffer = DictReplayBuffer( + self.buffer_size, + self.observation_space, + self.action_space, + self.device, + optimize_memory_usage=self.optimize_memory_usage, + ) + + self.replay_buffer = HerReplayBuffer( + self.env, + self.buffer_size, + self.device, + replay_buffer=replay_buffer, + **self.replay_buffer_kwargs, + ) + + if self.replay_buffer is None: + self.replay_buffer = self.replay_buffer_class( + self.buffer_size, + self.observation_space, + self.action_space, + self.device, + optimize_memory_usage=self.optimize_memory_usage, + **self.replay_buffer_kwargs, + ) - self.replay_buffer = buffer_cls( - self.buffer_size, - self.observation_space, - self.action_space, - self.device, - optimize_memory_usage=self.optimize_memory_usage, - ) self.policy = self.policy_class( # pytype:disable=not-instantiable self.observation_space, self.action_space, @@ -199,15 +240,35 @@ def save_replay_buffer(self, path: Union[str, pathlib.Path, io.BufferedIOBase]) assert self.replay_buffer is not None, "The replay buffer is not defined" save_to_pkl(path, self.replay_buffer, self.verbose) - def load_replay_buffer(self, path: Union[str, pathlib.Path, io.BufferedIOBase]) -> None: + def load_replay_buffer( + self, + path: Union[str, pathlib.Path, io.BufferedIOBase], + truncate_last_traj: bool = True, + ) -> None: """ Load a replay buffer from a pickle file. :param path: Path to the pickled replay buffer. + :param truncate_last_traj: When using ``HerReplayBuffer`` with online sampling: + If set to ``True``, we assume that the last trajectory in the replay buffer was finished + (and truncate it). + If set to ``False``, we assume that we continue the same trajectory (same episode). """ self.replay_buffer = load_from_pkl(path, self.verbose) assert isinstance(self.replay_buffer, ReplayBuffer), "The replay buffer must inherit from ReplayBuffer class" + # Backward compatibility with SB3 < 2.1.0 replay buffer + # Keep old behavior: do not handle timeout termination separately + if not hasattr(self.replay_buffer, "handle_timeout_termination"): # pragma: no cover + self.replay_buffer.handle_timeout_termination = False + self.replay_buffer.timeouts = np.zeros_like(self.replay_buffer.dones) + + if isinstance(self.replay_buffer, HerReplayBuffer): + assert self.env is not None, "You must pass an environment at load time when using `HerReplayBuffer`" + self.replay_buffer.set_env(self.get_env()) + if truncate_last_traj: + self.replay_buffer.truncate_last_trajectory() + def _setup_learn( self, total_timesteps: int, @@ -225,11 +286,19 @@ def _setup_learn( # Prevent continuity issue by truncating trajectory # when using memory efficient replay buffer # see https://github.com/DLR-RM/stable-baselines3/issues/46 + + # Special case when using HerReplayBuffer, + # the classic replay buffer is inside it when using offline sampling + if isinstance(self.replay_buffer, HerReplayBuffer): + replay_buffer = self.replay_buffer.replay_buffer + else: + replay_buffer = self.replay_buffer + truncate_last_traj = ( self.optimize_memory_usage and reset_num_timesteps - and self.replay_buffer is not None - and (self.replay_buffer.full or self.replay_buffer.pos > 0) + and replay_buffer is not None + and (replay_buffer.full or replay_buffer.pos > 0) ) if truncate_last_traj: @@ -240,8 +309,8 @@ def _setup_learn( "to avoid that issue." ) # Go to the previous index - pos = (self.replay_buffer.pos - 1) % self.replay_buffer.buffer_size - self.replay_buffer.dones[pos] = True + pos = (replay_buffer.pos - 1) % replay_buffer.buffer_size + replay_buffer.dones[pos] = True return super()._setup_learn( total_timesteps, @@ -405,7 +474,7 @@ def _store_transition( :param reward: reward for the current transition :param done: Termination signal :param infos: List of additional information about the transition. - It contains the terminal observations. + It may contain the terminal observations and information about timeout. """ # Store only the unnormalized version if self._vec_normalize_env is not None: @@ -425,7 +494,14 @@ def _store_transition( else: next_obs = new_obs_ - replay_buffer.add(self._last_original_obs, next_obs, buffer_action, reward_, done) + replay_buffer.add( + self._last_original_obs, + next_obs, + buffer_action, + reward_, + done, + infos, + ) self._last_obs = new_obs # Save the unnormalized observation diff --git a/stable_baselines3/common/on_policy_algorithm.py b/stable_baselines3/common/on_policy_algorithm.py index ea91e6e0b..bbb6473e9 100644 --- a/stable_baselines3/common/on_policy_algorithm.py +++ b/stable_baselines3/common/on_policy_algorithm.py @@ -186,9 +186,9 @@ def collect_rollouts( if isinstance(self.action_space, gym.spaces.Discrete): # Reshape in case of discrete action actions = actions.reshape(-1, 1) - rollout_buffer.add(self._last_obs, actions, rewards, self._last_dones, values, log_probs) + rollout_buffer.add(self._last_obs, actions, rewards, self._last_episode_starts, values, log_probs) self._last_obs = new_obs - self._last_dones = dones + self._last_episode_starts = dones with th.no_grad(): # Compute value for the last timestep diff --git a/stable_baselines3/common/policies.py b/stable_baselines3/common/policies.py index c8dd196c4..8b6f3e649 100644 --- a/stable_baselines3/common/policies.py +++ b/stable_baselines3/common/policies.py @@ -31,7 +31,6 @@ ) from stable_baselines3.common.type_aliases import Schedule from stable_baselines3.common.utils import get_device, is_vectorized_observation, obs_as_tensor -from stable_baselines3.common.vec_env.obs_dict_wrapper import ObsDictWrapper class BaseModel(nn.Module, ABC): @@ -114,7 +113,7 @@ def _update_features_extractor( return net_kwargs def make_features_extractor(self) -> BaseFeaturesExtractor: - """ Helper method to create a features extractor.""" + """Helper method to create a features extractor.""" return self.features_extractor_class(self.observation_space, **self.features_extractor_kwargs) def extract_features(self, obs: th.Tensor) -> th.Tensor: @@ -213,7 +212,7 @@ def __init__(self, *args, squash_output: bool = False, **kwargs): @staticmethod def _dummy_schedule(progress_remaining: float) -> float: - """ (float) Useful for pickling policy.""" + """(float) Useful for pickling policy.""" del progress_remaining return 0.0 @@ -268,12 +267,9 @@ def predict( # state = self.initial_state # if mask is None: # mask = [False for _ in range(self.n_envs)] - # Need to check the observation if its a ObsDictWrapper - # Special Case for GoalEnv (using HER normally) - if isinstance(observation, dict) and set(observation.keys()) == set(["observation", "desired_goal", "achieved_goal"]): - observation = ObsDictWrapper.convert_dict(observation) - elif isinstance(observation, dict): + vectorized_env = False + if isinstance(observation, dict): # need to copy the dict as the dict in VecFrameStack will become a torch tensor observation = copy.deepcopy(observation) for key, obs in observation.items(): @@ -282,6 +278,7 @@ def predict( obs_ = maybe_transpose(obs, obs_space) else: obs_ = np.array(obs) + vectorized_env = vectorized_env or is_vectorized_observation(obs_, obs_space) # Add batch dimension if needed observation[key] = obs_.reshape((-1,) + self.observation_space[key].shape) @@ -289,12 +286,13 @@ def predict( # Handle the different cases for images # as PyTorch use channel first format observation = maybe_transpose(observation, self.observation_space) + else: observation = np.array(observation) - vectorized_env = is_vectorized_observation(observation, self.observation_space) - if not isinstance(observation, dict): + # Dict obs need to be handled separately + vectorized_env = is_vectorized_observation(observation, self.observation_space) # Add batch dimension if needed observation = observation.reshape((-1,) + self.observation_space.shape) @@ -463,7 +461,7 @@ def _get_constructor_parameters(self) -> Dict[str, Any]: log_std_init=self.log_std_init, squash_output=default_none_kwargs["squash_output"], full_std=default_none_kwargs["full_std"], - sde_net_arch=default_none_kwargs["sde_net_arch"], + sde_net_arch=self.sde_net_arch, use_expln=default_none_kwargs["use_expln"], lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone ortho_init=self.ortho_init, diff --git a/stable_baselines3/common/results_plotter.py b/stable_baselines3/common/results_plotter.py index 7e5b3cd82..92f67ac34 100644 --- a/stable_baselines3/common/results_plotter.py +++ b/stable_baselines3/common/results_plotter.py @@ -84,7 +84,7 @@ def plot_curves( plt.figure(title, figsize=figsize) max_x = max(xy[0][-1] for xy in xy_list) min_x = 0 - for (i, (x, y)) in enumerate(xy_list): + for (_, (x, y)) in enumerate(xy_list): plt.scatter(x, y, s=2) # Do not plot the smoothed curve at all if the timeseries is shorter than window size. if x.shape[0] >= EPISODES_WINDOW: diff --git a/stable_baselines3/common/torch_layers.py b/stable_baselines3/common/torch_layers.py index 8d97e487a..0a308a0e3 100644 --- a/stable_baselines3/common/torch_layers.py +++ b/stable_baselines3/common/torch_layers.py @@ -175,7 +175,7 @@ def __init__( last_layer_dim_shared = feature_dim # Iterate through the shared layers and build the shared parts of the network - for idx, layer in enumerate(net_arch): + for layer in net_arch: if isinstance(layer, int): # Check that this is a shared layer # TODO: give layer a meaningful name shared_net.append(nn.Linear(last_layer_dim_shared, layer)) # add linear of size layer @@ -196,7 +196,7 @@ def __init__( last_layer_dim_vf = last_layer_dim_shared # Build the non-shared part of the network - for idx, (pi_layer_size, vf_layer_size) in enumerate(zip_longest(policy_only_layers, value_only_layers)): + for pi_layer_size, vf_layer_size in zip_longest(policy_only_layers, value_only_layers): if pi_layer_size is not None: assert isinstance(pi_layer_size, int), "Error: net_arch[-1]['pi'] must only contain integers." policy_net.append(nn.Linear(last_layer_dim_pi, pi_layer_size)) diff --git a/stable_baselines3/common/vec_env/__init__.py b/stable_baselines3/common/vec_env/__init__.py index 8e5c97768..37ebc364d 100644 --- a/stable_baselines3/common/vec_env/__init__.py +++ b/stable_baselines3/common/vec_env/__init__.py @@ -8,7 +8,9 @@ from stable_baselines3.common.vec_env.stacked_observations import StackedDictObservations, StackedObservations from stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv from stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan +from stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs from stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack +from stable_baselines3.common.vec_env.vec_monitor import VecMonitor from stable_baselines3.common.vec_env.vec_normalize import VecNormalize from stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage from stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder diff --git a/stable_baselines3/common/vec_env/obs_dict_wrapper.py b/stable_baselines3/common/vec_env/obs_dict_wrapper.py deleted file mode 100644 index d07ad2402..000000000 --- a/stable_baselines3/common/vec_env/obs_dict_wrapper.py +++ /dev/null @@ -1,68 +0,0 @@ -from typing import Dict - -import numpy as np -from gym import spaces - -from stable_baselines3.common.vec_env import VecEnv, VecEnvWrapper - - -class ObsDictWrapper(VecEnvWrapper): - """ - Wrapper for a VecEnv which overrides the observation space for Hindsight Experience Replay to support dict observations. - - :param env: The vectorized environment to wrap. - """ - - def __init__(self, venv: VecEnv): - super(ObsDictWrapper, self).__init__(venv, venv.observation_space, venv.action_space) - - self.venv = venv - - self.spaces = list(venv.observation_space.spaces.values()) - - # get dimensions of observation and goal - if isinstance(self.spaces[0], spaces.Discrete): - self.obs_dim = 1 - self.goal_dim = 1 - else: - self.obs_dim = venv.observation_space.spaces["observation"].shape[0] - self.goal_dim = venv.observation_space.spaces["achieved_goal"].shape[0] - - # new observation space with concatenated observation and (desired) goal - # for the different types of spaces - if isinstance(self.spaces[0], spaces.Box): - low_values = np.concatenate( - [venv.observation_space.spaces["observation"].low, venv.observation_space.spaces["desired_goal"].low] - ) - high_values = np.concatenate( - [venv.observation_space.spaces["observation"].high, venv.observation_space.spaces["desired_goal"].high] - ) - self.observation_space = spaces.Box(low_values, high_values, dtype=np.float32) - elif isinstance(self.spaces[0], spaces.MultiBinary): - total_dim = self.obs_dim + self.goal_dim - self.observation_space = spaces.MultiBinary(total_dim) - elif isinstance(self.spaces[0], spaces.Discrete): - dimensions = [venv.observation_space.spaces["observation"].n, venv.observation_space.spaces["desired_goal"].n] - self.observation_space = spaces.MultiDiscrete(dimensions) - else: - raise NotImplementedError(f"{type(self.spaces[0])} space is not supported") - - def reset(self): - return self.venv.reset() - - def step_wait(self): - return self.venv.step_wait() - - @staticmethod - def convert_dict( - observation_dict: Dict[str, np.ndarray], observation_key: str = "observation", goal_key: str = "desired_goal" - ) -> np.ndarray: - """ - Concatenate observation and (desired) goal of observation dict. - - :param observation_dict: Dictionary with observation. - :param observation_key: Key of observation in dictionary. - :param goal_key: Key of (desired) goal in dictionary. - :return: Concatenated observation. - """ - return np.concatenate([observation_dict[observation_key], observation_dict[goal_key]], axis=-1) diff --git a/stable_baselines3/common/vec_env/vec_extract_dict_obs.py b/stable_baselines3/common/vec_env/vec_extract_dict_obs.py new file mode 100644 index 000000000..8582b7a30 --- /dev/null +++ b/stable_baselines3/common/vec_env/vec_extract_dict_obs.py @@ -0,0 +1,24 @@ +import numpy as np + +from stable_baselines3.common.vec_env.base_vec_env import VecEnv, VecEnvStepReturn, VecEnvWrapper + + +class VecExtractDictObs(VecEnvWrapper): + """ + A vectorized wrapper for extracting dictionary observations. + + :param venv: The vectorized environment + :param key: The key of the dictionary observation + """ + + def __init__(self, venv: VecEnv, key: str): + self.key = key + super().__init__(venv=venv, observation_space=venv.observation_space.spaces[self.key]) + + def reset(self) -> np.ndarray: + obs = self.venv.reset() + return obs[self.key] + + def step_wait(self) -> VecEnvStepReturn: + obs, reward, done, info = self.venv.step_wait() + return obs[self.key], reward, done, info diff --git a/stable_baselines3/common/vec_env/vec_monitor.py b/stable_baselines3/common/vec_env/vec_monitor.py new file mode 100644 index 000000000..61e0748ff --- /dev/null +++ b/stable_baselines3/common/vec_env/vec_monitor.py @@ -0,0 +1,98 @@ +import time +import warnings +from typing import Optional, Tuple + +import numpy as np + +from stable_baselines3.common.vec_env.base_vec_env import VecEnv, VecEnvObs, VecEnvStepReturn, VecEnvWrapper + + +class VecMonitor(VecEnvWrapper): + """ + A vectorized monitor wrapper for *vectorized* Gym environments, + it is used to record the episode reward, length, time and other data. + + Some environments like `openai/procgen `_ + or `gym3 `_ directly initialize the + vectorized environments, without giving us a chance to use the ``Monitor`` + wrapper. So this class simply does the job of the ``Monitor`` wrapper on + a vectorized level. + + :param venv: The vectorized environment + :param filename: the location to save a log file, can be None for no log + :param info_keywords: extra information to log, from the information return of env.step() + """ + + def __init__( + self, + venv: VecEnv, + filename: Optional[str] = None, + info_keywords: Tuple[str, ...] = (), + ): + # Avoid circular import + from stable_baselines3.common.monitor import Monitor, ResultsWriter + + # This check is not valid for special `VecEnv` + # like the ones created by Procgen, that does follow completely + # the `VecEnv` interface + try: + is_wrapped_with_monitor = venv.env_is_wrapped(Monitor)[0] + except AttributeError: + is_wrapped_with_monitor = False + + if is_wrapped_with_monitor: + warnings.warn( + "The environment is already wrapped with a `Monitor` wrapper" + "but you are wrapping it with a `VecMonitor` wrapper, the `Monitor` statistics will be" + "overwritten by the `VecMonitor` ones.", + UserWarning, + ) + + VecEnvWrapper.__init__(self, venv) + self.episode_returns = None + self.episode_lengths = None + self.episode_count = 0 + self.t_start = time.time() + + env_id = None + if hasattr(venv, "spec") and venv.spec is not None: + env_id = venv.spec.id + + if filename: + self.results_writer = ResultsWriter( + filename, header={"t_start": self.t_start, "env_id": env_id}, extra_keys=info_keywords + ) + else: + self.results_writer = None + self.info_keywords = info_keywords + + def reset(self) -> VecEnvObs: + obs = self.venv.reset() + self.episode_returns = np.zeros(self.num_envs, dtype=np.float32) + self.episode_lengths = np.zeros(self.num_envs, dtype=np.int32) + return obs + + def step_wait(self) -> VecEnvStepReturn: + obs, rewards, dones, infos = self.venv.step_wait() + self.episode_returns += rewards + self.episode_lengths += 1 + new_infos = list(infos[:]) + for i in range(len(dones)): + if dones[i]: + info = infos[i].copy() + episode_return = self.episode_returns[i] + episode_length = self.episode_lengths[i] + episode_info = {"r": episode_return, "l": episode_length, "t": round(time.time() - self.t_start, 6)} + info["episode"] = episode_info + self.episode_count += 1 + self.episode_returns[i] = 0 + self.episode_lengths[i] = 0 + if self.results_writer: + self.results_writer.write_row(episode_info) + new_infos[i] = info + return obs, rewards, dones, new_infos + + def close(self) -> None: + if self.results_writer: + self.results_writer.close() + return self.venv.close() diff --git a/stable_baselines3/common/vec_env/vec_normalize.py b/stable_baselines3/common/vec_env/vec_normalize.py index 55ed2c54e..f1feeeef3 100644 --- a/stable_baselines3/common/vec_env/vec_normalize.py +++ b/stable_baselines3/common/vec_env/vec_normalize.py @@ -131,7 +131,8 @@ def step_wait(self) -> VecEnvStepReturn: for idx, done in enumerate(dones): if not done: continue - infos[idx]["terminal_observation"] = self.normalize_obs(infos[idx]["terminal_observation"]) + if "terminal_observation" in infos[idx]: + infos[idx]["terminal_observation"] = self.normalize_obs(infos[idx]["terminal_observation"]) self.ret[dones] = 0 return obs, rewards, dones, infos diff --git a/stable_baselines3/common/vec_env/vec_transpose.py b/stable_baselines3/common/vec_env/vec_transpose.py index 501c4e422..d4b2e4ef4 100644 --- a/stable_baselines3/common/vec_env/vec_transpose.py +++ b/stable_baselines3/common/vec_env/vec_transpose.py @@ -81,7 +81,8 @@ def step_wait(self) -> VecEnvStepReturn: for idx, done in enumerate(dones): if not done: continue - infos[idx]["terminal_observation"] = self.transpose_observations(infos[idx]["terminal_observation"]) + if "terminal_observation" in infos[idx]: + infos[idx]["terminal_observation"] = self.transpose_observations(infos[idx]["terminal_observation"]) return self.transpose_observations(observations), rewards, dones, infos diff --git a/stable_baselines3/ddpg/ddpg.py b/stable_baselines3/ddpg/ddpg.py index e7a1f75dc..a7de09e39 100644 --- a/stable_baselines3/ddpg/ddpg.py +++ b/stable_baselines3/ddpg/ddpg.py @@ -2,6 +2,7 @@ import torch as th +from stable_baselines3.common.buffers import ReplayBuffer from stable_baselines3.common.noise import ActionNoise from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule @@ -36,6 +37,9 @@ class DDPG(TD3): during the rollout. :param action_noise: the action noise type (None by default), this can help for hard exploration problem. Cf common.noise for the different action noise type. + :param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``). + If ``None``, it will be automatically selected. + :param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation. :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer at a cost of more complexity. See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195 @@ -54,7 +58,7 @@ def __init__( policy: Union[str, Type[TD3Policy]], env: Union[GymEnv, str], learning_rate: Union[float, Schedule] = 1e-3, - buffer_size: int = int(1e6), + buffer_size: int = 1000000, # 1e6 learning_starts: int = 100, batch_size: int = 100, tau: float = 0.005, @@ -62,6 +66,8 @@ def __init__( train_freq: Union[int, Tuple[int, str]] = (1, "episode"), gradient_steps: int = -1, action_noise: Optional[ActionNoise] = None, + replay_buffer_class: Optional[ReplayBuffer] = None, + replay_buffer_kwargs: Optional[Dict[str, Any]] = None, optimize_memory_usage: bool = False, tensorboard_log: Optional[str] = None, create_eval_env: bool = False, @@ -84,6 +90,8 @@ def __init__( train_freq=train_freq, gradient_steps=gradient_steps, action_noise=action_noise, + replay_buffer_class=replay_buffer_class, + replay_buffer_kwargs=replay_buffer_kwargs, policy_kwargs=policy_kwargs, tensorboard_log=tensorboard_log, verbose=verbose, diff --git a/stable_baselines3/dqn/dqn.py b/stable_baselines3/dqn/dqn.py index f7ea13307..615c5f061 100644 --- a/stable_baselines3/dqn/dqn.py +++ b/stable_baselines3/dqn/dqn.py @@ -6,6 +6,7 @@ from torch.nn import functional as F from stable_baselines3.common import logger +from stable_baselines3.common.buffers import ReplayBuffer from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm from stable_baselines3.common.preprocessing import maybe_transpose from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule @@ -35,6 +36,9 @@ class DQN(OffPolicyAlgorithm): :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``) Set to ``-1`` means to do as many gradient steps as steps done in the environment during the rollout. + :param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``). + If ``None``, it will be automatically selected. + :param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation. :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer at a cost of more complexity. See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195 @@ -60,13 +64,15 @@ def __init__( policy: Union[str, Type[DQNPolicy]], env: Union[GymEnv, str], learning_rate: Union[float, Schedule] = 1e-4, - buffer_size: int = 1000000, + buffer_size: int = 1000000, # 1e6 learning_starts: int = 50000, batch_size: Optional[int] = 32, tau: float = 1.0, gamma: float = 0.99, train_freq: Union[int, Tuple[int, str]] = 4, gradient_steps: int = 1, + replay_buffer_class: Optional[ReplayBuffer] = None, + replay_buffer_kwargs: Optional[Dict[str, Any]] = None, optimize_memory_usage: bool = False, target_update_interval: int = 10000, exploration_fraction: float = 0.1, @@ -95,6 +101,8 @@ def __init__( train_freq, gradient_steps, action_noise=None, # No action noise + replay_buffer_class=replay_buffer_class, + replay_buffer_kwargs=replay_buffer_kwargs, policy_kwargs=policy_kwargs, tensorboard_log=tensorboard_log, verbose=verbose, @@ -149,7 +157,7 @@ def train(self, gradient_steps: int, batch_size: int = 100) -> None: self._update_learning_rate(self.policy.optimizer) losses = [] - for gradient_step in range(gradient_steps): + for _ in range(gradient_steps): # Sample replay buffer replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env) diff --git a/stable_baselines3/her/__init__.py b/stable_baselines3/her/__init__.py index 24f347305..1f58921b4 100644 --- a/stable_baselines3/her/__init__.py +++ b/stable_baselines3/her/__init__.py @@ -1,4 +1,2 @@ -from stable_baselines3.common.vec_env.obs_dict_wrapper import ObsDictWrapper from stable_baselines3.her.goal_selection_strategy import GoalSelectionStrategy -from stable_baselines3.her.her import HER from stable_baselines3.her.her_replay_buffer import HerReplayBuffer diff --git a/stable_baselines3/her/her.py b/stable_baselines3/her/her.py deleted file mode 100644 index 43984ded3..000000000 --- a/stable_baselines3/her/her.py +++ /dev/null @@ -1,582 +0,0 @@ -import io -import pathlib -import warnings -from typing import Any, Dict, Iterable, List, Optional, Tuple, Type, Union - -import numpy as np -import torch as th - -from stable_baselines3.common.base_class import BaseAlgorithm -from stable_baselines3.common.callbacks import BaseCallback -from stable_baselines3.common.noise import ActionNoise -from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm -from stable_baselines3.common.policies import BasePolicy -from stable_baselines3.common.save_util import load_from_zip_file, recursive_setattr -from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, RolloutReturn, TrainFreq -from stable_baselines3.common.utils import check_for_correct_spaces, should_collect_more_steps -from stable_baselines3.common.vec_env import VecEnv -from stable_baselines3.common.vec_env.obs_dict_wrapper import ObsDictWrapper -from stable_baselines3.her.goal_selection_strategy import KEY_TO_GOAL_STRATEGY, GoalSelectionStrategy -from stable_baselines3.her.her_replay_buffer import HerReplayBuffer - - -def get_time_limit(env: VecEnv, current_max_episode_length: Optional[int]) -> int: - """ - Get time limit from environment. - - :param env: Environment from which we want to get the time limit. - :param current_max_episode_length: Current value for max_episode_length. - :return: max episode length - """ - # try to get the attribute from environment - if current_max_episode_length is None: - try: - current_max_episode_length = env.get_attr("spec")[0].max_episode_steps - # Raise the error because the attribute is present but is None - if current_max_episode_length is None: - raise AttributeError - # if not available check if a valid value was passed as an argument - except AttributeError: - raise ValueError( - "The max episode length could not be inferred.\n" - "You must specify a `max_episode_steps` when registering the environment,\n" - "use a `gym.wrappers.TimeLimit` wrapper " - "or pass `max_episode_length` to the model constructor" - ) - return current_max_episode_length - - -# TODO: rewrite HER class as soon as dict obs are supported -class HER(BaseAlgorithm): - """ - Hindsight Experience Replay (HER) - Paper: https://arxiv.org/abs/1707.01495 - - .. warning:: - - For performance reasons, the maximum number of steps per episodes must be specified. - In most cases, it will be inferred if you specify ``max_episode_steps`` when registering the environment - or if you use a ``gym.wrappers.TimeLimit`` (and ``env.spec`` is not None). - Otherwise, you can directly pass ``max_episode_length`` to the model constructor - - - For additional offline algorithm specific arguments please have a look at the corresponding documentation. - - :param policy: The policy model to use. - :param env: The environment to learn from (if registered in Gym, can be str) - :param model_class: Off policy model which will be used with hindsight experience replay. (SAC, TD3, DDPG, DQN) - :param n_sampled_goal: Number of sampled goals for replay. (offline sampling) - :param goal_selection_strategy: Strategy for sampling goals for replay. - One of ['episode', 'final', 'future', 'random'] - :param online_sampling: Sample HER transitions online. - :param learning_rate: learning rate for the optimizer, - it can be a function of the current progress remaining (from 1 to 0) - :param max_episode_length: The maximum length of an episode. If not specified, - it will be automatically inferred if the environment uses a ``gym.wrappers.TimeLimit`` wrapper. - """ - - def __init__( - self, - policy: Union[str, Type[BasePolicy]], - env: Union[GymEnv, str], - model_class: Type[OffPolicyAlgorithm], - n_sampled_goal: int = 4, - goal_selection_strategy: Union[GoalSelectionStrategy, str] = "future", - online_sampling: bool = False, - max_episode_length: Optional[int] = None, - *args, - **kwargs, - ): - - # we will use the policy and learning rate from the model - super(HER, self).__init__(policy=BasePolicy, env=env, policy_base=BasePolicy, learning_rate=0.0) - del self.policy, self.learning_rate - - if self.get_vec_normalize_env() is not None: - assert online_sampling, "You must pass `online_sampling=True` if you want to use `VecNormalize` with `HER`" - - _init_setup_model = kwargs.get("_init_setup_model", True) - if "_init_setup_model" in kwargs: - del kwargs["_init_setup_model"] - # model initialization - self.model_class = model_class - self.model = model_class( - policy=policy, - env=self.env, - _init_setup_model=False, # pytype: disable=wrong-keyword-args - *args, - **kwargs, # pytype: disable=wrong-keyword-args - ) - - # Make HER use self.model.action_noise - del self.action_noise - self.verbose = self.model.verbose - self.tensorboard_log = self.model.tensorboard_log - - # convert goal_selection_strategy into GoalSelectionStrategy if string - if isinstance(goal_selection_strategy, str): - self.goal_selection_strategy = KEY_TO_GOAL_STRATEGY[goal_selection_strategy.lower()] - else: - self.goal_selection_strategy = goal_selection_strategy - - # check if goal_selection_strategy is valid - assert isinstance( - self.goal_selection_strategy, GoalSelectionStrategy - ), f"Invalid goal selection strategy, please use one of {list(GoalSelectionStrategy)}" - - self.n_sampled_goal = n_sampled_goal - # if we sample her transitions online use custom replay buffer - self.online_sampling = online_sampling - # compute ratio between HER replays and regular replays in percent for online HER sampling - self.her_ratio = 1 - (1.0 / (self.n_sampled_goal + 1)) - # maximum steps in episode - self.max_episode_length = get_time_limit(self.env, max_episode_length) - # storage for transitions of current episode for offline sampling - # for online sampling, it replaces the "classic" replay buffer completely - her_buffer_size = self.buffer_size if online_sampling else self.max_episode_length - - assert self.env is not None, "Because it needs access to `env.compute_reward()` HER you must provide the env." - - self._episode_storage = HerReplayBuffer( - self.env, - her_buffer_size, - self.max_episode_length, - self.goal_selection_strategy, - self.env.observation_space, - self.env.action_space, - self.device, - self.n_envs, - self.her_ratio, # pytype: disable=wrong-arg-types - ) - - # counter for steps in episode - self.episode_steps = 0 - - if _init_setup_model: - self._setup_model() - - def _setup_model(self) -> None: - self.model._setup_model() - # assign episode storage to replay buffer when using online HER sampling - if self.online_sampling: - self.model.replay_buffer = self._episode_storage - - def predict( - self, - observation: np.ndarray, - state: Optional[np.ndarray] = None, - mask: Optional[np.ndarray] = None, - deterministic: bool = False, - ) -> Tuple[np.ndarray, Optional[np.ndarray]]: - - return self.model.predict(observation, state, mask, deterministic) - - def learn( - self, - total_timesteps: int, - callback: MaybeCallback = None, - log_interval: int = 4, - eval_env: Optional[GymEnv] = None, - eval_freq: int = -1, - n_eval_episodes: int = 5, - tb_log_name: str = "HER", - eval_log_path: Optional[str] = None, - reset_num_timesteps: bool = True, - ) -> BaseAlgorithm: - - total_timesteps, callback = self._setup_learn( - total_timesteps, eval_env, callback, eval_freq, n_eval_episodes, eval_log_path, reset_num_timesteps, tb_log_name - ) - self.model.start_time = self.start_time - self.model.ep_info_buffer = self.ep_info_buffer - self.model.ep_success_buffer = self.ep_success_buffer - self.model.num_timesteps = self.num_timesteps - self.model._episode_num = self._episode_num - self.model._last_obs = self._last_obs - self.model._total_timesteps = self._total_timesteps - - callback.on_training_start(locals(), globals()) - - while self.num_timesteps < total_timesteps: - rollout = self.collect_rollouts( - self.env, - train_freq=self.train_freq, - action_noise=self.action_noise, - callback=callback, - learning_starts=self.learning_starts, - log_interval=log_interval, - ) - - if rollout.continue_training is False: - break - - if self.num_timesteps > 0 and self.num_timesteps > self.learning_starts and self.replay_buffer.size() > 0: - # If no `gradient_steps` is specified, - # do as many gradients steps as steps performed during the rollout - gradient_steps = self.gradient_steps if self.gradient_steps > 0 else rollout.episode_timesteps - self.train(batch_size=self.batch_size, gradient_steps=gradient_steps) - - callback.on_training_end() - - return self - - def collect_rollouts( - self, - env: VecEnv, - callback: BaseCallback, - train_freq: TrainFreq, - action_noise: Optional[ActionNoise] = None, - learning_starts: int = 0, - log_interval: Optional[int] = None, - ) -> RolloutReturn: - """ - Collect experiences and store them into a ReplayBuffer. - - :param env: The training environment - :param callback: Callback that will be called at each step - (and at the beginning and end of the rollout) - :param train_freq: How much experience to collect - by doing rollouts of current policy. - Either ``TrainFreq(, TrainFrequencyUnit.STEP)`` - or ``TrainFreq(, TrainFrequencyUnit.EPISODE)`` - with ```` being an integer greater than 0. - :param action_noise: Action noise that will be used for exploration - Required for deterministic policy (e.g. TD3). This can also be used - in addition to the stochastic policy for SAC. - :param learning_starts: Number of steps before learning for the warm-up phase. - :param log_interval: Log data every ``log_interval`` episodes - :return: - """ - - episode_rewards, total_timesteps = [], [] - num_collected_steps, num_collected_episodes = 0, 0 - - assert isinstance(env, VecEnv), "You must pass a VecEnv" - assert env.num_envs == 1, "OffPolicyAlgorithm only support single environment" - assert train_freq.frequency > 0, "Should at least collect one step or episode." - - if self.model.use_sde: - self.actor.reset_noise() - - callback.on_rollout_start() - continue_training = True - - while should_collect_more_steps(train_freq, num_collected_steps, num_collected_episodes): - done = False - episode_reward, episode_timesteps = 0.0, 0 - - while not done: - # concatenate observation and (desired) goal - observation = self._last_obs - self._last_obs = ObsDictWrapper.convert_dict(observation) - - if ( - self.model.use_sde - and self.model.sde_sample_freq > 0 - and num_collected_steps % self.model.sde_sample_freq == 0 - ): - # Sample a new noise matrix - self.actor.reset_noise() - - # Select action randomly or according to policy - self.model._last_obs = self._last_obs - action, buffer_action = self._sample_action(learning_starts, action_noise) - - # Perform action - new_obs, reward, done, infos = env.step(action) - - self.num_timesteps += 1 - self.model.num_timesteps = self.num_timesteps - episode_timesteps += 1 - num_collected_steps += 1 - - # Only stop training if return value is False, not when it is None. - if callback.on_step() is False: - return RolloutReturn(0.0, num_collected_steps, num_collected_episodes, continue_training=False) - - episode_reward += reward - - # Retrieve reward and episode length if using Monitor wrapper - self._update_info_buffer(infos, done) - self.model.ep_info_buffer = self.ep_info_buffer - self.model.ep_success_buffer = self.ep_success_buffer - - # == Store transition in the replay buffer and/or in the episode storage == - - if self._vec_normalize_env is not None: - # Store only the unnormalized version - new_obs_ = self._vec_normalize_env.get_original_obs() - reward_ = self._vec_normalize_env.get_original_reward() - else: - # Avoid changing the original ones - self._last_original_obs, new_obs_, reward_ = observation, new_obs, reward - self.model._last_original_obs = self._last_original_obs - - # As the VecEnv resets automatically, new_obs is already the - # first observation of the next episode - if done and infos[0].get("terminal_observation") is not None: - next_obs = infos[0]["terminal_observation"] - # VecNormalize normalizes the terminal observation - if self._vec_normalize_env is not None: - next_obs = self._vec_normalize_env.unnormalize_obs(next_obs) - else: - next_obs = new_obs_ - - if self.online_sampling: - self.replay_buffer.add(self._last_original_obs, next_obs, buffer_action, reward_, done, infos) - else: - # concatenate observation with (desired) goal - flattened_obs = ObsDictWrapper.convert_dict(self._last_original_obs) - flattened_next_obs = ObsDictWrapper.convert_dict(next_obs) - # add to replay buffer - self.replay_buffer.add(flattened_obs, flattened_next_obs, buffer_action, reward_, done) - # add current transition to episode storage - self._episode_storage.add(self._last_original_obs, next_obs, buffer_action, reward_, done, infos) - - self._last_obs = new_obs - self.model._last_obs = self._last_obs - - # Save the unnormalized new observation - if self._vec_normalize_env is not None: - self._last_original_obs = new_obs_ - self.model._last_original_obs = self._last_original_obs - - self.model._update_current_progress_remaining(self.num_timesteps, self._total_timesteps) - - # For DQN, check if the target network should be updated - # and update the exploration schedule - # For SAC/TD3, the update is done as the same time as the gradient update - # see https://github.com/hill-a/stable-baselines/issues/900 - self.model._on_step() - - self.episode_steps += 1 - - if not should_collect_more_steps(train_freq, num_collected_steps, num_collected_episodes): - break - - if done or self.episode_steps >= self.max_episode_length: - if self.online_sampling: - self.replay_buffer.store_episode() - else: - self._episode_storage.store_episode() - # sample virtual transitions and store them in replay buffer - self._sample_her_transitions() - # clear storage for current episode - self._episode_storage.reset() - - num_collected_episodes += 1 - self._episode_num += 1 - self.model._episode_num = self._episode_num - episode_rewards.append(episode_reward) - total_timesteps.append(episode_timesteps) - - if action_noise is not None: - action_noise.reset() - - # Log training infos - if log_interval is not None and self._episode_num % log_interval == 0: - self._dump_logs() - - self.episode_steps = 0 - - mean_reward = np.mean(episode_rewards) if num_collected_episodes > 0 else 0.0 - - callback.on_rollout_end() - - return RolloutReturn(mean_reward, num_collected_steps, num_collected_episodes, continue_training) - - def _sample_her_transitions(self) -> None: - """ - Sample additional goals and store new transitions in replay buffer - when using offline sampling. - """ - - # Sample goals and get new observations - # maybe_vec_env=None as we should store unnormalized transitions, - # they will be normalized at sampling time - observations, next_observations, actions, rewards = self._episode_storage.sample_offline( - n_sampled_goal=self.n_sampled_goal - ) - - # store data in replay buffer - dones = np.zeros((len(observations)), dtype=bool) - self.replay_buffer.extend(observations, next_observations, actions, rewards, dones) - - def __getattr__(self, item: str) -> Any: - """ - Find attribute from model class if this class does not have it. - """ - if hasattr(self.model, item): - return getattr(self.model, item) - else: - raise AttributeError(f"{self} has no attribute {item}") - - def _get_torch_save_params(self) -> Tuple[List[str], List[str]]: - return self.model._get_torch_save_params() - - def save( - self, - path: Union[str, pathlib.Path, io.BufferedIOBase], - exclude: Optional[Iterable[str]] = None, - include: Optional[Iterable[str]] = None, - ) -> None: - """ - Save all the attributes of the object and the model parameters in a zip-file. - - :param path: path to the file where the rl agent should be saved - :param exclude: name of parameters that should be excluded in addition to the default one - :param include: name of parameters that might be excluded but should be included anyway - """ - - # add HER parameters to model - self.model.n_sampled_goal = self.n_sampled_goal - self.model.goal_selection_strategy = self.goal_selection_strategy - self.model.online_sampling = self.online_sampling - self.model.model_class = self.model_class - self.model.max_episode_length = self.max_episode_length - - self.model.save(path, exclude, include) - - @classmethod - def load( - cls, - path: Union[str, pathlib.Path, io.BufferedIOBase], - env: Optional[GymEnv] = None, - device: Union[th.device, str] = "auto", - custom_objects: Optional[Dict[str, Any]] = None, - **kwargs, - ) -> "BaseAlgorithm": - """ - Load the model from a zip-file - - :param path: path to the file (or a file-like) where to - load the agent from - :param env: the new environment to run the loaded model on - (can be None if you only need prediction from a trained model) has priority over any saved environment - :param device: Device on which the code should run. - :param custom_objects: Dictionary of objects to replace - upon loading. If a variable is present in this dictionary as a - key, it will not be deserialized and the corresponding item - will be used instead. Similar to custom_objects in - ``keras.models.load_model``. Useful when you have an object in - file that can not be deserialized. - :param kwargs: extra arguments to change the model when loading - """ - data, params, pytorch_variables = load_from_zip_file(path, device=device, custom_objects=custom_objects) - - # Remove stored device information and replace with ours - if "policy_kwargs" in data: - if "device" in data["policy_kwargs"]: - del data["policy_kwargs"]["device"] - - if "policy_kwargs" in kwargs and kwargs["policy_kwargs"] != data["policy_kwargs"]: - raise ValueError( - f"The specified policy kwargs do not equal the stored policy kwargs." - f"Stored kwargs: {data['policy_kwargs']}, specified kwargs: {kwargs['policy_kwargs']}" - ) - - # check if observation space and action space are part of the saved parameters - if "observation_space" not in data or "action_space" not in data: - raise KeyError("The observation_space and action_space were not given, can't verify new environments") - - # check if given env is valid - if env is not None: - # Wrap first if needed - env = cls._wrap_env(env, data["verbose"]) - # Check if given env is valid - check_for_correct_spaces(env, data["observation_space"], data["action_space"]) - else: - # Use stored env, if one exists. If not, continue as is (can be used for predict) - if "env" in data: - env = data["env"] - - if "use_sde" in data and data["use_sde"]: - kwargs["use_sde"] = True - - # Keys that cannot be changed - for key in {"model_class", "online_sampling", "max_episode_length"}: - if key in kwargs: - del kwargs[key] - - # Keys that can be changed - for key in {"n_sampled_goal", "goal_selection_strategy"}: - if key in kwargs: - data[key] = kwargs[key] # pytype: disable=unsupported-operands - del kwargs[key] - - # noinspection PyArgumentList - her_model = cls( - policy=data["policy_class"], - env=env, - model_class=data["model_class"], - n_sampled_goal=data["n_sampled_goal"], - goal_selection_strategy=data["goal_selection_strategy"], - online_sampling=data["online_sampling"], - max_episode_length=data["max_episode_length"], - policy_kwargs=data["policy_kwargs"], - _init_setup_model=False, # pytype: disable=not-instantiable,wrong-keyword-args - **kwargs, - ) - - # load parameters - her_model.model.__dict__.update(data) - her_model.model.__dict__.update(kwargs) - her_model._setup_model() - - her_model._total_timesteps = her_model.model._total_timesteps - her_model.num_timesteps = her_model.model.num_timesteps - her_model._episode_num = her_model.model._episode_num - - # put state_dicts back in place - her_model.model.set_parameters(params, exact_match=True, device=device) - - # put other pytorch variables back in place - if pytorch_variables is not None: - for name in pytorch_variables: - recursive_setattr(her_model.model, name, pytorch_variables[name]) - - # Sample gSDE exploration matrix, so it uses the right device - # see issue #44 - if her_model.model.use_sde: - her_model.model.policy.reset_noise() # pytype: disable=attribute-error - return her_model - - def load_replay_buffer( - self, path: Union[str, pathlib.Path, io.BufferedIOBase], truncate_last_trajectory: bool = True - ) -> None: - """ - Load a replay buffer from a pickle file and set environment for replay buffer (only online sampling). - - :param path: Path to the pickled replay buffer. - :param truncate_last_trajectory: Only for online sampling. - If set to ``True`` we assume that the last trajectory in the replay buffer was finished. - If it is set to ``False`` we assume that we continue the same trajectory (same episode). - """ - self.model.load_replay_buffer(path=path) - - if self.online_sampling: - # set environment - self.replay_buffer.set_env(self.env) - # If we are at the start of an episode, no need to truncate - current_idx = self.replay_buffer.current_idx - - # truncate interrupted episode - if truncate_last_trajectory and current_idx > 0: - warnings.warn( - "The last trajectory in the replay buffer will be truncated.\n" - "If you are in the same episode as when the replay buffer was saved,\n" - "you should use `truncate_last_trajectory=False` to avoid that issue." - ) - # get current episode and transition index - pos = self.replay_buffer.pos - # set episode length for current episode - self.replay_buffer.episode_lengths[pos] = current_idx - # set done = True for current episode - # current_idx was already incremented - self.replay_buffer.buffer["done"][pos][current_idx - 1] = np.array([True], dtype=np.float32) - # reset current transition index - self.replay_buffer.current_idx = 0 - # increment episode counter - self.replay_buffer.pos = (self.replay_buffer.pos + 1) % self.replay_buffer.max_episode_stored - # update "full" indicator - self.replay_buffer.full = self.replay_buffer.full or self.replay_buffer.pos == 0 diff --git a/stable_baselines3/her/her_replay_buffer.py b/stable_baselines3/her/her_replay_buffer.py index edca50ff9..ea5d35e09 100644 --- a/stable_baselines3/her/her_replay_buffer.py +++ b/stable_baselines3/her/her_replay_buffer.py @@ -1,75 +1,148 @@ +import warnings from collections import deque from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import torch as th -from gym import spaces -from stable_baselines3.common.buffers import ReplayBuffer -from stable_baselines3.common.type_aliases import ReplayBufferSamples, RolloutBufferSamples -from stable_baselines3.common.vec_env import VecNormalize -from stable_baselines3.common.vec_env.obs_dict_wrapper import ObsDictWrapper -from stable_baselines3.her.goal_selection_strategy import GoalSelectionStrategy +from stable_baselines3.common.buffers import DictReplayBuffer +from stable_baselines3.common.preprocessing import get_obs_shape +from stable_baselines3.common.type_aliases import DictReplayBufferSamples +from stable_baselines3.common.vec_env import VecEnv, VecNormalize +from stable_baselines3.her.goal_selection_strategy import KEY_TO_GOAL_STRATEGY, GoalSelectionStrategy -class HerReplayBuffer(ReplayBuffer): +def get_time_limit(env: VecEnv, current_max_episode_length: Optional[int]) -> int: """ + Get time limit from environment. + + :param env: Environment from which we want to get the time limit. + :param current_max_episode_length: Current value for max_episode_length. + :return: max episode length + """ + # try to get the attribute from environment + if current_max_episode_length is None: + try: + current_max_episode_length = env.get_attr("spec")[0].max_episode_steps + # Raise the error because the attribute is present but is None + if current_max_episode_length is None: + raise AttributeError + # if not available check if a valid value was passed as an argument + except AttributeError: + raise ValueError( + "The max episode length could not be inferred.\n" + "You must specify a `max_episode_steps` when registering the environment,\n" + "use a `gym.wrappers.TimeLimit` wrapper " + "or pass `max_episode_length` to the model constructor" + ) + return current_max_episode_length + + +class HerReplayBuffer(DictReplayBuffer): + """ + Hindsight Experience Replay (HER) buffer. + Paper: https://arxiv.org/abs/1707.01495 + + .. warning:: + + For performance reasons, the maximum number of steps per episodes must be specified. + In most cases, it will be inferred if you specify ``max_episode_steps`` when registering the environment + or if you use a ``gym.wrappers.TimeLimit`` (and ``env.spec`` is not None). + Otherwise, you can directly pass ``max_episode_length`` to the replay buffer constructor. + + Replay buffer for sampling HER (Hindsight Experience Replay) transitions. In the online sampling case, these new transitions will not be saved in the replay buffer and will only be created at sampling time. :param env: The training environment :param buffer_size: The size of the buffer measured in transitions. - :param max_episode_length: The length of an episode. (time horizon) + :param max_episode_length: The maximum length of an episode. If not specified, + it will be automatically inferred if the environment uses a ``gym.wrappers.TimeLimit`` wrapper. :param goal_selection_strategy: Strategy for sampling goals for replay. One of ['episode', 'final', 'future'] - :param observation_space: Observation space - :param action_space: Action space :param device: PyTorch device - :param n_envs: Number of parallel environments - :her_ratio: The ratio between HER transitions and regular transitions in percent - (between 0 and 1, for online sampling) - The default value ``her_ratio=0.8`` corresponds to 4 virtual transitions - for one real transition (4 / (4 + 1) = 0.8) + :param n_sampled_goal: Number of virtual transitions to create per real transition, + by sampling new goals. + :param handle_timeout_termination: Handle timeout termination (due to timelimit) + separately and treat the task as infinite horizon task. + https://github.com/DLR-RM/stable-baselines3/issues/284 """ def __init__( self, - env: ObsDictWrapper, + env: VecEnv, buffer_size: int, - max_episode_length: int, - goal_selection_strategy: GoalSelectionStrategy, - observation_space: spaces.Space, - action_space: spaces.Space, device: Union[th.device, str] = "cpu", - n_envs: int = 1, - her_ratio: float = 0.8, + replay_buffer: Optional[DictReplayBuffer] = None, + max_episode_length: Optional[int] = None, + n_sampled_goal: int = 4, + goal_selection_strategy: Union[GoalSelectionStrategy, str] = "future", + online_sampling: bool = True, + handle_timeout_termination: bool = True, ): - super(HerReplayBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs) + super(HerReplayBuffer, self).__init__(buffer_size, env.observation_space, env.action_space, device, env.num_envs) + + # convert goal_selection_strategy into GoalSelectionStrategy if string + if isinstance(goal_selection_strategy, str): + self.goal_selection_strategy = KEY_TO_GOAL_STRATEGY[goal_selection_strategy.lower()] + else: + self.goal_selection_strategy = goal_selection_strategy + + # check if goal_selection_strategy is valid + assert isinstance( + self.goal_selection_strategy, GoalSelectionStrategy + ), f"Invalid goal selection strategy, please use one of {list(GoalSelectionStrategy)}" + + self.n_sampled_goal = n_sampled_goal + # if we sample her transitions online use custom replay buffer + self.online_sampling = online_sampling + # compute ratio between HER replays and regular replays in percent for online HER sampling + self.her_ratio = 1 - (1.0 / (self.n_sampled_goal + 1)) + # maximum steps in episode + self.max_episode_length = get_time_limit(env, max_episode_length) + # storage for transitions of current episode for offline sampling + # for online sampling, it replaces the "classic" replay buffer completely + her_buffer_size = buffer_size if online_sampling else self.max_episode_length self.env = env - self.buffer_size = buffer_size - self.max_episode_length = max_episode_length + self.buffer_size = her_buffer_size + + if online_sampling: + replay_buffer = None + self.replay_buffer = replay_buffer + self.online_sampling = online_sampling + + # Handle timeouts termination properly if needed + # see https://github.com/DLR-RM/stable-baselines3/issues/284 + self.handle_timeout_termination = handle_timeout_termination # buffer with episodes # number of episodes which can be stored until buffer size is reached self.max_episode_stored = self.buffer_size // self.max_episode_length self.current_idx = 0 + # Counter to prevent overflow + self.episode_steps = 0 + + # Get shape of observation and goal (usually the same) + self.obs_shape = get_obs_shape(self.env.observation_space.spaces["observation"]) + self.goal_shape = get_obs_shape(self.env.observation_space.spaces["achieved_goal"]) # input dimensions for buffer initialization input_shape = { - "observation": (self.env.num_envs, self.env.obs_dim), - "achieved_goal": (self.env.num_envs, self.env.goal_dim), - "desired_goal": (self.env.num_envs, self.env.goal_dim), + "observation": (self.env.num_envs,) + self.obs_shape, + "achieved_goal": (self.env.num_envs,) + self.goal_shape, + "desired_goal": (self.env.num_envs,) + self.goal_shape, "action": (self.action_dim,), "reward": (1,), - "next_obs": (self.env.num_envs, self.env.obs_dim), - "next_achieved_goal": (self.env.num_envs, self.env.goal_dim), - "next_desired_goal": (self.env.num_envs, self.env.goal_dim), + "next_obs": (self.env.num_envs,) + self.obs_shape, + "next_achieved_goal": (self.env.num_envs,) + self.goal_shape, + "next_desired_goal": (self.env.num_envs,) + self.goal_shape, "done": (1,), } - self.buffer = { + self._observation_keys = ["observation", "achieved_goal", "desired_goal"] + self._buffer = { key: np.zeros((self.max_episode_stored, self.max_episode_length, *dim), dtype=np.float32) for key, dim in input_shape.items() } @@ -78,15 +151,13 @@ def __init__( # episode length storage, needed for episodes which has less steps than the maximum length self.episode_lengths = np.zeros(self.max_episode_stored, dtype=np.int64) - self.goal_selection_strategy = goal_selection_strategy - # percentage of her indices - self.her_ratio = her_ratio - def __getstate__(self) -> Dict[str, Any]: """ Gets state for pickling. - Excludes self.env, as in general Env's may not be pickleable.""" + Excludes self.env, as in general Env's may not be pickleable. + Note: when using offline sampling, this will also save the offline replay buffer. + """ state = self.__dict__.copy() # these attributes are not pickleable del state["env"] @@ -104,7 +175,7 @@ def __setstate__(self, state: Dict[str, Any]) -> None: assert "env" not in state self.env = None - def set_env(self, env: ObsDictWrapper) -> None: + def set_env(self, env: VecEnv) -> None: """ Sets the environment. @@ -115,9 +186,7 @@ def set_env(self, env: ObsDictWrapper) -> None: self.env = env - def _get_samples( - self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None - ) -> Union[ReplayBufferSamples, RolloutBufferSamples]: + def _get_samples(self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None) -> DictReplayBufferSamples: """ Abstract method from base class. """ @@ -127,7 +196,7 @@ def sample( self, batch_size: int, env: Optional[VecNormalize], - ) -> Union[ReplayBufferSamples, Tuple[np.ndarray, ...]]: + ) -> DictReplayBufferSamples: """ Sample function for online sampling of HER transition, this replaces the "regular" replay buffer ``sample()`` @@ -138,12 +207,14 @@ def sample( to normalize the observations/rewards when sampling :return: Samples. """ - return self._sample_transitions(batch_size, maybe_vec_env=env, online_sampling=True) + if self.replay_buffer is not None: + return self.replay_buffer.sample(batch_size, env) + return self._sample_transitions(batch_size, maybe_vec_env=env, online_sampling=True) # pytype: disable=bad-return-type - def sample_offline( + def _sample_offline( self, n_sampled_goal: Optional[int] = None, - ) -> Union[ReplayBufferSamples, Tuple[np.ndarray, ...]]: + ) -> Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray], np.ndarray, np.ndarray]: """ Sample function for offline sampling of HER transition, in that case, only one episode is used and transitions @@ -152,9 +223,13 @@ def sample_offline( :param n_sampled_goal: Number of sampled goals for replay :return: at most(n_sampled_goal * episode_length) HER transitions. """ - # env=None as we should store unnormalized transitions, they will be normalized at sampling time + # `maybe_vec_env=None` as we should store unnormalized transitions, + # they will be normalized at sampling time return self._sample_transitions( - batch_size=None, maybe_vec_env=None, online_sampling=False, n_sampled_goal=n_sampled_goal + batch_size=None, + maybe_vec_env=None, + online_sampling=False, + n_sampled_goal=n_sampled_goal, ) def sample_goals( @@ -191,7 +266,7 @@ def sample_goals( else: raise ValueError(f"Strategy {self.goal_selection_strategy} for sampling goals not supported!") - return self.buffer["achieved_goal"][her_episode_indices, transitions_indices] + return self._buffer["achieved_goal"][her_episode_indices, transitions_indices] def _sample_transitions( self, @@ -199,7 +274,7 @@ def _sample_transitions( maybe_vec_env: Optional[VecNormalize], online_sampling: bool, n_sampled_goal: Optional[int] = None, - ) -> Union[ReplayBufferSamples, Tuple[np.ndarray, ...]]: + ) -> Union[DictReplayBufferSamples, Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray], np.ndarray, np.ndarray]]: """ :param batch_size: Number of element to sample (only used for online sampling) :param env: associated gym VecEnv to normalize the observations/rewards @@ -248,7 +323,7 @@ def _sample_transitions( if her_indices.size == 0: # Episode of one timestep, not enough for using the "future" strategy # no virtual transitions are created in that case - return np.zeros(0), np.zeros(0), np.zeros(0), np.zeros(0) + return {}, {}, np.zeros(0), np.zeros(0) else: # Repeat every transition index n_sampled_goals times # to sample n_sampled_goal per timestep in the episode (only one is stored). @@ -258,7 +333,7 @@ def _sample_transitions( her_indices = np.arange(len(episode_indices)) # get selected transitions - transitions = {key: self.buffer[key][episode_indices, transitions_indices].copy() for key in self.buffer.keys()} + transitions = {key: self._buffer[key][episode_indices, transitions_indices].copy() for key in self._buffer.keys()} # sample new desired goals and relabel the transitions new_goals = self.sample_goals(episode_indices, her_indices, transitions_indices) @@ -288,21 +363,29 @@ def _sample_transitions( ) # concatenate observation with (desired) goal - observations = ObsDictWrapper.convert_dict(self._normalize_obs(transitions, maybe_vec_env)) - # HACK to make normalize obs work with the next observation - transitions["observation"] = transitions["next_obs"] - next_observations = ObsDictWrapper.convert_dict(self._normalize_obs(transitions, maybe_vec_env)) + observations = self._normalize_obs(transitions, maybe_vec_env) + + # HACK to make normalize obs and `add()` work with the next observation + next_observations = { + "observation": transitions["next_obs"], + "achieved_goal": transitions["next_achieved_goal"], + # The desired goal for the next observation must be the same as the previous one + "desired_goal": transitions["desired_goal"], + } + next_observations = self._normalize_obs(next_observations, maybe_vec_env) if online_sampling: - data = ( - observations[:, 0], - transitions["action"], - next_observations[:, 0], - transitions["done"], - self._normalize_reward(transitions["reward"], maybe_vec_env), - ) + next_obs = {key: self.to_torch(next_observations[key][:, 0, :]) for key in self._observation_keys} + + normalized_obs = {key: self.to_torch(observations[key][:, 0, :]) for key in self._observation_keys} - return ReplayBufferSamples(*tuple(map(self.to_torch, data))) + return DictReplayBufferSamples( + observations=normalized_obs, + actions=self.to_torch(transitions["action"]), + next_observations=next_obs, + dones=self.to_torch(transitions["done"]), + rewards=self.to_torch(self._normalize_reward(transitions["reward"], maybe_vec_env)), + ) else: return observations, next_observations, transitions["action"], transitions["reward"] @@ -313,28 +396,58 @@ def add( action: np.ndarray, reward: np.ndarray, done: np.ndarray, - infos: List[dict], + infos: List[Dict[str, Any]], ) -> None: if self.current_idx == 0 and self.full: # Clear info buffer self.info_buffer[self.pos] = deque(maxlen=self.max_episode_length) - self.buffer["observation"][self.pos][self.current_idx] = obs["observation"] - self.buffer["achieved_goal"][self.pos][self.current_idx] = obs["achieved_goal"] - self.buffer["desired_goal"][self.pos][self.current_idx] = obs["desired_goal"] - self.buffer["action"][self.pos][self.current_idx] = action - self.buffer["done"][self.pos][self.current_idx] = done - self.buffer["reward"][self.pos][self.current_idx] = reward - self.buffer["next_obs"][self.pos][self.current_idx] = next_obs["observation"] - self.buffer["next_achieved_goal"][self.pos][self.current_idx] = next_obs["achieved_goal"] - self.buffer["next_desired_goal"][self.pos][self.current_idx] = next_obs["desired_goal"] + # Remove termination signals due to timeout + if self.handle_timeout_termination: + done_ = done * (1 - np.array([info.get("TimeLimit.truncated", False) for info in infos])) + else: + done_ = done + + self._buffer["observation"][self.pos][self.current_idx] = obs["observation"] + self._buffer["achieved_goal"][self.pos][self.current_idx] = obs["achieved_goal"] + self._buffer["desired_goal"][self.pos][self.current_idx] = obs["desired_goal"] + self._buffer["action"][self.pos][self.current_idx] = action + self._buffer["done"][self.pos][self.current_idx] = done_ + self._buffer["reward"][self.pos][self.current_idx] = reward + self._buffer["next_obs"][self.pos][self.current_idx] = next_obs["observation"] + self._buffer["next_achieved_goal"][self.pos][self.current_idx] = next_obs["achieved_goal"] + self._buffer["next_desired_goal"][self.pos][self.current_idx] = next_obs["desired_goal"] + + # When doing offline sampling + # Add real transition to normal replay buffer + if self.replay_buffer is not None: + self.replay_buffer.add( + obs, + next_obs, + action, + reward, + done, + infos, + ) self.info_buffer[self.pos].append(infos) # update current pointer self.current_idx += 1 + self.episode_steps += 1 + + if done or self.episode_steps >= self.max_episode_length: + self.store_episode() + if not self.online_sampling: + # sample virtual transitions and store them in replay buffer + self._sample_her_transitions() + # clear storage for current episode + self.reset() + + self.episode_steps = 0 + def store_episode(self) -> None: """ Increment episode counter @@ -354,6 +467,28 @@ def store_episode(self) -> None: # reset transition pointer self.current_idx = 0 + def _sample_her_transitions(self) -> None: + """ + Sample additional goals and store new transitions in replay buffer + when using offline sampling. + """ + + # Sample goals to create virtual transitions for the last episode. + observations, next_observations, actions, rewards = self._sample_offline(n_sampled_goal=self.n_sampled_goal) + + # Store virtual transitions in the replay buffer, if available + if len(observations) > 0: + for i in range(len(observations["observation"])): + self.replay_buffer.add( + {key: obs[i] for key, obs in observations.items()}, + {key: next_obs[i] for key, next_obs in next_observations.items()}, + actions[i], + rewards[i], + # We consider the transition as non-terminal + done=[False], + infos=[{}], + ) + @property def n_episodes_stored(self) -> int: if self.full: @@ -374,3 +509,34 @@ def reset(self) -> None: self.current_idx = 0 self.full = False self.episode_lengths = np.zeros(self.max_episode_stored, dtype=np.int64) + + def truncate_last_trajectory(self) -> None: + """ + Only for online sampling, called when loading the replay buffer. + If called, we assume that the last trajectory in the replay buffer was finished + (and truncate it). + If not called, we assume that we continue the same trajectory (same episode). + """ + # If we are at the start of an episode, no need to truncate + current_idx = self.current_idx + + # truncate interrupted episode + if current_idx > 0: + warnings.warn( + "The last trajectory in the replay buffer will be truncated.\n" + "If you are in the same episode as when the replay buffer was saved,\n" + "you should use `truncate_last_trajectory=False` to avoid that issue." + ) + # get current episode and transition index + pos = self.pos + # set episode length for current episode + self.episode_lengths[pos] = current_idx + # set done = True for current episode + # current_idx was already incremented + self._buffer["done"][pos][current_idx - 1] = np.array([True], dtype=np.float32) + # reset current transition index + self.current_idx = 0 + # increment episode counter + self.pos = (self.pos + 1) % self.max_episode_stored + # update "full" indicator + self.full = self.full or self.pos == 0 diff --git a/stable_baselines3/sac/sac.py b/stable_baselines3/sac/sac.py index 63ed10f86..a22095575 100644 --- a/stable_baselines3/sac/sac.py +++ b/stable_baselines3/sac/sac.py @@ -6,6 +6,7 @@ from torch.nn import functional as F from stable_baselines3.common import logger +from stable_baselines3.common.buffers import ReplayBuffer from stable_baselines3.common.noise import ActionNoise from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule @@ -44,6 +45,9 @@ class SAC(OffPolicyAlgorithm): during the rollout. :param action_noise: the action noise type (None by default), this can help for hard exploration problem. Cf common.noise for the different action noise type. + :param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``). + If ``None``, it will be automatically selected. + :param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation. :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer at a cost of more complexity. See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195 @@ -74,7 +78,7 @@ def __init__( policy: Union[str, Type[SACPolicy]], env: Union[GymEnv, str], learning_rate: Union[float, Schedule] = 3e-4, - buffer_size: int = int(1e6), + buffer_size: int = 1000000, # 1e6 learning_starts: int = 100, batch_size: int = 256, tau: float = 0.005, @@ -82,6 +86,8 @@ def __init__( train_freq: Union[int, Tuple[int, str]] = 1, gradient_steps: int = 1, action_noise: Optional[ActionNoise] = None, + replay_buffer_class: Optional[ReplayBuffer] = None, + replay_buffer_kwargs: Optional[Dict[str, Any]] = None, optimize_memory_usage: bool = False, ent_coef: Union[str, float] = "auto", target_update_interval: int = 1, @@ -111,6 +117,8 @@ def __init__( train_freq, gradient_steps, action_noise, + replay_buffer_class=replay_buffer_class, + replay_buffer_kwargs=replay_buffer_kwargs, policy_kwargs=policy_kwargs, tensorboard_log=tensorboard_log, verbose=verbose, diff --git a/stable_baselines3/td3/td3.py b/stable_baselines3/td3/td3.py index b552e60f0..2b165c0f5 100644 --- a/stable_baselines3/td3/td3.py +++ b/stable_baselines3/td3/td3.py @@ -6,6 +6,7 @@ from torch.nn import functional as F from stable_baselines3.common import logger +from stable_baselines3.common.buffers import ReplayBuffer from stable_baselines3.common.noise import ActionNoise from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule @@ -39,6 +40,9 @@ class TD3(OffPolicyAlgorithm): during the rollout. :param action_noise: the action noise type (None by default), this can help for hard exploration problem. Cf common.noise for the different action noise type. + :param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``). + If ``None``, it will be automatically selected. + :param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation. :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer at a cost of more complexity. See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195 @@ -62,7 +66,7 @@ def __init__( policy: Union[str, Type[TD3Policy]], env: Union[GymEnv, str], learning_rate: Union[float, Schedule] = 1e-3, - buffer_size: int = int(1e6), + buffer_size: int = 1000000, # 1e6 learning_starts: int = 100, batch_size: int = 100, tau: float = 0.005, @@ -70,6 +74,8 @@ def __init__( train_freq: Union[int, Tuple[int, str]] = (1, "episode"), gradient_steps: int = -1, action_noise: Optional[ActionNoise] = None, + replay_buffer_class: Optional[ReplayBuffer] = None, + replay_buffer_kwargs: Optional[Dict[str, Any]] = None, optimize_memory_usage: bool = False, policy_delay: int = 2, target_policy_noise: float = 0.2, @@ -96,6 +102,8 @@ def __init__( train_freq, gradient_steps, action_noise=action_noise, + replay_buffer_class=replay_buffer_class, + replay_buffer_kwargs=replay_buffer_kwargs, policy_kwargs=policy_kwargs, tensorboard_log=tensorboard_log, verbose=verbose, @@ -131,7 +139,7 @@ def train(self, gradient_steps: int, batch_size: int = 100) -> None: actor_losses, critic_losses = [], [] - for gradient_step in range(gradient_steps): + for _ in range(gradient_steps): self._n_updates += 1 # Sample replay buffer diff --git a/stable_baselines3/version.txt b/stable_baselines3/version.txt index db805d35e..1406d2fc7 100644 --- a/stable_baselines3/version.txt +++ b/stable_baselines3/version.txt @@ -1 +1 @@ -1.0rc2 +1.1.0a6 diff --git a/tests/test_callbacks.py b/tests/test_callbacks.py index c5d453a12..48a6f34bd 100644 --- a/tests/test_callbacks.py +++ b/tests/test_callbacks.py @@ -5,7 +5,7 @@ import numpy as np import pytest -from stable_baselines3 import A2C, DDPG, DQN, HER, PPO, SAC, TD3 +from stable_baselines3 import A2C, DDPG, DQN, PPO, SAC, TD3, HerReplayBuffer from stable_baselines3.common.callbacks import ( CallbackList, CheckpointCallback, @@ -17,7 +17,6 @@ from stable_baselines3.common.env_util import make_vec_env from stable_baselines3.common.envs import BitFlippingEnv from stable_baselines3.common.vec_env import DummyVecEnv -from stable_baselines3.common.vec_env.obs_dict_wrapper import ObsDictWrapper @pytest.mark.parametrize("model_class", [A2C, PPO, SAC, TD3, DQN, DDPG]) @@ -108,12 +107,19 @@ def test_eval_success_logging(tmp_path): env = BitFlippingEnv(n_bits=n_bits) eval_env = DummyVecEnv([lambda: BitFlippingEnv(n_bits=n_bits)]) eval_callback = EvalCallback( - ObsDictWrapper(eval_env), + eval_env, eval_freq=250, log_path=tmp_path, warn=False, ) - model = HER("MlpPolicy", env, DQN, learning_starts=100, seed=0, max_episode_length=n_bits) + model = DQN( + "MultiInputPolicy", + env, + replay_buffer_class=HerReplayBuffer, + learning_starts=100, + seed=0, + replay_buffer_kwargs=dict(max_episode_length=n_bits), + ) model.learn(500, callback=eval_callback) assert len(eval_callback._is_success_buffer) > 0 # More than 50% success rate diff --git a/tests/test_dict_env.py b/tests/test_dict_env.py index 4e946d137..b165180d5 100644 --- a/tests/test_dict_env.py +++ b/tests/test_dict_env.py @@ -5,9 +5,9 @@ from stable_baselines3 import A2C, DDPG, DQN, PPO, SAC, TD3 from stable_baselines3.common.env_util import make_vec_env -from stable_baselines3.common.envs import SimpleMultiObsEnv +from stable_baselines3.common.envs import BitFlippingEnv, SimpleMultiObsEnv from stable_baselines3.common.evaluation import evaluate_policy -from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv, VecFrameStack +from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv, VecFrameStack, VecNormalize class DummyDictEnv(gym.Env): @@ -79,6 +79,14 @@ def render(self, mode="human"): pass +@pytest.mark.parametrize("model_class", [PPO, A2C]) +def test_goal_env(model_class): + env = BitFlippingEnv(n_bits=4) + # check that goal env works for PPO/A2C that cannot use HER replay buffer + model = model_class("MultiInputPolicy", env, n_steps=64).learn(250) + evaluate_policy(model, model.get_env()) + + @pytest.mark.parametrize("model_class", [PPO, A2C, DQN, DDPG, SAC, TD3]) def test_consistency(model_class): """ @@ -245,6 +253,46 @@ def test_dict_vec_framestack(model_class, channel_last): evaluate_policy(model, env, n_eval_episodes=5, warn=False) +@pytest.mark.parametrize("model_class", [PPO, A2C, DQN, DDPG, SAC, TD3]) +def test_vec_normalize(model_class): + """ + Additional tests for PPO/A2C/SAC/DDPG/TD3/DQN to check observation space support + for GoalEnv and VecNormalize using MultiInputPolicy. + """ + env = DummyVecEnv([lambda: BitFlippingEnv(n_bits=4, continuous=not (model_class == DQN))]) + env = VecNormalize(env) + + kwargs = {} + n_steps = 256 + + if model_class in {A2C, PPO}: + kwargs = dict( + n_steps=128, + policy_kwargs=dict( + net_arch=[32], + ), + ) + else: + # Avoid memory error when using replay buffer + # Reduce the size of the features and make learning faster + kwargs = dict( + buffer_size=250, + policy_kwargs=dict( + net_arch=[32], + ), + train_freq=8, + gradient_steps=1, + ) + if model_class == DQN: + kwargs["learning_starts"] = 0 + + model = model_class("MultiInputPolicy", env, gamma=0.5, seed=1, **kwargs) + + model.learn(total_timesteps=n_steps) + + evaluate_policy(model, env, n_eval_episodes=5, warn=False) + + def test_dict_nested(): """ Make sure we throw an appropiate error with nested Dict observation spaces diff --git a/tests/test_envs.py b/tests/test_envs.py index d8de91ac4..645c17e3f 100644 --- a/tests/test_envs.py +++ b/tests/test_envs.py @@ -56,6 +56,25 @@ def test_custom_envs(env_class): assert len(record) == 0 +@pytest.mark.parametrize( + "kwargs", + [ + dict(continuous=True), + dict(discrete_obs_space=True), + dict(image_obs_space=True, channel_first=True), + dict(image_obs_space=True, channel_first=False), + ], +) +def test_bit_flipping(kwargs): + # Additional tests for BitFlippingEnv + env = BitFlippingEnv(**kwargs) + with pytest.warns(None) as record: + check_env(env) + + # No warnings for custom envs + assert len(record) == 0 + + def test_high_dimension_action_space(): """ Test for continuous action space diff --git a/tests/test_gae.py b/tests/test_gae.py new file mode 100644 index 000000000..7f095c05c --- /dev/null +++ b/tests/test_gae.py @@ -0,0 +1,114 @@ +import gym +import numpy as np +import pytest +import torch as th + +from stable_baselines3 import A2C, PPO +from stable_baselines3.common.callbacks import BaseCallback +from stable_baselines3.common.policies import ActorCriticPolicy + + +class CustomEnv(gym.Env): + def __init__(self, max_steps=8): + super(CustomEnv, self).__init__() + self.observation_space = gym.spaces.Box(low=-1, high=1, shape=(2,), dtype=np.float32) + self.action_space = gym.spaces.Box(low=-1, high=1, shape=(2,), dtype=np.float32) + self.max_steps = max_steps + self.n_steps = 0 + + def seed(self, seed): + self.observation_space.seed(seed) + + def reset(self): + self.n_steps = 0 + return self.observation_space.sample() + + def step(self, action): + self.n_steps += 1 + + done = False + reward = 0.0 + if self.n_steps >= self.max_steps: + reward = 1.0 + done = True + + return self.observation_space.sample(), reward, done, {} + + +class CheckGAECallback(BaseCallback): + def __init__(self): + super(CheckGAECallback, self).__init__(verbose=0) + + def _on_rollout_end(self): + buffer = self.model.rollout_buffer + rollout_size = buffer.size() + + max_steps = self.training_env.envs[0].max_steps + gamma = self.model.gamma + gae_lambda = self.model.gae_lambda + value = self.model.policy.constant_value + # We know in advance that the agent will get a single + # reward at the very last timestep of the episode, + # so we can pre-compute the lambda-return and advantage + deltas = np.zeros((rollout_size,)) + advantages = np.zeros((rollout_size,)) + # Reward should be 1.0 on final timestep of episode + rewards = np.zeros((rollout_size,)) + rewards[max_steps - 1 :: max_steps] = 1.0 + # Note that these are episode starts (+1 timestep from done) + episode_starts = np.zeros((rollout_size,)) + episode_starts[::max_steps] = 1.0 + + # Final step is always terminal (next would episode_start = 1) + deltas[-1] = rewards[-1] - value + advantages[-1] = deltas[-1] + for n in reversed(range(rollout_size - 1)): + # Values are constants + episode_start_mask = 1.0 - episode_starts[n + 1] + deltas[n] = rewards[n] + gamma * value * episode_start_mask - value + advantages[n] = deltas[n] + gamma * gae_lambda * advantages[n + 1] * episode_start_mask + + # TD(lambda) estimate, see Github PR #375 + lambda_returns = advantages + value + + assert np.allclose(buffer.advantages.flatten(), advantages) + assert np.allclose(buffer.returns.flatten(), lambda_returns) + + def _on_step(self): + return True + + +class CustomPolicy(ActorCriticPolicy): + """Custom Policy with a constant value function""" + + def __init__(self, *args, **kwargs): + super(CustomPolicy, self).__init__(*args, **kwargs) + self.constant_value = 0.0 + + def forward(self, obs, deterministic=False): + actions, values, log_prob = super().forward(obs, deterministic) + # Overwrite values with ones + values = th.ones_like(values) * self.constant_value + return actions, values, log_prob + + +@pytest.mark.parametrize("model_class", [A2C, PPO]) +@pytest.mark.parametrize("gae_lambda", [1.0, 0.9]) +@pytest.mark.parametrize("gamma", [1.0, 0.99]) +@pytest.mark.parametrize("num_episodes", [1, 3]) +def test_gae_computation(model_class, gae_lambda, gamma, num_episodes): + env = CustomEnv(max_steps=64) + rollout_size = 64 * num_episodes + model = model_class( + CustomPolicy, + env, + seed=1, + gamma=gamma, + n_steps=rollout_size, + gae_lambda=gae_lambda, + ) + model.learn(rollout_size, callback=CheckGAECallback()) + + # Change constant value so advantage != returns + model.policy.constant_value = 1.0 + model.learn(rollout_size, callback=CheckGAECallback()) diff --git a/tests/test_her.py b/tests/test_her.py index 715b28054..ef887e70f 100644 --- a/tests/test_her.py +++ b/tests/test_her.py @@ -8,38 +8,57 @@ import pytest import torch as th -from stable_baselines3 import DDPG, DQN, HER, SAC, TD3 +from stable_baselines3 import DDPG, DQN, SAC, TD3, HerReplayBuffer from stable_baselines3.common.envs import BitFlippingEnv +from stable_baselines3.common.evaluation import evaluate_policy +from stable_baselines3.common.monitor import Monitor from stable_baselines3.common.noise import NormalActionNoise from stable_baselines3.common.vec_env import DummyVecEnv -from stable_baselines3.common.vec_env.obs_dict_wrapper import ObsDictWrapper from stable_baselines3.her.goal_selection_strategy import GoalSelectionStrategy -from stable_baselines3.her.her import get_time_limit +from stable_baselines3.her.her_replay_buffer import get_time_limit + + +def test_import_error(): + with pytest.raises(ImportError) as excinfo: + from stable_baselines3 import HER + + HER("MlpPolicy") + assert "documentation" in str(excinfo.value) @pytest.mark.parametrize("model_class", [SAC, TD3, DDPG, DQN]) @pytest.mark.parametrize("online_sampling", [True, False]) -def test_her(model_class, online_sampling): +@pytest.mark.parametrize("image_obs_space", [True, False]) +def test_her(model_class, online_sampling, image_obs_space): """ Test Hindsight Experience Replay. """ n_bits = 4 - env = BitFlippingEnv(n_bits=n_bits, continuous=not (model_class == DQN)) + env = BitFlippingEnv( + n_bits=n_bits, + continuous=not (model_class == DQN), + image_obs_space=image_obs_space, + ) - model = HER( - "MlpPolicy", + model = model_class( + "MultiInputPolicy", env, - model_class, - goal_selection_strategy="future", - online_sampling=online_sampling, - gradient_steps=1, + replay_buffer_class=HerReplayBuffer, + replay_buffer_kwargs=dict( + n_sampled_goal=2, + goal_selection_strategy="future", + online_sampling=online_sampling, + max_episode_length=n_bits, + ), train_freq=4, - max_episode_length=n_bits, + gradient_steps=1, policy_kwargs=dict(net_arch=[64]), learning_starts=100, + buffer_size=int(2e4), ) - model.learn(total_timesteps=300) + model.learn(total_timesteps=150) + evaluate_policy(model, Monitor(env)) @pytest.mark.parametrize( @@ -62,21 +81,25 @@ def test_goal_selection_strategy(goal_selection_strategy, online_sampling): normal_action_noise = NormalActionNoise(np.zeros(1), 0.1 * np.ones(1)) - model = HER( - "MlpPolicy", + model = SAC( + "MultiInputPolicy", env, - SAC, - goal_selection_strategy=goal_selection_strategy, - online_sampling=online_sampling, - gradient_steps=1, + replay_buffer_class=HerReplayBuffer, + replay_buffer_kwargs=dict( + goal_selection_strategy=goal_selection_strategy, + online_sampling=online_sampling, + max_episode_length=10, + n_sampled_goal=2, + ), train_freq=4, - max_episode_length=10, + gradient_steps=1, policy_kwargs=dict(net_arch=[64]), learning_starts=100, + buffer_size=int(1e5), action_noise=normal_action_noise, ) assert model.action_noise is not None - model.learn(total_timesteps=300) + model.learn(total_timesteps=150) @pytest.mark.parametrize("model_class", [SAC, TD3, DDPG, DQN]) @@ -95,37 +118,39 @@ def test_save_load(tmp_path, model_class, use_sde, online_sampling): kwargs = dict(use_sde=True) if use_sde else {} # create model - model = HER( - "MlpPolicy", + model = model_class( + "MultiInputPolicy", env, - model_class, - n_sampled_goal=5, - goal_selection_strategy="future", - online_sampling=online_sampling, + replay_buffer_class=HerReplayBuffer, + replay_buffer_kwargs=dict( + n_sampled_goal=2, + goal_selection_strategy="future", + online_sampling=online_sampling, + max_episode_length=n_bits, + ), verbose=0, tau=0.05, batch_size=128, learning_rate=0.001, policy_kwargs=dict(net_arch=[64]), - buffer_size=int(1e6), + buffer_size=int(1e5), gamma=0.98, gradient_steps=1, train_freq=4, learning_starts=100, - max_episode_length=n_bits, **kwargs ) - model.learn(total_timesteps=300) + model.learn(total_timesteps=150) - env.reset() + obs = env.reset() - observations_list = [] + observations = {key: [] for key in obs.keys()} for _ in range(10): obs = env.step(env.action_space.sample())[0] - observation = ObsDictWrapper.convert_dict(obs) - observations_list.append(observation) - observations = np.array(observations_list) + for key in obs.keys(): + observations[key].append(obs[key]) + observations = {key: np.array(obs) for key, obs in observations.items()} # Get dictionary of current parameters params = deepcopy(model.policy.state_dict()) @@ -153,14 +178,14 @@ def test_save_load(tmp_path, model_class, use_sde, online_sampling): # test custom_objects # Load with custom objects custom_objects = dict(learning_rate=2e-5, dummy=1.0) - model_ = HER.load(str(tmp_path / "test_save.zip"), env=env, custom_objects=custom_objects, verbose=2) + model_ = model_class.load(str(tmp_path / "test_save.zip"), env=env, custom_objects=custom_objects, verbose=2) assert model_.verbose == 2 # Check that the custom object was taken into account assert model_.learning_rate == custom_objects["learning_rate"] # Check that only parameters that are here already are replaced assert not hasattr(model_, "dummy") - model = HER.load(str(tmp_path / "test_save.zip"), env=env) + model = model_class.load(str(tmp_path / "test_save.zip"), env=env) # check if params are still the same after load new_params = model.policy.state_dict() @@ -174,18 +199,19 @@ def test_save_load(tmp_path, model_class, use_sde, online_sampling): assert np.allclose(selected_actions, new_selected_actions, 1e-4) # check if learn still works - model.learn(total_timesteps=300) + model.learn(total_timesteps=150) # Test that the change of parameters works - model = HER.load(str(tmp_path / "test_save.zip"), env=env, verbose=3, learning_rate=2.0) - assert model.model.learning_rate == 2.0 + model = model_class.load(str(tmp_path / "test_save.zip"), env=env, verbose=3, learning_rate=2.0) + assert model.learning_rate == 2.0 assert model.verbose == 3 # clear file from os os.remove(tmp_path / "test_save.zip") -@pytest.mark.parametrize("online_sampling, truncate_last_trajectory", [(False, False), (True, True), (True, False)]) +@pytest.mark.parametrize("online_sampling", [False, True]) +@pytest.mark.parametrize("truncate_last_trajectory", [False, True]) def test_save_load_replay_buffer(tmp_path, recwarn, online_sampling, truncate_last_trajectory): """ Test if 'save_replay_buffer' and 'load_replay_buffer' works correctly @@ -194,26 +220,32 @@ def test_save_load_replay_buffer(tmp_path, recwarn, online_sampling, truncate_la warnings.filterwarnings(action="ignore", category=DeprecationWarning) warnings.filterwarnings(action="ignore", category=UserWarning, module="gym") - path = pathlib.Path(tmp_path / "logs/replay_buffer.pkl") + path = pathlib.Path(tmp_path / "replay_buffer.pkl") path.parent.mkdir(exist_ok=True, parents=True) # to not raise a warning env = BitFlippingEnv(n_bits=4, continuous=True) - model = HER( - "MlpPolicy", + model = SAC( + "MultiInputPolicy", env, - SAC, - goal_selection_strategy="future", - online_sampling=online_sampling, + replay_buffer_class=HerReplayBuffer, + replay_buffer_kwargs=dict( + n_sampled_goal=2, + goal_selection_strategy="future", + online_sampling=online_sampling, + max_episode_length=4, + ), gradient_steps=1, train_freq=4, - max_episode_length=4, buffer_size=int(2e4), policy_kwargs=dict(net_arch=[64]), - seed=0, + seed=1, ) model.learn(200) - old_replay_buffer = deepcopy(model.replay_buffer) + if online_sampling: + old_replay_buffer = deepcopy(model.replay_buffer) + else: + old_replay_buffer = deepcopy(model.replay_buffer.replay_buffer) model.save_replay_buffer(path) - del model.model.replay_buffer + del model.replay_buffer with pytest.raises(AttributeError): model.replay_buffer @@ -221,7 +253,7 @@ def test_save_load_replay_buffer(tmp_path, recwarn, online_sampling, truncate_la # Check that there is no warning assert len(recwarn) == 0 - model.load_replay_buffer(path, truncate_last_trajectory) + model.load_replay_buffer(path, truncate_last_traj=truncate_last_trajectory) if truncate_last_trajectory: assert len(recwarn) == 1 @@ -233,31 +265,33 @@ def test_save_load_replay_buffer(tmp_path, recwarn, online_sampling, truncate_la if online_sampling: n_episodes_stored = model.replay_buffer.n_episodes_stored assert np.allclose( - old_replay_buffer.buffer["observation"][:n_episodes_stored], - model.replay_buffer.buffer["observation"][:n_episodes_stored], + old_replay_buffer._buffer["observation"][:n_episodes_stored], + model.replay_buffer._buffer["observation"][:n_episodes_stored], ) assert np.allclose( - old_replay_buffer.buffer["next_obs"][:n_episodes_stored], - model.replay_buffer.buffer["next_obs"][:n_episodes_stored], + old_replay_buffer._buffer["next_obs"][:n_episodes_stored], + model.replay_buffer._buffer["next_obs"][:n_episodes_stored], ) assert np.allclose( - old_replay_buffer.buffer["action"][:n_episodes_stored], - model.replay_buffer.buffer["action"][:n_episodes_stored], + old_replay_buffer._buffer["action"][:n_episodes_stored], + model.replay_buffer._buffer["action"][:n_episodes_stored], ) assert np.allclose( - old_replay_buffer.buffer["reward"][:n_episodes_stored], - model.replay_buffer.buffer["reward"][:n_episodes_stored], + old_replay_buffer._buffer["reward"][:n_episodes_stored], + model.replay_buffer._buffer["reward"][:n_episodes_stored], ) # we might change the last done of the last trajectory so we don't compare it assert np.allclose( - old_replay_buffer.buffer["done"][: n_episodes_stored - 1], - model.replay_buffer.buffer["done"][: n_episodes_stored - 1], + old_replay_buffer._buffer["done"][: n_episodes_stored - 1], + model.replay_buffer._buffer["done"][: n_episodes_stored - 1], ) else: - assert np.allclose(old_replay_buffer.observations, model.replay_buffer.observations) - assert np.allclose(old_replay_buffer.actions, model.replay_buffer.actions) - assert np.allclose(old_replay_buffer.rewards, model.replay_buffer.rewards) - assert np.allclose(old_replay_buffer.dones, model.replay_buffer.dones) + replay_buffer = model.replay_buffer.replay_buffer + assert np.allclose(old_replay_buffer.observations["observation"], replay_buffer.observations["observation"]) + assert np.allclose(old_replay_buffer.observations["desired_goal"], replay_buffer.observations["desired_goal"]) + assert np.allclose(old_replay_buffer.actions, replay_buffer.actions) + assert np.allclose(old_replay_buffer.rewards, replay_buffer.rewards) + assert np.allclose(old_replay_buffer.dones, replay_buffer.dones) # test if continuing training works properly reset_num_timesteps = False if truncate_last_trajectory is False else True @@ -273,15 +307,18 @@ def test_full_replay_buffer(): env = BitFlippingEnv(n_bits=n_bits, continuous=True) # use small buffer size to get the buffer full - model = HER( - "MlpPolicy", + model = SAC( + "MultiInputPolicy", env, - SAC, - goal_selection_strategy="future", - online_sampling=True, + replay_buffer_class=HerReplayBuffer, + replay_buffer_kwargs=dict( + n_sampled_goal=2, + goal_selection_strategy="future", + online_sampling=True, + max_episode_length=n_bits, + ), gradient_steps=1, train_freq=4, - max_episode_length=n_bits, policy_kwargs=dict(net_arch=[64]), learning_starts=1, buffer_size=20, @@ -315,15 +352,15 @@ def test_get_max_episode_length(): get_time_limit(vec_env, current_max_episode_length=None) # Initialize HER and specify max_episode_length, should not raise an issue - HER("MlpPolicy", dict_env, DQN, max_episode_length=5) + DQN("MultiInputPolicy", dict_env, replay_buffer_class=HerReplayBuffer, replay_buffer_kwargs=dict(max_episode_length=5)) with pytest.raises(ValueError): - HER("MlpPolicy", dict_env, DQN) + DQN("MultiInputPolicy", dict_env, replay_buffer_class=HerReplayBuffer) # Wrapped in a timelimit, should be fine # Note: it requires env.spec to be defined env = DummyVecEnv([lambda: gym.wrappers.TimeLimit(BitFlippingEnv(), 10)]) - HER("MlpPolicy", env, DQN) + DQN("MultiInputPolicy", env, replay_buffer_class=HerReplayBuffer, replay_buffer_kwargs=dict(max_episode_length=5)) @pytest.mark.parametrize("online_sampling", [False, True]) @@ -335,22 +372,25 @@ def test_performance_her(online_sampling, n_bits): """ env = BitFlippingEnv(n_bits=n_bits, continuous=False) - model = HER( - "MlpPolicy", + model = DQN( + "MultiInputPolicy", env, - DQN, - n_sampled_goal=5, - goal_selection_strategy="future", - online_sampling=online_sampling, + replay_buffer_class=HerReplayBuffer, + replay_buffer_kwargs=dict( + n_sampled_goal=5, + goal_selection_strategy="future", + online_sampling=online_sampling, + max_episode_length=n_bits, + ), verbose=1, learning_rate=5e-4, - max_episode_length=n_bits, train_freq=1, learning_starts=100, exploration_final_eps=0.02, target_update_interval=500, seed=0, batch_size=32, + buffer_size=int(1e5), ) model.learn(total_timesteps=5000, log_interval=50) diff --git a/tests/test_save_load.py b/tests/test_save_load.py index dcb775c61..5e4e9e705 100644 --- a/tests/test_save_load.py +++ b/tests/test_save_load.py @@ -233,6 +233,24 @@ def test_exclude_include_saved_params(tmp_path, model_class): os.remove(tmp_path / "test_save.zip") +def test_save_load_pytorch_var(tmp_path): + model = SAC("MlpPolicy", "Pendulum-v0", seed=3, policy_kwargs=dict(net_arch=[64], n_critics=1)) + model.learn(200) + save_path = str(tmp_path / "sac_pendulum") + model.save(save_path) + env = model.get_env() + ent_coef_before = model.log_ent_coef + + del model + + model = SAC.load(save_path, env=env) + assert th.allclose(ent_coef_before, model.log_ent_coef) + model.learn(200) + ent_coef_after = model.log_ent_coef + # Check that the entropy coefficient is still optimized + assert not th.allclose(ent_coef_before, ent_coef_after) + + @pytest.mark.parametrize("model_class", [A2C, TD3]) def test_save_load_env_cnn(tmp_path, model_class): """ @@ -271,6 +289,8 @@ def test_save_load_replay_buffer(tmp_path, model_class): assert np.allclose(old_replay_buffer.actions, model.replay_buffer.actions) assert np.allclose(old_replay_buffer.rewards, model.replay_buffer.rewards) assert np.allclose(old_replay_buffer.dones, model.replay_buffer.dones) + assert np.allclose(old_replay_buffer.timeouts, model.replay_buffer.timeouts) + infos = [[{"TimeLimit.truncated": truncated}] for truncated in old_replay_buffer.timeouts] # test extending replay buffer model.replay_buffer.extend( @@ -279,6 +299,7 @@ def test_save_load_replay_buffer(tmp_path, model_class): old_replay_buffer.actions, old_replay_buffer.rewards, old_replay_buffer.dones, + infos, ) @@ -323,7 +344,8 @@ def test_warn_buffer(recwarn, model_class, optimize_memory_usage): @pytest.mark.parametrize("model_class", MODEL_LIST) @pytest.mark.parametrize("policy_str", ["MlpPolicy", "CnnPolicy"]) -def test_save_load_policy(tmp_path, model_class, policy_str): +@pytest.mark.parametrize("use_sde", [False, True]) +def test_save_load_policy(tmp_path, model_class, policy_str, use_sde): """ Test saving and loading policy only. @@ -331,6 +353,11 @@ def test_save_load_policy(tmp_path, model_class, policy_str): :param policy_str: (str) Name of the policy. """ kwargs = dict(policy_kwargs=dict(net_arch=[16])) + + # gSDE is only applicable for A2C, PPO and SAC + if use_sde and model_class not in [A2C, PPO, SAC]: + pytest.skip() + if policy_str == "MlpPolicy": env = select_env(model_class) else: @@ -342,6 +369,9 @@ def test_save_load_policy(tmp_path, model_class, policy_str): ) env = FakeImageEnv(screen_height=40, screen_width=40, n_channels=2, discrete=model_class == DQN) + if use_sde: + kwargs["use_sde"] = True + env = DummyVecEnv([lambda: env]) # create model diff --git a/tests/test_vec_extract_dict_obs.py b/tests/test_vec_extract_dict_obs.py new file mode 100644 index 000000000..15074425e --- /dev/null +++ b/tests/test_vec_extract_dict_obs.py @@ -0,0 +1,52 @@ +import numpy as np +from gym import spaces + +from stable_baselines3 import PPO +from stable_baselines3.common.vec_env import VecExtractDictObs, VecMonitor + + +class DictObsVecEnv: + """Custom Environment that produces observation in a dictionary like the procgen env""" + + metadata = {"render.modes": ["human"]} + + def __init__(self): + self.num_envs = 4 + self.action_space = spaces.Discrete(2) + self.observation_space = spaces.Dict({"rgb": spaces.Box(low=0.0, high=255.0, shape=(86, 86), dtype=np.float32)}) + + def step_async(self, actions): + self.actions = actions + + def step_wait(self): + return ( + {"rgb": np.zeros((self.num_envs, 86, 86))}, + np.zeros((self.num_envs,)), + np.zeros((self.num_envs,), dtype=bool), + [{} for _ in range(self.num_envs)], + ) + + def reset(self): + return {"rgb": np.zeros((self.num_envs, 86, 86))} + + def render(self, mode="human", close=False): + pass + + +def test_extract_dict_obs(): + """Test VecExtractDictObs""" + + env = DictObsVecEnv() + env = VecExtractDictObs(env, "rgb") + assert env.reset().shape == (4, 86, 86) + + +def test_vec_with_ppo(): + """ + Test the `VecExtractDictObs` with PPO + """ + env = DictObsVecEnv() + env = VecExtractDictObs(env, "rgb") + monitor_env = VecMonitor(env) + model = PPO("MlpPolicy", monitor_env, verbose=1, n_steps=64, device="cpu") + model.learn(total_timesteps=250) diff --git a/tests/test_vec_monitor.py b/tests/test_vec_monitor.py new file mode 100644 index 000000000..585e6906c --- /dev/null +++ b/tests/test_vec_monitor.py @@ -0,0 +1,120 @@ +import json +import os +import uuid + +import gym +import pandas +import pytest + +from stable_baselines3 import PPO +from stable_baselines3.common.evaluation import evaluate_policy +from stable_baselines3.common.monitor import Monitor, get_monitor_files, load_results +from stable_baselines3.common.vec_env import DummyVecEnv, VecMonitor, VecNormalize + + +def test_vec_monitor(tmp_path): + """ + Test the `VecMonitor` wrapper + """ + env = DummyVecEnv([lambda: gym.make("CartPole-v1")]) + env.seed(0) + monitor_file = os.path.join(str(tmp_path), f"stable_baselines-test-{uuid.uuid4()}.monitor.csv") + monitor_env = VecMonitor(env, monitor_file) + monitor_env.reset() + total_steps = 1000 + ep_len, ep_reward = 0, 0 + for _ in range(total_steps): + _, rewards, dones, infos = monitor_env.step([monitor_env.action_space.sample()]) + ep_len += 1 + ep_reward += rewards[0] + if dones[0]: + assert ep_reward == infos[0]["episode"]["r"] + assert ep_len == infos[0]["episode"]["l"] + ep_len, ep_reward = 0, 0 + + monitor_env.close() + + with open(monitor_file, "rt") as file_handler: + first_line = file_handler.readline() + assert first_line.startswith("#") + metadata = json.loads(first_line[1:]) + assert set(metadata.keys()) == {"t_start", "env_id"}, "Incorrect keys in monitor metadata" + + last_logline = pandas.read_csv(file_handler, index_col=None) + assert set(last_logline.keys()) == {"l", "t", "r"}, "Incorrect keys in monitor logline" + os.remove(monitor_file) + + +def test_vec_monitor_load_results(tmp_path): + """ + test load_results on log files produced by the monitor wrapper + """ + tmp_path = str(tmp_path) + env1 = DummyVecEnv([lambda: gym.make("CartPole-v1")]) + env1.seed(0) + monitor_file1 = os.path.join(str(tmp_path), f"stable_baselines-test-{uuid.uuid4()}.monitor.csv") + monitor_env1 = VecMonitor(env1, monitor_file1) + + monitor_files = get_monitor_files(tmp_path) + assert len(monitor_files) == 1 + assert monitor_file1 in monitor_files + + monitor_env1.reset() + episode_count1 = 0 + for _ in range(1000): + _, _, dones, _ = monitor_env1.step([monitor_env1.action_space.sample()]) + if dones[0]: + episode_count1 += 1 + monitor_env1.reset() + + results_size1 = len(load_results(os.path.join(tmp_path)).index) + assert results_size1 == episode_count1 + + env2 = DummyVecEnv([lambda: gym.make("CartPole-v1")]) + env2.seed(0) + monitor_file2 = os.path.join(str(tmp_path), f"stable_baselines-test-{uuid.uuid4()}.monitor.csv") + monitor_env2 = VecMonitor(env2, monitor_file2) + monitor_files = get_monitor_files(tmp_path) + assert len(monitor_files) == 2 + assert monitor_file1 in monitor_files + assert monitor_file2 in monitor_files + + monitor_env2.reset() + episode_count2 = 0 + for _ in range(1000): + _, _, dones, _ = monitor_env2.step([monitor_env2.action_space.sample()]) + if dones[0]: + episode_count2 += 1 + monitor_env2.reset() + + results_size2 = len(load_results(os.path.join(tmp_path)).index) + + assert results_size2 == (results_size1 + episode_count2) + + os.remove(monitor_file1) + os.remove(monitor_file2) + + +def test_vec_monitor_ppo(recwarn): + """ + Test the `VecMonitor` with PPO + """ + env = DummyVecEnv([lambda: gym.make("CartPole-v1")]) + env.seed(0) + monitor_env = VecMonitor(env) + model = PPO("MlpPolicy", monitor_env, verbose=1, n_steps=64, device="cpu") + model.learn(total_timesteps=250) + + # No warnings because using `VecMonitor` + evaluate_policy(model, monitor_env) + assert len(recwarn) == 0 + + +def test_vec_monitor_warn(): + env = DummyVecEnv([lambda: Monitor(gym.make("CartPole-v1"))]) + # We should warn the user when the env is already wrapped with a Monitor wrapper + with pytest.warns(UserWarning): + VecMonitor(env) + + with pytest.warns(UserWarning): + VecMonitor(VecNormalize(env)) diff --git a/tests/test_vec_normalize.py b/tests/test_vec_normalize.py index 46c0c4450..63d4dbfba 100644 --- a/tests/test_vec_normalize.py +++ b/tests/test_vec_normalize.py @@ -3,7 +3,7 @@ import pytest from gym import spaces -from stable_baselines3 import HER, SAC, TD3 +from stable_baselines3 import SAC, TD3, HerReplayBuffer from stable_baselines3.common.monitor import Monitor from stable_baselines3.common.running_mean_std import RunningMeanStd from stable_baselines3.common.vec_env import ( @@ -217,18 +217,39 @@ def test_normalize_external(): assert np.all(norm_rewards < 1) -@pytest.mark.parametrize("model_class", [SAC, TD3, HER]) -def test_offpolicy_normalization(model_class): - make_env_ = make_dict_env if model_class == HER else make_env +@pytest.mark.parametrize("model_class", [SAC, TD3, HerReplayBuffer]) +@pytest.mark.parametrize("online_sampling", [False, True]) +def test_offpolicy_normalization(model_class, online_sampling): + + if online_sampling and model_class != HerReplayBuffer: + pytest.skip() + + make_env_ = make_dict_env if model_class == HerReplayBuffer else make_env env = DummyVecEnv([make_env_]) env = VecNormalize(env, norm_obs=True, norm_reward=True, clip_obs=10.0, clip_reward=10.0) eval_env = DummyVecEnv([make_env_]) eval_env = VecNormalize(eval_env, training=False, norm_obs=True, norm_reward=False, clip_obs=10.0, clip_reward=10.0) - kwargs = dict(model_class=SAC, max_episode_length=200, online_sampling=True) if model_class == HER else {} - model = model_class("MlpPolicy", env, verbose=1, learning_starts=100, policy_kwargs=dict(net_arch=[64]), **kwargs) - model.learn(total_timesteps=500, eval_env=eval_env, eval_freq=250) + if model_class == HerReplayBuffer: + model = SAC( + "MultiInputPolicy", + env, + verbose=1, + learning_starts=100, + policy_kwargs=dict(net_arch=[64]), + replay_buffer_kwargs=dict( + max_episode_length=100, + online_sampling=online_sampling, + n_sampled_goal=2, + ), + replay_buffer_class=HerReplayBuffer, + seed=2, + ) + else: + model = model_class("MlpPolicy", env, verbose=1, learning_starts=100, policy_kwargs=dict(net_arch=[64])) + + model.learn(total_timesteps=150, eval_env=eval_env, eval_freq=75) # Check getter assert isinstance(model.get_vec_normalize_env(), VecNormalize) From 89607cface4194f938fb9ef31b60e79a4548590a Mon Sep 17 00:00:00 2001 From: Antonin Raffin Date: Mon, 3 May 2021 17:19:11 +0200 Subject: [PATCH 61/70] Update doc and minor fixes --- docs/guide/algos.rst | 9 +++++++++ docs/guide/custom_policy.rst | 10 +++++----- docs/misc/changelog.rst | 7 ------- docs/modules/sac.rst | 1 + stable_baselines3/common/base_class.py | 6 +++--- stable_baselines3/common/utils.py | 7 ++++--- stable_baselines3/dqn/dqn.py | 1 + stable_baselines3/sac/policies.py | 13 ++++++++----- 8 files changed, 31 insertions(+), 23 deletions(-) diff --git a/docs/guide/algos.rst b/docs/guide/algos.rst index 7988a6700..30c9f358a 100644 --- a/docs/guide/algos.rst +++ b/docs/guide/algos.rst @@ -42,6 +42,15 @@ Actions ``gym.spaces``: See `Issue #339 `_ for more info. +.. note:: + + When using off-policy algorithms, `Time Limits `_ (aka timeouts) are handled + properly (cf. `issue #284 `_). + You can revert to SB3 < 2.1.0 behavior by passing ``handle_timeout_termination=False`` + via the ``replay_buffer_kwargs`` argument. + + + Reproducibility --------------- diff --git a/docs/guide/custom_policy.rst b/docs/guide/custom_policy.rst index f17c10ab3..3da932b33 100644 --- a/docs/guide/custom_policy.rst +++ b/docs/guide/custom_policy.rst @@ -158,17 +158,17 @@ inputs into a single vector, handled by the ``net_arch`` network. By default, ``CombinedExtractor`` processes multiple inputs as follows: -1. If input is an image (automatically detected, see ``common.preprocessing.is_image_space``), process image with Nature Atari CNN network and - output a latent vector of size ``64``. +1. If input is an image (automatically detected, see ``common.preprocessing.is_image_space``), process image with Nature Atari CNN network and + output a latent vector of size ``256``. 2. If input is not an image, flatten it (no layers). 3. Concatenate all previous vectors into one long vector and pass it to policy. -Much like above, you can define custom feature extractors as above. The following example assumes the environment has two keys in the -observation space dictionary: "image" is a (1,H,W) image, and "vector" is a (D,) dimensional vector. We process "image" with a simple +Much like above, you can define custom feature extractors. The following example assumes the environment has two keys in the +observation space dictionary: "image" is a (1,H,W) image (channel first), and "vector" is a (D,) dimensional vector. We process "image" with a simple downsampling and "vector" with a single linear layer. .. code-block:: python - + import gym import torch as th from torch import nn diff --git a/docs/misc/changelog.rst b/docs/misc/changelog.rst index f2204c261..907683868 100644 --- a/docs/misc/changelog.rst +++ b/docs/misc/changelog.rst @@ -26,13 +26,6 @@ Breaking Changes: model = SAC("MultiInputPolicy", env, replay_buffer_class=HerReplayBuffer, replay_buffer_kwargs=her_kwargs) -.. warning:: - - A refactoring of the ``HER`` algorithm is planned together with support for dictionary observations - (see `PR #243 `_ and `#351 `_) - This will be a backward incompatible change (model trained with previous version of ``HER`` won't work with the new version). - - New Features: ^^^^^^^^^^^^^ - Added support for single-level ``Dict`` observation space (@JadenTravnik) diff --git a/docs/modules/sac.rst b/docs/modules/sac.rst index 1e77ef40d..818d8c240 100644 --- a/docs/modules/sac.rst +++ b/docs/modules/sac.rst @@ -19,6 +19,7 @@ A key feature of SAC, and a major difference with common RL algorithms, is that MlpPolicy CnnPolicy + MultiInputPolicy Notes diff --git a/stable_baselines3/common/base_class.py b/stable_baselines3/common/base_class.py index bbb630fd3..4d0d6b0f2 100644 --- a/stable_baselines3/common/base_class.py +++ b/stable_baselines3/common/base_class.py @@ -128,10 +128,10 @@ def __init__( self.learning_rate = learning_rate self.tensorboard_log = tensorboard_log self.lr_schedule = None # type: Optional[Schedule] - self._last_obs = None # type: Optional[np.ndarray] + self._last_obs = None # type: Optional[Union[np.ndarray, Dict[str, np.ndarray]]] self._last_episode_starts = None # type: Optional[np.ndarray] # When using VecNormalize: - self._last_original_obs = None # type: Optional[np.ndarray] + self._last_original_obs = None # type: Optional[Union[np.ndarray, Dict[str, np.ndarray]]] self._episode_num = 0 # Used for gSDE only self.use_sde = use_sde @@ -389,7 +389,7 @@ def _setup_learn( # Avoid resetting the environment when calling ``.learn()`` consecutive times if reset_num_timesteps or self._last_obs is None: - self._last_obs = self.env.reset() + self._last_obs = self.env.reset() # pytype: disable=annotation-type-mismatch self._last_episode_starts = np.ones((self.env.num_envs,), dtype=bool) # Retrieve unnormalized observation for saving into the buffer if self._vec_normalize_env is not None: diff --git a/stable_baselines3/common/utils.py b/stable_baselines3/common/utils.py index 4594de156..9b1a6f32c 100644 --- a/stable_baselines3/common/utils.py +++ b/stable_baselines3/common/utils.py @@ -411,10 +411,11 @@ def obs_as_tensor( obs: Union[np.ndarray, Dict[Union[str, int], np.ndarray]], device: th.device ) -> Union[th.Tensor, TensorDict]: """ - Moves the observeration to the given device + Moves the observation to the given device. + :param obs: - :param device: - :return: + :param device: PyTorch device + :return: PyTorch tensor of the observation on a desired device. """ if isinstance(obs, np.ndarray): return th.as_tensor(obs).to(device) diff --git a/stable_baselines3/dqn/dqn.py b/stable_baselines3/dqn/dqn.py index 615c5f061..6dffaa721 100644 --- a/stable_baselines3/dqn/dqn.py +++ b/stable_baselines3/dqn/dqn.py @@ -171,6 +171,7 @@ def train(self, gradient_steps: int, batch_size: int = 100) -> None: # 1-step TD target target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values + # Get current Q-values estimates current_q_values = self.q_net(replay_data.observations) # Retrieve the q-values for the actions from the replay buffer diff --git a/stable_baselines3/sac/policies.py b/stable_baselines3/sac/policies.py index af12478cd..86d218517 100644 --- a/stable_baselines3/sac/policies.py +++ b/stable_baselines3/sac/policies.py @@ -94,10 +94,9 @@ def __init__( latent_sde_dim = last_layer_dim # Separate features extractor for gSDE if sde_net_arch is not None: - ( - self.sde_features_extractor, - latent_sde_dim, - ) = create_sde_features_extractor(features_dim, sde_net_arch, activation_fn) + self.sde_features_extractor, latent_sde_dim = create_sde_features_extractor( + features_dim, sde_net_arch, activation_fn + ) self.action_dist = StateDependentNoiseDistribution( action_dim, full_std=full_std, use_expln=use_expln, learn_features=True, squash_output=True @@ -284,7 +283,11 @@ def __init__( self.actor_kwargs.update(sde_kwargs) self.critic_kwargs = self.net_args.copy() self.critic_kwargs.update( - {"n_critics": n_critics, "net_arch": critic_arch, "share_features_extractor": share_features_extractor} + { + "n_critics": n_critics, + "net_arch": critic_arch, + "share_features_extractor": share_features_extractor, + } ) self.actor, self.actor_target = None, None From 495cf5d8cc86e7c8e5a23af4a778b78d1bc4d3d6 Mon Sep 17 00:00:00 2001 From: Antonin Raffin Date: Mon, 3 May 2021 17:23:52 +0200 Subject: [PATCH 62/70] Update doc --- docs/guide/algos.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/guide/algos.rst b/docs/guide/algos.rst index 30c9f358a..78ccb9a89 100644 --- a/docs/guide/algos.rst +++ b/docs/guide/algos.rst @@ -19,8 +19,9 @@ TD3 ✔️ ❌ ❌ ❌ .. note:: - ``Tuple`` spaces are not supported by any environment however ``Dict`` spaces of ``Box`` spaces are. - ``Dict`` spaces of containing other kinds of spaces (e.g., ``Discrete``) have not yet been explored. + ``Tuple`` observation spaces are not supported by any environment + however single-level ``Dict`` spaces are (cf. :ref:`Examples `). + Actions ``gym.spaces``: From 0ea5c61c9a54aa29749bdbddad06638e2501c80e Mon Sep 17 00:00:00 2001 From: Jaden Travnik <57504230+J-Travnik@users.noreply.github.com> Date: Fri, 7 May 2021 23:29:44 -0400 Subject: [PATCH 63/70] Added note about MultiInputPolicy in error of NatureCNN --- stable_baselines3/common/torch_layers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stable_baselines3/common/torch_layers.py b/stable_baselines3/common/torch_layers.py index 6b73ae92d..16088f3c4 100644 --- a/stable_baselines3/common/torch_layers.py +++ b/stable_baselines3/common/torch_layers.py @@ -67,7 +67,7 @@ def __init__(self, observation_space: gym.spaces.Box, features_dim: int = 512): assert is_image_space(observation_space), ( "You should use NatureCNN " f"only with images not with {observation_space}\n" - "(you are probably using `CnnPolicy` instead of `MlpPolicy`)\n" + "(you are probably using `CnnPolicy` instead of `MlpPolicy` or `MultiInputPolicy`)\n" "If you are using a custom environment,\n" "please check it using our env checker:\n" "https://stable-baselines3.readthedocs.io/en/master/common/env_checker.html" From f8351ab27e804ee9a35a797d08c56db9da98c5aa Mon Sep 17 00:00:00 2001 From: Antonin Raffin Date: Mon, 10 May 2021 11:31:53 +0200 Subject: [PATCH 64/70] Merge branch 'master' into feat/dict_observations --- docs/misc/changelog.rst | 3 ++- docs/modules/ddpg.rst | 2 ++ stable_baselines3/common/on_policy_algorithm.py | 6 ++++-- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/docs/misc/changelog.rst b/docs/misc/changelog.rst index 907683868..07d3df8ca 100644 --- a/docs/misc/changelog.rst +++ b/docs/misc/changelog.rst @@ -38,6 +38,7 @@ New Features: to handle gym3-style vectorized environments (@vwxyzjn) - Ignored the terminal observation if the it is not provided by the environment such as the gym3-style vectorized environments. (@vwxyzjn) +- Add policy_base as input to the OnPolicyAlgorithm for more flexibility (@09tangriro) - Added support for image observation when using ``HER`` - Added ``replay_buffer_class`` and ``replay_buffer_kwargs`` arguments to off-policy algorithms @@ -683,4 +684,4 @@ And all the contributors: @tirafesi @blurLake @koulakis @joeljosephjin @shwang @rk37 @andyshih12 @RaphaelWag @xicocaio @diditforlulz273 @liorcohen5 @ManifoldFR @mloo3 @SwamyDev @wmmc88 @megan-klaiber @thisray @tfederico @hn2 @LucasAlegre @AptX395 @zampanteymedio @JadenTravnik @decodyng @ardabbour @lorenz-h @mschweizer @lorepieri8 @vwxyzjn -@ShangqunYu @PierreExeter @JacopoPan @ltbd78 @tom-doerr @Atlis @liusida +@ShangqunYu @PierreExeter @JacopoPan @ltbd78 @tom-doerr @Atlis @liusida @09tangriro diff --git a/docs/modules/ddpg.rst b/docs/modules/ddpg.rst index 6bdfd45b3..40bef6507 100644 --- a/docs/modules/ddpg.rst +++ b/docs/modules/ddpg.rst @@ -169,6 +169,8 @@ DDPG Policies .. autoclass:: CnnPolicy :members: + :noindex: .. autoclass:: MultiInputPolicy :members: + :noindex: diff --git a/stable_baselines3/common/on_policy_algorithm.py b/stable_baselines3/common/on_policy_algorithm.py index bbb6473e9..924788dbd 100644 --- a/stable_baselines3/common/on_policy_algorithm.py +++ b/stable_baselines3/common/on_policy_algorithm.py @@ -9,7 +9,7 @@ from stable_baselines3.common.base_class import BaseAlgorithm from stable_baselines3.common.buffers import DictRolloutBuffer, RolloutBuffer from stable_baselines3.common.callbacks import BaseCallback -from stable_baselines3.common.policies import ActorCriticPolicy +from stable_baselines3.common.policies import ActorCriticPolicy, BasePolicy from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule from stable_baselines3.common.utils import obs_as_tensor, safe_mean from stable_baselines3.common.vec_env import VecEnv @@ -35,6 +35,7 @@ class OnPolicyAlgorithm(BaseAlgorithm): instead of action noise exploration (default: False) :param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE Default: -1 (only sample at the beginning of the rollout) + :param policy_base: The base policy used by this method :param tensorboard_log: the log location for tensorboard (if None, no logging) :param create_eval_env: Whether to create a second environment that will be used for evaluating the agent periodically. (Only available when passing string for the environment) @@ -62,6 +63,7 @@ def __init__( max_grad_norm: float, use_sde: bool, sde_sample_freq: int, + policy_base: Type[BasePolicy] = ActorCriticPolicy, tensorboard_log: Optional[str] = None, create_eval_env: bool = False, monitor_wrapper: bool = True, @@ -76,7 +78,7 @@ def __init__( super(OnPolicyAlgorithm, self).__init__( policy=policy, env=env, - policy_base=ActorCriticPolicy, + policy_base=policy_base, learning_rate=learning_rate, policy_kwargs=policy_kwargs, verbose=verbose, From 94cb760a386404945f58c1d4a757b44840ea2802 Mon Sep 17 00:00:00 2001 From: "Anssi \"Miffyli\" Kanervisto" Date: Tue, 11 May 2021 01:17:03 +0300 Subject: [PATCH 65/70] Address comments --- stable_baselines3/common/env_checker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stable_baselines3/common/env_checker.py b/stable_baselines3/common/env_checker.py index 090bb796b..6bd097dae 100644 --- a/stable_baselines3/common/env_checker.py +++ b/stable_baselines3/common/env_checker.py @@ -119,7 +119,7 @@ def _check_box_obs(observation_space: spaces.Box, key: str = "") -> None: Check that the observation space is correctly formatted when dealing with a ``Box()`` space. In particular, it checks: - that the dimensions are big enough when it is an image, and that the type matches - - that the observation has an expected shape (warn the use if not) + - that the observation has an expected shape (warn the user if not) """ # If image, check the low and high values, the type and the number of channels # and the shape (minimal value) From 5d56c34be7aab5b482a14b67924e4521346186c0 Mon Sep 17 00:00:00 2001 From: "Anssi \"Miffyli\" Kanervisto" Date: Tue, 11 May 2021 01:19:10 +0300 Subject: [PATCH 66/70] Naming clarifications --- stable_baselines3/common/vec_env/vec_transpose.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stable_baselines3/common/vec_env/vec_transpose.py b/stable_baselines3/common/vec_env/vec_transpose.py index d4b2e4ef4..37c15f6bc 100644 --- a/stable_baselines3/common/vec_env/vec_transpose.py +++ b/stable_baselines3/common/vec_env/vec_transpose.py @@ -42,8 +42,8 @@ def transpose_space(observation_space: spaces.Box) -> spaces.Box: :return: """ assert is_image_space(observation_space), "The observation space must be an image" - width, height, channels = observation_space.shape - new_shape = (channels, width, height) + height, width, channels = observation_space.shape + new_shape = (channels, height, width) return spaces.Box(low=0, high=255, shape=new_shape, dtype=observation_space.dtype) @staticmethod From c30916e9a43c85c72dc991ef9b35f5bf0778e3db Mon Sep 17 00:00:00 2001 From: "Anssi \"Miffyli\" Kanervisto" Date: Tue, 11 May 2021 01:29:52 +0300 Subject: [PATCH 67/70] Actually saving the file would be nice --- docs/modules/ddpg.rst | 3 --- 1 file changed, 3 deletions(-) diff --git a/docs/modules/ddpg.rst b/docs/modules/ddpg.rst index 844d0eda2..40bef6507 100644 --- a/docs/modules/ddpg.rst +++ b/docs/modules/ddpg.rst @@ -170,10 +170,7 @@ DDPG Policies .. autoclass:: CnnPolicy :members: :noindex: -<<<<<<< HEAD .. autoclass:: MultiInputPolicy :members: :noindex: -======= ->>>>>>> master From 0acea9736f9de9cc75abbb7bba1f2dd0ba67404e Mon Sep 17 00:00:00 2001 From: Antonin Raffin Date: Tue, 11 May 2021 11:28:00 +0200 Subject: [PATCH 68/70] Fix edge case when doing online sampling with HER --- setup.cfg | 7 ++--- stable_baselines3/her/her_replay_buffer.py | 31 ++++++++++++---------- tests/test_her.py | 1 + 3 files changed, 20 insertions(+), 19 deletions(-) diff --git a/setup.cfg b/setup.cfg index 1f412536d..7bfd321e3 100644 --- a/setup.cfg +++ b/setup.cfg @@ -7,11 +7,8 @@ license_file = LICENSE env = PYTHONHASHSEED=0 filterwarnings = - # Tensorboard/Tensorflow warnings - ignore:inspect.getargspec:DeprecationWarning:tensorflow - ignore:builtin type EagerTensor has no __module__ attribute:DeprecationWarning - ignore:The binary mode of fromstring is deprecated:DeprecationWarning - ignore::FutureWarning:tensorflow + # Tensorboard warnings + ignore::DeprecationWarning:tensorboard # Gym warnings ignore:Parameters to load are deprecated.:DeprecationWarning ignore:the imp module is deprecated in favour of importlib:PendingDeprecationWarning diff --git a/stable_baselines3/her/her_replay_buffer.py b/stable_baselines3/her/her_replay_buffer.py index ea5d35e09..6a790d691 100644 --- a/stable_baselines3/her/her_replay_buffer.py +++ b/stable_baselines3/her/her_replay_buffer.py @@ -347,20 +347,23 @@ def _sample_transitions( ] ) - # Vectorized computation of the new reward - transitions["reward"][her_indices, 0] = self.env.env_method( - "compute_reward", - # the new state depends on the previous state and action - # s_{t+1} = f(s_t, a_t) - # so the next_achieved_goal depends also on the previous state and action - # because we are in a GoalEnv: - # r_t = reward(s_t, a_t) = reward(next_achieved_goal, desired_goal) - # therefore we have to use "next_achieved_goal" and not "achieved_goal" - transitions["next_achieved_goal"][her_indices, 0], - # here we use the new desired goal - transitions["desired_goal"][her_indices, 0], - transitions["info"][her_indices, 0], - ) + # Edge case: episode of one timesteps with the future strategy + # no virtual transition can be created + if len(her_indices) > 0: + # Vectorized computation of the new reward + transitions["reward"][her_indices, 0] = self.env.env_method( + "compute_reward", + # the new state depends on the previous state and action + # s_{t+1} = f(s_t, a_t) + # so the next_achieved_goal depends also on the previous state and action + # because we are in a GoalEnv: + # r_t = reward(s_t, a_t) = reward(next_achieved_goal, desired_goal) + # therefore we have to use "next_achieved_goal" and not "achieved_goal" + transitions["next_achieved_goal"][her_indices, 0], + # here we use the new desired goal + transitions["desired_goal"][her_indices, 0], + transitions["info"][her_indices, 0], + ) # concatenate observation with (desired) goal observations = self._normalize_obs(transitions, maybe_vec_env) diff --git a/tests/test_her.py b/tests/test_her.py index ef887e70f..0f6d75f6f 100644 --- a/tests/test_her.py +++ b/tests/test_her.py @@ -323,6 +323,7 @@ def test_full_replay_buffer(): learning_starts=1, buffer_size=20, verbose=1, + seed=757, ) model.learn(total_timesteps=100) From d6a59f9d3b4e2d2c5fb5ff23642bfeccc367ff9b Mon Sep 17 00:00:00 2001 From: Antonin Raffin Date: Tue, 11 May 2021 11:35:50 +0200 Subject: [PATCH 69/70] Cleanup --- docs/misc/changelog.rst | 4 ---- 1 file changed, 4 deletions(-) diff --git a/docs/misc/changelog.rst b/docs/misc/changelog.rst index bf8c02e02..e4cab0f06 100644 --- a/docs/misc/changelog.rst +++ b/docs/misc/changelog.rst @@ -685,9 +685,5 @@ And all the contributors: @flodorner @KuKuXia @NeoExtended @PartiallyTyped @mmcenta @richardwu @kinalmehta @rolandgvc @tkelestemur @mloo3 @tirafesi @blurLake @koulakis @joeljosephjin @shwang @rk37 @andyshih12 @RaphaelWag @xicocaio @diditforlulz273 @liorcohen5 @ManifoldFR @mloo3 @SwamyDev @wmmc88 @megan-klaiber @thisray -<<<<<<< HEAD @tfederico @hn2 @LucasAlegre @AptX395 @zampanteymedio @JadenTravnik @decodyng @ardabbour @lorenz-h @mschweizer @lorepieri8 @vwxyzjn -======= -@tfederico @hn2 @LucasAlegre @AptX395 @zampanteymedio @decodyng @ardabbour @lorenz-h @mschweizer @lorepieri8 @vwxyzjn ->>>>>>> master @ShangqunYu @PierreExeter @JacopoPan @ltbd78 @tom-doerr @Atlis @liusida @09tangriro From ce848fb69e73a181aa7f8116f1be0e1e692cd4a5 Mon Sep 17 00:00:00 2001 From: Antonin Raffin Date: Tue, 11 May 2021 11:48:34 +0200 Subject: [PATCH 70/70] Add sanity check --- stable_baselines3/common/base_class.py | 2 ++ stable_baselines3/common/vec_env/vec_transpose.py | 11 ++++++++--- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/stable_baselines3/common/base_class.py b/stable_baselines3/common/base_class.py index 4d0d6b0f2..7a97980c8 100644 --- a/stable_baselines3/common/base_class.py +++ b/stable_baselines3/common/base_class.py @@ -206,6 +206,8 @@ def _wrap_env(env: GymEnv, verbose: int = 0, monitor_wrapper: bool = True) -> Ve wrap_with_vectranspose = False if isinstance(env.observation_space, gym.spaces.Dict): # If even one of the keys is a image-space in need of transpose, apply transpose + # If the image spaces are not consistent (for instance one is channel first, + # the other channel last), VecTransposeImage will throw an error for space in env.observation_space.spaces.values(): wrap_with_vectranspose = wrap_with_vectranspose or ( is_image_space(space) and not is_image_space_channels_first(space) diff --git a/stable_baselines3/common/vec_env/vec_transpose.py b/stable_baselines3/common/vec_env/vec_transpose.py index 37c15f6bc..399fb310e 100644 --- a/stable_baselines3/common/vec_env/vec_transpose.py +++ b/stable_baselines3/common/vec_env/vec_transpose.py @@ -4,7 +4,7 @@ import numpy as np from gym import spaces -from stable_baselines3.common.preprocessing import is_image_space +from stable_baselines3.common.preprocessing import is_image_space, is_image_space_channels_first from stable_baselines3.common.vec_env.base_vec_env import VecEnv, VecEnvStepReturn, VecEnvWrapper @@ -28,20 +28,25 @@ def __init__(self, venv: VecEnv): if is_image_space(space): # Keep track of which keys should be transposed later self.image_space_keys.append(key) - observation_space.spaces[key] = self.transpose_space(space) + observation_space.spaces[key] = self.transpose_space(space, key) else: observation_space = self.transpose_space(venv.observation_space) super(VecTransposeImage, self).__init__(venv, observation_space=observation_space) @staticmethod - def transpose_space(observation_space: spaces.Box) -> spaces.Box: + def transpose_space(observation_space: spaces.Box, key: str = "") -> spaces.Box: """ Transpose an observation space (re-order channels). :param observation_space: + :param key: In case of dictionary space, the key of the observation space. :return: """ + # Sanity checks assert is_image_space(observation_space), "The observation space must be an image" + assert not is_image_space_channels_first( + observation_space + ), f"The observation space {key} must follow the channel last convention" height, width, channels = observation_space.shape new_shape = (channels, height, width) return spaces.Box(low=0, high=255, shape=new_shape, dtype=observation_space.dtype)