From 1fec8d40bdca315848b6a69f186597dded63aa7a Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Fri, 9 Jan 2026 14:34:48 +0100 Subject: [PATCH 01/44] DONE: Added DDPG, PPO in multi-agent environment in /reinforcement_learning module. --- .../algorithms/base_algorithm.py | 1 + .../algorithms/maddpg.py | 614 ++++++++++++++++++ .../algorithms/mappo.py | 411 ++++++++++++ .../algorithms/matd3.py | 2 +- .../reinforcement_learning/learning_role.py | 6 + .../reinforcement_learning/learning_utils.py | 4 +- .../neural_network_architecture.py | 377 +++++++++-- .../reinforcement_learning/rollout_buffer.py | 261 ++++++++ 8 files changed, 1629 insertions(+), 47 deletions(-) create mode 100644 assume/reinforcement_learning/algorithms/maddpg.py create mode 100644 assume/reinforcement_learning/algorithms/mappo.py create mode 100644 assume/reinforcement_learning/rollout_buffer.py diff --git a/assume/reinforcement_learning/algorithms/base_algorithm.py b/assume/reinforcement_learning/algorithms/base_algorithm.py index 44c0f492f..9ab5b258b 100644 --- a/assume/reinforcement_learning/algorithms/base_algorithm.py +++ b/assume/reinforcement_learning/algorithms/base_algorithm.py @@ -89,3 +89,4 @@ def load_params(self, directory: str) -> None: """ Load learning params - abstract method to be implemented by the Learning Algorithm """ + pass diff --git a/assume/reinforcement_learning/algorithms/maddpg.py b/assume/reinforcement_learning/algorithms/maddpg.py new file mode 100644 index 000000000..a49ac7c83 --- /dev/null +++ b/assume/reinforcement_learning/algorithms/maddpg.py @@ -0,0 +1,614 @@ +# SPDX-FileCopyrightText: ASSUME Developers +# +# SPDX-License-Identifier: AGPL-3.0-or-later + +""" +MADDPG - Multi-Agent Deep Deterministic Policy Gradient + +This module implements the DDPG algorithm for multi-agent settings (MADDPG). + +DDPG vs TD3 Comparison: +----------------------- +| Feature | DDPG (this) | TD3 | +|-------------------|-----------------|------------------| +| Critics | 1 (single) | 2 (twin) | +| Policy Updates | Every step | Delayed (1:2) | +| Target Noise | No | Yes (smoothing) | +| Overestimation | Can occur | Reduced | +| Complexity | Simpler | More complex | + +MADDPG extends DDPG to multi-agent settings using: +- Centralized Training: Critic sees all agents' observations and actions +- Decentralized Execution: Each actor only uses its own observation +""" + +import json +import logging +import os + +import torch as th +from torch.nn import functional as F +from torch.optim import AdamW + +from assume.reinforcement_learning.algorithms.base_algorithm import RLAlgorithm +from assume.reinforcement_learning.learning_utils import ( + polyak_update, + transfer_weights, +) +from assume.reinforcement_learning.neural_network_architecture import CriticDDPG + +logger = logging.getLogger(__name__) + + +class DDPG(RLAlgorithm): + """ + Deep Deterministic Policy Gradient (DDPG) Algorithm. + + Extended to multi-agent settings (MADDPG) for electricity market simulations. + + Key Features: + - Single critic network (vs twin critics in TD3) + - Updates actor every step (no policy delay) + - No target action smoothing noise + - Centralized training with decentralized execution + """ + + def __init__(self, learning_role): + """Initialize DDPG algorithm.""" + super().__init__(learning_role) + + # Gradient step counter + self.n_updates = 0 + + # Gradient clipping threshold + self.grad_clip_norm = 1.0 + + # ========================================================================= + # CHECKPOINT SAVING METHODS + # ========================================================================= + + def save_params(self, directory: str) -> None: + """Save all actor and critic network parameters to disk.""" + self.save_critic_params(directory=f"{directory}/critics") + self.save_actor_params(directory=f"{directory}/actors") + + def save_critic_params(self, directory: str) -> None: + """Save critic network parameters for all agents.""" + os.makedirs(directory, exist_ok=True) + + for u_id, strategy in self.learning_role.rl_strats.items(): + obj = { + "critic": strategy.critic.state_dict(), + "critic_target": strategy.target_critic.state_dict(), + "critic_optimizer": strategy.critic.optimizer.state_dict(), + } + path = f"{directory}/critic_{u_id}.pt" + th.save(obj, path) + + # Save unit ID order for weight transfer + u_id_list = [str(u) for u in self.learning_role.rl_strats.keys()] + mapping = {"u_id_order": u_id_list} + map_path = os.path.join(directory, "u_id_order.json") + with open(map_path, "w") as f: + json.dump(mapping, f, indent=2) + + def save_actor_params(self, directory: str) -> None: + """Save actor network parameters for all agents.""" + os.makedirs(directory, exist_ok=True) + + for u_id, strategy in self.learning_role.rl_strats.items(): + obj = { + "actor": strategy.actor.state_dict(), + "actor_target": strategy.actor_target.state_dict(), + "actor_optimizer": strategy.actor.optimizer.state_dict(), + } + path = f"{directory}/actor_{u_id}.pt" + th.save(obj, path) + + # ========================================================================= + # CHECKPOINT LOADING METHODS + # ========================================================================= + + def load_params(self, directory: str) -> None: + """Load all actor and critic parameters from disk.""" + self.load_critic_params(directory) + self.load_actor_params(directory) + + def load_critic_params(self, directory: str) -> None: + """Load critic parameters with support for agent count changes.""" + logger.info("Loading critic parameters...") + + if not os.path.exists(directory): + logger.warning( + "Specified directory does not exist. Using randomly initialized critics." + ) + return + + # Load saved unit ID order + map_path = os.path.join(directory, "critics", "u_id_order.json") + if os.path.exists(map_path): + with open(map_path) as f: + loaded_id_order = json.load(f).get("u_id_order", []) + else: + logger.warning("No u_id_order.json: assuming same order as current.") + loaded_id_order = [str(u) for u in self.learning_role.rl_strats.keys()] + + new_id_order = [str(u) for u in self.learning_role.rl_strats.keys()] + direct_load = loaded_id_order == new_id_order + + if direct_load: + logger.info("Agents order unchanged. Loading critic weights directly.") + else: + logger.info( + f"Agents mismatch: n_old={len(loaded_id_order)}, " + f"n_new={len(new_id_order)}. Transferring weights." + ) + + for u_id, strategy in self.learning_role.rl_strats.items(): + critic_path = os.path.join(directory, "critics", f"critic_{u_id}.pt") + if not os.path.exists(critic_path): + logger.warning(f"No saved critic for {u_id}; skipping.") + continue + + try: + critic_params = th.load(critic_path, weights_only=True) + + for key in ("critic", "critic_target", "critic_optimizer"): + if key not in critic_params: + logger.warning(f"Missing {key} in critic params for {u_id}.") + continue + + if direct_load: + strategy.critic.load_state_dict(critic_params["critic"]) + strategy.target_critic.load_state_dict(critic_params["critic_target"]) + strategy.critic.optimizer.load_state_dict(critic_params["critic_optimizer"]) + else: + # Weight transfer for agent count changes + critic_weights = transfer_weights( + model=strategy.critic, + loaded_state=critic_params["critic"], + loaded_id_order=loaded_id_order, + new_id_order=new_id_order, + obs_base=strategy.obs_dim, + act_dim=strategy.act_dim, + unique_obs=strategy.unique_obs_dim, + ) + target_critic_weights = transfer_weights( + model=strategy.target_critic, + loaded_state=critic_params["critic_target"], + loaded_id_order=loaded_id_order, + new_id_order=new_id_order, + obs_base=strategy.obs_dim, + act_dim=strategy.act_dim, + unique_obs=strategy.unique_obs_dim, + ) + + if critic_weights is None or target_critic_weights is None: + logger.warning(f"Weights transfer failed for {u_id}.") + continue + + strategy.critic.load_state_dict(critic_weights) + strategy.target_critic.load_state_dict(target_critic_weights) + + except Exception as e: + logger.warning(f"Failed to load critic for {u_id}: {e}") + + def load_actor_params(self, directory: str) -> None: + """Load actor network parameters from disk.""" + logger.info("Loading actor parameters...") + + if not os.path.exists(directory): + logger.warning( + "Specified directory for actors does not exist! " + "Starting with randomly initialized values!" + ) + return + + for u_id, strategy in self.learning_role.rl_strats.items(): + try: + actor_params = self.load_obj( + directory=f"{directory}/actors/actor_{str(u_id)}.pt" + ) + + strategy.actor.load_state_dict(actor_params["actor"]) + strategy.actor_target.load_state_dict(actor_params["actor_target"]) + strategy.actor.optimizer.load_state_dict(actor_params["actor_optimizer"]) + strategy.actor.loaded = True + + except Exception: + logger.warning(f"No actor values loaded for agent {u_id}") + + # ========================================================================= + # NETWORK INITIALIZATION + # ========================================================================= + + def initialize_policy(self, actors_and_critics: dict = None) -> None: + """ + Initialize actor and critic networks for all agents. + + Args: + actors_and_critics: Optional pre-existing networks to assign + """ + if actors_and_critics is None: + self.check_strategy_dimensions() + self.create_actors() + self.create_critics() + else: + for u_id, strategy in self.learning_role.rl_strats.items(): + strategy.actor = actors_and_critics["actors"][u_id] + strategy.actor_target = actors_and_critics["actor_targets"][u_id] + strategy.critic = actors_and_critics["critics"][u_id] + strategy.target_critic = actors_and_critics["target_critics"][u_id] + + self.obs_dim = actors_and_critics["obs_dim"] + self.act_dim = actors_and_critics["act_dim"] + self.unique_obs_dim = actors_and_critics["unique_obs_dim"] + + def check_strategy_dimensions(self) -> None: + """Validate that all agents have consistent dimensions.""" + obs_dim_list = [] + act_dim_list = [] + unique_obs_dim_list = [] + num_timeseries_obs_dim_list = [] + + for strategy in self.learning_role.rl_strats.values(): + obs_dim_list.append(strategy.obs_dim) + act_dim_list.append(strategy.act_dim) + unique_obs_dim_list.append(strategy.unique_obs_dim) + num_timeseries_obs_dim_list.append(strategy.num_timeseries_obs_dim) + + if len(set(obs_dim_list)) > 1: + raise ValueError( + f"All observation dimensions must be the same. " + f"Got: {obs_dim_list}" + ) + else: + self.obs_dim = obs_dim_list[0] + + if len(set(act_dim_list)) > 1: + raise ValueError( + f"All action dimensions must be the same. " + f"Got: {act_dim_list}" + ) + else: + self.act_dim = act_dim_list[0] + + if len(set(unique_obs_dim_list)) > 1: + raise ValueError( + f"All unique_obs_dim values must be the same. " + f"Got: {unique_obs_dim_list}" + ) + else: + self.unique_obs_dim = unique_obs_dim_list[0] + + if len(set(num_timeseries_obs_dim_list)) > 1: + raise ValueError( + f"All num_timeseries_obs_dim values must be the same. " + f"Got: {num_timeseries_obs_dim_list}" + ) + else: + self.num_timeseries_obs_dim = num_timeseries_obs_dim_list[0] + + def create_actors(self) -> None: + """Create actor (policy) networks for all agents.""" + for strategy in self.learning_role.rl_strats.values(): + # Create main actor network + strategy.actor = self.actor_architecture_class( + obs_dim=self.obs_dim, + act_dim=self.act_dim, + float_type=self.float_type, + unique_obs_dim=self.unique_obs_dim, + num_timeseries_obs_dim=self.num_timeseries_obs_dim, + ).to(self.device) + + # Create target actor network + strategy.actor_target = self.actor_architecture_class( + obs_dim=self.obs_dim, + act_dim=self.act_dim, + float_type=self.float_type, + unique_obs_dim=self.unique_obs_dim, + num_timeseries_obs_dim=self.num_timeseries_obs_dim, + ).to(self.device) + + # Initialize target with same weights + strategy.actor_target.load_state_dict(strategy.actor.state_dict()) + strategy.actor_target.train(mode=False) + + # Create optimizer + strategy.actor.optimizer = AdamW( + strategy.actor.parameters(), + lr=self.learning_role.calc_lr_from_progress(1), + ) + + strategy.actor.loaded = False + + def create_critics(self) -> None: + """ + Create critic (Q-function) networks for all agents. + + Key difference from TD3: Uses single critic instead of twin critics. + """ + n_agents = len(self.learning_role.rl_strats) + + for strategy in self.learning_role.rl_strats.values(): + # Create main critic (single Q-network, not twin) + strategy.critic = CriticDDPG( + n_agents=n_agents, + obs_dim=self.obs_dim, + act_dim=self.act_dim, + unique_obs_dim=self.unique_obs_dim, + float_type=self.float_type, + ).to(self.device) + + # Create target critic + strategy.target_critic = CriticDDPG( + n_agents=n_agents, + obs_dim=self.obs_dim, + act_dim=self.act_dim, + unique_obs_dim=self.unique_obs_dim, + float_type=self.float_type, + ).to(self.device) + + # Initialize target with same weights + strategy.target_critic.load_state_dict(strategy.critic.state_dict()) + strategy.target_critic.train(mode=False) + + # Create optimizer + strategy.critic.optimizer = AdamW( + strategy.critic.parameters(), + lr=self.learning_role.calc_lr_from_progress(1), + ) + + def extract_policy(self) -> dict: + """Extract all actor and critic networks into a dictionary.""" + actors = {} + actor_targets = {} + critics = {} + target_critics = {} + + for u_id, strategy in self.learning_role.rl_strats.items(): + actors[u_id] = strategy.actor + actor_targets[u_id] = strategy.actor_target + critics[u_id] = strategy.critic + target_critics[u_id] = strategy.target_critic + + return { + "actors": actors, + "actor_targets": actor_targets, + "critics": critics, + "target_critics": target_critics, + "obs_dim": self.obs_dim, + "act_dim": self.act_dim, + "unique_obs_dim": self.unique_obs_dim, + } + + # ========================================================================= + # CORE TRAINING: POLICY UPDATE + # ========================================================================= + + def update_policy(self) -> None: + """ + Update actor and critic networks using the DDPG algorithm. + + Key differences from TD3: + 1. Uses single critic (no twin Q-learning) + 2. Updates actor every step (no policy delay) + 3. No target action smoothing noise + """ + logger.debug("Updating Policy (MADDPG/DDPG)") + + strategies = list(self.learning_role.rl_strats.values()) + n_rl_agents = len(strategies) + + # Initialize metrics storage + unit_params = [ + { + u_id: { + "actor_loss": None, + "actor_total_grad_norm": None, + "actor_max_grad_norm": None, + "critic_loss": None, + "critic_total_grad_norm": None, + "critic_max_grad_norm": None, + } + for u_id in self.learning_role.rl_strats.keys() + } + for _ in range(self.learning_config.gradient_steps) + ] + + # Update noise and learning rate schedules + progress_remaining = self.learning_role.get_progress_remaining() + updated_noise_decay = self.learning_role.calc_noise_from_progress(progress_remaining) + learning_rate = self.learning_role.calc_lr_from_progress(progress_remaining) + + for strategy in strategies: + self.update_learning_rate( + [strategy.critic.optimizer, strategy.actor.optimizer], + learning_rate=learning_rate, + ) + strategy.action_noise.update_noise_decay(updated_noise_decay) + + # Main gradient step loop + for step in range(self.learning_config.gradient_steps): + self.n_updates += 1 + + # Sample from replay buffer + transitions = self.learning_role.buffer.sample( + self.learning_config.batch_size + ) + + states, actions, next_states, rewards = ( + transitions.observations, + transitions.actions, + transitions.next_observations, + transitions.rewards, + ) + + # Compute target actions (no smoothing noise in DDPG) + with th.no_grad(): + next_actions = th.stack([ + strategy.actor_target(next_states[:, i, :]).clamp(-1, 1) + for i, strategy in enumerate(strategies) + ]) + next_actions = next_actions.transpose(0, 1).contiguous() + next_actions = next_actions.view(-1, n_rl_agents * self.act_dim) + + all_actions = actions.view(self.learning_config.batch_size, -1) + + # Precompute observation slices + unique_obs_from_others = states[ + :, :, self.obs_dim - self.unique_obs_dim : + ].reshape(self.learning_config.batch_size, n_rl_agents, -1) + + next_unique_obs_from_others = next_states[ + :, :, self.obs_dim - self.unique_obs_dim : + ].reshape(self.learning_config.batch_size, n_rl_agents, -1) + + # ================================================================= + # CRITIC UPDATE + # ================================================================= + for strategy in strategies: + strategy.critic.optimizer.zero_grad(set_to_none=True) + + total_critic_loss = 0.0 + + for i, strategy in enumerate(strategies): + critic = strategy.critic + critic_target = strategy.target_critic + + # Build centralized observation + other_unique_obs = th.cat( + (unique_obs_from_others[:, :i], unique_obs_from_others[:, i + 1 :]), + dim=1, + ) + other_next_unique_obs = th.cat( + (next_unique_obs_from_others[:, :i], next_unique_obs_from_others[:, i + 1 :]), + dim=1, + ) + + all_states = th.cat( + ( + states[:, i, :].reshape(self.learning_config.batch_size, -1), + other_unique_obs.reshape(self.learning_config.batch_size, -1), + ), + dim=1, + ) + all_next_states = th.cat( + ( + next_states[:, i, :].reshape(self.learning_config.batch_size, -1), + other_next_unique_obs.reshape(self.learning_config.batch_size, -1), + ), + dim=1, + ) + + # Compute target Q-value (single critic, no min) + with th.no_grad(): + next_q_value = critic_target(all_next_states, next_actions) + target_Q_value = ( + rewards[:, i].unsqueeze(1) + + self.learning_config.gamma * next_q_value + ) + + # Compute current Q-value + current_Q_value = critic(all_states, all_actions) + + # MSE loss (single critic) + critic_loss = F.mse_loss(current_Q_value, target_Q_value) + + unit_params[step][strategy.unit_id]["critic_loss"] = critic_loss.item() + total_critic_loss += critic_loss + + # Backward pass for critics + total_critic_loss.backward() + + for strategy in strategies: + parameters = list(strategy.critic.parameters()) + max_grad_norm = max(p.grad.norm() for p in parameters) + total_norm = th.nn.utils.clip_grad_norm_( + parameters, max_norm=self.grad_clip_norm + ) + strategy.critic.optimizer.step() + + unit_params[step][strategy.unit_id]["critic_total_grad_norm"] = total_norm + unit_params[step][strategy.unit_id]["critic_max_grad_norm"] = max_grad_norm + + # ================================================================= + # ACTOR UPDATE (every step, no delay in DDPG) + # ================================================================= + for strategy in strategies: + strategy.actor.optimizer.zero_grad(set_to_none=True) + + total_actor_loss = 0.0 + + for i, strategy in enumerate(strategies): + actor = strategy.actor + critic = strategy.critic + + state_i = states[:, i, :] + action_i = actor(state_i) + + other_unique_obs = th.cat( + (unique_obs_from_others[:, :i], unique_obs_from_others[:, i + 1 :]), + dim=1, + ) + all_states_i = th.cat( + ( + state_i.reshape(self.learning_config.batch_size, -1), + other_unique_obs.reshape(self.learning_config.batch_size, -1), + ), + dim=1, + ) + + all_actions_clone = actions.clone().detach() + all_actions_clone[:, i, :] = action_i + all_actions_clone = all_actions_clone.view( + self.learning_config.batch_size, -1 + ) + + # Actor loss: maximize Q-value + actor_loss = -critic(all_states_i, all_actions_clone).mean() + + unit_params[step][strategy.unit_id]["actor_loss"] = actor_loss.item() + total_actor_loss += actor_loss + + # Backward pass for actors + total_actor_loss.backward() + + for strategy in strategies: + parameters = list(strategy.actor.parameters()) + max_grad_norm = max(p.grad.norm() for p in parameters) + total_norm = th.nn.utils.clip_grad_norm_( + parameters, max_norm=self.grad_clip_norm + ) + strategy.actor.optimizer.step() + + unit_params[step][strategy.unit_id]["actor_total_grad_norm"] = total_norm + unit_params[step][strategy.unit_id]["actor_max_grad_norm"] = max_grad_norm + + # ================================================================= + # TARGET NETWORK UPDATES (Polyak averaging) + # ================================================================= + all_critic_params = [] + all_target_critic_params = [] + all_actor_params = [] + all_target_actor_params = [] + + for strategy in strategies: + all_critic_params.extend(strategy.critic.parameters()) + all_target_critic_params.extend(strategy.target_critic.parameters()) + all_actor_params.extend(strategy.actor.parameters()) + all_target_actor_params.extend(strategy.actor_target.parameters()) + + polyak_update( + all_critic_params, + all_target_critic_params, + self.learning_config.tau, + ) + polyak_update( + all_actor_params, + all_target_actor_params, + self.learning_config.tau, + ) + + # Log metrics + self.learning_role.write_rl_grad_params_to_output(learning_rate, unit_params) diff --git a/assume/reinforcement_learning/algorithms/mappo.py b/assume/reinforcement_learning/algorithms/mappo.py new file mode 100644 index 000000000..b3f43c5d4 --- /dev/null +++ b/assume/reinforcement_learning/algorithms/mappo.py @@ -0,0 +1,411 @@ +# SPDX-FileCopyrightText: ASSUME Developers +# +# SPDX-License-Identifier: AGPL-3.0-or-later +import json +import logging +import os + +import numpy as np +import torch as th +from torch.nn import functional as F +from torch.optim import AdamW + +from assume.reinforcement_learning.algorithms.base_algorithm import RLAlgorithm +from assume.reinforcement_learning.learning_utils import polyak_update +from assume.reinforcement_learning.neural_network_architecture import ( + ActorPPO, + CriticPPO +) +from assume.reinforcement_learning.rollout_buffer import RolloutBuffer + +logger = logging.getLogger(__name__) + +class PPO(RLAlgorithm): + """ + Proximal Policy Optimization (PPO) Algorithm. + """ + + def __init__( + self, + learning_role, + clip_range = 0.2, # Clipping parameter + clip_range_vf = None, + n_epochs = 10, # Number of epochs per update + entropy_coef = 0.01, # Entropy bonus coefficient + vf_coef = 0.5, # Value function loss coefficient + max_grad_norm = 0.5, # Gradient clipping + ): + """Initialize PPO algorithm.""" + super().__init__(learning_role) + + self.clip_range = clip_range + self.clip_range_vf = clip_range_vf + self.n_epochs = n_epochs + self.entropy_coef = entropy_coef + self.vf_coef = vf_coef + self.max_grad_norm = max_grad_norm + + # Update counter + self.n_updates = 0 + + def save_params(self, directory: str) -> None: + """Save all actor and critic network parameters to disk.""" + self.save_critic_params(directory=f"{directory}/critics") + self.save_actor_params(directory=f"{directory}/actors") + + def save_critic_params(self, directory: str) -> None: + """Save value network parameters for all agents.""" + os.makedirs(directory, exist_ok=True) + + for u_id, strategy in self.learning_role.rl_strats.items(): + obj = { + "critic": strategy.critic.state_dict(), + "critic_optimizer": strategy.critic.optimizer.state_dict(), + } + path = f"{directory}/critic_{u_id}.pt" + th.save(obj, path) + + # Save unit ID order + u_id_list = [str(u) for u in self.learning_role.rl_strats.keys()] + mapping = {"u_id_order": u_id_list} + map_path = os.path.join(directory, "u_id_order.json") + with open(map_path, "w") as f: + json.dump(mapping, f, indent=2) + + def save_actor_params(self, directory: str) -> None: + """Save actor network parameters for all agents.""" + os.makedirs(directory, exist_ok=True) + + for u_id, strategy in self.learning_role.rl_strats.items(): + obj = { + "actor": strategy.actor.state_dict(), + "actor_optimizer": strategy.actor.optimizer.state_dict(), + } + path = f"{directory}/actor_{u_id}.pt" + th.save(obj, path) + + def load_params(self, directory: str) -> None: + """Load all actor and critic parameters from disk.""" + self.load_critic_params(directory) + self.load_actor_params(directory) + + def load_critic_params(self, directory: str) -> None: + """Load critic parameters.""" + logger.info("Loading PPO critic parameters...") + + if not os.path.exists(directory): + logger.warning( + "Specified directory does not exist. Using randomly initialized critics." + ) + return + + for u_id, strategy in self.learning_role.rl_strats.items(): + critic_path = os.path.join(directory, "critics", f"critic_{u_id}.pt") + if not os.path.exists(critic_path): + logger.warning(f"No saved critic for {u_id}; skipping.") + continue + + try: + critic_params = th.load(critic_path, weights_only=True) + strategy.critic.load_state_dict(critic_params["critic"]) + strategy.critic.optimizer.load_state_dict(critic_params["critic_optimizer"]) + except Exception as e: + logger.warning(f"Failed to load critic for {u_id}: {e}") + + def load_actor_params(self, directory: str) -> None: + """Load actor network parameters from disk.""" + logger.info("Loading PPO actor parameters...") + + if not os.path.exists(directory): + logger.warning( + "Specified directory for actors does not exist! " + "Starting with randomly initialized values!" + ) + return + + for u_id, strategy in self.learning_role.rl_strats.items(): + try: + actor_params = self.load_obj( + directory=f"{directory}/actors/actor_{str(u_id)}.pt" + ) + + strategy.actor.load_state_dict(actor_params["actor"]) + strategy.actor.optimizer.load_state_dict(actor_params["actor_optimizer"]) + strategy.actor.loaded = True + + except Exception: + logger.warning(f"No actor values loaded for agent {u_id}") + + def initialize_policy(self, actors_and_critics: dict = None) -> None: + """ + Initialize actor and critic networks for all agents. + + Args: + actors_and_critics: Optional pre-existing networks to assign + """ + if actors_and_critics is None: + self.check_strategy_dimensions() + self.create_actors() + self.create_critics() + else: + for u_id, strategy in self.learning_role.rl_strats.items(): + strategy.actor = actors_and_critics["actors"][u_id] + strategy.critic = actors_and_critics["critics"][u_id] + + self.obs_dim = actors_and_critics["obs_dim"] + self.act_dim = actors_and_critics["act_dim"] + self.unique_obs_dim = actors_and_critics["unique_obs_dim"] + + def check_strategy_dimensions(self) -> None: + """Validate that all agents have consistent dimensions.""" + foresight_list = [] + obs_dim_list = [] + act_dim_list = [] + unique_obs_dim_list = [] + num_timeseries_obs_dim_list = [] + + for strategy in self.learning_role.rl_strats.values(): + foresight_list.append(strategy.foresight) + obs_dim_list.append(strategy.obs_dim) + act_dim_list.append(strategy.act_dim) + unique_obs_dim_list.append(strategy.unique_obs_dim) + num_timeseries_obs_dim_list.append(strategy.num_timeseries_obs_dim) + + if len(set(foresight_list)) > 1: + raise ValueError( + f"All foresight values must be the same for all RL agents. THe defined learning strategies have the following foresight values: {foresight_list}" + ) + else: + self.foresight = foresight_list[0] + + if len(set(obs_dim_list)) > 1: + raise ValueError( + f"All observation dimensions must be the same. Got: {obs_dim_list}" + ) + else: + self.obs_dim = obs_dim_list[0] + + if len(set(act_dim_list)) > 1: + raise ValueError( + f"All action dimensions must be the same. Got: {act_dim_list}" + ) + else: + self.act_dim = act_dim_list[0] + + if len(set(unique_obs_dim_list)) > 1: + raise ValueError( + f"All unique_obs_dim values must be the same. Got: {unique_obs_dim_list}" + ) + else: + self.unique_obs_dim = unique_obs_dim_list[0] + + if len(set(num_timeseries_obs_dim_list)) > 1: + raise ValueError( + f"All num_timeseries_obs_dim values must be the same. " + f"Got: {num_timeseries_obs_dim_list}" + ) + else: + self.num_timeseries_obs_dim = num_timeseries_obs_dim_list[0] + + def create_actors(self) -> None: + """Create stochastic actor networks for all agents.""" + for strategy in self.learning_role.rl_strats.values(): + # Create PPO Actor + strategy.actor = ActorPPO( + obs_dim=self.obs_dim, + act_dim=self.act_dim, + float_type=self.float_type, + ).to(self.device) + + # Create Optimizer + strategy.actor.optimizer = AdamW( + strategy.actor.parameters(), + lr=self.learning_role.calc_lr_from_progress(1), + ) + + strategy.actor.loaded = False + + def create_critics(self) -> None: + """ + Create value networks for all agents. + """ + n_agents = len(self.learning_role.rl_strats) + + for strategy in self.learning_role.rl_strats.values(): + # Create value network + strategy.critic = CriticPPO( + n_agents=n_agents, + obs_dim=self.obs_dim, + unique_obs_dim=self.unique_obs_dim, + float_type=self.float_type, + ).to(self.device) + + # Create optimizer + strategy.critic.optimizer = AdamW( + strategy.critic.parameters(), + lr=self.learning_role.calc_lr_from_progress(1), + ) + + def extract_policy(self) -> dict: + """Extract all actor and critic networks into a dictionary.""" + actors = {} + critics = {} + + for u_id, strategy in self.learning_role.rl_strats.items(): + actors[u_id] = strategy.actor + critics[u_id] = strategy.critic + + return { + "actors": actors, + "critics": critics, + "obs_dim": self.obs_dim, + "act_dim": self.act_dim, + "unique_obs_dim": self.unique_obs_dim, + } + + def update_policy(self) -> None: + """ + Update actor and critic networks. + """ + logger.debug("Updating Policy") + + strategies = list(self.learning_role.rl_strats.values()) + n_rl_agents = len(strategies) + + # Get rollout buffer + rollout_buffer = self.learning_role.rollout_buffer + + # Update learning rate + progress_remaining = self.learning_role.get_progress_remaining() + learning_rate = self.learning_role.calc_lr_from_progress(progress_remaining) + + for strategy in strategies: + for param_group in strategy.critic.optimizer.param_groups: + param_group["lr"] = learning_rate + for param_group in strategy.actor.optimizer.param_groups: + param_group["lr"] = learning_rate + + # Get last values for advantage computation + last_values = np.zeros(n_rl_agents) + dones = np.zeros(n_rl_agents) + + # Get the buffer size to index into the last stored state + buffer_size = rollout_buffer.pos if not rollout_buffer.full else rollout_buffer.buffer_size + + if buffer_size > 0: + # Get the last observation from the buffer + last_obs = rollout_buffer.observations[buffer_size-1] + last_dones = rollout_buffer.dones[buffer_size-1] + + with th.no_grad(): + for i, strategy in enumerate(strategies): + obs_tensor = th.as_tensor( + last_obs[i:i+1], + device = self.device, + dtype = self.float_type + ) + # Get value estimate from critic + last_values[i] = strategy.critic(obs_tensor).cpu().numpy().flatten()[0] + dones[i] = last_dones[i] + + # Compute advantages and returns + rollout_buffer.compute_returns_and_advantages(last_values, dones) + + # Initialize metrics storage + all_actor_losses = [] + all_critic_losses = [] + all_entropy_losses = [] + + for epoch in range(self.n_epochs): + for batch in rollout_buffer.get(self.learning_config.batch_size): + for i, strategy in enumerate(strategies): + actor = strategy.actor + critic = strategy.critic + + obs_i = batch.observations[:, i, :] + actions_i = batch.actions[:, i, :] + old_log_probs_i = batch.old_log_probs[:, i] + advantages_i = batch.advantages[:, i] + returns_i = batch.returns[:, i] + old_values_i = batch.old_values[:, i] + + advantages_i = (advantages_i - advantages_i.mean()) / ( + advantages_i.std() + 1e-8 + ) + + log_probs, entropy = actor.evaluate_actions( + obs_i, + actions_i + ) + values = critic(obs_i).flatten() + + # Importance sampling ratio + ratio = th.exp(log_probs - old_log_probs_i) + + # Clipped surrogate objective + policy_loss_1 = advantages_i * ratio + policy_loss_2 = advantages_i * th.clamp( + ratio, 1 - self.clip_range, 1 + self.clip_range + ) + policy_loss = -th.min(policy_loss_1, policy_loss_2) + + # Entropy loss + entropy_loss = -self.entropy_coef * entropy.mean() + + if self.clip_rnage_vf is not None: + # Clipped value function loss + values_clipped = old_values_i + th.clamp( + values - old_values_i, + -self.clip_range_vf, + self.clip_range_vf + ) + value_loss_1 = F.mse_loss(values, returns_i) + value_loss_2 = F.mse_loss(values_clipped, returns_i) + value_loss = th.max(value_loss_1, value_loss_2) + else: + value_loss = F.mse_loss(values, returns_i) + + loss = policy_loss + entropy_loss + self.vf_coef * value_loss + + # Actor update + actor.optimizer.zero_grad() + critic.optimizer.zero_grad() + loss.backward() + + # Gradient clipping + th.nn.utils.clip_grad_norm_( + actor.parameters(), self.max_grad_norm + ) + th.nn.utils.clip_grad_norm_( + critic.parameters(), self.max_grad_norm + ) + + actor.optimizer.step() + critic.optimizer.step() + + # Store metrics + all_actor_losses.append(policy_loss.item()) + all_critic_losses.append(value_loss.item()) + all_entropy_losses.append(entropy_loss.item()) + + self.n_updates += 1 + + # Log average metrics + if self.learning_role.tensor_board_logger: + self.learning_role.tensor_board_logger.log_scalar( + "ppo/actor_loss", np.mean(all_actor_losses), self.n_updates + ) + self.learning_role.tensor_board_logger.log_scalar( + "ppo/critic_loss", np.mean(all_critic_losses), self.n_updates + ) + self.learning_role.tensor_board_logger.log_scalar( + "ppo/entropy_loss", np.mean(all_entropy_losses), self.n_updates + ) + + # Clear rollout buffer + rollout_buffer.reset() + + logger.debug( + f"PPO update complete. Actor loss: {np.mean(all_actor_losses):.4f}, " + f"Value loss: {np.mean(all_critic_losses):.4f}" + ) \ No newline at end of file diff --git a/assume/reinforcement_learning/algorithms/matd3.py b/assume/reinforcement_learning/algorithms/matd3.py index 12d2a9a38..dbdd50e41 100644 --- a/assume/reinforcement_learning/algorithms/matd3.py +++ b/assume/reinforcement_learning/algorithms/matd3.py @@ -329,7 +329,7 @@ def create_actors(self) -> None: The created actor networks are associated with each unit strategy and stored as attributes. Note: - The observation dimension need to be the same, due to the centralized criic that all actors share. + The observation dimension need to be the same, due to the centralized critic that all actors share. If you have units with different observation dimensions. They need to have different critics and hence learning roles. """ diff --git a/assume/reinforcement_learning/learning_role.py b/assume/reinforcement_learning/learning_role.py index 837dd223d..23d1df97d 100644 --- a/assume/reinforcement_learning/learning_role.py +++ b/assume/reinforcement_learning/learning_role.py @@ -19,6 +19,8 @@ ) from assume.reinforcement_learning.algorithms.base_algorithm import RLAlgorithm from assume.reinforcement_learning.algorithms.matd3 import TD3 +from assume.reinforcement_learning.algorithms.maddpg import DDPG +from assume.reinforcement_learning.algorithms.mappo import PPO from assume.reinforcement_learning.buffer import ReplayBuffer from assume.reinforcement_learning.learning_utils import ( linear_schedule_func, @@ -453,6 +455,10 @@ def create_learning_algorithm(self, algorithm: RLAlgorithm): """ if algorithm == "matd3": self.rl_algorithm = TD3(learning_role=self) + elif algorithm == "maddpg": + self.rl_algorithm = DDPG(learning_role=self) + elif algorithm == "mappo": + self.rl_algorithm = PPO(learning_role=self) else: logger.error(f"Learning algorithm {algorithm} not implemented!") diff --git a/assume/reinforcement_learning/learning_utils.py b/assume/reinforcement_learning/learning_utils.py index 3ec682a1c..268b0ed5b 100644 --- a/assume/reinforcement_learning/learning_utils.py +++ b/assume/reinforcement_learning/learning_utils.py @@ -248,7 +248,7 @@ def transfer_weights( ) -> dict | None: """ Transfer weights from loaded model to new model. Copy only those obs- and action-slices for matching IDs. - New IDs keep their original (random) weights. Function only works if the neural network architeczture remained stable besides the input layer, namely with the same hidden layers. + New IDs keep their original (random) weights. Function only works if the neural network architecture remained stable besides the input layer, namely with the same hidden layers. Args: model (th.nn.Module): The model to transfer weights to. @@ -259,7 +259,7 @@ def transfer_weights( act_dim (int): The action dimension size. unique_obs (int): The unique observation size per agent, smaller than obs_base as these include also shared observation values. - returns: + Returns: dict | None: The updated state dictionary with transferred weights, or None if architecture mismatch. """ diff --git a/assume/reinforcement_learning/neural_network_architecture.py b/assume/reinforcement_learning/neural_network_architecture.py index a173b4b5c..bfd1032c6 100644 --- a/assume/reinforcement_learning/neural_network_architecture.py +++ b/assume/reinforcement_learning/neural_network_architecture.py @@ -6,16 +6,20 @@ from torch import nn from torch.nn import functional as F +from typing import List, Tuple, Type, Optional, Union -class CriticTD3(nn.Module): - """Initialize parameters and build model. + +class Critic(nn.Module): + """ + Base Critic class handling architecture generation and initialization. Args: n_agents (int): Number of agents - obs_dim (int): Dimension of each state - act_dim (int): Dimension of each action + obs_dim (int): Dimension of observation per agent + act_dim: Dimension of action per agent + float_type: Data type for parameters + unique_obs_dim: Dimension of agent-specific observations """ - def __init__( self, n_agents: int, @@ -26,39 +30,41 @@ def __init__( ): super().__init__() + # Calculate total (centralized) dimensions self.obs_dim = obs_dim + unique_obs_dim * (n_agents - 1) self.act_dim = act_dim * n_agents - # Select proper architecture based on `n_agents` + self.float_type = float_type + + # Dynamic Architecture Definition + self.hidden_sizes = self._get_architecture(n_agents) + + def _get_architecture( + self, n_agents: int + ) -> List[int]: + """Returns hidden layer sizes based on the number of agents.""" if n_agents <= 20: hidden_sizes = [256, 128] # Shallow network for small `n_agents` elif n_agents <= 50: hidden_sizes = [512, 256, 128] # Medium network else: hidden_sizes = [1024, 512, 256, 128] # Deeper network for large `n_agents` + return hidden_sizes - # First Q-network (Q1) - self.q1_layers = self._build_q_network(hidden_sizes, float_type) - - # Second Q-network (Q2) for double Q-learning - self.q2_layers = self._build_q_network(hidden_sizes, float_type) - - # Initialize weights properly - self._init_weights() - - def _build_q_network(self, hidden_sizes, float_type): + def _build_q_network(self) -> nn.ModuleList: """ - Dynamically creates a Q-network given the chosen hidden layer sizes. + Dynamically create a Q-network given the chosen hidden layer sizes. """ layers = nn.ModuleList() input_dim = ( self.obs_dim + self.act_dim - ) # Input includes all observations and actions + ) # Input includes all observations and actions - for h in hidden_sizes: - layers.append(nn.Linear(input_dim, h, dtype=float_type)) + for h in self.hidden_sizes: + layers.append(nn.Linear(input_dim, h, dtype=self.float_type)) + layers.append(nn.ReLU()) input_dim = h - layers.append(nn.Linear(input_dim, 1, dtype=float_type)) # Output Q-value + layers.append(nn.Linear(input_dim, 1, dtype=self.float_type)) # Output Q-value return layers @@ -72,40 +78,163 @@ def init_layer(m): self.apply(init_layer) - def forward(self, obs, actions): + +class CriticTD3(Critic): + """Initialize parameters and build model. + + Args: + n_agents (int): Number of agents + obs_dim (int): Dimension of each state + act_dim (int): Dimension of each action + """ + def __init__( + self, + n_agents: int, + obs_dim: int, + act_dim: int, + float_type, + unique_obs_dim: int + ): + super().__init__( + n_agents, + obs_dim, + act_dim, + float_type, + unique_obs_dim + ) + + # First Q-network (Q1) + self.q1_layers = self._build_q_network() + + # Second Q-network (Q2) for double Q-learning + self.q2_layers = self._build_q_network() + + def forward( + self, + obs: th.Tensor, + actions: th.Tensor + ) -> Tuple[th.Tensor, th.Tensor]: """ Forward pass through both Q-networks. """ - xu = th.cat([obs, actions], dim=1) # Concatenate obs & actions + xu = th.cat([obs, actions], dim=1) # Concatenate obs & actions # Compute Q1 - x1 = xu - for layer in self.q1_layers[:-1]: # All hidden layers - x1 = F.relu(layer(x1)) - x1 = self.q1_layers[-1](x1) # Output layer (no activation) + x1 = nn.Sequential(*self.q1_layers)(xu) # Compute Q2 - x2 = xu - for layer in self.q2_layers[:-1]: # All hidden layers - x2 = F.relu(layer(x2)) - x2 = self.q2_layers[-1](x2) # Output layer (no activation) + x2 = nn.Sequential(*self.q2_layers)(xu) return x1, x2 - def q1_forward(self, obs, actions): + def q1_forward( + self, + obs: th.Tensor, + actions: th.Tensor + ) -> th.Tensor: """ Compute only Q1 (used during actor updates). """ x = th.cat([obs, actions], dim=1) - for layer in self.q1_layers[:-1]: # All hidden layers - x = F.relu(layer(x)) + x = nn.Sequential(*self.q1_layers)(x) + + return x - x = self.q1_layers[-1](x) # Output layer (no activation) + +class CriticDDPG(Critic): + """Initialize parameters and build model. + + Args: + n_agents (int): Number of agents + obs_dim (int): Dimension of observation per agent + act_dim: Dimension of action per agent + float_type: Data type for parameters + unique_obs_dim: Dimension of agent-specific observations + """ + def __init__( + self, + n_agents: int, + obs_dim: int, + act_dim: int, + float_type: th.dtype, + unique_obs_dim: int, + ): + super().__init__( + n_agents, + obs_dim, + act_dim, + float_type, + unique_obs_dim + ) + + # Q-network + self.q_layers = self._build_q_network() + + # Initialize weights properly + self._init_weights() + + def forward( + self, + obs: th.Tensor, + actions: th.Tensor + ) -> th.Tensor: + """Returns Q value.""" + xu = th.cat([obs, actions], dim=1) # Concatenate obs & actions + + # Compute Q + x = nn.Sequential(*self.layers)(xu) return x +class CriticPPO(Critic): + """Initialize parameters and build PPO value network. + + Args: + n_agents (int): Number of agents + obs_dim (int): Dimension of observation per agent + float_type: Data type for parameters + unique_obs_dim: Dimension of agent-specific observations + """ + + def __init__( + self, + n_agents: int, + obs_dim: int, + float_type, + unique_obs_dim: int + ): + super().__init__( + n_agents=n_agents, + obs_dim=obs_dim, + act_dim=0, + float_type=float_type, + unique_obs_dim=unique_obs_dim + ) + + # V-network + self.v_layers = self._build_q_network() + + # Initialize weights properly + self._init_weights() + + def _init_weights(self) -> None: + """ + Apply Orthogonal initialization. + """ + def init_layer(m): + if isinstance(m, nn.Linear): + nn.init.orthogonal_(m.weight, gain=1.0) + nn.init.zeros_(m.bias) + + self.apply(init_layer) + + def forward(self, obs: th.Tensor) -> th.Tensor: + """Returns V value.""" + return self.v_net(obs) + + class Actor(nn.Module): """ Parent class for actor networks. @@ -122,23 +251,25 @@ class Actor(nn.Module): "softsign": F.softsign, "tanh": th.tanh, "sigmoid": th.sigmoid, - "relu": F.relu, + "relu": F.relu } def __init__(self): super().__init__() - self.activation = "softsign" # or "tanh", "sigmoid", "relu" + self.activation = "softsign" # or "tanh", "sigmoid", "relu" if self.activation not in self.activation_function_limit: raise ValueError( f"Activation '{self.activation}' not supported! Supported: {list(self.activation_function_limit.keys())}" ) + self.min_output, self.max_output = self.activation_function_limit[ self.activation ] self.activation_function = self.activation_function_map.get(self.activation) + if self.activation_function is None: raise ValueError( f"Activation '{self.activation}' not implemented in forward pass!" @@ -147,12 +278,12 @@ def __init__(self): class MLPActor(Actor): """ - The neurnal network for the MLP actor. + The neural network for the MLP actor. """ def __init__(self, obs_dim: int, act_dim: int, float_type, *args, **kwargs): super().__init__() - + self.FC1 = nn.Linear(obs_dim, 256, dtype=float_type) self.FC2 = nn.Linear(256, 128, dtype=float_type) self.FC3 = nn.Linear(128, act_dim, dtype=float_type) @@ -181,12 +312,12 @@ def forward(self, obs): class LSTMActor(Actor): """ - The LSTM recurrent neurnal network for the actor. + The LSTM recurrent neural network for the actor. Based on "Multi-Period and Multi-Spatial Equilibrium Analysis in Imperfect Electricity Markets" by Ye at al. (2019) - Note: the original source code was not available, therefore this implementation was derived from the published paper. + Note: the original source code was not available, therefore this implementation was derived from the published paper. Adjustments to resemble final layers from MLPActor: - dense layer 2 was omitted - single output layer with softsign activation function to output actions directly instead of two output layers for mean and stddev @@ -200,7 +331,7 @@ def __init__( unique_obs_dim: int, num_timeseries_obs_dim: int, *args, - **kwargs, + **kwargs ): super().__init__() self.float_type = float_type @@ -247,14 +378,14 @@ def forward(self, obs): outputs = [] for time_step in x1.split(1, dim=2): - time_step = time_step.reshape(-1, self.num_timeseries_obs_dim) + time_step = time_step.reshape(-1, self.num_timeseris_obs_dim) h_t, c_t = self.LSTM1(time_step, (h_t, c_t)) h_t2, c_t2 = self.LSTM2(h_t, (h_t2, c_t2)) outputs += [h_t2] outputs = th.cat(outputs, dim=1) x = th.cat((outputs, x2), dim=1) - + x = F.relu(self.FC1(x)) x = self.activation_function(self.FC2(x)) @@ -262,3 +393,161 @@ def forward(self, obs): x = x.squeeze(0) return x + + +class ActorPPO(nn.Module): + """ + PPO Stochastic Actor Network. + + Key differences from MLPActor (DDPG): + - Outputs mean AND log_std for Gaussian policy + - Provides log_prob for importance sampling + - Used with clipped surrogate objective + """ + + def __init__( + self, + obs_dim: int, + act_dim: int, + float_type, + log_std_init: float = 0.0, + *args, + **kwargs, + ): + """ + Initialize stochastic actor. + + Args: + obs_dim: Observation dimension + act_dim: Action dimension + float_type: Data type for parameters + log_std_init: Initial log standard deviation + """ + super().__init__() + + self.act_dim = act_dim + self.float_type = float_type + + # Policy network (outputs mean) + self.FC1 = nn.Linear(obs_dim, 256, dtype=float_type) + self.FC2 = nn.Linear(256, 128, dtype=float_type) + self.mean_layer = nn.Linear(128, act_dim, dtype=float_type) + + # Learnable log standard deviation + self.log_std = nn.Parameter( + th.ones(act_dim, dtype=float_type) * log_std_init + ) + + self._init_weights() + + def _init_weights(self) -> None: + """Apply orthogonal initialization.""" + def init_layer(m): + if isinstance(m, nn.Linear): + nn.init.orthogonal_(m.weight, gain=0.01) + nn.init.zeros_(m.bias) + + # Initialize hidden layers with larger gain + nn.init.orthogonal_(self.FC1.weight, gain=1.0) + nn.init.orthogonal_(self.FC2.weight, gain=1.0) + nn.init.zeros_(self.FC1.bias) + nn.init.zeros_(self.FC2.bias) + + # Initialize output layer with small gain + nn.init.orthogonal_(self.mean_layer.weight, gain=0.01) + nn.init.zeros_(self.mean_layer.bias) + + def forward(self, obs: th.Tensor) -> tuple[th.Tensor, th.Tensor]: + """ + Forward pass: observation → (mean, log_std). + + Args: + obs: Observations [batch, obs_dim] + + Returns: + Tuple of (action_mean, log_std) + """ + x = F.relu(self.FC1(obs)) + x = F.relu(self.FC2(x)) + mean = th.tanh(self.mean_layer(x)) # Bounded to [-1, 1] + + # Expand log_std to batch size + log_std = self.log_std.expand_as(mean) + + return mean, log_std + + def get_action_and_log_prob( + self, + obs: th.Tensor, + deterministic: bool = False, + ) -> tuple[th.Tensor, th.Tensor]: + """ + Sample action and compute log probability. + + Args: + obs: Observations + deterministic: If True, return mean action + + Returns: + Tuple of (action, log_prob) + """ + mean, log_std = self.forward(obs) + std = log_std.exp() + + if deterministic: + action = mean + else: + # Sample from Gaussian + noise = th.randn_like(mean) + action = mean + std * noise + + # Clamp action to valid range + action = th.clamp(action, -1.0, 1.0) + + # Compute log probability + log_prob = self._compute_log_prob(action, mean, std) + + return action, log_prob + + def evaluate_actions( + self, + obs: th.Tensor, + actions: th.Tensor, + ) -> tuple[th.Tensor, th.Tensor, th.Tensor]: + """ + Evaluate log probability and entropy for given actions. + + Used during PPO update to compute importance ratio. + + Args: + obs: Observations + actions: Actions to evaluate + + Returns: + Tuple of (log_prob, entropy, values) + """ + mean, log_std = self.forward(obs) + std = log_std.exp() + + # Log probability + log_prob = self._compute_log_prob(actions, mean, std) + + # Entropy for exploration bonus + entropy = 0.5 * (1.0 + th.log(2 * th.pi * std.pow(2))).sum(dim=-1) + + return log_prob, entropy + + def _compute_log_prob( + self, + actions: th.Tensor, + mean: th.Tensor, + std: th.Tensor, + ) -> th.Tensor: + """Compute log probability of actions under Gaussian distribution.""" + var = std.pow(2) + log_prob = -0.5 * ( + ((actions - mean).pow(2) / var) + + 2 * th.log(std) + + th.log(th.tensor(2 * th.pi)) + ) + return log_prob.sum(dim=-1) \ No newline at end of file diff --git a/assume/reinforcement_learning/rollout_buffer.py b/assume/reinforcement_learning/rollout_buffer.py new file mode 100644 index 000000000..a248584f8 --- /dev/null +++ b/assume/reinforcement_learning/rollout_buffer.py @@ -0,0 +1,261 @@ +# SPDX-FileCopyrightText: ASSUME Developers +# +# SPDX-License-Identifier: AGPL-3.0-or-later + +""" +ROLLOUT BUFFER - On-Policy Experience Storage for PPO + +Unlike the replay buffer (off-policy), the rollout buffer: +1. Stores complete trajectories from current policy +2. Computes advantages using GAE (Generalized Advantage Estimation) +3. Is cleared after each policy update (single-use data) +""" + +import numpy as np +import torch as th +from typing import NamedTuple, Generator + + +class RolloutBufferSamples(NamedTuple): + """Container for rollout buffer samples.""" + observations: th.Tensor + actions: th.Tensor + old_values: th.Tensor + old_log_probs: th.Tensor + advantages: th.Tensor + returns: th.Tensor + + +class RolloutBuffer: + """ + On-policy rollout buffer for PPO algorithm. + + Stores trajectories from the current policy and computes + GAE-based advantages for policy optimization. + + Key differences from ReplayBuffer: + - Single-use: data is discarded after update + - Stores log_probs for importance sampling + - Stores values for advantage computation + - Computes advantages and returns before sampling + """ + + def __init__( + self, + buffer_size: int, + obs_dim: int, + act_dim: int, + n_rl_units: int, + device: str | th.device, + float_type: th.dtype, + gamma: float = 0.99, + gae_lambda: float = 0.95, + ): + """ + Initialize rollout buffer. + + Args: + buffer_size: Maximum number of transitions per rollout + obs_dim: Observation dimension per agent + act_dim: Action dimension per agent + n_rl_units: Number of RL agents + device: Torch device (cpu/cuda) + float_type: Data type for tensors + gamma: Discount factor for returns + gae_lambda: Lambda for GAE computation + """ + self.buffer_size = buffer_size + self.obs_dim = obs_dim + self.act_dim = act_dim + self.n_rl_units = n_rl_units + self.device = device + self.float_type = float_type + self.gamma = gamma + self.gae_lambda = gae_lambda + + # Current position and full flag + self.pos = 0 + self.full = False + self.generator_ready = False + + # Allocate buffers + self.reset() + + def reset(self) -> None: + """Clear the buffer and allocate new storage.""" + self.observations = np.zeros( + (self.buffer_size, self.n_rl_units, self.obs_dim), + dtype=np.float32, + ) + self.actions = np.zeros( + (self.buffer_size, self.n_rl_units, self.act_dim), + dtype=np.float32, + ) + self.rewards = np.zeros( + (self.buffer_size, self.n_rl_units), + dtype=np.float32, + ) + self.values = np.zeros( + (self.buffer_size, self.n_rl_units), + dtype=np.float32, + ) + self.log_probs = np.zeros( + (self.buffer_size, self.n_rl_units), + dtype=np.float32, + ) + self.dones = np.zeros( + (self.buffer_size, self.n_rl_units), + dtype=np.float32, + ) + + # Computed after rollout + self.advantages = np.zeros( + (self.buffer_size, self.n_rl_units), + dtype=np.float32, + ) + self.returns = np.zeros( + (self.buffer_size, self.n_rl_units), + dtype=np.float32, + ) + + self.pos = 0 + self.full = False + self.generator_ready = False + + def add( + self, + obs: np.ndarray, + action: np.ndarray, + reward: np.ndarray, + done: np.ndarray, + value: np.ndarray, + log_prob: np.ndarray, + ) -> None: + """ + Add a transition to the buffer. + + Args: + obs: Observations [n_agents, obs_dim] + action: Actions taken [n_agents, act_dim] + reward: Rewards received [n_agents] + done: Episode done flags [n_agents] + value: Value estimates [n_agents] + log_prob: Log probabilities of actions [n_agents] + """ + if self.pos >= self.buffer_size: + self.full = True + return + + self.observations[self.pos] = np.array(obs).copy() + self.actions[self.pos] = np.array(action).copy() + self.rewards[self.pos] = np.array(reward).copy() + self.dones[self.pos] = np.array(done).copy() + self.values[self.pos] = np.array(value).copy() + self.log_probs[self.pos] = np.array(log_prob).copy() + + self.pos += 1 + + def compute_returns_and_advantages( + self, + last_values: np.ndarray, + dones: np.ndarray, + ) -> None: + """ + Compute GAE advantages and returns. + + Uses Generalized Advantage Estimation (GAE) for lower variance + advantage estimates. + + Args: + last_values: Value estimates for the last state [n_agents] + dones: Done flags for the last state [n_agents] + """ + last_values = np.array(last_values).flatten() + dones = np.array(dones).flatten() + + # GAE computation + last_gae_lam = np.zeros(self.n_rl_units, dtype=np.float32) + buffer_size = self.pos if not self.full else self.buffer_size + + for step in reversed(range(buffer_size)): + if step == buffer_size - 1: + next_non_terminal = 1.0 - dones + next_values = last_values + else: + next_non_terminal = 1.0 - self.dones[step + 1] + next_values = self.values[step + 1] + + # TD error + delta = ( + self.rewards[step] + + self.gamma * next_values * next_non_terminal + - self.values[step] + ) + + # GAE advantage + last_gae_lam = ( + delta + + self.gamma * self.gae_lambda * next_non_terminal * last_gae_lam + ) + self.advantages[step] = last_gae_lam + + # Returns = advantages + values + self.returns = self.advantages + self.values + self.generator_ready = True + + def get( + self, + batch_size: int | None = None, + ) -> Generator[RolloutBufferSamples, None, None]: + """ + Generate batches of samples for training. + + Args: + batch_size: Size of each batch. If None, return all data. + + Yields: + RolloutBufferSamples containing observation, action, etc. + """ + if not self.generator_ready: + raise ValueError( + "Must call compute_returns_and_advantages before sampling!" + ) + + buffer_size = self.pos if not self.full else self.buffer_size + indices = np.random.permutation(buffer_size) + + if batch_size is None: + batch_size = buffer_size + + start_idx = 0 + while start_idx < buffer_size: + batch_indices = indices[start_idx : start_idx + batch_size] + yield self._get_samples(batch_indices) + start_idx += batch_size + + def _get_samples(self, indices: np.ndarray) -> RolloutBufferSamples: + """Convert numpy arrays to torch tensors for given indices.""" + return RolloutBufferSamples( + observations=th.as_tensor( + self.observations[indices], device=self.device, dtype=self.float_type + ), + actions=th.as_tensor( + self.actions[indices], device=self.device, dtype=self.float_type + ), + old_values=th.as_tensor( + self.values[indices], device=self.device, dtype=self.float_type + ), + old_log_probs=th.as_tensor( + self.log_probs[indices], device=self.device, dtype=self.float_type + ), + advantages=th.as_tensor( + self.advantages[indices], device=self.device, dtype=self.float_type + ), + returns=th.as_tensor( + self.returns[indices], device=self.device, dtype=self.float_type + ), + ) + + def size(self) -> int: + """Return current number of stored transitions.""" + return self.buffer_size if self.full else self.pos \ No newline at end of file From 83854497eee677be8750b22856828c4fcbc33656 Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Fri, 9 Jan 2026 14:51:30 +0100 Subject: [PATCH 02/44] DONE: Added DDPG, PPO in multi-agent environment in /reinforcement_learning module. --- .../reinforcement_learning/rollout_buffer.py | 51 +------------------ 1 file changed, 1 insertion(+), 50 deletions(-) diff --git a/assume/reinforcement_learning/rollout_buffer.py b/assume/reinforcement_learning/rollout_buffer.py index a248584f8..630dff80e 100644 --- a/assume/reinforcement_learning/rollout_buffer.py +++ b/assume/reinforcement_learning/rollout_buffer.py @@ -2,15 +2,6 @@ # # SPDX-License-Identifier: AGPL-3.0-or-later -""" -ROLLOUT BUFFER - On-Policy Experience Storage for PPO - -Unlike the replay buffer (off-policy), the rollout buffer: -1. Stores complete trajectories from current policy -2. Computes advantages using GAE (Generalized Advantage Estimation) -3. Is cleared after each policy update (single-use data) -""" - import numpy as np import torch as th from typing import NamedTuple, Generator @@ -29,15 +20,6 @@ class RolloutBufferSamples(NamedTuple): class RolloutBuffer: """ On-policy rollout buffer for PPO algorithm. - - Stores trajectories from the current policy and computes - GAE-based advantages for policy optimization. - - Key differences from ReplayBuffer: - - Single-use: data is discarded after update - - Stores log_probs for importance sampling - - Stores values for advantage computation - - Computes advantages and returns before sampling """ def __init__( @@ -53,16 +35,6 @@ def __init__( ): """ Initialize rollout buffer. - - Args: - buffer_size: Maximum number of transitions per rollout - obs_dim: Observation dimension per agent - act_dim: Action dimension per agent - n_rl_units: Number of RL agents - device: Torch device (cpu/cuda) - float_type: Data type for tensors - gamma: Discount factor for returns - gae_lambda: Lambda for GAE computation """ self.buffer_size = buffer_size self.obs_dim = obs_dim @@ -133,14 +105,6 @@ def add( ) -> None: """ Add a transition to the buffer. - - Args: - obs: Observations [n_agents, obs_dim] - action: Actions taken [n_agents, act_dim] - reward: Rewards received [n_agents] - done: Episode done flags [n_agents] - value: Value estimates [n_agents] - log_prob: Log probabilities of actions [n_agents] """ if self.pos >= self.buffer_size: self.full = True @@ -162,13 +126,6 @@ def compute_returns_and_advantages( ) -> None: """ Compute GAE advantages and returns. - - Uses Generalized Advantage Estimation (GAE) for lower variance - advantage estimates. - - Args: - last_values: Value estimates for the last state [n_agents] - dones: Done flags for the last state [n_agents] """ last_values = np.array(last_values).flatten() dones = np.array(dones).flatten() @@ -209,16 +166,10 @@ def get( ) -> Generator[RolloutBufferSamples, None, None]: """ Generate batches of samples for training. - - Args: - batch_size: Size of each batch. If None, return all data. - - Yields: - RolloutBufferSamples containing observation, action, etc. """ if not self.generator_ready: raise ValueError( - "Must call compute_returns_and_advantages before sampling!" + "Must call compute_returns_and_advantages before sampling." ) buffer_size = self.pos if not self.full else self.buffer_size From 5b9763d4d65d681ccdf5b068300a755bc097e343 Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Mon, 12 Jan 2026 16:10:49 +0100 Subject: [PATCH 03/44] UPDATED ppo-input-pipeline, code-documentation DELETED rollout_buffer.py ADDED RolloutBuffer-in-buffer.py --- assume/common/base.py | 7 + .../algorithms/mappo.py | 179 +++++++++--- assume/reinforcement_learning/buffer.py | 254 +++++++++++++++++- .../reinforcement_learning/learning_role.py | 129 ++++++++- .../neural_network_architecture.py | 82 +++--- .../reinforcement_learning/rollout_buffer.py | 212 --------------- assume/strategies/learning_strategies.py | 61 ++++- 7 files changed, 633 insertions(+), 291 deletions(-) delete mode 100644 assume/reinforcement_learning/rollout_buffer.py diff --git a/assume/common/base.py b/assume/common/base.py index 33fb9d28a..32abc38c1 100644 --- a/assume/common/base.py +++ b/assume/common/base.py @@ -865,6 +865,13 @@ class LearningConfig: target_policy_noise: float = 0.2 target_noise_clip: float = 0.5 + ppo_clip_range: float | None = 0.1 + ppo_clip_range_vf: float | None = None + ppo_n_epochs: int = 10 + ppo_entropy_coef: float = 0.01 + ppo_vf_coef: float = 0.5 + ppo_gae_lambda: float = 0.95 + def __post_init__(self): """Calculate defaults that depend on other fields and validate inputs.""" if self.early_stopping_steps is None: diff --git a/assume/reinforcement_learning/algorithms/mappo.py b/assume/reinforcement_learning/algorithms/mappo.py index b3f43c5d4..f3fa7865a 100644 --- a/assume/reinforcement_learning/algorithms/mappo.py +++ b/assume/reinforcement_learning/algorithms/mappo.py @@ -28,21 +28,23 @@ class PPO(RLAlgorithm): def __init__( self, learning_role, - clip_range = 0.2, # Clipping parameter - clip_range_vf = None, - n_epochs = 10, # Number of epochs per update - entropy_coef = 0.01, # Entropy bonus coefficient - vf_coef = 0.5, # Value function loss coefficient - max_grad_norm = 0.5, # Gradient clipping + clip_range = 0.1, # Epsilon clipping constant preventing the policy from changing too much in a single update. + clip_range_vf = 0.1, # preventing the value function from changing too much from previous estimates + n_epochs = 30, # sample efficiency + entropy_coef = 0.02, # encourages exploration by rewarding "randomness" + vf_coef = 1.0, # balances the importance of training the Critic and training the Actor + max_grad_norm = 0.5, # Gradient clipping ): """Initialize PPO algorithm.""" super().__init__(learning_role) - self.clip_range = clip_range - self.clip_range_vf = clip_range_vf - self.n_epochs = n_epochs - self.entropy_coef = entropy_coef - self.vf_coef = vf_coef + config = self.learning_config + + self.clip_range = clip_range if clip_range is not None else getattr(config, 'ppo_clip_range', 0.2) + self.clip_range_vf = clip_range_vf if clip_range_vf is not None else getattr(config, 'ppo_clip_range_vf', None) + self.n_epochs = n_epochs if n_epochs is not None else getattr(config, 'ppo_n_epochs', 10) + self.entropy_coef = entropy_coef if entropy_coef is not None else getattr(config, 'ppo_entropy_coef', 0.01) + self.vf_coef = vf_coef if vf_coef is not None else getattr(config, 'ppo_vf_coef', 0.5) self.max_grad_norm = max_grad_norm # Update counter @@ -274,6 +276,21 @@ def update_policy(self) -> None: # Get rollout buffer rollout_buffer = self.learning_role.rollout_buffer + + # Check if rollout buffer has data + if rollout_buffer is None or rollout_buffer.pos == 0: + logger.debug("Rollout buffer is empty, skipping policy update") + return + + # Accumulate data if we don't have enough for a full batch + # This decouples train_freq from the required rollout length + if rollout_buffer.pos < self.learning_role.learning_config.batch_size: + logger.debug( + f"Rollout buffer has {rollout_buffer.pos} samples, " + f"waiting for {self.learning_role.learning_config.batch_size} (batch_size). " + "Skipping update to accumulate more on-policy data." + ) + return # Update learning rate progress_remaining = self.learning_role.get_progress_remaining() @@ -293,16 +310,37 @@ def update_policy(self) -> None: buffer_size = rollout_buffer.pos if not rollout_buffer.full else rollout_buffer.buffer_size if buffer_size > 0: - # Get the last observation from the buffer - last_obs = rollout_buffer.observations[buffer_size-1] - last_dones = rollout_buffer.dones[buffer_size-1] + # Use the LAST observation as the bootstrap for the REST of the buffer. + # We sacrifice the last step (pos-1) to serve as s_{t+1} for the step before it. + # This ensures V(s_{t+1}) is calculating using the REAL next state, not self-referential. + + last_idx = buffer_size - 1 + last_obs = rollout_buffer.observations[last_idx] + last_dones = rollout_buffer.dones[last_idx] + + # Reduce buffer size by 1 so as to not train on the bootstrap step + rollout_buffer.pos -= 1 + if rollout_buffer.full: + rollout_buffer.full = False # If it was full, it's not anymore + + # Prepare unique observations for centralized critic + last_unique_obs = last_obs[:, self.obs_dim - self.unique_obs_dim :] with th.no_grad(): for i, strategy in enumerate(strategies): + # Construct centralized observation + obs_i = last_obs[i : i + 1] + other_unique = np.concatenate( + (last_unique_obs[:i], last_unique_obs[i + 1 :]), axis=0 + ) + centralized_obs = np.concatenate( + (obs_i, other_unique.reshape(1, -1)), axis=1 + ) + obs_tensor = th.as_tensor( - last_obs[i:i+1], - device = self.device, - dtype = self.float_type + centralized_obs, + device=self.device, + dtype=self.float_type, ) # Get value estimate from critic last_values[i] = strategy.critic(obs_tensor).cpu().numpy().flatten()[0] @@ -315,14 +353,54 @@ def update_policy(self) -> None: all_actor_losses = [] all_critic_losses = [] all_entropy_losses = [] + + # Initialize unit_params for gradient logging + # Use an empty list that will be dynamically extended + unit_params = [] + step_count = 0 + + # Helper to create a new step entry + def create_step_entry(): + return { + u_id: { + "actor_loss": None, + "actor_total_grad_norm": None, + "actor_max_grad_norm": None, + "critic_loss": None, + "critic_total_grad_norm": None, + "critic_max_grad_norm": None, + } + for u_id in self.learning_role.rl_strats.keys() + } for epoch in range(self.n_epochs): for batch in rollout_buffer.get(self.learning_config.batch_size): + current_batch_size = batch.observations.shape[0] + + # Precompute unique observation parts for centralized critic + unique_obs_from_others = batch.observations[ + :, :, self.obs_dim - self.unique_obs_dim : + ].reshape(current_batch_size, n_rl_agents, -1) + for i, strategy in enumerate(strategies): actor = strategy.actor critic = strategy.critic obs_i = batch.observations[:, i, :] + + # Construct centralized state + other_unique_obs = th.cat( + (unique_obs_from_others[:, :i], unique_obs_from_others[:, i + 1 :]), + dim=1, + ) + all_states = th.cat( + ( + obs_i.reshape(current_batch_size, -1), + other_unique_obs.reshape(current_batch_size, -1), + ), + dim=1, + ) + actions_i = batch.actions[:, i, :] old_log_probs_i = batch.old_log_probs[:, i] advantages_i = batch.advantages[:, i] @@ -337,7 +415,7 @@ def update_policy(self) -> None: obs_i, actions_i ) - values = critic(obs_i).flatten() + values = critic(all_states).flatten() # Importance sampling ratio ratio = th.exp(log_probs - old_log_probs_i) @@ -347,12 +425,12 @@ def update_policy(self) -> None: policy_loss_2 = advantages_i * th.clamp( ratio, 1 - self.clip_range, 1 + self.clip_range ) - policy_loss = -th.min(policy_loss_1, policy_loss_2) + policy_loss = -th.min(policy_loss_1, policy_loss_2).mean() # Entropy loss entropy_loss = -self.entropy_coef * entropy.mean() - if self.clip_rnage_vf is not None: + if self.clip_range_vf is not None: # Clipped value function loss values_clipped = old_values_i + th.clamp( values - old_values_i, @@ -372,11 +450,24 @@ def update_policy(self) -> None: critic.optimizer.zero_grad() loss.backward() + # Calculate gradient norms BEFORE clipping + actor_params = list(actor.parameters()) + critic_params = list(critic.parameters()) + + actor_max_grad_norm = max( + (p.grad.norm().item() for p in actor_params if p.grad is not None), + default=0.0 + ) + critic_max_grad_norm = max( + (p.grad.norm().item() for p in critic_params if p.grad is not None), + default=0.0 + ) + # Gradient clipping - th.nn.utils.clip_grad_norm_( + actor_total_grad_norm = th.nn.utils.clip_grad_norm_( actor.parameters(), self.max_grad_norm ) - th.nn.utils.clip_grad_norm_( + critic_total_grad_norm = th.nn.utils.clip_grad_norm_( critic.parameters(), self.max_grad_norm ) @@ -387,20 +478,44 @@ def update_policy(self) -> None: all_actor_losses.append(policy_loss.item()) all_critic_losses.append(value_loss.item()) all_entropy_losses.append(entropy_loss.item()) + + # Ensure we have an entry for this step + if step_count >= len(unit_params): + unit_params.append(create_step_entry()) + + # Store per-unit gradient params for this step + unit_params[step_count][strategy.unit_id]["actor_loss"] = policy_loss.item() + unit_params[step_count][strategy.unit_id]["critic_loss"] = value_loss.item() + unit_params[step_count][strategy.unit_id]["actor_total_grad_norm"] = actor_total_grad_norm.item() if isinstance(actor_total_grad_norm, th.Tensor) else actor_total_grad_norm + unit_params[step_count][strategy.unit_id]["actor_max_grad_norm"] = actor_max_grad_norm + unit_params[step_count][strategy.unit_id]["critic_total_grad_norm"] = critic_total_grad_norm.item() if isinstance(critic_total_grad_norm, th.Tensor) else critic_total_grad_norm + unit_params[step_count][strategy.unit_id]["critic_max_grad_norm"] = critic_max_grad_norm + + step_count += 1 self.n_updates += 1 # Log average metrics - if self.learning_role.tensor_board_logger: - self.learning_role.tensor_board_logger.log_scalar( - "ppo/actor_loss", np.mean(all_actor_losses), self.n_updates - ) - self.learning_role.tensor_board_logger.log_scalar( - "ppo/critic_loss", np.mean(all_critic_losses), self.n_updates - ) - self.learning_role.tensor_board_logger.log_scalar( - "ppo/entropy_loss", np.mean(all_entropy_losses), self.n_updates - ) + # Log average metrics + # if self.learning_role.tensor_board_logger: + # self.learning_role.tensor_board_logger.log_scalar( + # "ppo/actor_loss", np.mean(all_actor_losses), self.n_updates + # ) + # self.learning_role.tensor_board_logger.log_scalar( + # "ppo/critic_loss", np.mean(all_critic_losses), self.n_updates + # ) + # self.learning_role.tensor_board_logger.log_scalar( + # "ppo/entropy_loss", np.mean(all_entropy_losses), self.n_updates + # ) + # if all_actor_losses: + # logger.info( + # f"PPO Update {self.n_updates} - Actor loss: {np.mean(all_actor_losses):.4f}, " + # f"Critic loss: {np.mean(all_critic_losses):.4f}, " + # f"Entropy loss: {np.mean(all_entropy_losses):.4f}" + # ) + + # Write gradient params to output + self.learning_role.write_rl_grad_params_to_output(learning_rate, unit_params) # Clear rollout buffer rollout_buffer.reset() diff --git a/assume/reinforcement_learning/buffer.py b/assume/reinforcement_learning/buffer.py index 534310b6d..d021452a4 100644 --- a/assume/reinforcement_learning/buffer.py +++ b/assume/reinforcement_learning/buffer.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: AGPL-3.0-or-later import warnings -from typing import NamedTuple +from typing import NamedTuple, Generator import numpy as np import torch as th @@ -172,3 +172,255 @@ def sample(self, batch_size: int) -> ReplayBufferSamples: ) return ReplayBufferSamples(*tuple(map(self.to_torch, data))) + +class RolloutBufferSamples(NamedTuple): + """ + Container for roll buffer samples. It holds one batch of training samples + from PPO's rollout buffer. + """ + observations: th.Tensor # states/observations the agent saw + actions: th.Tensor # actions the agent took + old_values: th.Tensor # critic's value estimates + old_log_probs: th.Tensor # log_probability of taking each action + advantages: th.Tensor # generalized advantage estimates + returns: th.Tensor # expected returns + +class RolloutBuffer: + """ + On-policy rollout buffer for PPO algorithm. This is different from TD3/DDPG which keep old data in a replay buffer. The buffer stores data for all the agents together. + + buffer_size (int): maximum number of transitions the buffer can store before training. + obs_dim (int): dimension of the observation space. + act_dim (int): dimension of the action space. + n_rl_units (int): number of RL agents in the multi-agent system. + device (str | th.device): specifies the device for training. + float_type (th.dtype): precision of floating-point numbers. + gamma (float): discount factor for defining how much to value future rewards. + gae_lambda (float): GAE (Generalized Advantage Estimationn) smoothing parameter. + """ + + def __init__( + self, + buffer_size: int, + obs_dim: int, + act_dim: int, + n_rl_units: int, + device: str | th.device, + float_type: th.dtype, + gamma: float = 0.99, + gae_lambda: float = 0.98 + ): + """Initialize the rollout buffer.""" + self.buffer_size = buffer_size + self.obs_dim = obs_dim + self.act_dim = act_dim + self.n_rl_units = n_rl_units + self.device = device + self.float_type = float_type + self.gamma = gamma + self.gae_lambda = gae_lambda + + # Current position and full flag + self.pos = 0 + self.full = False + self.generator_ready = False + + # Allocate buffers + self.reset() + + def reset(self) -> None: + """Clear the buffer and allocate new storage.""" + self.observations = np.zeros( + ( + self.buffer_size, + self.n_rl_units, + self.obs_dim + ), + dtype = np.float32 + ) + self.actions = np.zeros( + ( + self.buffer_size, + self.n_rl_units, + self.act_dim + ), + dtype = np.float32 + ) + self.rewards = np.zeros( + ( + self.buffer_size, + self.n_rl_units + ), + dtype = np.float32 + ) + self.values = np.zeros( + ( + self.buffer_size, + self.n_rl_units + ), + dtype = np.float32 + ) + self.log_probs = np.zeros( + ( + self.buffer_size, + self.n_rl_units + ), + dtype = np.float32 + ) + self.dones = np.zeros( + ( + self.buffer_size, + self.n_rl_units + ), + dtype = np.float32 + ) + + # Computed after rollout + self.advantages = np.zeros( + ( + self.buffer_size, + self.n_rl_units + ), + dtype = np.float32 + ) + self.returns = np.zeros( + ( + self.buffer_size, + self.n_rl_units + ), + dtype = np.float32 + ) + + self.pos = 0 + self.full = False + self.generator_ready = False + + def add( + self, + obs: np.ndarray, + action: np.ndarray, + reward: np.ndarray, + done: np.ndarray, + value: np.ndarray, + log_prob: np.ndarray + ) -> None: + """Add a transition to the buffer.""" + if self.pos >= self.buffer_size: + self.full = True + return + + self.observations[self.pos] = np.array(obs).copy() + self.actions[self.pos] = np.array(action).copy() + self.rewards[self.pos] = np.array(reward).flatten().copy() + self.dones[self.pos] = np.array(done).flatten().copy() + self.values[self.pos] = np.array(value).flatten().copy() + self.log_probs[self.pos] = np.array(log_prob).flatten().copy() + # flattening the rewards, dones, values, log_probs array to (n_units,) size + + self.pos += 1 + + def compute_returns_and_advantages( + self, + last_values: np.ndarray, + dones: np.ndarray + ) -> None: + """Compute GAE advantages and returns.""" + # taking the final value estimates and episode-end flags, + # and making them flat arrays providing one number per agent. + last_values = np.array(last_values).flatten() + dones = np.array(dones).flatten() + + # GAE computation + # starting with running total of zero for each agent. + last_gae_lam = np.zeros(self.n_rl_units, dtype=np.float32) + buffer_size = self.pos if not self.full else self.buffer_size + + # backward loop + for step in reversed(range(buffer_size)): + if step == buffer_size - 1: + # if at the last step, use the last_vlaues given as input + next_non_terminal = 1.0 - dones + next_values = last_values + else: + # for all the other steps, get the next value and next episode flag. + next_non_terminal = 1.0 - self.dones[step + 1] + next_values = self.values[step + 1] + + # TD error + delta = ( + self.rewards[step] + + self.gamma * next_values * next_non_terminal + - self.values[step] + ) + + # GAE advantage + last_gae_lam = ( + delta + + self.gamma * self.gae_lambda * next_non_terminal * last_gae_lam + ) + self.advantages[step] = last_gae_lam + + # Returns = advantages + values + self.returns = self.advantages + self.values + self.generator_ready = True + + def get( + self, + batch_size: int | None = None + ) -> Generator[RolloutBufferSamples, None, None]: + """Generate batches of samples for training.""" + if not self.generator_ready: + raise ValueError( + "Must call compute_returns_and_advantages before sampling." + ) + + buffer_size = self.pos if not self.full else self.buffer_size + indices = np.random.permutation(buffer_size) + + if batch_size is None: + batch_size = buffer_size + + start_idx = 0 + while start_idx < buffer_size: + batch_indices = indices[start_idx : start_idx + batch_size] + yield self._get_samples(batch_indices) + start_idx += batch_size + + def _get_samples(self, indices: np.ndarray) -> RolloutBufferSamples: + """Convert numpy arrays to torch tensors for given indices.""" + return RolloutBufferSamples( + observations = th.as_tensor( + self.observations[indices], + device = self.device, + dtype = self.float_type + ), + actions = th.as_tensor( + self.actions[indices], + device = self.device, + dtype = self.float_type + ), + old_values = th.as_tensor( + self.values[indices], + device = self.device, + dtype = self.float_type + ), + old_log_probs = th.as_tensor( + self.log_probs[indices], + device = self.device, + dtype = self.float_type + ), + advantages = th.as_tensor( + self.advantages[indices], + device = self.device, + dtype = self.float_type + ), + returns = th.as_tensor( + self.returns[indices], + device = self.device, + dtype = self.float_type + ) + ) + + def size(self) -> int: + """Return current number of stored transitions.""" + return self.buffer_size if self.full else self.pos diff --git a/assume/reinforcement_learning/learning_role.py b/assume/reinforcement_learning/learning_role.py index 23d1df97d..32966b0aa 100644 --- a/assume/reinforcement_learning/learning_role.py +++ b/assume/reinforcement_learning/learning_role.py @@ -7,6 +7,7 @@ from datetime import datetime from pathlib import Path +import numpy as np import pandas as pd import torch as th from mango import Role @@ -21,7 +22,10 @@ from assume.reinforcement_learning.algorithms.matd3 import TD3 from assume.reinforcement_learning.algorithms.maddpg import DDPG from assume.reinforcement_learning.algorithms.mappo import PPO -from assume.reinforcement_learning.buffer import ReplayBuffer +from assume.reinforcement_learning.buffer import ( + ReplayBuffer, + RolloutBuffer +) from assume.reinforcement_learning.learning_utils import ( linear_schedule_func, transform_buffer_data, @@ -54,6 +58,7 @@ def __init__( # how many learning roles do exist and how are they named self.buffer: ReplayBuffer = None + self.rollout_buffer: RolloutBuffer = None # For on-policy algorithms (PPO) self.episodes_done = 0 self.rl_strats: dict[int, LearningStrategy] = {} self.learning_config = learning_config @@ -123,6 +128,10 @@ def __init__( self.all_rewards = defaultdict(lambda: defaultdict(list)) self.all_regrets = defaultdict(lambda: defaultdict(list)) self.all_profits = defaultdict(lambda: defaultdict(list)) + # PPO algorithm specific caches for on-policy learning + self.all_values = defaultdict(lambda: defaultdict(list)) + self.all_log_probs = defaultdict(lambda: defaultdict(list)) + self.all_dones = defaultdict(lambda: defaultdict(list)) def on_ready(self): """ @@ -234,6 +243,10 @@ async def store_to_buffer_and_update(self) -> None: current_noises = self.all_noises current_regrets = self.all_regrets current_profits = self.all_profits + # PPO specific caches + current_values = self.all_values + current_log_probs = self.all_log_probs + current_dones = self.all_dones # Reset cache dicts immediately with new defaultdicts self.all_obs = defaultdict(lambda: defaultdict(list)) @@ -242,6 +255,10 @@ async def store_to_buffer_and_update(self) -> None: self.all_noises = defaultdict(lambda: defaultdict(list)) self.all_regrets = defaultdict(lambda: defaultdict(list)) self.all_profits = defaultdict(lambda: defaultdict(list)) + # PPO specific resets + self.all_values = defaultdict(lambda: defaultdict(list)) + self.all_log_probs = defaultdict(lambda: defaultdict(list)) + self.all_dones = defaultdict(lambda: defaultdict(list)) # Get timestamps from cache we took all_timestamps = sorted(current_obs.keys()) @@ -257,6 +274,9 @@ async def store_to_buffer_and_update(self) -> None: "noises": {t: current_noises[t] for t in timestamps_to_process}, "regret": {t: current_regrets[t] for t in timestamps_to_process}, "profit": {t: current_profits[t] for t in timestamps_to_process}, + "values": {t: current_values[t] for t in timestamps_to_process}, + "log_probs": {t: current_log_probs[t] for t in timestamps_to_process}, + "dones": {t: current_dones[t] for t in timestamps_to_process} } # write data to output agent @@ -289,12 +309,84 @@ async def _store_to_buffer_and_update_sync(self, cache, device) -> None: ) return - # rewrite dict so that obs.shape == (n_rl_units, obs_dim) and sorted by keys and store in buffer - self.buffer.add( - obs=transform_buffer_data(cache["obs"], device), - actions=transform_buffer_data(cache["actions"], device), - reward=transform_buffer_data(cache["rewards"], device), - ) + # check which buffer type to use based on algorithm + if self.learning_config.algorithm == "mappo": + # for PPO use on-policy RolloutBuffer + # Add each transition to the rollout buffer + for timestamp in sorted(cache["obs"].keys()): + obs_data = transform_buffer_data( + { + timestamp: cache["obs"][timestamp] + }, + device + ) + actions_data = transform_buffer_data( + { + timestamp: cache["actions"][timestamp] + }, + device + ) + rewards_data = transform_buffer_data( + { + timestamp: cache["rewards"][timestamp] + }, + device + ) + + if cache["values"].get(timestamp): + values_data = transform_buffer_data( + { + timestamp: cache["values"][timestamp] + }, + device + ) + else: + values_data = np.zeros(len(self.values_data)) + + if cache["log_probs"].get(timestamp): + log_probs_data = transform_buffer_data( + { + timestamp: cache["log_probs"][timestamp] + }, + device + ) + else: + log_probs_data = np.zeros(len(self.log_probs_data)) + + if cache["dones"].get(timestamp): + dones_data = transform_buffer_data( + { + timestamp: cache["dones"][timestamp] + }, + device + ) + else: + dones_data = np.zeros(len(self.rl_strats)) + + # Helper to convert to numpy + def to_numpy(data): + if isinstance(data, th.Tensor): + return data.cpu().numpy() + return np.array(data) + + # Add to rollout buffer + if self.rollout_buffer is not None: + self.rollout_buffer.add( + obs = to_numpy(obs_data), + action = to_numpy(actions_data), + reward = to_numpy(rewards_data), + done = to_numpy(dones_data), + value = to_numpy(values_data), + log_prob = to_numpy(log_probs_data) + ) + else: + # for TD3/DDPG use off-policy ReplayBuffer + # rewrite dict so that obs.shape == (n_rl_units, obs_dim) and sorted by keys and store in buffer + self.buffer.add( + obs = transform_buffer_data(cache["obs"], device), + actions = transform_buffer_data(cache["actions"], device), + reward = transform_buffer_data(cache["rewards"], device), + ) if ( self.episodes_done @@ -347,6 +439,27 @@ def add_reward_to_cache(self, unit_id, start, reward, regret, profit) -> None: self.all_regrets[start][unit_id].append(regret) self.all_profits[start][unit_id].append(profit) + def add_ppo_data_to_cache( + self, + unit_id, + start, + value, + log_prob, + done=False + ) -> None: + """ + Add PPO specific data to the cache dict, per unit_id. + + Args: + unit_id (str): The id of the unit. + value (float): The value estimate V(s) from the critic. + log_prob (float): The log probability of the action. + done (bool): Whether a terminal state or not. + """ + self.all_values[start][unit_id].append(value) + self.all_log_probs[start][unit_id].append(log_prob) + self.all_dones[start][unit_id].append(float(done)) + def load_inter_episodic_data(self, inter_episodic_data): """ Load the inter-episodic data from the dict stored across simulation runs. @@ -361,6 +474,7 @@ def load_inter_episodic_data(self, inter_episodic_data): self.rl_eval = inter_episodic_data["all_eval"] self.avg_rewards = inter_episodic_data["avg_all_eval"] self.buffer = inter_episodic_data["buffer"] + self.rollout_buffer = inter_episodic_data["rollout_buffer"] self.initialize_policy(inter_episodic_data["actors_and_critics"]) @@ -390,6 +504,7 @@ def get_inter_episodic_data(self): "all_eval": self.rl_eval, "avg_all_eval": self.avg_rewards, "buffer": self.buffer, + "rollout_buffer": self.rollout_buffer, "actors_and_critics": self.rl_algorithm.extract_policy(), } diff --git a/assume/reinforcement_learning/neural_network_architecture.py b/assume/reinforcement_learning/neural_network_architecture.py index bfd1032c6..87c1cb2a8 100644 --- a/assume/reinforcement_learning/neural_network_architecture.py +++ b/assume/reinforcement_learning/neural_network_architecture.py @@ -183,7 +183,7 @@ def forward( xu = th.cat([obs, actions], dim=1) # Concatenate obs & actions # Compute Q - x = nn.Sequential(*self.layers)(xu) + x = nn.Sequential(*self.q_layers)(xu) return x @@ -232,7 +232,10 @@ def init_layer(m): def forward(self, obs: th.Tensor) -> th.Tensor: """Returns V value.""" - return self.v_net(obs) + x = obs + for layer in self.v_layers: + x = layer(x) + return x class Actor(nn.Module): @@ -396,15 +399,20 @@ def forward(self, obs): class ActorPPO(nn.Module): - """ - PPO Stochastic Actor Network. - - Key differences from MLPActor (DDPG): - - Outputs mean AND log_std for Gaussian policy - - Provides log_prob for importance sampling - - Used with clipped surrogate objective - """ + activation_function_limit = { + "softsign": (-1, 1), + "tanh": (-1, 1), + "sigmoid": (0, 1), + "relu": (0, float("inf")), + } + activation_function_map = { + "softsign": F.softsign, + "tanh": th.tanh, + "sigmoid": th.sigmoid, + "relu": F.relu + } + def __init__( self, obs_dim: int, @@ -414,20 +422,22 @@ def __init__( *args, **kwargs, ): - """ - Initialize stochastic actor. - - Args: - obs_dim: Observation dimension - act_dim: Action dimension - float_type: Data type for parameters - log_std_init: Initial log standard deviation - """ super().__init__() self.act_dim = act_dim self.float_type = float_type + self.activation = "softsign" # or "tanh", "sigmoid", "relu" + + if self.activation not in self.activation_function_limit: + raise ValueError( + f"Activation '{self.activation}' not supported! Supported: {list(self.activation_function_limit.keys())}" + ) + + self.min_output, self.max_output = self.activation_function_limit[ + self.activation + ] + # Policy network (outputs mean) self.FC1 = nn.Linear(obs_dim, 256, dtype=float_type) self.FC2 = nn.Linear(256, 128, dtype=float_type) @@ -457,21 +467,31 @@ def init_layer(m): nn.init.orthogonal_(self.mean_layer.weight, gain=0.01) nn.init.zeros_(self.mean_layer.bias) - def forward(self, obs: th.Tensor) -> tuple[th.Tensor, th.Tensor]: - """ - Forward pass: observation → (mean, log_std). + def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor: + """Forward pass""" + x = F.relu(self.FC1(obs)) + x = F.relu(self.FC2(x)) + mean = th.tanh(self.mean_layer(x)) # Bounded to [-1, 1] - Args: - obs: Observations [batch, obs_dim] - - Returns: - Tuple of (action_mean, log_std) + if deterministic: + return mean + + # Sample from Gaussian during training + log_std = self.log_std.expand_as(mean) + std = log_std.exp() + noise = th.randn_like(mean) + action = mean + std * noise + + # Clamp to valid range + return th.clamp(action, -1.0, 1.0) + + def get_distribution(self, obs: th.Tensor) -> tuple[th.Tensor, th.Tensor]: + """ + Get the policy distribution parameters. """ x = F.relu(self.FC1(obs)) x = F.relu(self.FC2(x)) mean = th.tanh(self.mean_layer(x)) # Bounded to [-1, 1] - - # Expand log_std to batch size log_std = self.log_std.expand_as(mean) return mean, log_std @@ -491,7 +511,7 @@ def get_action_and_log_prob( Returns: Tuple of (action, log_prob) """ - mean, log_std = self.forward(obs) + mean, log_std = self.get_distribution(obs) std = log_std.exp() if deterministic: @@ -526,7 +546,7 @@ def evaluate_actions( Returns: Tuple of (log_prob, entropy, values) """ - mean, log_std = self.forward(obs) + mean, log_std = self.get_distribution(obs) std = log_std.exp() # Log probability diff --git a/assume/reinforcement_learning/rollout_buffer.py b/assume/reinforcement_learning/rollout_buffer.py deleted file mode 100644 index 630dff80e..000000000 --- a/assume/reinforcement_learning/rollout_buffer.py +++ /dev/null @@ -1,212 +0,0 @@ -# SPDX-FileCopyrightText: ASSUME Developers -# -# SPDX-License-Identifier: AGPL-3.0-or-later - -import numpy as np -import torch as th -from typing import NamedTuple, Generator - - -class RolloutBufferSamples(NamedTuple): - """Container for rollout buffer samples.""" - observations: th.Tensor - actions: th.Tensor - old_values: th.Tensor - old_log_probs: th.Tensor - advantages: th.Tensor - returns: th.Tensor - - -class RolloutBuffer: - """ - On-policy rollout buffer for PPO algorithm. - """ - - def __init__( - self, - buffer_size: int, - obs_dim: int, - act_dim: int, - n_rl_units: int, - device: str | th.device, - float_type: th.dtype, - gamma: float = 0.99, - gae_lambda: float = 0.95, - ): - """ - Initialize rollout buffer. - """ - self.buffer_size = buffer_size - self.obs_dim = obs_dim - self.act_dim = act_dim - self.n_rl_units = n_rl_units - self.device = device - self.float_type = float_type - self.gamma = gamma - self.gae_lambda = gae_lambda - - # Current position and full flag - self.pos = 0 - self.full = False - self.generator_ready = False - - # Allocate buffers - self.reset() - - def reset(self) -> None: - """Clear the buffer and allocate new storage.""" - self.observations = np.zeros( - (self.buffer_size, self.n_rl_units, self.obs_dim), - dtype=np.float32, - ) - self.actions = np.zeros( - (self.buffer_size, self.n_rl_units, self.act_dim), - dtype=np.float32, - ) - self.rewards = np.zeros( - (self.buffer_size, self.n_rl_units), - dtype=np.float32, - ) - self.values = np.zeros( - (self.buffer_size, self.n_rl_units), - dtype=np.float32, - ) - self.log_probs = np.zeros( - (self.buffer_size, self.n_rl_units), - dtype=np.float32, - ) - self.dones = np.zeros( - (self.buffer_size, self.n_rl_units), - dtype=np.float32, - ) - - # Computed after rollout - self.advantages = np.zeros( - (self.buffer_size, self.n_rl_units), - dtype=np.float32, - ) - self.returns = np.zeros( - (self.buffer_size, self.n_rl_units), - dtype=np.float32, - ) - - self.pos = 0 - self.full = False - self.generator_ready = False - - def add( - self, - obs: np.ndarray, - action: np.ndarray, - reward: np.ndarray, - done: np.ndarray, - value: np.ndarray, - log_prob: np.ndarray, - ) -> None: - """ - Add a transition to the buffer. - """ - if self.pos >= self.buffer_size: - self.full = True - return - - self.observations[self.pos] = np.array(obs).copy() - self.actions[self.pos] = np.array(action).copy() - self.rewards[self.pos] = np.array(reward).copy() - self.dones[self.pos] = np.array(done).copy() - self.values[self.pos] = np.array(value).copy() - self.log_probs[self.pos] = np.array(log_prob).copy() - - self.pos += 1 - - def compute_returns_and_advantages( - self, - last_values: np.ndarray, - dones: np.ndarray, - ) -> None: - """ - Compute GAE advantages and returns. - """ - last_values = np.array(last_values).flatten() - dones = np.array(dones).flatten() - - # GAE computation - last_gae_lam = np.zeros(self.n_rl_units, dtype=np.float32) - buffer_size = self.pos if not self.full else self.buffer_size - - for step in reversed(range(buffer_size)): - if step == buffer_size - 1: - next_non_terminal = 1.0 - dones - next_values = last_values - else: - next_non_terminal = 1.0 - self.dones[step + 1] - next_values = self.values[step + 1] - - # TD error - delta = ( - self.rewards[step] - + self.gamma * next_values * next_non_terminal - - self.values[step] - ) - - # GAE advantage - last_gae_lam = ( - delta - + self.gamma * self.gae_lambda * next_non_terminal * last_gae_lam - ) - self.advantages[step] = last_gae_lam - - # Returns = advantages + values - self.returns = self.advantages + self.values - self.generator_ready = True - - def get( - self, - batch_size: int | None = None, - ) -> Generator[RolloutBufferSamples, None, None]: - """ - Generate batches of samples for training. - """ - if not self.generator_ready: - raise ValueError( - "Must call compute_returns_and_advantages before sampling." - ) - - buffer_size = self.pos if not self.full else self.buffer_size - indices = np.random.permutation(buffer_size) - - if batch_size is None: - batch_size = buffer_size - - start_idx = 0 - while start_idx < buffer_size: - batch_indices = indices[start_idx : start_idx + batch_size] - yield self._get_samples(batch_indices) - start_idx += batch_size - - def _get_samples(self, indices: np.ndarray) -> RolloutBufferSamples: - """Convert numpy arrays to torch tensors for given indices.""" - return RolloutBufferSamples( - observations=th.as_tensor( - self.observations[indices], device=self.device, dtype=self.float_type - ), - actions=th.as_tensor( - self.actions[indices], device=self.device, dtype=self.float_type - ), - old_values=th.as_tensor( - self.values[indices], device=self.device, dtype=self.float_type - ), - old_log_probs=th.as_tensor( - self.log_probs[indices], device=self.device, dtype=self.float_type - ), - advantages=th.as_tensor( - self.advantages[indices], device=self.device, dtype=self.float_type - ), - returns=th.as_tensor( - self.returns[indices], device=self.device, dtype=self.float_type - ), - ) - - def size(self) -> int: - """Return current number of stored transitions.""" - return self.buffer_size if self.full else self.pos \ No newline at end of file diff --git a/assume/strategies/learning_strategies.py b/assume/strategies/learning_strategies.py index a55db0cb0..c15c56031 100644 --- a/assume/strategies/learning_strategies.py +++ b/assume/strategies/learning_strategies.py @@ -262,6 +262,7 @@ def get_actions(self, next_observation): ----- In learning mode, actions incorporate noise for exploration. Initial exploration relies solely on noise to cover the action space broadly. + For PPO, we also store log_prob and value estimates for later use. """ # distinction whether we are in learning mode or not to handle exploration realised with noise @@ -283,15 +284,37 @@ def get_actions(self, next_observation): # ============================================================================= # only use noise as the action to enforce exploration curr_action = noise + + # For PPO, store dummy log_prob and value during initial exploration + if self.algorithm == "mappo": + self._last_log_prob = th.tensor(0.0, device=self.device) + self._last_value = th.tensor(0.0, device=self.device) else: - # if we are not in the initial exploration phase we chose the action with the actor neural net - # and add noise to the action - curr_action = self.actor(next_observation).detach() - noise = self.action_noise.noise( - device=self.device, dtype=self.float_type - ) - curr_action += noise + # Check if we're using PPO algorithm + if self.algorithm == "mappo": + # PPO: use get_action_and_log_prob for proper stochastic sampling + curr_action, log_prob = self.actor.get_action_and_log_prob(next_observation.unsqueeze(0)) + curr_action = curr_action.squeeze(0).detach() + self._last_log_prob = log_prob.squeeze(0).detach() + + # Get value estimate from critic (if available) + if hasattr(self.learning_role, 'critics') and self.unit_id in self.learning_role.critics: + critic = self.learning_role.critics[self.unit_id] + self._last_value = critic(next_observation.unsqueeze(0)).squeeze().detach() + else: + self._last_value = th.tensor(0.0, device=self.device) + + # PPO uses stochastic policy, no external noise needed + noise = th.zeros_like(curr_action, dtype=self.float_type) + else: + # TD3/DDPG: if we are not in the initial exploration phase we chose the action with the actor neural net + # and add noise to the action + curr_action = self.actor(next_observation).detach() + noise = self.action_noise.noise( + device=self.device, dtype=self.float_type + ) + curr_action += noise # make sure that noise adding does not exceed the actual output of the NN as it pushes results in a direction that actor can't even reach curr_action = th.clamp( @@ -299,7 +322,11 @@ def get_actions(self, next_observation): ) else: # if we are not in learning mode we just use the actor neural net to get the action without adding noise - curr_action = self.actor(next_observation).detach() + if self.algorithm == "mappo": + # For PPO evaluation, use deterministic action (mean) + curr_action = self.actor(next_observation, deterministic=True).detach() + else: + curr_action = self.actor(next_observation).detach() # noise is an tensor with zeros, because we are not in learning mode noise = th.zeros_like(curr_action, dtype=self.float_type) @@ -493,6 +520,15 @@ def calculate_bids( if self.learning_mode: self.learning_role.add_actions_to_cache(self.unit_id, start, actions, noise) + # For PPO, also cache value estimates and log probabilities + if self.algorithm == "mappo" and hasattr(self, '_last_log_prob'): + self.learning_role.add_ppo_data_to_cache( + self.unit_id, + start, + getattr(self, '_last_value', 0.0), + self._last_log_prob.item() if hasattr(self._last_log_prob, 'item') else self._last_log_prob, + done=False + ) return bids @@ -795,6 +831,15 @@ def calculate_bids( if self.learning_mode: self.learning_role.add_actions_to_cache(self.unit_id, start, actions, noise) + # For PPO, also cache value estimates and log probabilities + if self.algorithm == "mappo" and hasattr(self, '_last_log_prob'): + self.learning_role.add_ppo_data_to_cache( + self.unit_id, + start, + getattr(self, '_last_value', 0.0), + self._last_log_prob.item() if hasattr(self._last_log_prob, 'item') else self._last_log_prob, + done=False + ) return bids From 9082e0b28f2a6014273c0076cad49028aa8524b5 Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Wed, 14 Jan 2026 04:00:06 +0100 Subject: [PATCH 04/44] FIX: initial values_data assignment --- assume/reinforcement_learning/learning_role.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/assume/reinforcement_learning/learning_role.py b/assume/reinforcement_learning/learning_role.py index 32966b0aa..19e533468 100644 --- a/assume/reinforcement_learning/learning_role.py +++ b/assume/reinforcement_learning/learning_role.py @@ -341,7 +341,7 @@ async def _store_to_buffer_and_update_sync(self, cache, device) -> None: device ) else: - values_data = np.zeros(len(self.values_data)) + values_data = np.zeros(len(self.rl_strats)) if cache["log_probs"].get(timestamp): log_probs_data = transform_buffer_data( @@ -351,7 +351,7 @@ async def _store_to_buffer_and_update_sync(self, cache, device) -> None: device ) else: - log_probs_data = np.zeros(len(self.log_probs_data)) + log_probs_data = np.zeros(len(self.rl_strats)) if cache["dones"].get(timestamp): dones_data = transform_buffer_data( From e6d0056c4bd1fbfa33aac8d5f54f600b3bc50635 Mon Sep 17 00:00:00 2001 From: kim-mskw Date: Wed, 14 Jan 2026 12:07:17 +0100 Subject: [PATCH 05/44] - started making proper config definition --- assume/common/base.py | 162 ++++++++++++------ .../algorithms/matd3.py | 2 +- examples/inputs/example_02a/config.yaml | 30 +++- 3 files changed, 128 insertions(+), 66 deletions(-) diff --git a/assume/common/base.py b/assume/common/base.py index 32abc38c1..33df0e866 100644 --- a/assume/common/base.py +++ b/assume/common/base.py @@ -4,7 +4,7 @@ import logging from collections import defaultdict -from dataclasses import dataclass +from dataclasses import dataclass, field from datetime import datetime, timedelta import numpy as np @@ -753,38 +753,25 @@ def remove_empty_bids(self, bids: list) -> list: @dataclass -class LearningConfig: - """ - A class for the learning configuration. +class AlgorithmConfig: + """Base configuration for algorithm-specific parameters.""" - Parameters: - learning_mode (bool): Should we use learning mode at all? If False, the learning bidding strategy is - loaded from trained_policies_load_path and no training occurs. Default is False. - evaluation_mode (bool): This setting is modified internally. Whether to run in evaluation mode. If True, the agent uses the learned policy - without exploration noise and no training updates occur. Default is False. - continue_learning (bool): Whether to use pre-learned strategies and then continue learning. - If True, loads existing policies from trained_policies_load_path and continues training. Default is False. - trained_policies_save_path (str | None): The directory path - relative to the scenario's inputs_path - where newly trained RL policies (actor and - critic networks) will be saved. Only needed when learning_mode is True. Value is set in setup_world(). Defaults to None. - trained_policies_load_path (str | None): The directory path - relative to the scenario's inputs_path - from which pre-trained policies should be - loaded. Needed when continue_learning is True or using pre-trained strategies. Default is None. - min_bid_price (float | None): The minimum bid price which limits the action of the actor to this price. - Used to constrain the actor's output to a realistic price range. Default is -100.0. - max_bid_price (float | None): The maximum bid price which limits the action of the actor to this price. - Used to constrain the actor's output to a realistic price range. Default is 100.0. +@dataclass +class MATD3Config(AlgorithmConfig): + """ + Configuration for MATD3 algorithm parameters. - device (str): The device to use for PyTorch computations. Options include "cpu", "cuda", or specific - CUDA devices like "cuda:0". Default is "cpu". + Parameters: + gamma (float): The discount factor for future rewards, ranging from 0 to 1. Higher values give more + weight to long-term rewards in decision-making. Default is 0.99. + actor_architecture (str): The architecture of the neural networks used for the actors. Options include + "mlp" (Multi-Layer Perceptron) and "lstm" (Long Short-Term Memory). Default is "mlp". episodes_collecting_initial_experience (int): The number of episodes at the start during which random actions are chosen instead of using the actor network. This helps populate the replay buffer with diverse experiences. Default is 5. exploration_noise_std (float): The standard deviation of Gaussian noise added to actions during exploration in the environment. Higher values encourage more exploration. Default is 0.2. - training_episodes (int): The number of training episodes, where one episode is the entire simulation - horizon specified in the general config. Default is 100. - validation_episodes_interval (int): The interval (in episodes) at which validation episodes are run - to evaluate the current policy's performance without training updates. Default is 5. train_freq (str): Defines the frequency in time steps at which the actor and critic networks are updated. Accepts time strings like "24h" for 24 hours or "1d" for 1 day. Default is "24h". batch_size (int): The batch size of experiences sampled from the replay buffer for each training update. @@ -802,15 +789,6 @@ class LearningConfig: early_stopping_threshold (float): The minimum improvement in moving average reward required to avoid early stopping. If the reward improvement is less than this threshold over early_stopping_steps, training is terminated early. Default is 0.05. - - algorithm (str): Specifies which reinforcement learning algorithm to use. Currently, only "matd3" - (Multi-Agent Twin Delayed Deep Deterministic Policy Gradient) is implemented. Default is "matd3". - replay_buffer_size (int): The maximum number of transitions stored in the replay buffer for experience replay. - Larger buffers allow for more diverse training samples. Default is 500000. - gamma (float): The discount factor for future rewards, ranging from 0 to 1. Higher values give more - weight to long-term rewards in decision-making. Default is 0.99. - actor_architecture (str): The architecture of the neural networks used for the actors. Options include - "mlp" (Multi-Layer Perceptron) and "lstm" (Long Short-Term Memory). Default is "mlp". policy_delay (int): The frequency (in gradient steps) at which the actor policy is updated. TD3 updates the critic more frequently than the actor to stabilize training. Default is 2. noise_sigma (float): The standard deviation of the Ornstein-Uhlenbeck or Gaussian noise distribution @@ -827,6 +805,97 @@ class LearningConfig: critic updates. This smoothing helps prevent overfitting to narrow policy peaks. Default is 0.2. target_noise_clip (float): The maximum absolute value for clipping the target policy noise. Prevents the noise from being too large. Default is 0.5. + """ + + actor_architecture: str = "mlp" + batch_size: int = 128 + episodes_collecting_initial_experience: int = 5 + gamma: float = 0.99 + gradient_steps: int = 100 + noise_dt: int = 1 + noise_scale: int = 1 + noise_sigma: float = 0.1 + action_noise_schedule: str | None = None + train_freq: str = "24h" + policy_delay: int = 2 + tau: float = 0.005 + target_policy_noise: float = 0.2 + target_noise_clip: float = 0.5 + replay_buffer_size: int = 50000 + + +@dataclass +class PPOConfig(AlgorithmConfig): + """ + Configuration for PPO algorithm parameters. + + Parameters: + actor_architecture (str): The architecture of the neural networks used for the actors. Options include + "mlp" (Multi-Layer Perceptron) and "lstm" (Long Short-Term Memory). Default is "mlp". + batch_size (int): The batch size of experiences sampled from the replay buffer for each training update. + Larger batches provide more stable gradients but require more memory. Default is 128. + clip_ratio (float): The clipping ratio for the PPO surrogate objective. Controls how much the new policy + can deviate from the old policy during updates. Typical values are between 0.1 and 0.3. Default is 0.1. + entropy_coef (float): The coefficient for the entropy bonus added to the loss function. + Encourages exploration by penalizing low-entropy policies. Default is 0.01. + gae_lambda (float): The lambda parameter for Generalized Advantage Estimation (GAE). + Balances bias and variance in advantage estimates. Typical values are between 0.9 and 0.98. Default is 0.95. + gamma (float): The discount factor for future rewards, ranging from 0 to 1. Higher values give more + weight to long-term rewards in decision-making. Default is 0.99. + max_grad_norm (float): The maximum norm for gradient clipping. Prevents exploding gradients by + capping their magnitude during backpropagation. Default is 0.5. + train_freq (str): Defines the frequency in time steps at which the actor and critic networks are updated. + Accepts time strings like "24h" for 24 hours or "1d" for 1 day. Default is "24h". + vf_coef (float): The coefficient for the value function loss term in the overall loss function. + Balances the importance of value function accuracy versus policy improvement. Default is 0.5. + n_epochs (int): The number of epochs to perform for each training update. + More epochs can lead to better learning but increase computation time. Default is 10. + """ + + actor_architecture: str = "mlp" + batch_size: int = 128 + clip_ratio: float = 0.1 + entropy_coef: float = 0.01 + gae_lambda: float = 0.95 + gamma: float = 0.99 + max_grad_norm: float = 0.5 + train_freq: str = "24h" + vf_coef: float = 0.5 + n_epochs: int = 10 + + +@dataclass +class LearningConfig: + """ + A class for the learning configuration. + + Parameters: + learning_mode (bool): Should we use learning mode at all? If False, the learning bidding strategy is + loaded from trained_policies_load_path and no training occurs. Default is False. + evaluation_mode (bool): This setting is modified internally. Whether to run in evaluation mode. If True, the agent uses the learned policy + without exploration noise and no training updates occur. Default is False. + continue_learning (bool): Whether to use pre-learned strategies and then continue learning. + If True, loads existing policies from trained_policies_load_path and continues training. Default is False. + trained_policies_save_path (str | None): The directory path - relative to the scenario's inputs_path - where newly trained RL policies (actor and + critic networks) will be saved. Only needed when learning_mode is True. Value is set in setup_world(). Defaults to None. + trained_policies_load_path (str | None): The directory path - relative to the scenario's inputs_path - from which pre-trained policies should be + loaded. Needed when continue_learning is True or using pre-trained strategies. Default is None. + + min_bid_price (float | None): The minimum bid price which limits the action of the actor to this price. + Used to constrain the actor's output to a realistic price range. Default is -100.0. + max_bid_price (float | None): The maximum bid price which limits the action of the actor to this price. + Used to constrain the actor's output to a realistic price range. Default is 100.0. + device (str): The device to use for PyTorch computations. Options include "cpu", "cuda", or specific + CUDA devices like "cuda:0". Default is "cpu". + training_episodes (int): The number of training episodes, where one episode is the entire simulation + horizon specified in the general config. Default is 100. + validation_episodes_interval (int): The interval (in episodes) at which validation episodes are run + to evaluate the current policy's performance without training updates. Default is 5. + algorithm (str): Specifies which reinforcement learning algorithm to use. Currently, only "matd3" + (Multi-Agent Twin Delayed Deep Deterministic Policy Gradient) is implemented. Default is "matd3". + replay_buffer_size (int): The maximum number of transitions stored in the replay buffer for experience replay. + Larger buffers allow for more diverse training samples. Default is 500000. + """ @@ -840,37 +909,18 @@ class LearningConfig: max_bid_price: float | None = 100.0 device: str = "cpu" - episodes_collecting_initial_experience: int = 5 - exploration_noise_std: float = 0.2 training_episodes: int = 100 validation_episodes_interval: int = 5 - train_freq: str = "24h" - batch_size: int = 128 - gradient_steps: int = 100 learning_rate: float = 0.001 learning_rate_schedule: str | None = None early_stopping_steps: int | None = None early_stopping_threshold: float = 0.05 algorithm: str = "matd3" - replay_buffer_size: int = 50000 - gamma: float = 0.99 - actor_architecture: str = "mlp" - policy_delay: int = 2 - noise_sigma: float = 0.1 - noise_scale: int = 1 - noise_dt: int = 1 - action_noise_schedule: str | None = None - tau: float = 0.005 - target_policy_noise: float = 0.2 - target_noise_clip: float = 0.5 - ppo_clip_range: float | None = 0.1 - ppo_clip_range_vf: float | None = None - ppo_n_epochs: int = 10 - ppo_entropy_coef: float = 0.01 - ppo_vf_coef: float = 0.5 - ppo_gae_lambda: float = 0.95 + # Nested algorithm configurations + matd3: MATD3Config = field(default_factory=MATD3Config) + ppo: PPOConfig = field(default_factory=PPOConfig) def __post_init__(self): """Calculate defaults that depend on other fields and validate inputs.""" diff --git a/assume/reinforcement_learning/algorithms/matd3.py b/assume/reinforcement_learning/algorithms/matd3.py index dbdd50e41..813f57cb5 100644 --- a/assume/reinforcement_learning/algorithms/matd3.py +++ b/assume/reinforcement_learning/algorithms/matd3.py @@ -457,7 +457,7 @@ def update_policy(self): """ - logger.debug("Updating Policy") + logger.debug("Updating Policy (TD3)") # Stack strategies for easier access strategies = list(self.learning_role.rl_strats.values()) diff --git a/examples/inputs/example_02a/config.yaml b/examples/inputs/example_02a/config.yaml index a1a6cc9c1..5fa5af48a 100644 --- a/examples/inputs/example_02a/config.yaml +++ b/examples/inputs/example_02a/config.yaml @@ -17,17 +17,29 @@ base: algorithm: matd3 learning_rate: 0.001 training_episodes: 100 - episodes_collecting_initial_experience: 5 - train_freq: 100h - gradient_steps: 10 - batch_size: 128 - gamma: 0.99 device: cpu - action_noise_schedule: linear - noise_sigma: 0.1 - noise_scale: 1 - noise_dt: 1 validation_episodes_interval: 5 + matd3: + actor_architecture: mlp + batch_size: 64 + episodes_collecting_initial_experience: 3 + gamma: 0.99 + gradient_steps: 10 + noise_dt: 1 + noise_scale: 1 + noise_sigma: 0.1 + action_noise_schedule: linear + train_freq: 24h + ppo: + actor_architecture: dist + batch_size: 11 + clip_ratio: 0.05 + entropy_coef: 0.005 + gae_lambda: 0.95 + gamma: 0.99 + max_grad_norm: 0.3 + train_freq: 33h + vf_coef: 0.75 markets_config: EOM: From 537f9e8e382953fbb4c56a979d5cf54060a9f0e3 Mon Sep 17 00:00:00 2001 From: kim-mskw Date: Wed, 14 Jan 2026 12:25:00 +0100 Subject: [PATCH 06/44] outsource activation_function_limit --- .../reinforcement_learning/learning_utils.py | 16 ++ .../neural_network_architecture.py | 147 +++++------------- 2 files changed, 59 insertions(+), 104 deletions(-) diff --git a/assume/reinforcement_learning/learning_utils.py b/assume/reinforcement_learning/learning_utils.py index 268b0ed5b..8d72f4aeb 100644 --- a/assume/reinforcement_learning/learning_utils.py +++ b/assume/reinforcement_learning/learning_utils.py @@ -26,6 +26,22 @@ class ObsActRew(TypedDict): Schedule = Callable[[float], float] +class ActivationLimits(TypedDict): + """Output limits for activation functions.""" + + min: float + max: float + func: Callable[[th.Tensor], th.Tensor] + + +activation_function_limit: dict[str, ActivationLimits] = { + "tanh": {"min": -1, "max": 1, "func": th.tanh}, + "sigmoid": {"min": 0, "max": 1, "func": th.sigmoid}, + "relu": {"min": 0, "max": float("inf"), "func": th.nn.functional.relu}, + "softsign": {"min": -1, "max": 1, "func": th.nn.functional.softsign}, +} + + # Ornstein-Uhlenbeck Noise # from https://github.com/songrotek/DDPG/blob/master/ou_noise.py class OUNoise: diff --git a/assume/reinforcement_learning/neural_network_architecture.py b/assume/reinforcement_learning/neural_network_architecture.py index 87c1cb2a8..9557b4ba1 100644 --- a/assume/reinforcement_learning/neural_network_architecture.py +++ b/assume/reinforcement_learning/neural_network_architecture.py @@ -2,12 +2,11 @@ # # SPDX-License-Identifier: AGPL-3.0-or-later + import torch as th from torch import nn from torch.nn import functional as F -from typing import List, Tuple, Type, Optional, Union - class Critic(nn.Module): """ @@ -20,6 +19,7 @@ class Critic(nn.Module): float_type: Data type for parameters unique_obs_dim: Dimension of agent-specific observations """ + def __init__( self, n_agents: int, @@ -39,9 +39,7 @@ def __init__( # Dynamic Architecture Definition self.hidden_sizes = self._get_architecture(n_agents) - def _get_architecture( - self, n_agents: int - ) -> List[int]: + def _get_architecture(self, n_agents: int) -> list[int]: """Returns hidden layer sizes based on the number of agents.""" if n_agents <= 20: hidden_sizes = [256, 128] # Shallow network for small `n_agents` @@ -58,13 +56,13 @@ def _build_q_network(self) -> nn.ModuleList: layers = nn.ModuleList() input_dim = ( self.obs_dim + self.act_dim - ) # Input includes all observations and actions + ) # Input includes all observations and actions for h in self.hidden_sizes: layers.append(nn.Linear(input_dim, h, dtype=self.float_type)) layers.append(nn.ReLU()) input_dim = h - layers.append(nn.Linear(input_dim, 1, dtype=self.float_type)) # Output Q-value + layers.append(nn.Linear(input_dim, 1, dtype=self.float_type)) # Output Q-value return layers @@ -87,21 +85,11 @@ class CriticTD3(Critic): obs_dim (int): Dimension of each state act_dim (int): Dimension of each action """ + def __init__( - self, - n_agents: int, - obs_dim: int, - act_dim: int, - float_type, - unique_obs_dim: int + self, n_agents: int, obs_dim: int, act_dim: int, float_type, unique_obs_dim: int ): - super().__init__( - n_agents, - obs_dim, - act_dim, - float_type, - unique_obs_dim - ) + super().__init__(n_agents, obs_dim, act_dim, float_type, unique_obs_dim) # First Q-network (Q1) self.q1_layers = self._build_q_network() @@ -110,14 +98,12 @@ def __init__( self.q2_layers = self._build_q_network() def forward( - self, - obs: th.Tensor, - actions: th.Tensor - ) -> Tuple[th.Tensor, th.Tensor]: + self, obs: th.Tensor, actions: th.Tensor + ) -> tuple[th.Tensor, th.Tensor]: """ Forward pass through both Q-networks. """ - xu = th.cat([obs, actions], dim=1) # Concatenate obs & actions + xu = th.cat([obs, actions], dim=1) # Concatenate obs & actions # Compute Q1 x1 = nn.Sequential(*self.q1_layers)(xu) @@ -127,11 +113,7 @@ def forward( return x1, x2 - def q1_forward( - self, - obs: th.Tensor, - actions: th.Tensor - ) -> th.Tensor: + def q1_forward(self, obs: th.Tensor, actions: th.Tensor) -> th.Tensor: """ Compute only Q1 (used during actor updates). """ @@ -152,6 +134,7 @@ class CriticDDPG(Critic): float_type: Data type for parameters unique_obs_dim: Dimension of agent-specific observations """ + def __init__( self, n_agents: int, @@ -160,27 +143,17 @@ def __init__( float_type: th.dtype, unique_obs_dim: int, ): - super().__init__( - n_agents, - obs_dim, - act_dim, - float_type, - unique_obs_dim - ) + super().__init__(n_agents, obs_dim, act_dim, float_type, unique_obs_dim) # Q-network self.q_layers = self._build_q_network() - + # Initialize weights properly self._init_weights() - def forward( - self, - obs: th.Tensor, - actions: th.Tensor - ) -> th.Tensor: + def forward(self, obs: th.Tensor, actions: th.Tensor) -> th.Tensor: """Returns Q value.""" - xu = th.cat([obs, actions], dim=1) # Concatenate obs & actions + xu = th.cat([obs, actions], dim=1) # Concatenate obs & actions # Compute Q x = nn.Sequential(*self.q_layers)(xu) @@ -198,19 +171,13 @@ class CriticPPO(Critic): unique_obs_dim: Dimension of agent-specific observations """ - def __init__( - self, - n_agents: int, - obs_dim: int, - float_type, - unique_obs_dim: int - ): + def __init__(self, n_agents: int, obs_dim: int, float_type, unique_obs_dim: int): super().__init__( n_agents=n_agents, obs_dim=obs_dim, act_dim=0, float_type=float_type, - unique_obs_dim=unique_obs_dim + unique_obs_dim=unique_obs_dim, ) # V-network @@ -223,11 +190,12 @@ def _init_weights(self) -> None: """ Apply Orthogonal initialization. """ + def init_layer(m): if isinstance(m, nn.Linear): nn.init.orthogonal_(m.weight, gain=1.0) nn.init.zeros_(m.bias) - + self.apply(init_layer) def forward(self, obs: th.Tensor) -> th.Tensor: @@ -243,30 +211,16 @@ class Actor(nn.Module): Parent class for actor networks. """ - activation_function_limit = { - "softsign": (-1, 1), - "tanh": (-1, 1), - "sigmoid": (0, 1), - "relu": (0, float("inf")), - } - - activation_function_map = { - "softsign": F.softsign, - "tanh": th.tanh, - "sigmoid": th.sigmoid, - "relu": F.relu - } - def __init__(self): super().__init__() - self.activation = "softsign" # or "tanh", "sigmoid", "relu" + self.activation = "softsign" # or "tanh", "sigmoid", "relu" if self.activation not in self.activation_function_limit: raise ValueError( f"Activation '{self.activation}' not supported! Supported: {list(self.activation_function_limit.keys())}" ) - + self.min_output, self.max_output = self.activation_function_limit[ self.activation ] @@ -286,7 +240,7 @@ class MLPActor(Actor): def __init__(self, obs_dim: int, act_dim: int, float_type, *args, **kwargs): super().__init__() - + self.FC1 = nn.Linear(obs_dim, 256, dtype=float_type) self.FC2 = nn.Linear(256, 128, dtype=float_type) self.FC3 = nn.Linear(128, act_dim, dtype=float_type) @@ -320,7 +274,7 @@ class LSTMActor(Actor): Based on "Multi-Period and Multi-Spatial Equilibrium Analysis in Imperfect Electricity Markets" by Ye at al. (2019) - Note: the original source code was not available, therefore this implementation was derived from the published paper. + Note: the original source code was not available, therefore this implementation was derived from the published paper. Adjustments to resemble final layers from MLPActor: - dense layer 2 was omitted - single output layer with softsign activation function to output actions directly instead of two output layers for mean and stddev @@ -334,7 +288,7 @@ def __init__( unique_obs_dim: int, num_timeseries_obs_dim: int, *args, - **kwargs + **kwargs, ): super().__init__() self.float_type = float_type @@ -388,7 +342,7 @@ def forward(self, obs): outputs = th.cat(outputs, dim=1) x = th.cat((outputs, x2), dim=1) - + x = F.relu(self.FC1(x)) x = self.activation_function(self.FC2(x)) @@ -399,20 +353,6 @@ def forward(self, obs): class ActorPPO(nn.Module): - activation_function_limit = { - "softsign": (-1, 1), - "tanh": (-1, 1), - "sigmoid": (0, 1), - "relu": (0, float("inf")), - } - - activation_function_map = { - "softsign": F.softsign, - "tanh": th.tanh, - "sigmoid": th.sigmoid, - "relu": F.relu - } - def __init__( self, obs_dim: int, @@ -427,13 +367,13 @@ def __init__( self.act_dim = act_dim self.float_type = float_type - self.activation = "softsign" # or "tanh", "sigmoid", "relu" + self.activation = "softsign" # or "tanh", "sigmoid", "relu" if self.activation not in self.activation_function_limit: raise ValueError( f"Activation '{self.activation}' not supported! Supported: {list(self.activation_function_limit.keys())}" ) - + self.min_output, self.max_output = self.activation_function_limit[ self.activation ] @@ -444,25 +384,24 @@ def __init__( self.mean_layer = nn.Linear(128, act_dim, dtype=float_type) # Learnable log standard deviation - self.log_std = nn.Parameter( - th.ones(act_dim, dtype=float_type) * log_std_init - ) + self.log_std = nn.Parameter(th.ones(act_dim, dtype=float_type) * log_std_init) self._init_weights() def _init_weights(self) -> None: """Apply orthogonal initialization.""" + def init_layer(m): if isinstance(m, nn.Linear): nn.init.orthogonal_(m.weight, gain=0.01) nn.init.zeros_(m.bias) - + # Initialize hidden layers with larger gain nn.init.orthogonal_(self.FC1.weight, gain=1.0) nn.init.orthogonal_(self.FC2.weight, gain=1.0) nn.init.zeros_(self.FC1.bias) nn.init.zeros_(self.FC2.bias) - + # Initialize output layer with small gain nn.init.orthogonal_(self.mean_layer.weight, gain=0.01) nn.init.zeros_(self.mean_layer.bias) @@ -472,16 +411,16 @@ def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor: x = F.relu(self.FC1(obs)) x = F.relu(self.FC2(x)) mean = th.tanh(self.mean_layer(x)) # Bounded to [-1, 1] - + if deterministic: return mean - + # Sample from Gaussian during training log_std = self.log_std.expand_as(mean) std = log_std.exp() noise = th.randn_like(mean) action = mean + std * noise - + # Clamp to valid range return th.clamp(action, -1.0, 1.0) @@ -493,7 +432,7 @@ def get_distribution(self, obs: th.Tensor) -> tuple[th.Tensor, th.Tensor]: x = F.relu(self.FC2(x)) mean = th.tanh(self.mean_layer(x)) # Bounded to [-1, 1] log_std = self.log_std.expand_as(mean) - + return mean, log_std def get_action_and_log_prob( @@ -503,11 +442,11 @@ def get_action_and_log_prob( ) -> tuple[th.Tensor, th.Tensor]: """ Sample action and compute log probability. - + Args: obs: Observations deterministic: If True, return mean action - + Returns: Tuple of (action, log_prob) """ @@ -536,13 +475,13 @@ def evaluate_actions( ) -> tuple[th.Tensor, th.Tensor, th.Tensor]: """ Evaluate log probability and entropy for given actions. - + Used during PPO update to compute importance ratio. - + Args: obs: Observations actions: Actions to evaluate - + Returns: Tuple of (log_prob, entropy, values) """ @@ -570,4 +509,4 @@ def _compute_log_prob( + 2 * th.log(std) + th.log(th.tensor(2 * th.pi)) ) - return log_prob.sum(dim=-1) \ No newline at end of file + return log_prob.sum(dim=-1) From afe007727e5b4dedea9518cdc920944f56e4cacd Mon Sep 17 00:00:00 2001 From: kim-mskw Date: Wed, 14 Jan 2026 13:03:58 +0100 Subject: [PATCH 07/44] - make algorithm specific extra info instead of many if mappo statements - suggest subclass of A2C algorithms so that we can outsource loading and saving function to base algorithm class and inherit form it --- .../algorithms/base_algorithm.py | 426 +++++++++++++++- .../algorithms/maddpg.py | 431 ++++------------- .../algorithms/mappo.py | 428 +++++++---------- .../algorithms/matd3.py | 453 +++--------------- .../reinforcement_learning/learning_role.py | 96 ++-- 5 files changed, 780 insertions(+), 1054 deletions(-) diff --git a/assume/reinforcement_learning/algorithms/base_algorithm.py b/assume/reinforcement_learning/algorithms/base_algorithm.py index 9ab5b258b..120a9bb11 100644 --- a/assume/reinforcement_learning/algorithms/base_algorithm.py +++ b/assume/reinforcement_learning/algorithms/base_algorithm.py @@ -1,12 +1,17 @@ # SPDX-FileCopyrightText: ASSUME Developers # # SPDX-License-Identifier: AGPL-3.0-or-later - +import json import logging +import os import torch as th +from torch.optim import AdamW from assume.reinforcement_learning.algorithms import actor_architecture_aliases +from assume.reinforcement_learning.learning_utils import ( + transfer_weights, +) logger = logging.getLogger(__name__) @@ -89,4 +94,421 @@ def load_params(self, directory: str) -> None: """ Load learning params - abstract method to be implemented by the Learning Algorithm """ - pass + + +class A2CAlgorithm(RLAlgorithm): + """ + The base A2C model class. To implement your own A2C algorithm, you need to subclass this class and implement the `update_policy` method. + + Args: + learning_role (Learning Role object): Learning object + """ + + def __init__( + self, + # init learning_role as object of Learning class + learning_role, + ): + super().__init__( + learning_role, + ) + + def save_params(self, directory): + """ + This method saves the parameters of both the actor and critic networks associated with the learning role. It organizes the + saved parameters into separate directories for critics and actors within the specified base directory. + + Args: + directory (str): The base directory for saving the parameters. + """ + self.save_critic_params(directory=f"{directory}/critics") + self.save_actor_params(directory=f"{directory}/actors") + + def save_critic_params(self, directory): + """ + Save the parameters of critic networks. + + This method saves the parameters of the critic networks, including the critic's state_dict, critic_target's state_dict, + and the critic's optimizer state_dict. It organizes the saved parameters into a directory structure specific to the critic + associated with each learning strategy. + + Args: + directory (str): The base directory for saving the parameters. + """ + os.makedirs(directory, exist_ok=True) + for u_id, strategy in self.learning_role.rl_strats.items(): + obj = { + "critic": strategy.critics.state_dict(), + "critic_target": strategy.target_critics.state_dict(), + "critic_optimizer": strategy.critics.optimizer.state_dict(), + } + path = f"{directory}/critic_{u_id}.pt" + th.save(obj, path) + + # record the exact order of u_ids and save it with critics to ensure that the same order is used when loading the parameters + u_id_list = [str(u) for u in self.learning_role.rl_strats.keys()] + mapping = {"u_id_order": u_id_list} + map_path = os.path.join(directory, "u_id_order.json") + with open(map_path, "w") as f: + json.dump(mapping, f, indent=2) + + def save_actor_params(self, directory): + """ + Save the parameters of actor networks. + + This method saves the parameters of the actor networks, including the actor's state_dict, actor_target's state_dict, and + the actor's optimizer state_dict. It organizes the saved parameters into a directory structure specific to the actor + associated with each learning strategy. + + Args: + directory (str): The base directory for saving the parameters. + """ + os.makedirs(directory, exist_ok=True) + for u_id, strategy in self.learning_role.rl_strats.items(): + obj = { + "actor": strategy.actor.state_dict(), + "actor_target": strategy.actor_target.state_dict(), + "actor_optimizer": strategy.actor.optimizer.state_dict(), + } + path = f"{directory}/actor_{u_id}.pt" + th.save(obj, path) + + def load_params(self, directory: str) -> None: + """ + Load the parameters of both actor and critic networks. + + This method loads the parameters of both the actor and critic networks associated with the learning role from the specified + directory. It uses the `load_critic_params` and `load_actor_params` methods to load the respective parameters. + + Args: + directory (str): The directory from which the parameters should be loaded. + """ + self.load_critic_params(directory) + self.load_actor_params(directory) + + def load_critic_params(self, directory: str) -> None: + """ + Load critic, target_critic, and optimizer states for each agent strategy. + If agent count differs between saved and current model, performs weight transfer for both networks. + Args: + directory (str): The directory from which the parameters should be loaded. + """ + logger.info("Loading critic parameters...") + + if not os.path.exists(directory): + logger.warning( + "Specified directory does not exist. Using randomly initialized critics." + ) + return + + map_path = os.path.join(directory, "critics", "u_id_order.json") + if os.path.exists(map_path): + # read the saved order of u_ids from critics save directory + with open(map_path) as f: + loaded_id_order = json.load(f).get("u_id_order", []) + else: + logger.warning("No u_id_order.json: assuming same order as current.") + loaded_id_order = [str(u) for u in self.learning_role.rl_strats.keys()] + + new_id_order = [str(u) for u in self.learning_role.rl_strats.keys()] + direct_load = loaded_id_order == new_id_order + + if direct_load: + logger.info("Agents order unchanged. Loading critic weights directly.") + else: + logger.info( + f"Agents length and/or order mismatch: n_old={len(loaded_id_order)}, n_new={len(new_id_order)}. Transferring weights for critics and target critics." + ) + + for u_id, strategy in self.learning_role.rl_strats.items(): + critic_path = os.path.join(directory, "critics", f"critic_{u_id}.pt") + if not os.path.exists(critic_path): + logger.warning(f"No saved critic for {u_id}; skipping.") + continue + + try: + critic_params = th.load(critic_path, weights_only=True) + for key in ("critic", "critic_target", "critic_optimizer"): + if key not in critic_params: + logger.warning( + f"Missing {key} in critic params for {u_id}; skipping." + ) + continue + + if direct_load: + strategy.critics.load_state_dict(critic_params["critic"]) + strategy.target_critics.load_state_dict( + critic_params["critic_target"] + ) + strategy.critics.optimizer.load_state_dict( + critic_params["critic_optimizer"] + ) + logger.debug(f"Loaded critic for {u_id} directly.") + else: + critic_weights = transfer_weights( + model=strategy.critics, + loaded_state=critic_params["critic"], + loaded_id_order=loaded_id_order, + new_id_order=new_id_order, + obs_base=strategy.obs_dim, + act_dim=strategy.act_dim, + unique_obs=strategy.unique_obs_dim, + ) + target_critic_weights = transfer_weights( + model=strategy.target_critics, + loaded_state=critic_params["critic_target"], + loaded_id_order=loaded_id_order, + new_id_order=new_id_order, + obs_base=strategy.obs_dim, + act_dim=strategy.act_dim, + unique_obs=strategy.unique_obs_dim, + ) + + if critic_weights is None or target_critic_weights is None: + logger.warning( + f"Critic weights transfer failed for {u_id}; skipping." + ) + continue + + strategy.critics.load_state_dict(critic_weights) + strategy.target_critics.load_state_dict(target_critic_weights) + logger.debug(f"Critic weights transferred for {u_id}.") + + except Exception as e: + logger.warning(f"Failed to load critic for {u_id}: {e}") + + def load_actor_params(self, directory: str) -> None: + """ + Load the parameters of actor networks from a specified directory. + + This method loads the parameters of actor networks, including the actor's state_dict, actor_target's state_dict, and + the actor's optimizer state_dict, from the specified directory. It iterates through the learning strategies associated + with the learning role, loads the respective parameters, and updates the actor and target actor networks accordingly. + + Args: + directory (str): The directory from which the parameters should be loaded. + """ + logger.info("Loading actor parameters...") + if not os.path.exists(directory): + logger.warning( + "Specified directory for loading the actors does not exist! Starting with randomly initialized values!" + ) + return + + for u_id, strategy in self.learning_role.rl_strats.items(): + try: + actor_params = self.load_obj( + directory=f"{directory}/actors/actor_{str(u_id)}.pt" + ) + strategy.actor.load_state_dict(actor_params["actor"]) + strategy.actor_target.load_state_dict(actor_params["actor_target"]) + strategy.actor.optimizer.load_state_dict( + actor_params["actor_optimizer"] + ) + + # add a tag to the strategy to indicate that the actor was loaded + strategy.actor.loaded = True + except Exception: + logger.warning(f"No actor values loaded for agent {u_id}") + + def initialize_policy(self, actors_and_critics: dict = None) -> None: + """ + Create actor and critic networks for reinforcement learning. + + If `actors_and_critics` is None, this method creates new actor and critic networks. + If `actors_and_critics` is provided, it assigns existing networks to the respective attributes. + + Args: + actors_and_critics (dict): The actor and critic networks to be assigned. + + """ + if actors_and_critics is None: + self.check_strategy_dimensions() + self.create_actors() + self.create_critics() + + else: + for u_id, strategy in self.learning_role.rl_strats.items(): + strategy.actor = actors_and_critics["actors"][u_id] + strategy.actor_target = actors_and_critics["actor_targets"][u_id] + + strategy.critics = actors_and_critics["critics"][u_id] + strategy.target_critics = actors_and_critics["target_critics"][u_id] + + self.obs_dim = actors_and_critics["obs_dim"] + self.act_dim = actors_and_critics["act_dim"] + self.unique_obs_dim = actors_and_critics["unique_obs_dim"] + + def check_strategy_dimensions(self) -> None: + """ + Iterate over all learning strategies and check if the dimensions of observations and actions are the same. + Also check if the unique observation dimensions are the same. If not, raise a ValueError. + This is important for the TD3 algorithm, as it uses a centralized critic that requires consistent dimensions across all agents. + """ + foresight_list = [] + obs_dim_list = [] + act_dim_list = [] + unique_obs_dim_list = [] + num_timeseries_obs_dim_list = [] + + for strategy in self.learning_role.rl_strats.values(): + foresight_list.append(strategy.foresight) + obs_dim_list.append(strategy.obs_dim) + act_dim_list.append(strategy.act_dim) + unique_obs_dim_list.append(strategy.unique_obs_dim) + num_timeseries_obs_dim_list.append(strategy.num_timeseries_obs_dim) + + if len(set(foresight_list)) > 1: + raise ValueError( + f"All foresight values must be the same for all RL agents. The defined learning strategies have the following foresight values: {foresight_list}" + ) + else: + self.foresight = foresight_list[0] + + if len(set(act_dim_list)) > 1: + raise ValueError( + f"All action dimensions must be the same for all RL agents. The defined learning strategies have the following action dimensions: {act_dim_list}" + ) + else: + self.act_dim = act_dim_list[0] + + if len(set(unique_obs_dim_list)) > 1: + raise ValueError( + f"All unique_obs_dim values must be the same for all RL agents. The defined learning strategies have the following unique_obs_dim values: {unique_obs_dim_list}" + ) + else: + self.unique_obs_dim = unique_obs_dim_list[0] + + if len(set(num_timeseries_obs_dim_list)) > 1: + raise ValueError( + f"All num_timeseries_obs_dim values must be the same for all RL agents. The defined learning strategies have the following num_timeseries_obs_dim values: {num_timeseries_obs_dim_list}" + ) + else: + self.num_timeseries_obs_dim = num_timeseries_obs_dim_list[0] + + # Check last, as other cases should fail before! + if len(set(obs_dim_list)) > 1: + raise ValueError( + f"All observation dimensions must be the same for all RL agents. The defined learning strategies have the following observation dimensions: {obs_dim_list}" + ) + else: + self.obs_dim = obs_dim_list[0] + + def create_actors(self) -> None: + """ + Create actor networks for reinforcement learning for each unit strategy. + + This method initializes actor networks and their corresponding target networks for each unit strategy. + The actors are designed to map observations to action probabilities in a reinforcement learning setting. + + The created actor networks are associated with each unit strategy and stored as attributes. + + Note: + The observation dimension need to be the same, due to the centralized critic that all actors share. + If you have units with different observation dimensions. They need to have different critics and hence learning roles. + + """ + + for strategy in self.learning_role.rl_strats.values(): + strategy.actor = self.actor_architecture_class( + obs_dim=self.obs_dim, + act_dim=self.act_dim, + float_type=self.float_type, + unique_obs_dim=self.unique_obs_dim, + num_timeseries_obs_dim=self.num_timeseries_obs_dim, + ).to(self.device) + + strategy.actor_target = self.actor_architecture_class( + obs_dim=self.obs_dim, + act_dim=self.act_dim, + float_type=self.float_type, + unique_obs_dim=self.unique_obs_dim, + num_timeseries_obs_dim=self.num_timeseries_obs_dim, + ).to(self.device) + + strategy.actor_target.load_state_dict(strategy.actor.state_dict()) + strategy.actor_target.train(mode=False) + + strategy.actor.optimizer = AdamW( + strategy.actor.parameters(), + lr=self.learning_role.calc_lr_from_progress( + 1 + ), # 1=100% of simulation remaining, uses learning_rate from config as starting point + ) + + strategy.actor.loaded = False + + def create_critics(self) -> None: + """ + Create critic networks for reinforcement learning. + + This method initializes critic networks for each agent in the reinforcement learning setup. + + Note: + The observation dimension need to be the same, due to the centralized criic that all actors share. + If you have units with different observation dimensions. They need to have different critics and hence learning roles. + """ + n_agents = len(self.learning_role.rl_strats) + + for strategy in self.learning_role.rl_strats.values(): + strategy.critics = self.critic_architecture_class( + n_agents=n_agents, + obs_dim=self.obs_dim, + act_dim=self.act_dim, + unique_obs_dim=self.unique_obs_dim, + float_type=self.float_type, + ).to(self.device) + + strategy.target_critics = self.critic_architecture_class( + n_agents=n_agents, + obs_dim=self.obs_dim, + act_dim=self.act_dim, + unique_obs_dim=self.unique_obs_dim, + float_type=self.float_type, + ).to(self.device) + + strategy.target_critics.load_state_dict(strategy.critics.state_dict()) + strategy.target_critics.train(mode=False) + + strategy.critics.optimizer = AdamW( + strategy.critics.parameters(), + lr=self.learning_role.calc_lr_from_progress( + 1 + ), # 1 = 100% of simulation remaining, uses learning_rate from config as starting point + ) + + def extract_policy(self) -> dict: + """ + Extract actor and critic networks. + + This method extracts the actor and critic networks associated with each learning strategy and organizes them into a + dictionary structure. The extracted networks include actors, actor_targets, critics, and target_critics. The resulting + dictionary is typically used for saving and sharing these networks. + + Returns: + dict: The extracted actor and critic networks. + """ + actors = {} + actor_targets = {} + + critics = {} + target_critics = {} + + for u_id, strategy in self.learning_role.rl_strats.items(): + actors[u_id] = strategy.actor + actor_targets[u_id] = strategy.actor_target + + critics[u_id] = strategy.critics + target_critics[u_id] = strategy.target_critics + + actors_and_critics = { + "actors": actors, + "actor_targets": actor_targets, + "critics": critics, + "target_critics": target_critics, + "obs_dim": self.obs_dim, + "act_dim": self.act_dim, + "unique_obs_dim": self.unique_obs_dim, + } + + return actors_and_critics diff --git a/assume/reinforcement_learning/algorithms/maddpg.py b/assume/reinforcement_learning/algorithms/maddpg.py index a49ac7c83..2eaaad3e6 100644 --- a/assume/reinforcement_learning/algorithms/maddpg.py +++ b/assume/reinforcement_learning/algorithms/maddpg.py @@ -22,30 +22,26 @@ - Decentralized Execution: Each actor only uses its own observation """ -import json import logging -import os import torch as th from torch.nn import functional as F -from torch.optim import AdamW -from assume.reinforcement_learning.algorithms.base_algorithm import RLAlgorithm +from assume.reinforcement_learning.algorithms.base_algorithm import A2CAlgorithm from assume.reinforcement_learning.learning_utils import ( polyak_update, - transfer_weights, ) from assume.reinforcement_learning.neural_network_architecture import CriticDDPG logger = logging.getLogger(__name__) -class DDPG(RLAlgorithm): +class DDPG(A2CAlgorithm): """ Deep Deterministic Policy Gradient (DDPG) Algorithm. - + Extended to multi-agent settings (MADDPG) for electricity market simulations. - + Key Features: - Single critic network (vs twin critics in TD3) - Updates actor every step (no policy delay) @@ -56,331 +52,77 @@ class DDPG(RLAlgorithm): def __init__(self, learning_role): """Initialize DDPG algorithm.""" super().__init__(learning_role) - + # Gradient step counter self.n_updates = 0 - + # Gradient clipping threshold self.grad_clip_norm = 1.0 - # ========================================================================= - # CHECKPOINT SAVING METHODS - # ========================================================================= - - def save_params(self, directory: str) -> None: - """Save all actor and critic network parameters to disk.""" - self.save_critic_params(directory=f"{directory}/critics") - self.save_actor_params(directory=f"{directory}/actors") - - def save_critic_params(self, directory: str) -> None: - """Save critic network parameters for all agents.""" - os.makedirs(directory, exist_ok=True) - - for u_id, strategy in self.learning_role.rl_strats.items(): - obj = { - "critic": strategy.critic.state_dict(), - "critic_target": strategy.target_critic.state_dict(), - "critic_optimizer": strategy.critic.optimizer.state_dict(), - } - path = f"{directory}/critic_{u_id}.pt" - th.save(obj, path) - - # Save unit ID order for weight transfer - u_id_list = [str(u) for u in self.learning_role.rl_strats.keys()] - mapping = {"u_id_order": u_id_list} - map_path = os.path.join(directory, "u_id_order.json") - with open(map_path, "w") as f: - json.dump(mapping, f, indent=2) - - def save_actor_params(self, directory: str) -> None: - """Save actor network parameters for all agents.""" - os.makedirs(directory, exist_ok=True) - - for u_id, strategy in self.learning_role.rl_strats.items(): - obj = { - "actor": strategy.actor.state_dict(), - "actor_target": strategy.actor_target.state_dict(), - "actor_optimizer": strategy.actor.optimizer.state_dict(), - } - path = f"{directory}/actor_{u_id}.pt" - th.save(obj, path) - - # ========================================================================= - # CHECKPOINT LOADING METHODS - # ========================================================================= - - def load_params(self, directory: str) -> None: - """Load all actor and critic parameters from disk.""" - self.load_critic_params(directory) - self.load_actor_params(directory) - - def load_critic_params(self, directory: str) -> None: - """Load critic parameters with support for agent count changes.""" - logger.info("Loading critic parameters...") - - if not os.path.exists(directory): - logger.warning( - "Specified directory does not exist. Using randomly initialized critics." - ) - return - - # Load saved unit ID order - map_path = os.path.join(directory, "critics", "u_id_order.json") - if os.path.exists(map_path): - with open(map_path) as f: - loaded_id_order = json.load(f).get("u_id_order", []) - else: - logger.warning("No u_id_order.json: assuming same order as current.") - loaded_id_order = [str(u) for u in self.learning_role.rl_strats.keys()] - - new_id_order = [str(u) for u in self.learning_role.rl_strats.keys()] - direct_load = loaded_id_order == new_id_order - - if direct_load: - logger.info("Agents order unchanged. Loading critic weights directly.") - else: - logger.info( - f"Agents mismatch: n_old={len(loaded_id_order)}, " - f"n_new={len(new_id_order)}. Transferring weights." - ) - - for u_id, strategy in self.learning_role.rl_strats.items(): - critic_path = os.path.join(directory, "critics", f"critic_{u_id}.pt") - if not os.path.exists(critic_path): - logger.warning(f"No saved critic for {u_id}; skipping.") - continue - - try: - critic_params = th.load(critic_path, weights_only=True) - - for key in ("critic", "critic_target", "critic_optimizer"): - if key not in critic_params: - logger.warning(f"Missing {key} in critic params for {u_id}.") - continue - - if direct_load: - strategy.critic.load_state_dict(critic_params["critic"]) - strategy.target_critic.load_state_dict(critic_params["critic_target"]) - strategy.critic.optimizer.load_state_dict(critic_params["critic_optimizer"]) - else: - # Weight transfer for agent count changes - critic_weights = transfer_weights( - model=strategy.critic, - loaded_state=critic_params["critic"], - loaded_id_order=loaded_id_order, - new_id_order=new_id_order, - obs_base=strategy.obs_dim, - act_dim=strategy.act_dim, - unique_obs=strategy.unique_obs_dim, - ) - target_critic_weights = transfer_weights( - model=strategy.target_critic, - loaded_state=critic_params["critic_target"], - loaded_id_order=loaded_id_order, - new_id_order=new_id_order, - obs_base=strategy.obs_dim, - act_dim=strategy.act_dim, - unique_obs=strategy.unique_obs_dim, - ) - - if critic_weights is None or target_critic_weights is None: - logger.warning(f"Weights transfer failed for {u_id}.") - continue - - strategy.critic.load_state_dict(critic_weights) - strategy.target_critic.load_state_dict(target_critic_weights) - - except Exception as e: - logger.warning(f"Failed to load critic for {u_id}: {e}") - - def load_actor_params(self, directory: str) -> None: - """Load actor network parameters from disk.""" - logger.info("Loading actor parameters...") - - if not os.path.exists(directory): - logger.warning( - "Specified directory for actors does not exist! " - "Starting with randomly initialized values!" - ) - return - - for u_id, strategy in self.learning_role.rl_strats.items(): - try: - actor_params = self.load_obj( - directory=f"{directory}/actors/actor_{str(u_id)}.pt" - ) - - strategy.actor.load_state_dict(actor_params["actor"]) - strategy.actor_target.load_state_dict(actor_params["actor_target"]) - strategy.actor.optimizer.load_state_dict(actor_params["actor_optimizer"]) - strategy.actor.loaded = True - - except Exception: - logger.warning(f"No actor values loaded for agent {u_id}") - - # ========================================================================= - # NETWORK INITIALIZATION - # ========================================================================= + self.actor_architecture_class = None # define actor class here with kwargs pop or something similar so that parent class storage etc works properly + self.critic_architecture_class = CriticDDPG - def initialize_policy(self, actors_and_critics: dict = None) -> None: + def get_actions(self, next_observation): """ - Initialize actor and critic networks for all agents. - - Args: - actors_and_critics: Optional pre-existing networks to assign + Determines actions based on the current observation, applying noise for exploration if in learning mode. + + Args + ---- + next_observation : torch.Tensor + Observation data influencing bid price and direction. + + Returns + ------- + torch.Tensor + Actions that include bid price and direction. + torch.Tensor + Noise component which is already added to actions for exploration, if applicable. + + Notes + ----- + In learning mode, actions incorporate noise for exploration. Initial exploration relies + solely on noise to cover the action space broadly. + For PPO, we also store log_prob and value estimates for later use. """ - if actors_and_critics is None: - self.check_strategy_dimensions() - self.create_actors() - self.create_critics() - else: - for u_id, strategy in self.learning_role.rl_strats.items(): - strategy.actor = actors_and_critics["actors"][u_id] - strategy.actor_target = actors_and_critics["actor_targets"][u_id] - strategy.critic = actors_and_critics["critics"][u_id] - strategy.target_critic = actors_and_critics["target_critics"][u_id] - - self.obs_dim = actors_and_critics["obs_dim"] - self.act_dim = actors_and_critics["act_dim"] - self.unique_obs_dim = actors_and_critics["unique_obs_dim"] - - def check_strategy_dimensions(self) -> None: - """Validate that all agents have consistent dimensions.""" - obs_dim_list = [] - act_dim_list = [] - unique_obs_dim_list = [] - num_timeseries_obs_dim_list = [] - - for strategy in self.learning_role.rl_strats.values(): - obs_dim_list.append(strategy.obs_dim) - act_dim_list.append(strategy.act_dim) - unique_obs_dim_list.append(strategy.unique_obs_dim) - num_timeseries_obs_dim_list.append(strategy.num_timeseries_obs_dim) - - if len(set(obs_dim_list)) > 1: - raise ValueError( - f"All observation dimensions must be the same. " - f"Got: {obs_dim_list}" - ) - else: - self.obs_dim = obs_dim_list[0] - if len(set(act_dim_list)) > 1: - raise ValueError( - f"All action dimensions must be the same. " - f"Got: {act_dim_list}" - ) - else: - self.act_dim = act_dim_list[0] + # distinction whether we are in learning mode or not to handle exploration realised with noise + if self.learning_mode and not self.evaluation_mode: + # if we are in learning mode the first x episodes we want to explore the entire action space + # to get a good initial experience, in the area around the costs of the agent + if self.collect_initial_experience_mode: + # define current action as solely noise + noise = th.normal( + mean=0.0, + std=self.exploration_noise_std, + size=(self.act_dim,), + dtype=self.float_type, + device=self.device, + ) - if len(set(unique_obs_dim_list)) > 1: - raise ValueError( - f"All unique_obs_dim values must be the same. " - f"Got: {unique_obs_dim_list}" - ) - else: - self.unique_obs_dim = unique_obs_dim_list[0] + # ============================================================================= + # 2.1 Get Actions and handle exploration + # ============================================================================= + # only use noise as the action to enforce exploration + curr_action = noise + + else: + # and add noise to the action + curr_action = self.actor(next_observation).detach() + noise = self.action_noise.noise( + device=self.device, dtype=self.float_type + ) + curr_action += noise - if len(set(num_timeseries_obs_dim_list)) > 1: - raise ValueError( - f"All num_timeseries_obs_dim values must be the same. " - f"Got: {num_timeseries_obs_dim_list}" - ) + # make sure that noise adding does not exceed the actual output of the NN as it pushes results in a direction that actor can't even reach + curr_action = th.clamp( + curr_action, self.actor.min_output, self.actor.max_output + ) else: - self.num_timeseries_obs_dim = num_timeseries_obs_dim_list[0] - - def create_actors(self) -> None: - """Create actor (policy) networks for all agents.""" - for strategy in self.learning_role.rl_strats.values(): - # Create main actor network - strategy.actor = self.actor_architecture_class( - obs_dim=self.obs_dim, - act_dim=self.act_dim, - float_type=self.float_type, - unique_obs_dim=self.unique_obs_dim, - num_timeseries_obs_dim=self.num_timeseries_obs_dim, - ).to(self.device) - - # Create target actor network - strategy.actor_target = self.actor_architecture_class( - obs_dim=self.obs_dim, - act_dim=self.act_dim, - float_type=self.float_type, - unique_obs_dim=self.unique_obs_dim, - num_timeseries_obs_dim=self.num_timeseries_obs_dim, - ).to(self.device) - - # Initialize target with same weights - strategy.actor_target.load_state_dict(strategy.actor.state_dict()) - strategy.actor_target.train(mode=False) - - # Create optimizer - strategy.actor.optimizer = AdamW( - strategy.actor.parameters(), - lr=self.learning_role.calc_lr_from_progress(1), - ) + curr_action = self.actor(next_observation).detach() + # noise is an tensor with zeros, because we are not in learning mode + noise = th.zeros_like(curr_action, dtype=self.float_type) - strategy.actor.loaded = False - - def create_critics(self) -> None: - """ - Create critic (Q-function) networks for all agents. - - Key difference from TD3: Uses single critic instead of twin critics. - """ - n_agents = len(self.learning_role.rl_strats) - - for strategy in self.learning_role.rl_strats.values(): - # Create main critic (single Q-network, not twin) - strategy.critic = CriticDDPG( - n_agents=n_agents, - obs_dim=self.obs_dim, - act_dim=self.act_dim, - unique_obs_dim=self.unique_obs_dim, - float_type=self.float_type, - ).to(self.device) - - # Create target critic - strategy.target_critic = CriticDDPG( - n_agents=n_agents, - obs_dim=self.obs_dim, - act_dim=self.act_dim, - unique_obs_dim=self.unique_obs_dim, - float_type=self.float_type, - ).to(self.device) - - # Initialize target with same weights - strategy.target_critic.load_state_dict(strategy.critic.state_dict()) - strategy.target_critic.train(mode=False) - - # Create optimizer - strategy.critic.optimizer = AdamW( - strategy.critic.parameters(), - lr=self.learning_role.calc_lr_from_progress(1), - ) - - def extract_policy(self) -> dict: - """Extract all actor and critic networks into a dictionary.""" - actors = {} - actor_targets = {} - critics = {} - target_critics = {} - - for u_id, strategy in self.learning_role.rl_strats.items(): - actors[u_id] = strategy.actor - actor_targets[u_id] = strategy.actor_target - critics[u_id] = strategy.critic - target_critics[u_id] = strategy.target_critic - - return { - "actors": actors, - "actor_targets": actor_targets, - "critics": critics, - "target_critics": target_critics, - "obs_dim": self.obs_dim, - "act_dim": self.act_dim, - "unique_obs_dim": self.unique_obs_dim, - } + return curr_action, noise # ========================================================================= # CORE TRAINING: POLICY UPDATE @@ -389,7 +131,7 @@ def extract_policy(self) -> dict: def update_policy(self) -> None: """ Update actor and critic networks using the DDPG algorithm. - + Key differences from TD3: 1. Uses single critic (no twin Q-learning) 2. Updates actor every step (no policy delay) @@ -418,7 +160,9 @@ def update_policy(self) -> None: # Update noise and learning rate schedules progress_remaining = self.learning_role.get_progress_remaining() - updated_noise_decay = self.learning_role.calc_noise_from_progress(progress_remaining) + updated_noise_decay = self.learning_role.calc_noise_from_progress( + progress_remaining + ) learning_rate = self.learning_role.calc_lr_from_progress(progress_remaining) for strategy in strategies: @@ -436,7 +180,7 @@ def update_policy(self) -> None: transitions = self.learning_role.buffer.sample( self.learning_config.batch_size ) - + states, actions, next_states, rewards = ( transitions.observations, transitions.actions, @@ -446,10 +190,12 @@ def update_policy(self) -> None: # Compute target actions (no smoothing noise in DDPG) with th.no_grad(): - next_actions = th.stack([ - strategy.actor_target(next_states[:, i, :]).clamp(-1, 1) - for i, strategy in enumerate(strategies) - ]) + next_actions = th.stack( + [ + strategy.actor_target(next_states[:, i, :]).clamp(-1, 1) + for i, strategy in enumerate(strategies) + ] + ) next_actions = next_actions.transpose(0, 1).contiguous() next_actions = next_actions.view(-1, n_rl_agents * self.act_dim) @@ -459,7 +205,7 @@ def update_policy(self) -> None: unique_obs_from_others = states[ :, :, self.obs_dim - self.unique_obs_dim : ].reshape(self.learning_config.batch_size, n_rl_agents, -1) - + next_unique_obs_from_others = next_states[ :, :, self.obs_dim - self.unique_obs_dim : ].reshape(self.learning_config.batch_size, n_rl_agents, -1) @@ -482,7 +228,10 @@ def update_policy(self) -> None: dim=1, ) other_next_unique_obs = th.cat( - (next_unique_obs_from_others[:, :i], next_unique_obs_from_others[:, i + 1 :]), + ( + next_unique_obs_from_others[:, :i], + next_unique_obs_from_others[:, i + 1 :], + ), dim=1, ) @@ -495,8 +244,12 @@ def update_policy(self) -> None: ) all_next_states = th.cat( ( - next_states[:, i, :].reshape(self.learning_config.batch_size, -1), - other_next_unique_obs.reshape(self.learning_config.batch_size, -1), + next_states[:, i, :].reshape( + self.learning_config.batch_size, -1 + ), + other_next_unique_obs.reshape( + self.learning_config.batch_size, -1 + ), ), dim=1, ) @@ -529,8 +282,12 @@ def update_policy(self) -> None: ) strategy.critic.optimizer.step() - unit_params[step][strategy.unit_id]["critic_total_grad_norm"] = total_norm - unit_params[step][strategy.unit_id]["critic_max_grad_norm"] = max_grad_norm + unit_params[step][strategy.unit_id]["critic_total_grad_norm"] = ( + total_norm + ) + unit_params[step][strategy.unit_id]["critic_max_grad_norm"] = ( + max_grad_norm + ) # ================================================================= # ACTOR UPDATE (every step, no delay in DDPG) @@ -582,8 +339,12 @@ def update_policy(self) -> None: ) strategy.actor.optimizer.step() - unit_params[step][strategy.unit_id]["actor_total_grad_norm"] = total_norm - unit_params[step][strategy.unit_id]["actor_max_grad_norm"] = max_grad_norm + unit_params[step][strategy.unit_id]["actor_total_grad_norm"] = ( + total_norm + ) + unit_params[step][strategy.unit_id]["actor_max_grad_norm"] = ( + max_grad_norm + ) # ================================================================= # TARGET NETWORK UPDATES (Polyak averaging) diff --git a/assume/reinforcement_learning/algorithms/mappo.py b/assume/reinforcement_learning/algorithms/mappo.py index f3fa7865a..7596e99ce 100644 --- a/assume/reinforcement_learning/algorithms/mappo.py +++ b/assume/reinforcement_learning/algorithms/mappo.py @@ -1,269 +1,153 @@ # SPDX-FileCopyrightText: ASSUME Developers # # SPDX-License-Identifier: AGPL-3.0-or-later -import json import logging -import os import numpy as np import torch as th from torch.nn import functional as F -from torch.optim import AdamW -from assume.reinforcement_learning.algorithms.base_algorithm import RLAlgorithm -from assume.reinforcement_learning.learning_utils import polyak_update +from assume.reinforcement_learning.algorithms.base_algorithm import A2CAlgorithm from assume.reinforcement_learning.neural_network_architecture import ( ActorPPO, - CriticPPO + CriticPPO, ) -from assume.reinforcement_learning.rollout_buffer import RolloutBuffer logger = logging.getLogger(__name__) -class PPO(RLAlgorithm): + +class PPO(A2CAlgorithm): """ Proximal Policy Optimization (PPO) Algorithm. """ def __init__( - self, + self, learning_role, - clip_range = 0.1, # Epsilon clipping constant preventing the policy from changing too much in a single update. - clip_range_vf = 0.1, # preventing the value function from changing too much from previous estimates - n_epochs = 30, # sample efficiency - entropy_coef = 0.02, # encourages exploration by rewarding "randomness" - vf_coef = 1.0, # balances the importance of training the Critic and training the Actor - max_grad_norm = 0.5, # Gradient clipping + clip_range=0.1, # Epsilon clipping constant preventing the policy from changing too much in a single update. + clip_range_vf=0.1, # preventing the value function from changing too much from previous estimates + n_epochs=30, # sample efficiency + entropy_coef=0.02, # encourages exploration by rewarding "randomness" + vf_coef=1.0, # balances the importance of training the Critic and training the Actor + max_grad_norm=0.5, # Gradient clipping ): """Initialize PPO algorithm.""" super().__init__(learning_role) config = self.learning_config - - self.clip_range = clip_range if clip_range is not None else getattr(config, 'ppo_clip_range', 0.2) - self.clip_range_vf = clip_range_vf if clip_range_vf is not None else getattr(config, 'ppo_clip_range_vf', None) - self.n_epochs = n_epochs if n_epochs is not None else getattr(config, 'ppo_n_epochs', 10) - self.entropy_coef = entropy_coef if entropy_coef is not None else getattr(config, 'ppo_entropy_coef', 0.01) - self.vf_coef = vf_coef if vf_coef is not None else getattr(config, 'ppo_vf_coef', 0.5) + + self.clip_range = ( + clip_range + if clip_range is not None + else getattr(config, "ppo_clip_range", 0.2) + ) + self.clip_range_vf = ( + clip_range_vf + if clip_range_vf is not None + else getattr(config, "ppo_clip_range_vf", None) + ) + self.n_epochs = ( + n_epochs if n_epochs is not None else getattr(config, "ppo_n_epochs", 10) + ) + self.entropy_coef = ( + entropy_coef + if entropy_coef is not None + else getattr(config, "ppo_entropy_coef", 0.01) + ) + self.vf_coef = ( + vf_coef if vf_coef is not None else getattr(config, "ppo_vf_coef", 0.5) + ) self.max_grad_norm = max_grad_norm + self.actor_architecture_class = ActorPPO + self.critic_architecture_class = CriticPPO + # Update counter self.n_updates = 0 - def save_params(self, directory: str) -> None: - """Save all actor and critic network parameters to disk.""" - self.save_critic_params(directory=f"{directory}/critics") - self.save_actor_params(directory=f"{directory}/actors") - - def save_critic_params(self, directory: str) -> None: - """Save value network parameters for all agents.""" - os.makedirs(directory, exist_ok=True) - - for u_id, strategy in self.learning_role.rl_strats.items(): - obj = { - "critic": strategy.critic.state_dict(), - "critic_optimizer": strategy.critic.optimizer.state_dict(), - } - path = f"{directory}/critic_{u_id}.pt" - th.save(obj, path) - - # Save unit ID order - u_id_list = [str(u) for u in self.learning_role.rl_strats.keys()] - mapping = {"u_id_order": u_id_list} - map_path = os.path.join(directory, "u_id_order.json") - with open(map_path, "w") as f: - json.dump(mapping, f, indent=2) - - def save_actor_params(self, directory: str) -> None: - """Save actor network parameters for all agents.""" - os.makedirs(directory, exist_ok=True) - - for u_id, strategy in self.learning_role.rl_strats.items(): - obj = { - "actor": strategy.actor.state_dict(), - "actor_optimizer": strategy.actor.optimizer.state_dict(), - } - path = f"{directory}/actor_{u_id}.pt" - th.save(obj, path) - - def load_params(self, directory: str) -> None: - """Load all actor and critic parameters from disk.""" - self.load_critic_params(directory) - self.load_actor_params(directory) + def get_actions(self, next_observation): + """ + Determines actions based on the current observation, applying noise for exploration if in learning mode. + + Args + ---- + next_observation : torch.Tensor + Observation data influencing bid price and direction. + + Returns + ------- + torch.Tensor + Actions that include bid price and direction. + torch.Tensor + Noise component which is already added to actions for exploration, if applicable. + + Notes + ----- + In learning mode, actions incorporate noise for exploration. Initial exploration relies + solely on noise to cover the action space broadly. + For PPO, we also store log_prob and value estimates for later use. + """ - def load_critic_params(self, directory: str) -> None: - """Load critic parameters.""" - logger.info("Loading PPO critic parameters...") + # distinction whether we are in learning mode or not to handle exploration realised with noise + if self.learning_mode and not self.evaluation_mode: + # if we are in learning mode the first x episodes we want to explore the entire action space + # to get a good initial experience, in the area around the costs of the agent + if self.collect_initial_experience_mode: + # define current action as solely noise + noise = th.normal( + mean=0.0, + std=self.exploration_noise_std, + size=(self.act_dim,), + dtype=self.float_type, + device=self.device, + ) - if not os.path.exists(directory): - logger.warning( - "Specified directory does not exist. Using randomly initialized critics." - ) - return + # ============================================================================= + # 2.1 Get Actions and handle exploration + # ============================================================================= + # only use noise as the action to enforce exploration + curr_action = noise - for u_id, strategy in self.learning_role.rl_strats.items(): - critic_path = os.path.join(directory, "critics", f"critic_{u_id}.pt") - if not os.path.exists(critic_path): - logger.warning(f"No saved critic for {u_id}; skipping.") - continue - - try: - critic_params = th.load(critic_path, weights_only=True) - strategy.critic.load_state_dict(critic_params["critic"]) - strategy.critic.optimizer.load_state_dict(critic_params["critic_optimizer"]) - except Exception as e: - logger.warning(f"Failed to load critic for {u_id}: {e}") - - def load_actor_params(self, directory: str) -> None: - """Load actor network parameters from disk.""" - logger.info("Loading PPO actor parameters...") - - if not os.path.exists(directory): - logger.warning( - "Specified directory for actors does not exist! " - "Starting with randomly initialized values!" - ) - return + self._last_log_prob = th.tensor(0.0, device=self.device) + self._last_value = th.tensor(0.0, device=self.device) - for u_id, strategy in self.learning_role.rl_strats.items(): - try: - actor_params = self.load_obj( - directory=f"{directory}/actors/actor_{str(u_id)}.pt" + else: + # PPO: use get_action_and_log_prob for proper stochastic sampling + curr_action, log_prob = self.actor.get_action_and_log_prob( + next_observation.unsqueeze(0) ) - - strategy.actor.load_state_dict(actor_params["actor"]) - strategy.actor.optimizer.load_state_dict(actor_params["actor_optimizer"]) - strategy.actor.loaded = True - - except Exception: - logger.warning(f"No actor values loaded for agent {u_id}") - - def initialize_policy(self, actors_and_critics: dict = None) -> None: - """ - Initialize actor and critic networks for all agents. - - Args: - actors_and_critics: Optional pre-existing networks to assign - """ - if actors_and_critics is None: - self.check_strategy_dimensions() - self.create_actors() - self.create_critics() - else: - for u_id, strategy in self.learning_role.rl_strats.items(): - strategy.actor = actors_and_critics["actors"][u_id] - strategy.critic = actors_and_critics["critics"][u_id] - - self.obs_dim = actors_and_critics["obs_dim"] - self.act_dim = actors_and_critics["act_dim"] - self.unique_obs_dim = actors_and_critics["unique_obs_dim"] - - def check_strategy_dimensions(self) -> None: - """Validate that all agents have consistent dimensions.""" - foresight_list = [] - obs_dim_list = [] - act_dim_list = [] - unique_obs_dim_list = [] - num_timeseries_obs_dim_list = [] - - for strategy in self.learning_role.rl_strats.values(): - foresight_list.append(strategy.foresight) - obs_dim_list.append(strategy.obs_dim) - act_dim_list.append(strategy.act_dim) - unique_obs_dim_list.append(strategy.unique_obs_dim) - num_timeseries_obs_dim_list.append(strategy.num_timeseries_obs_dim) - - if len(set(foresight_list)) > 1: - raise ValueError( - f"All foresight values must be the same for all RL agents. THe defined learning strategies have the following foresight values: {foresight_list}" - ) - else: - self.foresight = foresight_list[0] - - if len(set(obs_dim_list)) > 1: - raise ValueError( - f"All observation dimensions must be the same. Got: {obs_dim_list}" - ) - else: - self.obs_dim = obs_dim_list[0] - - if len(set(act_dim_list)) > 1: - raise ValueError( - f"All action dimensions must be the same. Got: {act_dim_list}" - ) - else: - self.act_dim = act_dim_list[0] + curr_action = curr_action.squeeze(0).detach() + self._last_log_prob = log_prob.squeeze(0).detach() + + # Get value estimate from critic (if available) + if ( + hasattr(self.learning_role, "critics") + and self.unit_id in self.learning_role.critics + ): + critic = self.learning_role.critics[self.unit_id] + self._last_value = ( + critic(next_observation.unsqueeze(0)).squeeze().detach() + ) + else: + self._last_value = th.tensor(0.0, device=self.device) - if len(set(unique_obs_dim_list)) > 1: - raise ValueError( - f"All unique_obs_dim values must be the same. Got: {unique_obs_dim_list}" - ) - else: - self.unique_obs_dim = unique_obs_dim_list[0] + # PPO uses stochastic policy, no external noise needed + noise = th.zeros_like(curr_action, dtype=self.float_type) - if len(set(num_timeseries_obs_dim_list)) > 1: - raise ValueError( - f"All num_timeseries_obs_dim values must be the same. " - f"Got: {num_timeseries_obs_dim_list}" - ) + # make sure that noise adding does not exceed the actual output of the NN as it pushes results in a direction that actor can't even reach + curr_action = th.clamp( + curr_action, self.actor.min_output, self.actor.max_output + ) else: - self.num_timeseries_obs_dim = num_timeseries_obs_dim_list[0] - - def create_actors(self) -> None: - """Create stochastic actor networks for all agents.""" - for strategy in self.learning_role.rl_strats.values(): - # Create PPO Actor - strategy.actor = ActorPPO( - obs_dim=self.obs_dim, - act_dim=self.act_dim, - float_type=self.float_type, - ).to(self.device) - - # Create Optimizer - strategy.actor.optimizer = AdamW( - strategy.actor.parameters(), - lr=self.learning_role.calc_lr_from_progress(1), - ) + # if we are not in learning mode we just use the actor neural net to get the action without adding noise - strategy.actor.loaded = False + # For PPO evaluation, use deterministic action (mean) + curr_action = self.actor(next_observation, deterministic=True).detach() - def create_critics(self) -> None: - """ - Create value networks for all agents. - """ - n_agents = len(self.learning_role.rl_strats) - - for strategy in self.learning_role.rl_strats.values(): - # Create value network - strategy.critic = CriticPPO( - n_agents=n_agents, - obs_dim=self.obs_dim, - unique_obs_dim=self.unique_obs_dim, - float_type=self.float_type, - ).to(self.device) - - # Create optimizer - strategy.critic.optimizer = AdamW( - strategy.critic.parameters(), - lr=self.learning_role.calc_lr_from_progress(1), - ) - - def extract_policy(self) -> dict: - """Extract all actor and critic networks into a dictionary.""" - actors = {} - critics = {} + # noise is an tensor with zeros, because we are not in learning mode + noise = th.zeros_like(curr_action, dtype=self.float_type) - for u_id, strategy in self.learning_role.rl_strats.items(): - actors[u_id] = strategy.actor - critics[u_id] = strategy.critic - - return { - "actors": actors, - "critics": critics, - "obs_dim": self.obs_dim, - "act_dim": self.act_dim, - "unique_obs_dim": self.unique_obs_dim, - } + return curr_action, noise def update_policy(self) -> None: """ @@ -276,7 +160,7 @@ def update_policy(self) -> None: # Get rollout buffer rollout_buffer = self.learning_role.rollout_buffer - + # Check if rollout buffer has data if rollout_buffer is None or rollout_buffer.pos == 0: logger.debug("Rollout buffer is empty, skipping policy update") @@ -307,22 +191,26 @@ def update_policy(self) -> None: dones = np.zeros(n_rl_agents) # Get the buffer size to index into the last stored state - buffer_size = rollout_buffer.pos if not rollout_buffer.full else rollout_buffer.buffer_size + buffer_size = ( + rollout_buffer.pos + if not rollout_buffer.full + else rollout_buffer.buffer_size + ) if buffer_size > 0: # Use the LAST observation as the bootstrap for the REST of the buffer. # We sacrifice the last step (pos-1) to serve as s_{t+1} for the step before it. # This ensures V(s_{t+1}) is calculating using the REAL next state, not self-referential. - + last_idx = buffer_size - 1 last_obs = rollout_buffer.observations[last_idx] last_dones = rollout_buffer.dones[last_idx] - + # Reduce buffer size by 1 so as to not train on the bootstrap step rollout_buffer.pos -= 1 if rollout_buffer.full: - rollout_buffer.full = False # If it was full, it's not anymore - + rollout_buffer.full = False # If it was full, it's not anymore + # Prepare unique observations for centralized critic last_unique_obs = last_obs[:, self.obs_dim - self.unique_obs_dim :] @@ -343,7 +231,9 @@ def update_policy(self) -> None: dtype=self.float_type, ) # Get value estimate from critic - last_values[i] = strategy.critic(obs_tensor).cpu().numpy().flatten()[0] + last_values[i] = ( + strategy.critic(obs_tensor).cpu().numpy().flatten()[0] + ) dones[i] = last_dones[i] # Compute advantages and returns @@ -353,12 +243,12 @@ def update_policy(self) -> None: all_actor_losses = [] all_critic_losses = [] all_entropy_losses = [] - + # Initialize unit_params for gradient logging # Use an empty list that will be dynamically extended unit_params = [] step_count = 0 - + # Helper to create a new step entry def create_step_entry(): return { @@ -387,10 +277,13 @@ def create_step_entry(): critic = strategy.critic obs_i = batch.observations[:, i, :] - + # Construct centralized state other_unique_obs = th.cat( - (unique_obs_from_others[:, :i], unique_obs_from_others[:, i + 1 :]), + ( + unique_obs_from_others[:, :i], + unique_obs_from_others[:, i + 1 :], + ), dim=1, ) all_states = th.cat( @@ -411,10 +304,7 @@ def create_step_entry(): advantages_i.std() + 1e-8 ) - log_probs, entropy = actor.evaluate_actions( - obs_i, - actions_i - ) + log_probs, entropy = actor.evaluate_actions(obs_i, actions_i) values = critic(all_states).flatten() # Importance sampling ratio @@ -435,7 +325,7 @@ def create_step_entry(): values_clipped = old_values_i + th.clamp( values - old_values_i, -self.clip_range_vf, - self.clip_range_vf + self.clip_range_vf, ) value_loss_1 = F.mse_loss(values, returns_i) value_loss_2 = F.mse_loss(values_clipped, returns_i) @@ -453,14 +343,22 @@ def create_step_entry(): # Calculate gradient norms BEFORE clipping actor_params = list(actor.parameters()) critic_params = list(critic.parameters()) - + actor_max_grad_norm = max( - (p.grad.norm().item() for p in actor_params if p.grad is not None), - default=0.0 + ( + p.grad.norm().item() + for p in actor_params + if p.grad is not None + ), + default=0.0, ) critic_max_grad_norm = max( - (p.grad.norm().item() for p in critic_params if p.grad is not None), - default=0.0 + ( + p.grad.norm().item() + for p in critic_params + if p.grad is not None + ), + default=0.0, ) # Gradient clipping @@ -478,19 +376,39 @@ def create_step_entry(): all_actor_losses.append(policy_loss.item()) all_critic_losses.append(value_loss.item()) all_entropy_losses.append(entropy_loss.item()) - + # Ensure we have an entry for this step if step_count >= len(unit_params): unit_params.append(create_step_entry()) - + # Store per-unit gradient params for this step - unit_params[step_count][strategy.unit_id]["actor_loss"] = policy_loss.item() - unit_params[step_count][strategy.unit_id]["critic_loss"] = value_loss.item() - unit_params[step_count][strategy.unit_id]["actor_total_grad_norm"] = actor_total_grad_norm.item() if isinstance(actor_total_grad_norm, th.Tensor) else actor_total_grad_norm - unit_params[step_count][strategy.unit_id]["actor_max_grad_norm"] = actor_max_grad_norm - unit_params[step_count][strategy.unit_id]["critic_total_grad_norm"] = critic_total_grad_norm.item() if isinstance(critic_total_grad_norm, th.Tensor) else critic_total_grad_norm - unit_params[step_count][strategy.unit_id]["critic_max_grad_norm"] = critic_max_grad_norm - + unit_params[step_count][strategy.unit_id]["actor_loss"] = ( + policy_loss.item() + ) + unit_params[step_count][strategy.unit_id]["critic_loss"] = ( + value_loss.item() + ) + unit_params[step_count][strategy.unit_id][ + "actor_total_grad_norm" + ] = ( + actor_total_grad_norm.item() + if isinstance(actor_total_grad_norm, th.Tensor) + else actor_total_grad_norm + ) + unit_params[step_count][strategy.unit_id]["actor_max_grad_norm"] = ( + actor_max_grad_norm + ) + unit_params[step_count][strategy.unit_id][ + "critic_total_grad_norm" + ] = ( + critic_total_grad_norm.item() + if isinstance(critic_total_grad_norm, th.Tensor) + else critic_total_grad_norm + ) + unit_params[step_count][strategy.unit_id][ + "critic_max_grad_norm" + ] = critic_max_grad_norm + step_count += 1 self.n_updates += 1 @@ -523,4 +441,4 @@ def create_step_entry(): logger.debug( f"PPO update complete. Actor loss: {np.mean(all_actor_losses):.4f}, " f"Value loss: {np.mean(all_critic_losses):.4f}" - ) \ No newline at end of file + ) diff --git a/assume/reinforcement_learning/algorithms/matd3.py b/assume/reinforcement_learning/algorithms/matd3.py index 813f57cb5..2d5996387 100644 --- a/assume/reinforcement_learning/algorithms/matd3.py +++ b/assume/reinforcement_learning/algorithms/matd3.py @@ -2,25 +2,21 @@ # # SPDX-License-Identifier: AGPL-3.0-or-later -import json import logging -import os import torch as th from torch.nn import functional as F -from torch.optim import AdamW -from assume.reinforcement_learning.algorithms.base_algorithm import RLAlgorithm +from assume.reinforcement_learning.algorithms.base_algorithm import A2CAlgorithm from assume.reinforcement_learning.learning_utils import ( polyak_update, - transfer_weights, ) from assume.reinforcement_learning.neural_network_architecture import CriticTD3 logger = logging.getLogger(__name__) -class TD3(RLAlgorithm): +class TD3(A2CAlgorithm): """ Twin Delayed Deep Deterministic Policy Gradients (TD3). Addressing Function Approximation Error in Actor-Critic Methods. @@ -38,405 +34,68 @@ def __init__(self, learning_role): self.n_updates = 0 self.grad_clip_norm = 1.0 - def save_params(self, directory): - """ - This method saves the parameters of both the actor and critic networks associated with the learning role. It organizes the - saved parameters into separate directories for critics and actors within the specified base directory. - - Args: - directory (str): The base directory for saving the parameters. - """ - self.save_critic_params(directory=f"{directory}/critics") - self.save_actor_params(directory=f"{directory}/actors") - - def save_critic_params(self, directory): - """ - Save the parameters of critic networks. - - This method saves the parameters of the critic networks, including the critic's state_dict, critic_target's state_dict, - and the critic's optimizer state_dict. It organizes the saved parameters into a directory structure specific to the critic - associated with each learning strategy. - - Args: - directory (str): The base directory for saving the parameters. - """ - os.makedirs(directory, exist_ok=True) - for u_id, strategy in self.learning_role.rl_strats.items(): - obj = { - "critic": strategy.critics.state_dict(), - "critic_target": strategy.target_critics.state_dict(), - "critic_optimizer": strategy.critics.optimizer.state_dict(), - } - path = f"{directory}/critic_{u_id}.pt" - th.save(obj, path) - - # record the exact order of u_ids and save it with critics to ensure that the same order is used when loading the parameters - u_id_list = [str(u) for u in self.learning_role.rl_strats.keys()] - mapping = {"u_id_order": u_id_list} - map_path = os.path.join(directory, "u_id_order.json") - with open(map_path, "w") as f: - json.dump(mapping, f, indent=2) - - def save_actor_params(self, directory): - """ - Save the parameters of actor networks. + self.actor_architecture_class = None # define actor class here with kwargs pop or something similar so that parent class storage etc works properly + self.critic_architecture_class = CriticTD3 - This method saves the parameters of the actor networks, including the actor's state_dict, actor_target's state_dict, and - the actor's optimizer state_dict. It organizes the saved parameters into a directory structure specific to the actor - associated with each learning strategy. - - Args: - directory (str): The base directory for saving the parameters. - """ - os.makedirs(directory, exist_ok=True) - for u_id, strategy in self.learning_role.rl_strats.items(): - obj = { - "actor": strategy.actor.state_dict(), - "actor_target": strategy.actor_target.state_dict(), - "actor_optimizer": strategy.actor.optimizer.state_dict(), - } - path = f"{directory}/actor_{u_id}.pt" - th.save(obj, path) - - def load_params(self, directory: str) -> None: - """ - Load the parameters of both actor and critic networks. - - This method loads the parameters of both the actor and critic networks associated with the learning role from the specified - directory. It uses the `load_critic_params` and `load_actor_params` methods to load the respective parameters. - - Args: - directory (str): The directory from which the parameters should be loaded. - """ - self.load_critic_params(directory) - self.load_actor_params(directory) - - def load_critic_params(self, directory: str) -> None: + def get_actions(self, next_observation): """ - Load critic, target_critic, and optimizer states for each agent strategy. - If agent count differs between saved and current model, performs weight transfer for both networks. - Args: - directory (str): The directory from which the parameters should be loaded. + Determines actions based on the current observation, applying noise for exploration if in learning mode. + + Args + ---- + next_observation : torch.Tensor + Observation data influencing bid price and direction. + + Returns + ------- + torch.Tensor + Actions that include bid price and direction. + torch.Tensor + Noise component which is already added to actions for exploration, if applicable. + + Notes + ----- + In learning mode, actions incorporate noise for exploration. Initial exploration relies + solely on noise to cover the action space broadly. + For PPO, we also store log_prob and value estimates for later use. """ - logger.info("Loading critic parameters...") - if not os.path.exists(directory): - logger.warning( - "Specified directory does not exist. Using randomly initialized critics." - ) - return - - map_path = os.path.join(directory, "critics", "u_id_order.json") - if os.path.exists(map_path): - # read the saved order of u_ids from critics save directory - with open(map_path) as f: - loaded_id_order = json.load(f).get("u_id_order", []) - else: - logger.warning("No u_id_order.json: assuming same order as current.") - loaded_id_order = [str(u) for u in self.learning_role.rl_strats.keys()] - - new_id_order = [str(u) for u in self.learning_role.rl_strats.keys()] - direct_load = loaded_id_order == new_id_order - - if direct_load: - logger.info("Agents order unchanged. Loading critic weights directly.") - else: - logger.info( - f"Agents length and/or order mismatch: n_old={len(loaded_id_order)}, n_new={len(new_id_order)}. Transferring weights for critics and target critics." - ) - - for u_id, strategy in self.learning_role.rl_strats.items(): - critic_path = os.path.join(directory, "critics", f"critic_{u_id}.pt") - if not os.path.exists(critic_path): - logger.warning(f"No saved critic for {u_id}; skipping.") - continue - - try: - critic_params = th.load(critic_path, weights_only=True) - for key in ("critic", "critic_target", "critic_optimizer"): - if key not in critic_params: - logger.warning( - f"Missing {key} in critic params for {u_id}; skipping." - ) - continue - - if direct_load: - strategy.critics.load_state_dict(critic_params["critic"]) - strategy.target_critics.load_state_dict( - critic_params["critic_target"] - ) - strategy.critics.optimizer.load_state_dict( - critic_params["critic_optimizer"] - ) - logger.debug(f"Loaded critic for {u_id} directly.") - else: - critic_weights = transfer_weights( - model=strategy.critics, - loaded_state=critic_params["critic"], - loaded_id_order=loaded_id_order, - new_id_order=new_id_order, - obs_base=strategy.obs_dim, - act_dim=strategy.act_dim, - unique_obs=strategy.unique_obs_dim, - ) - target_critic_weights = transfer_weights( - model=strategy.target_critics, - loaded_state=critic_params["critic_target"], - loaded_id_order=loaded_id_order, - new_id_order=new_id_order, - obs_base=strategy.obs_dim, - act_dim=strategy.act_dim, - unique_obs=strategy.unique_obs_dim, - ) - - if critic_weights is None or target_critic_weights is None: - logger.warning( - f"Critic weights transfer failed for {u_id}; skipping." - ) - continue - - strategy.critics.load_state_dict(critic_weights) - strategy.target_critics.load_state_dict(target_critic_weights) - logger.debug(f"Critic weights transferred for {u_id}.") - - except Exception as e: - logger.warning(f"Failed to load critic for {u_id}: {e}") - - def load_actor_params(self, directory: str) -> None: - """ - Load the parameters of actor networks from a specified directory. - - This method loads the parameters of actor networks, including the actor's state_dict, actor_target's state_dict, and - the actor's optimizer state_dict, from the specified directory. It iterates through the learning strategies associated - with the learning role, loads the respective parameters, and updates the actor and target actor networks accordingly. - - Args: - directory (str): The directory from which the parameters should be loaded. - """ - logger.info("Loading actor parameters...") - if not os.path.exists(directory): - logger.warning( - "Specified directory for loading the actors does not exist! Starting with randomly initialized values!" - ) - return - - for u_id, strategy in self.learning_role.rl_strats.items(): - try: - actor_params = self.load_obj( - directory=f"{directory}/actors/actor_{str(u_id)}.pt" - ) - strategy.actor.load_state_dict(actor_params["actor"]) - strategy.actor_target.load_state_dict(actor_params["actor_target"]) - strategy.actor.optimizer.load_state_dict( - actor_params["actor_optimizer"] + # distinction whether we are in learning mode or not to handle exploration realised with noise + if self.learning_mode and not self.evaluation_mode: + # if we are in learning mode the first x episodes we want to explore the entire action space + # to get a good initial experience, in the area around the costs of the agent + if self.collect_initial_experience_mode: + # define current action as solely noise + noise = th.normal( + mean=0.0, + std=self.exploration_noise_std, + size=(self.act_dim,), + dtype=self.float_type, + device=self.device, ) - # add a tag to the strategy to indicate that the actor was loaded - strategy.actor.loaded = True - except Exception: - logger.warning(f"No actor values loaded for agent {u_id}") - - def initialize_policy(self, actors_and_critics: dict = None) -> None: - """ - Create actor and critic networks for reinforcement learning. - - If `actors_and_critics` is None, this method creates new actor and critic networks. - If `actors_and_critics` is provided, it assigns existing networks to the respective attributes. - - Args: - actors_and_critics (dict): The actor and critic networks to be assigned. - - """ - if actors_and_critics is None: - self.check_strategy_dimensions() - self.create_actors() - self.create_critics() - - else: - for u_id, strategy in self.learning_role.rl_strats.items(): - strategy.actor = actors_and_critics["actors"][u_id] - strategy.actor_target = actors_and_critics["actor_targets"][u_id] - - strategy.critics = actors_and_critics["critics"][u_id] - strategy.target_critics = actors_and_critics["target_critics"][u_id] - - self.obs_dim = actors_and_critics["obs_dim"] - self.act_dim = actors_and_critics["act_dim"] - self.unique_obs_dim = actors_and_critics["unique_obs_dim"] - - def check_strategy_dimensions(self) -> None: - """ - Iterate over all learning strategies and check if the dimensions of observations and actions are the same. - Also check if the unique observation dimensions are the same. If not, raise a ValueError. - This is important for the TD3 algorithm, as it uses a centralized critic that requires consistent dimensions across all agents. - """ - foresight_list = [] - obs_dim_list = [] - act_dim_list = [] - unique_obs_dim_list = [] - num_timeseries_obs_dim_list = [] - - for strategy in self.learning_role.rl_strats.values(): - foresight_list.append(strategy.foresight) - obs_dim_list.append(strategy.obs_dim) - act_dim_list.append(strategy.act_dim) - unique_obs_dim_list.append(strategy.unique_obs_dim) - num_timeseries_obs_dim_list.append(strategy.num_timeseries_obs_dim) - - if len(set(foresight_list)) > 1: - raise ValueError( - f"All foresight values must be the same for all RL agents. The defined learning strategies have the following foresight values: {foresight_list}" - ) - else: - self.foresight = foresight_list[0] - - if len(set(act_dim_list)) > 1: - raise ValueError( - f"All action dimensions must be the same for all RL agents. The defined learning strategies have the following action dimensions: {act_dim_list}" - ) - else: - self.act_dim = act_dim_list[0] - - if len(set(unique_obs_dim_list)) > 1: - raise ValueError( - f"All unique_obs_dim values must be the same for all RL agents. The defined learning strategies have the following unique_obs_dim values: {unique_obs_dim_list}" - ) - else: - self.unique_obs_dim = unique_obs_dim_list[0] - - if len(set(num_timeseries_obs_dim_list)) > 1: - raise ValueError( - f"All num_timeseries_obs_dim values must be the same for all RL agents. The defined learning strategies have the following num_timeseries_obs_dim values: {num_timeseries_obs_dim_list}" - ) - else: - self.num_timeseries_obs_dim = num_timeseries_obs_dim_list[0] + # ============================================================================= + # 2.1 Get Actions and handle exploration + # ============================================================================= + # only use noise as the action to enforce exploration + curr_action = noise + + else: + # and add noise to the action + curr_action = self.actor(next_observation).detach() + noise = self.action_noise.noise( + device=self.device, dtype=self.float_type + ) + curr_action += noise - # Check last, as other cases should fail before! - if len(set(obs_dim_list)) > 1: - raise ValueError( - f"All observation dimensions must be the same for all RL agents. The defined learning strategies have the following observation dimensions: {obs_dim_list}" - ) + # make sure that noise adding does not exceed the actual output of the NN as it pushes results in a direction that actor can't even reach + curr_action = th.clamp( + curr_action, self.actor.min_output, self.actor.max_output + ) else: - self.obs_dim = obs_dim_list[0] - - def create_actors(self) -> None: - """ - Create actor networks for reinforcement learning for each unit strategy. - - This method initializes actor networks and their corresponding target networks for each unit strategy. - The actors are designed to map observations to action probabilities in a reinforcement learning setting. - - The created actor networks are associated with each unit strategy and stored as attributes. - - Note: - The observation dimension need to be the same, due to the centralized critic that all actors share. - If you have units with different observation dimensions. They need to have different critics and hence learning roles. - - """ - - for strategy in self.learning_role.rl_strats.values(): - strategy.actor = self.actor_architecture_class( - obs_dim=self.obs_dim, - act_dim=self.act_dim, - float_type=self.float_type, - unique_obs_dim=self.unique_obs_dim, - num_timeseries_obs_dim=self.num_timeseries_obs_dim, - ).to(self.device) - - strategy.actor_target = self.actor_architecture_class( - obs_dim=self.obs_dim, - act_dim=self.act_dim, - float_type=self.float_type, - unique_obs_dim=self.unique_obs_dim, - num_timeseries_obs_dim=self.num_timeseries_obs_dim, - ).to(self.device) - - strategy.actor_target.load_state_dict(strategy.actor.state_dict()) - strategy.actor_target.train(mode=False) - - strategy.actor.optimizer = AdamW( - strategy.actor.parameters(), - lr=self.learning_role.calc_lr_from_progress( - 1 - ), # 1=100% of simulation remaining, uses learning_rate from config as starting point - ) - - strategy.actor.loaded = False - - def create_critics(self) -> None: - """ - Create critic networks for reinforcement learning. - - This method initializes critic networks for each agent in the reinforcement learning setup. - - Note: - The observation dimension need to be the same, due to the centralized criic that all actors share. - If you have units with different observation dimensions. They need to have different critics and hence learning roles. - """ - n_agents = len(self.learning_role.rl_strats) - - for strategy in self.learning_role.rl_strats.values(): - strategy.critics = CriticTD3( - n_agents=n_agents, - obs_dim=self.obs_dim, - act_dim=self.act_dim, - unique_obs_dim=self.unique_obs_dim, - float_type=self.float_type, - ).to(self.device) - - strategy.target_critics = CriticTD3( - n_agents=n_agents, - obs_dim=self.obs_dim, - act_dim=self.act_dim, - unique_obs_dim=self.unique_obs_dim, - float_type=self.float_type, - ).to(self.device) - - strategy.target_critics.load_state_dict(strategy.critics.state_dict()) - strategy.target_critics.train(mode=False) - - strategy.critics.optimizer = AdamW( - strategy.critics.parameters(), - lr=self.learning_role.calc_lr_from_progress( - 1 - ), # 1 = 100% of simulation remaining, uses learning_rate from config as starting point - ) - - def extract_policy(self) -> dict: - """ - Extract actor and critic networks. - - This method extracts the actor and critic networks associated with each learning strategy and organizes them into a - dictionary structure. The extracted networks include actors, actor_targets, critics, and target_critics. The resulting - dictionary is typically used for saving and sharing these networks. - - Returns: - dict: The extracted actor and critic networks. - """ - actors = {} - actor_targets = {} - - critics = {} - target_critics = {} - - for u_id, strategy in self.learning_role.rl_strats.items(): - actors[u_id] = strategy.actor - actor_targets[u_id] = strategy.actor_target - - critics[u_id] = strategy.critics - target_critics[u_id] = strategy.target_critics - - actors_and_critics = { - "actors": actors, - "actor_targets": actor_targets, - "critics": critics, - "target_critics": target_critics, - "obs_dim": self.obs_dim, - "act_dim": self.act_dim, - "unique_obs_dim": self.unique_obs_dim, - } - - return actors_and_critics + curr_action = self.actor(next_observation).detach() + # noise is an tensor with zeros, because we are not in learning mode + noise = th.zeros_like(curr_action, dtype=self.float_type) def update_policy(self): """ diff --git a/assume/reinforcement_learning/learning_role.py b/assume/reinforcement_learning/learning_role.py index 19e533468..1e27ebd95 100644 --- a/assume/reinforcement_learning/learning_role.py +++ b/assume/reinforcement_learning/learning_role.py @@ -19,13 +19,10 @@ timestamp2datetime, ) from assume.reinforcement_learning.algorithms.base_algorithm import RLAlgorithm -from assume.reinforcement_learning.algorithms.matd3 import TD3 from assume.reinforcement_learning.algorithms.maddpg import DDPG from assume.reinforcement_learning.algorithms.mappo import PPO -from assume.reinforcement_learning.buffer import ( - ReplayBuffer, - RolloutBuffer -) +from assume.reinforcement_learning.algorithms.matd3 import TD3 +from assume.reinforcement_learning.buffer import ReplayBuffer, RolloutBuffer from assume.reinforcement_learning.learning_utils import ( linear_schedule_func, transform_buffer_data, @@ -58,7 +55,7 @@ def __init__( # how many learning roles do exist and how are they named self.buffer: ReplayBuffer = None - self.rollout_buffer: RolloutBuffer = None # For on-policy algorithms (PPO) + self.rollout_buffer: RolloutBuffer = None # For on-policy algorithms (PPO) self.episodes_done = 0 self.rl_strats: dict[int, LearningStrategy] = {} self.learning_config = learning_config @@ -276,7 +273,7 @@ async def store_to_buffer_and_update(self) -> None: "profit": {t: current_profits[t] for t in timestamps_to_process}, "values": {t: current_values[t] for t in timestamps_to_process}, "log_probs": {t: current_log_probs[t] for t in timestamps_to_process}, - "dones": {t: current_dones[t] for t in timestamps_to_process} + "dones": {t: current_dones[t] for t in timestamps_to_process}, } # write data to output agent @@ -315,50 +312,32 @@ async def _store_to_buffer_and_update_sync(self, cache, device) -> None: # Add each transition to the rollout buffer for timestamp in sorted(cache["obs"].keys()): obs_data = transform_buffer_data( - { - timestamp: cache["obs"][timestamp] - }, - device + {timestamp: cache["obs"][timestamp]}, device ) actions_data = transform_buffer_data( - { - timestamp: cache["actions"][timestamp] - }, - device + {timestamp: cache["actions"][timestamp]}, device ) rewards_data = transform_buffer_data( - { - timestamp: cache["rewards"][timestamp] - }, - device + {timestamp: cache["rewards"][timestamp]}, device ) - + if cache["values"].get(timestamp): values_data = transform_buffer_data( - { - timestamp: cache["values"][timestamp] - }, - device + {timestamp: cache["values"][timestamp]}, device ) else: values_data = np.zeros(len(self.rl_strats)) - + if cache["log_probs"].get(timestamp): log_probs_data = transform_buffer_data( - { - timestamp: cache["log_probs"][timestamp] - }, - device + {timestamp: cache["log_probs"][timestamp]}, device ) else: log_probs_data = np.zeros(len(self.rl_strats)) if cache["dones"].get(timestamp): dones_data = transform_buffer_data( - { - timestamp: cache["dones"][timestamp] - }, - device + {timestamp: cache["dones"][timestamp]}, device ) else: dones_data = np.zeros(len(self.rl_strats)) @@ -372,20 +351,20 @@ def to_numpy(data): # Add to rollout buffer if self.rollout_buffer is not None: self.rollout_buffer.add( - obs = to_numpy(obs_data), - action = to_numpy(actions_data), - reward = to_numpy(rewards_data), - done = to_numpy(dones_data), - value = to_numpy(values_data), - log_prob = to_numpy(log_probs_data) + obs=to_numpy(obs_data), + action=to_numpy(actions_data), + reward=to_numpy(rewards_data), + done=to_numpy(dones_data), + value=to_numpy(values_data), + log_prob=to_numpy(log_probs_data), ) else: # for TD3/DDPG use off-policy ReplayBuffer # rewrite dict so that obs.shape == (n_rl_units, obs_dim) and sorted by keys and store in buffer self.buffer.add( - obs = transform_buffer_data(cache["obs"], device), - actions = transform_buffer_data(cache["actions"], device), - reward = transform_buffer_data(cache["rewards"], device), + obs=transform_buffer_data(cache["obs"], device), + actions=transform_buffer_data(cache["actions"], device), + reward=transform_buffer_data(cache["rewards"], device), ) if ( @@ -405,7 +384,7 @@ def add_observation_to_cache(self, unit_id, start, observation) -> None: """ self.all_obs[start][unit_id].append(observation) - def add_actions_to_cache(self, unit_id, start, action, noise) -> None: + def add_actions_to_cache(self, unit_id, start, action, extra_info) -> None: """ Add the action and noise to the cache dict, per unit_id. @@ -424,7 +403,15 @@ def add_actions_to_cache(self, unit_id, start, action, noise) -> None: return self.all_actions[start][unit_id].append(action) - self.all_noises[start][unit_id].append(noise) + + if isinstance(extra_info, th.Tensor) and extra_info.shape == action.shape: + self.all_noises[start][unit_id].append(extra_info) # It's noise + else: + self.all_log_probs[start][unit_id].append( + extra_info["log_probs"] + ) # It's log_probs and other stuff + self.all_values[start][unit_id].append(extra_info["value"]) + self.all_dones[start][unit_id].append(float(extra_info["done"])) def add_reward_to_cache(self, unit_id, start, reward, regret, profit) -> None: """ @@ -439,27 +426,6 @@ def add_reward_to_cache(self, unit_id, start, reward, regret, profit) -> None: self.all_regrets[start][unit_id].append(regret) self.all_profits[start][unit_id].append(profit) - def add_ppo_data_to_cache( - self, - unit_id, - start, - value, - log_prob, - done=False - ) -> None: - """ - Add PPO specific data to the cache dict, per unit_id. - - Args: - unit_id (str): The id of the unit. - value (float): The value estimate V(s) from the critic. - log_prob (float): The log probability of the action. - done (bool): Whether a terminal state or not. - """ - self.all_values[start][unit_id].append(value) - self.all_log_probs[start][unit_id].append(log_prob) - self.all_dones[start][unit_id].append(float(done)) - def load_inter_episodic_data(self, inter_episodic_data): """ Load the inter-episodic data from the dict stored across simulation runs. From 4c76dc419d79bb24fc60148cc5146a140516c4fc Mon Sep 17 00:00:00 2001 From: kim-mskw Date: Wed, 14 Jan 2026 13:41:40 +0100 Subject: [PATCH 08/44] ad buffer and algo doku --- assume/strategies/learning_strategies.py | 142 ++++++----------------- docs/source/learning_algorithm.rst | 121 ++++++++++++++++--- 2 files changed, 141 insertions(+), 122 deletions(-) diff --git a/assume/strategies/learning_strategies.py b/assume/strategies/learning_strategies.py index c15c56031..0e4116c2f 100644 --- a/assume/strategies/learning_strategies.py +++ b/assume/strategies/learning_strategies.py @@ -242,96 +242,6 @@ def get_individual_observations( return np.array([]) - def get_actions(self, next_observation): - """ - Determines actions based on the current observation, applying noise for exploration if in learning mode. - - Args - ---- - next_observation : torch.Tensor - Observation data influencing bid price and direction. - - Returns - ------- - torch.Tensor - Actions that include bid price and direction. - torch.Tensor - Noise component which is already added to actions for exploration, if applicable. - - Notes - ----- - In learning mode, actions incorporate noise for exploration. Initial exploration relies - solely on noise to cover the action space broadly. - For PPO, we also store log_prob and value estimates for later use. - """ - - # distinction whether we are in learning mode or not to handle exploration realised with noise - if self.learning_mode and not self.evaluation_mode: - # if we are in learning mode the first x episodes we want to explore the entire action space - # to get a good initial experience, in the area around the costs of the agent - if self.collect_initial_experience_mode: - # define current action as solely noise - noise = th.normal( - mean=0.0, - std=self.exploration_noise_std, - size=(self.act_dim,), - dtype=self.float_type, - device=self.device, - ) - - # ============================================================================= - # 2.1 Get Actions and handle exploration - # ============================================================================= - # only use noise as the action to enforce exploration - curr_action = noise - - # For PPO, store dummy log_prob and value during initial exploration - if self.algorithm == "mappo": - self._last_log_prob = th.tensor(0.0, device=self.device) - self._last_value = th.tensor(0.0, device=self.device) - - else: - # Check if we're using PPO algorithm - if self.algorithm == "mappo": - # PPO: use get_action_and_log_prob for proper stochastic sampling - curr_action, log_prob = self.actor.get_action_and_log_prob(next_observation.unsqueeze(0)) - curr_action = curr_action.squeeze(0).detach() - self._last_log_prob = log_prob.squeeze(0).detach() - - # Get value estimate from critic (if available) - if hasattr(self.learning_role, 'critics') and self.unit_id in self.learning_role.critics: - critic = self.learning_role.critics[self.unit_id] - self._last_value = critic(next_observation.unsqueeze(0)).squeeze().detach() - else: - self._last_value = th.tensor(0.0, device=self.device) - - # PPO uses stochastic policy, no external noise needed - noise = th.zeros_like(curr_action, dtype=self.float_type) - else: - # TD3/DDPG: if we are not in the initial exploration phase we chose the action with the actor neural net - # and add noise to the action - curr_action = self.actor(next_observation).detach() - noise = self.action_noise.noise( - device=self.device, dtype=self.float_type - ) - curr_action += noise - - # make sure that noise adding does not exceed the actual output of the NN as it pushes results in a direction that actor can't even reach - curr_action = th.clamp( - curr_action, self.actor.min_output, self.actor.max_output - ) - else: - # if we are not in learning mode we just use the actor neural net to get the action without adding noise - if self.algorithm == "mappo": - # For PPO evaluation, use deterministic action (mean) - curr_action = self.actor(next_observation, deterministic=True).detach() - else: - curr_action = self.actor(next_observation).detach() - # noise is an tensor with zeros, because we are not in learning mode - noise = th.zeros_like(curr_action, dtype=self.float_type) - - return curr_action, noise - class EnergyLearningStrategy(TorchLearningStrategy, MinMaxStrategy): """ @@ -479,7 +389,9 @@ def calculate_bids( # ============================================================================= # 2. Get the Actions, based on the observations # ============================================================================= - actions, noise = self.get_actions(next_observation) + # Depending on the algorithm, we call specific function that passes obs through actor and generates actions + # extra_info is either noise (MATD3) or log_probs (PPO) + actions, extra_info = self.get_actions(self, next_observation) # ============================================================================= # 3. Transform Actions into bids @@ -519,15 +431,19 @@ def calculate_bids( ] if self.learning_mode: - self.learning_role.add_actions_to_cache(self.unit_id, start, actions, noise) + self.learning_role.add_actions_to_cache( + self.unit_id, start, actions, extra_info + ) # For PPO, also cache value estimates and log probabilities - if self.algorithm == "mappo" and hasattr(self, '_last_log_prob'): + if self.algorithm == "mappo" and hasattr(self, "_last_log_prob"): self.learning_role.add_ppo_data_to_cache( - self.unit_id, - start, - getattr(self, '_last_value', 0.0), - self._last_log_prob.item() if hasattr(self._last_log_prob, 'item') else self._last_log_prob, - done=False + self.unit_id, + start, + getattr(self, "_last_value", 0.0), + self._last_log_prob.item() + if hasattr(self._last_log_prob, "item") + else self._last_log_prob, + done=False, ) return bids @@ -808,7 +724,9 @@ def calculate_bids( # ============================================================================= # 2. Get the Actions, based on the observations # ============================================================================= - actions, noise = self.get_actions(next_observation) + # Depending on the algorithm, we call specific function that passes obs through actor and generates actions + # extra_info is either noise (MATD3) or log_probs (PPO) + actions, extra_info = self.get_actions(self, next_observation) # ============================================================================= # 3. Transform Actions into bids @@ -830,15 +748,19 @@ def calculate_bids( ] if self.learning_mode: - self.learning_role.add_actions_to_cache(self.unit_id, start, actions, noise) + self.learning_role.add_actions_to_cache( + self.unit_id, start, actions, extra_info + ) # For PPO, also cache value estimates and log probabilities - if self.algorithm == "mappo" and hasattr(self, '_last_log_prob'): + if self.algorithm == "mappo" and hasattr(self, "_last_log_prob"): self.learning_role.add_ppo_data_to_cache( - self.unit_id, - start, - getattr(self, '_last_value', 0.0), - self._last_log_prob.item() if hasattr(self._last_log_prob, 'item') else self._last_log_prob, - done=False + self.unit_id, + start, + getattr(self, "_last_value", 0.0), + self._last_log_prob.item() + if hasattr(self._last_log_prob, "item") + else self._last_log_prob, + done=False, ) return bids @@ -1003,7 +925,9 @@ def calculate_bids( # ============================================================================= # Get the Actions, based on the observations # ============================================================================= - actions, noise = self.get_actions(next_observation) + # Depending on the algorithm, we call specific function that passes obs through actor and generates actions + # extra_info is either noise (MATD3) or log_probs (PPO) + actions, extra_info = self.get_actions(self, next_observation) # ============================================================================= # 3. Transform Actions into bids @@ -1049,7 +973,9 @@ def calculate_bids( ) if self.learning_mode: - self.learning_role.add_actions_to_cache(self.unit_id, start, actions, noise) + self.learning_role.add_actions_to_cache( + self.unit_id, start, actions, extra_info + ) return bids diff --git a/docs/source/learning_algorithm.rst b/docs/source/learning_algorithm.rst index 640f2663e..dee241468 100644 --- a/docs/source/learning_algorithm.rst +++ b/docs/source/learning_algorithm.rst @@ -130,10 +130,60 @@ Note, that the specific implementation of each network architecture is defined i [2] Y. Ye, D. Qiu, J. Li and G. Strbac, "Multi-Period and Multi-Spatial Equilibrium Analysis in Imperfect Electricity Markets: A Novel Multi-Agent Deep Reinforcement Learning Approach," in IEEE Access, vol. 7, pp. 130515-130529, 2019, doi: 10.1109/ACCESS.2019.2940005. -.. _replay-buffer: +DDPG (Deep Deterministic Policy Gradient) +------------------------------------------ + +DDPG is a single-agent off-policy algorithm that serves as the foundation for TD3. While TD3 improves upon DDPG with twin critics and delayed updates, +DDPG itself remains a powerful baseline for continuous control tasks. + +Original paper: https://arxiv.org/abs/1509.02971 + +OpenAI Spinning Guide for DDPG: https://spinningup.openai.com/en/latest/algorithms/ddpg.html + +DDPG extends the deterministic policy gradient to off-policy settings using a single critic network and an actor network. +The algorithm updates the critic towards the Bellman target: + +.. math:: + y = r + \gamma Q_{\theta'}(s', \pi_{\phi'}(s')) + +And updates the actor using the deterministic policy gradient: + +.. math:: + \nabla_\phi J(\phi) = \mathbb{E}[(\nabla_a Q(s, a) |_{a=\pi(s)} \nabla_\phi \pi(s))] + +In ASSUME, DDPG is implemented in a multi-agent setting (MADDPG) with centralized training and decentralized execution. +The main differences from TD3 are: only one critic network, actor updates every step (no policy delay), and no target action smoothing. +The implementation follows the same structure as TD3, with actors and critics initialized via :func:`assume.reinforcement_learning.algorithms.maddpg.DDPG.initialize_policy` +and policy updates performed in :func:`assume.reinforcement_learning.algorithms.maddpg.DDPG.update_policy`. + +PPO (Proximal Policy Optimization) +----------------------------------- + +PPO is a state-of-the-art on-policy algorithm that uses a clipped surrogate objective to ensure stable policy updates. +Unlike off-policy methods like TD3 and DDPG, PPO requires fresh data collected under the current policy. + +Original paper: https://arxiv.org/abs/1707.06347 + +OpenAI Spinning Guide for PPO: https://spinningup.openai.com/en/latest/algorithms/ppo.html + +PPO improves upon policy gradient methods by limiting policy updates with a clipping mechanism: + +.. math:: + L^{CLIP}(\theta) = \hat{\mathbb{E}}_t[\min(r_t(\theta) \hat{A}_t, \text{clip}(r_t(\theta), 1-\epsilon, 1+\epsilon) \hat{A}_t)] + +where :math:`r_t(\theta)` is the importance sampling ratio and :math:`\hat{A}_t` is the advantage estimate using Generalized Advantage Estimation (GAE). + +In ASSUME, PPO uses a rollout buffer instead of a replay buffer, collecting on-policy experiences and training over multiple epochs. +The algorithm maintains both an actor and a centralized critic, similar to other methods, but updates them using the clipped surrogate objective. +Network initialization is handled by :func:`assume.reinforcement_learning.algorithms.mappo.PPO.initialize_policy` +and policy updates occur in :func:`assume.reinforcement_learning.algorithms.mappo.PPO.update_policy`. +The rollout buffer enables efficient multi-pass training over the same batch of experiences, improving sample efficiency. +For more details on how the rollout buffer works, see :ref:`rollout-buffer`. + +.. _buffer: ############## -Replay Buffer +Buffer ############## This chapter gives you an insight into the general usage of buffers in reinforcement learning and how they are implemented in ASSUME. @@ -142,22 +192,16 @@ This chapter gives you an insight into the general usage of buffers in reinforce Why do we need buffers? ======================= -In reinforcement learning, a buffer, often referred to as a replay buffer, is a crucial component in algorithms like for Experience Replay. -It serves as a memory for the agent's past experiences, storing tuples of observations, actions, rewards, and subsequent observations. - -Instead of immediately using each new experience for training, the experiences are stored in the buffer. During the training process, -a batch of experiences is randomly sampled from the replay buffer. This random sampling breaks the temporal correlation in the data, contributing to a more stable learning process. - -The replay buffer improves sample efficiency by allowing the agent to reuse and learn from past experiences multiple times. -This reduces the reliance on new experiences and makes better use of the available data. It also helps mitigate the effects of non-stationarity in the environment, -as the agent is exposed to a diverse set of experiences. +In reinforcement learning, a buffer is a crucial component for storing +the agent's past experiences as tuples of observations, actions, rewards, +and subsequent observations. -Overall, the replay buffer is instrumental in stabilizing the learning process in reinforcement learning algorithms, +Overall, the buffer is instrumental in stabilizing the learning process in reinforcement learning algorithms, enhancing their robustness and performance by providing a diverse and non-correlated set of training samples. -How are they used in ASSUME? -============================ +How are buffers implemented in ASSUME? +====================================== In principal ASSUME allows for different buffers to be implemented. They just need to adhere to the structure presented in the base buffer. Here we will present the different buffers already implemented, which is only one, yet. @@ -170,3 +214,52 @@ Yet, the buffer is quite large to store all observations also from multiple agen After a certain round of training runs which is defined in the config file the RL strategy is updated by calling the update function of the respective algorithms which calls the sample function of the replay buffer. The sample function returns a batch of experiences which is then used to update the RL strategy. For more information on the learning capabilities of ASSUME, see :doc:`learning`. + +Instead of immediately using each new experience for training, the experiences are stored in the buffer. During the training process, +a batch of experiences is randomly sampled from the buffer. This random sampling breaks the temporal correlation in the data, contributing to a more stable learning process. + +The buffer improves sample efficiency by allowing the agent to reuse and learn from past experiences multiple times. +This reduces the reliance on new experiences and makes better use of the available data. It also helps mitigate the effects of non-stationarity in the environment, +as the agent is exposed to a diverse set of experiences. + + +The rollout buffer +------------------ + +A rollout buffer is a specialized type of experience storage designed for on-policy reinforcement learning algorithms like PPO (Proximal Policy Optimization). +Unlike replay buffers that store and reuse experiences from multiple past policies, rollout buffers only store experiences collected by the current policy. + +The key characteristics of a rollout buffer are: + +* **On-policy storage**: Only stores trajectories from the current policy version +* **Single-use data**: Experiences are used once for training, then discarded +* **Temporal structure**: Maintains the sequential order of experiences for advantage computation +* **Additional metadata**: Stores policy-specific information like old log probabilities and value estimates + +This design makes rollout buffers particularly suitable for policy gradient methods that require fresh, on-policy data for stable learning. + +The rollout buffer for PPO is implemented as a fixed-size circular buffer that stores one complete rollout of experiences. +Unlike the replay buffer, it is completely reset after each training update to ensure only on-policy data is used. + +The buffer stores the following information for each timestep: + +* **Observations**: The state observed by each agent +* **Actions**: The actions taken by each agent +* **Rewards**: The rewards received by each agent +* **Old log probabilities**: The log probability of the action under the policy that collected it +* **Old values**: The value function estimate at that state +* **Dones**: Whether the episode terminated + +After a complete rollout is collected (determined by the ``train_freq`` parameter in the config), the buffer computes: + +* **Returns**: Discounted sum of future rewards for each timestep +* **Advantages**: GAE-based advantage estimates that guide policy improvement + +The learning role collects experiences after each environment step by calling the buffer's add function. +Once the buffer accumulates enough data (specified by ``batch_size``), the PPO algorithm's update function +is triggered, which retrieves mini-batches from the buffer for multiple training epochs (specified by ``ppo_n_epochs``). + +After training is complete, the buffer is reset, and the cycle begins again with the updated policy. +This ensures that PPO always learns from fresh, on-policy experiences, which is critical for the algorithm's stability and performance. + +For more information on how PPO uses the rollout buffer, see the PPO algorithm documentation above. From 8d39963258b83ad9ca61b375d0969850fe975ba7 Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Fri, 9 Jan 2026 14:34:48 +0100 Subject: [PATCH 09/44] DONE: Added DDPG, PPO in multi-agent environment in /reinforcement_learning module. --- .../algorithms/base_algorithm.py | 1 + .../algorithms/maddpg.py | 614 ++++++++++++++++++ .../algorithms/mappo.py | 411 ++++++++++++ .../algorithms/matd3.py | 2 +- .../reinforcement_learning/learning_role.py | 6 + .../reinforcement_learning/learning_utils.py | 4 +- .../neural_network_architecture.py | 377 +++++++++-- .../reinforcement_learning/rollout_buffer.py | 261 ++++++++ 8 files changed, 1629 insertions(+), 47 deletions(-) create mode 100644 assume/reinforcement_learning/algorithms/maddpg.py create mode 100644 assume/reinforcement_learning/algorithms/mappo.py create mode 100644 assume/reinforcement_learning/rollout_buffer.py diff --git a/assume/reinforcement_learning/algorithms/base_algorithm.py b/assume/reinforcement_learning/algorithms/base_algorithm.py index 44c0f492f..9ab5b258b 100644 --- a/assume/reinforcement_learning/algorithms/base_algorithm.py +++ b/assume/reinforcement_learning/algorithms/base_algorithm.py @@ -89,3 +89,4 @@ def load_params(self, directory: str) -> None: """ Load learning params - abstract method to be implemented by the Learning Algorithm """ + pass diff --git a/assume/reinforcement_learning/algorithms/maddpg.py b/assume/reinforcement_learning/algorithms/maddpg.py new file mode 100644 index 000000000..a49ac7c83 --- /dev/null +++ b/assume/reinforcement_learning/algorithms/maddpg.py @@ -0,0 +1,614 @@ +# SPDX-FileCopyrightText: ASSUME Developers +# +# SPDX-License-Identifier: AGPL-3.0-or-later + +""" +MADDPG - Multi-Agent Deep Deterministic Policy Gradient + +This module implements the DDPG algorithm for multi-agent settings (MADDPG). + +DDPG vs TD3 Comparison: +----------------------- +| Feature | DDPG (this) | TD3 | +|-------------------|-----------------|------------------| +| Critics | 1 (single) | 2 (twin) | +| Policy Updates | Every step | Delayed (1:2) | +| Target Noise | No | Yes (smoothing) | +| Overestimation | Can occur | Reduced | +| Complexity | Simpler | More complex | + +MADDPG extends DDPG to multi-agent settings using: +- Centralized Training: Critic sees all agents' observations and actions +- Decentralized Execution: Each actor only uses its own observation +""" + +import json +import logging +import os + +import torch as th +from torch.nn import functional as F +from torch.optim import AdamW + +from assume.reinforcement_learning.algorithms.base_algorithm import RLAlgorithm +from assume.reinforcement_learning.learning_utils import ( + polyak_update, + transfer_weights, +) +from assume.reinforcement_learning.neural_network_architecture import CriticDDPG + +logger = logging.getLogger(__name__) + + +class DDPG(RLAlgorithm): + """ + Deep Deterministic Policy Gradient (DDPG) Algorithm. + + Extended to multi-agent settings (MADDPG) for electricity market simulations. + + Key Features: + - Single critic network (vs twin critics in TD3) + - Updates actor every step (no policy delay) + - No target action smoothing noise + - Centralized training with decentralized execution + """ + + def __init__(self, learning_role): + """Initialize DDPG algorithm.""" + super().__init__(learning_role) + + # Gradient step counter + self.n_updates = 0 + + # Gradient clipping threshold + self.grad_clip_norm = 1.0 + + # ========================================================================= + # CHECKPOINT SAVING METHODS + # ========================================================================= + + def save_params(self, directory: str) -> None: + """Save all actor and critic network parameters to disk.""" + self.save_critic_params(directory=f"{directory}/critics") + self.save_actor_params(directory=f"{directory}/actors") + + def save_critic_params(self, directory: str) -> None: + """Save critic network parameters for all agents.""" + os.makedirs(directory, exist_ok=True) + + for u_id, strategy in self.learning_role.rl_strats.items(): + obj = { + "critic": strategy.critic.state_dict(), + "critic_target": strategy.target_critic.state_dict(), + "critic_optimizer": strategy.critic.optimizer.state_dict(), + } + path = f"{directory}/critic_{u_id}.pt" + th.save(obj, path) + + # Save unit ID order for weight transfer + u_id_list = [str(u) for u in self.learning_role.rl_strats.keys()] + mapping = {"u_id_order": u_id_list} + map_path = os.path.join(directory, "u_id_order.json") + with open(map_path, "w") as f: + json.dump(mapping, f, indent=2) + + def save_actor_params(self, directory: str) -> None: + """Save actor network parameters for all agents.""" + os.makedirs(directory, exist_ok=True) + + for u_id, strategy in self.learning_role.rl_strats.items(): + obj = { + "actor": strategy.actor.state_dict(), + "actor_target": strategy.actor_target.state_dict(), + "actor_optimizer": strategy.actor.optimizer.state_dict(), + } + path = f"{directory}/actor_{u_id}.pt" + th.save(obj, path) + + # ========================================================================= + # CHECKPOINT LOADING METHODS + # ========================================================================= + + def load_params(self, directory: str) -> None: + """Load all actor and critic parameters from disk.""" + self.load_critic_params(directory) + self.load_actor_params(directory) + + def load_critic_params(self, directory: str) -> None: + """Load critic parameters with support for agent count changes.""" + logger.info("Loading critic parameters...") + + if not os.path.exists(directory): + logger.warning( + "Specified directory does not exist. Using randomly initialized critics." + ) + return + + # Load saved unit ID order + map_path = os.path.join(directory, "critics", "u_id_order.json") + if os.path.exists(map_path): + with open(map_path) as f: + loaded_id_order = json.load(f).get("u_id_order", []) + else: + logger.warning("No u_id_order.json: assuming same order as current.") + loaded_id_order = [str(u) for u in self.learning_role.rl_strats.keys()] + + new_id_order = [str(u) for u in self.learning_role.rl_strats.keys()] + direct_load = loaded_id_order == new_id_order + + if direct_load: + logger.info("Agents order unchanged. Loading critic weights directly.") + else: + logger.info( + f"Agents mismatch: n_old={len(loaded_id_order)}, " + f"n_new={len(new_id_order)}. Transferring weights." + ) + + for u_id, strategy in self.learning_role.rl_strats.items(): + critic_path = os.path.join(directory, "critics", f"critic_{u_id}.pt") + if not os.path.exists(critic_path): + logger.warning(f"No saved critic for {u_id}; skipping.") + continue + + try: + critic_params = th.load(critic_path, weights_only=True) + + for key in ("critic", "critic_target", "critic_optimizer"): + if key not in critic_params: + logger.warning(f"Missing {key} in critic params for {u_id}.") + continue + + if direct_load: + strategy.critic.load_state_dict(critic_params["critic"]) + strategy.target_critic.load_state_dict(critic_params["critic_target"]) + strategy.critic.optimizer.load_state_dict(critic_params["critic_optimizer"]) + else: + # Weight transfer for agent count changes + critic_weights = transfer_weights( + model=strategy.critic, + loaded_state=critic_params["critic"], + loaded_id_order=loaded_id_order, + new_id_order=new_id_order, + obs_base=strategy.obs_dim, + act_dim=strategy.act_dim, + unique_obs=strategy.unique_obs_dim, + ) + target_critic_weights = transfer_weights( + model=strategy.target_critic, + loaded_state=critic_params["critic_target"], + loaded_id_order=loaded_id_order, + new_id_order=new_id_order, + obs_base=strategy.obs_dim, + act_dim=strategy.act_dim, + unique_obs=strategy.unique_obs_dim, + ) + + if critic_weights is None or target_critic_weights is None: + logger.warning(f"Weights transfer failed for {u_id}.") + continue + + strategy.critic.load_state_dict(critic_weights) + strategy.target_critic.load_state_dict(target_critic_weights) + + except Exception as e: + logger.warning(f"Failed to load critic for {u_id}: {e}") + + def load_actor_params(self, directory: str) -> None: + """Load actor network parameters from disk.""" + logger.info("Loading actor parameters...") + + if not os.path.exists(directory): + logger.warning( + "Specified directory for actors does not exist! " + "Starting with randomly initialized values!" + ) + return + + for u_id, strategy in self.learning_role.rl_strats.items(): + try: + actor_params = self.load_obj( + directory=f"{directory}/actors/actor_{str(u_id)}.pt" + ) + + strategy.actor.load_state_dict(actor_params["actor"]) + strategy.actor_target.load_state_dict(actor_params["actor_target"]) + strategy.actor.optimizer.load_state_dict(actor_params["actor_optimizer"]) + strategy.actor.loaded = True + + except Exception: + logger.warning(f"No actor values loaded for agent {u_id}") + + # ========================================================================= + # NETWORK INITIALIZATION + # ========================================================================= + + def initialize_policy(self, actors_and_critics: dict = None) -> None: + """ + Initialize actor and critic networks for all agents. + + Args: + actors_and_critics: Optional pre-existing networks to assign + """ + if actors_and_critics is None: + self.check_strategy_dimensions() + self.create_actors() + self.create_critics() + else: + for u_id, strategy in self.learning_role.rl_strats.items(): + strategy.actor = actors_and_critics["actors"][u_id] + strategy.actor_target = actors_and_critics["actor_targets"][u_id] + strategy.critic = actors_and_critics["critics"][u_id] + strategy.target_critic = actors_and_critics["target_critics"][u_id] + + self.obs_dim = actors_and_critics["obs_dim"] + self.act_dim = actors_and_critics["act_dim"] + self.unique_obs_dim = actors_and_critics["unique_obs_dim"] + + def check_strategy_dimensions(self) -> None: + """Validate that all agents have consistent dimensions.""" + obs_dim_list = [] + act_dim_list = [] + unique_obs_dim_list = [] + num_timeseries_obs_dim_list = [] + + for strategy in self.learning_role.rl_strats.values(): + obs_dim_list.append(strategy.obs_dim) + act_dim_list.append(strategy.act_dim) + unique_obs_dim_list.append(strategy.unique_obs_dim) + num_timeseries_obs_dim_list.append(strategy.num_timeseries_obs_dim) + + if len(set(obs_dim_list)) > 1: + raise ValueError( + f"All observation dimensions must be the same. " + f"Got: {obs_dim_list}" + ) + else: + self.obs_dim = obs_dim_list[0] + + if len(set(act_dim_list)) > 1: + raise ValueError( + f"All action dimensions must be the same. " + f"Got: {act_dim_list}" + ) + else: + self.act_dim = act_dim_list[0] + + if len(set(unique_obs_dim_list)) > 1: + raise ValueError( + f"All unique_obs_dim values must be the same. " + f"Got: {unique_obs_dim_list}" + ) + else: + self.unique_obs_dim = unique_obs_dim_list[0] + + if len(set(num_timeseries_obs_dim_list)) > 1: + raise ValueError( + f"All num_timeseries_obs_dim values must be the same. " + f"Got: {num_timeseries_obs_dim_list}" + ) + else: + self.num_timeseries_obs_dim = num_timeseries_obs_dim_list[0] + + def create_actors(self) -> None: + """Create actor (policy) networks for all agents.""" + for strategy in self.learning_role.rl_strats.values(): + # Create main actor network + strategy.actor = self.actor_architecture_class( + obs_dim=self.obs_dim, + act_dim=self.act_dim, + float_type=self.float_type, + unique_obs_dim=self.unique_obs_dim, + num_timeseries_obs_dim=self.num_timeseries_obs_dim, + ).to(self.device) + + # Create target actor network + strategy.actor_target = self.actor_architecture_class( + obs_dim=self.obs_dim, + act_dim=self.act_dim, + float_type=self.float_type, + unique_obs_dim=self.unique_obs_dim, + num_timeseries_obs_dim=self.num_timeseries_obs_dim, + ).to(self.device) + + # Initialize target with same weights + strategy.actor_target.load_state_dict(strategy.actor.state_dict()) + strategy.actor_target.train(mode=False) + + # Create optimizer + strategy.actor.optimizer = AdamW( + strategy.actor.parameters(), + lr=self.learning_role.calc_lr_from_progress(1), + ) + + strategy.actor.loaded = False + + def create_critics(self) -> None: + """ + Create critic (Q-function) networks for all agents. + + Key difference from TD3: Uses single critic instead of twin critics. + """ + n_agents = len(self.learning_role.rl_strats) + + for strategy in self.learning_role.rl_strats.values(): + # Create main critic (single Q-network, not twin) + strategy.critic = CriticDDPG( + n_agents=n_agents, + obs_dim=self.obs_dim, + act_dim=self.act_dim, + unique_obs_dim=self.unique_obs_dim, + float_type=self.float_type, + ).to(self.device) + + # Create target critic + strategy.target_critic = CriticDDPG( + n_agents=n_agents, + obs_dim=self.obs_dim, + act_dim=self.act_dim, + unique_obs_dim=self.unique_obs_dim, + float_type=self.float_type, + ).to(self.device) + + # Initialize target with same weights + strategy.target_critic.load_state_dict(strategy.critic.state_dict()) + strategy.target_critic.train(mode=False) + + # Create optimizer + strategy.critic.optimizer = AdamW( + strategy.critic.parameters(), + lr=self.learning_role.calc_lr_from_progress(1), + ) + + def extract_policy(self) -> dict: + """Extract all actor and critic networks into a dictionary.""" + actors = {} + actor_targets = {} + critics = {} + target_critics = {} + + for u_id, strategy in self.learning_role.rl_strats.items(): + actors[u_id] = strategy.actor + actor_targets[u_id] = strategy.actor_target + critics[u_id] = strategy.critic + target_critics[u_id] = strategy.target_critic + + return { + "actors": actors, + "actor_targets": actor_targets, + "critics": critics, + "target_critics": target_critics, + "obs_dim": self.obs_dim, + "act_dim": self.act_dim, + "unique_obs_dim": self.unique_obs_dim, + } + + # ========================================================================= + # CORE TRAINING: POLICY UPDATE + # ========================================================================= + + def update_policy(self) -> None: + """ + Update actor and critic networks using the DDPG algorithm. + + Key differences from TD3: + 1. Uses single critic (no twin Q-learning) + 2. Updates actor every step (no policy delay) + 3. No target action smoothing noise + """ + logger.debug("Updating Policy (MADDPG/DDPG)") + + strategies = list(self.learning_role.rl_strats.values()) + n_rl_agents = len(strategies) + + # Initialize metrics storage + unit_params = [ + { + u_id: { + "actor_loss": None, + "actor_total_grad_norm": None, + "actor_max_grad_norm": None, + "critic_loss": None, + "critic_total_grad_norm": None, + "critic_max_grad_norm": None, + } + for u_id in self.learning_role.rl_strats.keys() + } + for _ in range(self.learning_config.gradient_steps) + ] + + # Update noise and learning rate schedules + progress_remaining = self.learning_role.get_progress_remaining() + updated_noise_decay = self.learning_role.calc_noise_from_progress(progress_remaining) + learning_rate = self.learning_role.calc_lr_from_progress(progress_remaining) + + for strategy in strategies: + self.update_learning_rate( + [strategy.critic.optimizer, strategy.actor.optimizer], + learning_rate=learning_rate, + ) + strategy.action_noise.update_noise_decay(updated_noise_decay) + + # Main gradient step loop + for step in range(self.learning_config.gradient_steps): + self.n_updates += 1 + + # Sample from replay buffer + transitions = self.learning_role.buffer.sample( + self.learning_config.batch_size + ) + + states, actions, next_states, rewards = ( + transitions.observations, + transitions.actions, + transitions.next_observations, + transitions.rewards, + ) + + # Compute target actions (no smoothing noise in DDPG) + with th.no_grad(): + next_actions = th.stack([ + strategy.actor_target(next_states[:, i, :]).clamp(-1, 1) + for i, strategy in enumerate(strategies) + ]) + next_actions = next_actions.transpose(0, 1).contiguous() + next_actions = next_actions.view(-1, n_rl_agents * self.act_dim) + + all_actions = actions.view(self.learning_config.batch_size, -1) + + # Precompute observation slices + unique_obs_from_others = states[ + :, :, self.obs_dim - self.unique_obs_dim : + ].reshape(self.learning_config.batch_size, n_rl_agents, -1) + + next_unique_obs_from_others = next_states[ + :, :, self.obs_dim - self.unique_obs_dim : + ].reshape(self.learning_config.batch_size, n_rl_agents, -1) + + # ================================================================= + # CRITIC UPDATE + # ================================================================= + for strategy in strategies: + strategy.critic.optimizer.zero_grad(set_to_none=True) + + total_critic_loss = 0.0 + + for i, strategy in enumerate(strategies): + critic = strategy.critic + critic_target = strategy.target_critic + + # Build centralized observation + other_unique_obs = th.cat( + (unique_obs_from_others[:, :i], unique_obs_from_others[:, i + 1 :]), + dim=1, + ) + other_next_unique_obs = th.cat( + (next_unique_obs_from_others[:, :i], next_unique_obs_from_others[:, i + 1 :]), + dim=1, + ) + + all_states = th.cat( + ( + states[:, i, :].reshape(self.learning_config.batch_size, -1), + other_unique_obs.reshape(self.learning_config.batch_size, -1), + ), + dim=1, + ) + all_next_states = th.cat( + ( + next_states[:, i, :].reshape(self.learning_config.batch_size, -1), + other_next_unique_obs.reshape(self.learning_config.batch_size, -1), + ), + dim=1, + ) + + # Compute target Q-value (single critic, no min) + with th.no_grad(): + next_q_value = critic_target(all_next_states, next_actions) + target_Q_value = ( + rewards[:, i].unsqueeze(1) + + self.learning_config.gamma * next_q_value + ) + + # Compute current Q-value + current_Q_value = critic(all_states, all_actions) + + # MSE loss (single critic) + critic_loss = F.mse_loss(current_Q_value, target_Q_value) + + unit_params[step][strategy.unit_id]["critic_loss"] = critic_loss.item() + total_critic_loss += critic_loss + + # Backward pass for critics + total_critic_loss.backward() + + for strategy in strategies: + parameters = list(strategy.critic.parameters()) + max_grad_norm = max(p.grad.norm() for p in parameters) + total_norm = th.nn.utils.clip_grad_norm_( + parameters, max_norm=self.grad_clip_norm + ) + strategy.critic.optimizer.step() + + unit_params[step][strategy.unit_id]["critic_total_grad_norm"] = total_norm + unit_params[step][strategy.unit_id]["critic_max_grad_norm"] = max_grad_norm + + # ================================================================= + # ACTOR UPDATE (every step, no delay in DDPG) + # ================================================================= + for strategy in strategies: + strategy.actor.optimizer.zero_grad(set_to_none=True) + + total_actor_loss = 0.0 + + for i, strategy in enumerate(strategies): + actor = strategy.actor + critic = strategy.critic + + state_i = states[:, i, :] + action_i = actor(state_i) + + other_unique_obs = th.cat( + (unique_obs_from_others[:, :i], unique_obs_from_others[:, i + 1 :]), + dim=1, + ) + all_states_i = th.cat( + ( + state_i.reshape(self.learning_config.batch_size, -1), + other_unique_obs.reshape(self.learning_config.batch_size, -1), + ), + dim=1, + ) + + all_actions_clone = actions.clone().detach() + all_actions_clone[:, i, :] = action_i + all_actions_clone = all_actions_clone.view( + self.learning_config.batch_size, -1 + ) + + # Actor loss: maximize Q-value + actor_loss = -critic(all_states_i, all_actions_clone).mean() + + unit_params[step][strategy.unit_id]["actor_loss"] = actor_loss.item() + total_actor_loss += actor_loss + + # Backward pass for actors + total_actor_loss.backward() + + for strategy in strategies: + parameters = list(strategy.actor.parameters()) + max_grad_norm = max(p.grad.norm() for p in parameters) + total_norm = th.nn.utils.clip_grad_norm_( + parameters, max_norm=self.grad_clip_norm + ) + strategy.actor.optimizer.step() + + unit_params[step][strategy.unit_id]["actor_total_grad_norm"] = total_norm + unit_params[step][strategy.unit_id]["actor_max_grad_norm"] = max_grad_norm + + # ================================================================= + # TARGET NETWORK UPDATES (Polyak averaging) + # ================================================================= + all_critic_params = [] + all_target_critic_params = [] + all_actor_params = [] + all_target_actor_params = [] + + for strategy in strategies: + all_critic_params.extend(strategy.critic.parameters()) + all_target_critic_params.extend(strategy.target_critic.parameters()) + all_actor_params.extend(strategy.actor.parameters()) + all_target_actor_params.extend(strategy.actor_target.parameters()) + + polyak_update( + all_critic_params, + all_target_critic_params, + self.learning_config.tau, + ) + polyak_update( + all_actor_params, + all_target_actor_params, + self.learning_config.tau, + ) + + # Log metrics + self.learning_role.write_rl_grad_params_to_output(learning_rate, unit_params) diff --git a/assume/reinforcement_learning/algorithms/mappo.py b/assume/reinforcement_learning/algorithms/mappo.py new file mode 100644 index 000000000..b3f43c5d4 --- /dev/null +++ b/assume/reinforcement_learning/algorithms/mappo.py @@ -0,0 +1,411 @@ +# SPDX-FileCopyrightText: ASSUME Developers +# +# SPDX-License-Identifier: AGPL-3.0-or-later +import json +import logging +import os + +import numpy as np +import torch as th +from torch.nn import functional as F +from torch.optim import AdamW + +from assume.reinforcement_learning.algorithms.base_algorithm import RLAlgorithm +from assume.reinforcement_learning.learning_utils import polyak_update +from assume.reinforcement_learning.neural_network_architecture import ( + ActorPPO, + CriticPPO +) +from assume.reinforcement_learning.rollout_buffer import RolloutBuffer + +logger = logging.getLogger(__name__) + +class PPO(RLAlgorithm): + """ + Proximal Policy Optimization (PPO) Algorithm. + """ + + def __init__( + self, + learning_role, + clip_range = 0.2, # Clipping parameter + clip_range_vf = None, + n_epochs = 10, # Number of epochs per update + entropy_coef = 0.01, # Entropy bonus coefficient + vf_coef = 0.5, # Value function loss coefficient + max_grad_norm = 0.5, # Gradient clipping + ): + """Initialize PPO algorithm.""" + super().__init__(learning_role) + + self.clip_range = clip_range + self.clip_range_vf = clip_range_vf + self.n_epochs = n_epochs + self.entropy_coef = entropy_coef + self.vf_coef = vf_coef + self.max_grad_norm = max_grad_norm + + # Update counter + self.n_updates = 0 + + def save_params(self, directory: str) -> None: + """Save all actor and critic network parameters to disk.""" + self.save_critic_params(directory=f"{directory}/critics") + self.save_actor_params(directory=f"{directory}/actors") + + def save_critic_params(self, directory: str) -> None: + """Save value network parameters for all agents.""" + os.makedirs(directory, exist_ok=True) + + for u_id, strategy in self.learning_role.rl_strats.items(): + obj = { + "critic": strategy.critic.state_dict(), + "critic_optimizer": strategy.critic.optimizer.state_dict(), + } + path = f"{directory}/critic_{u_id}.pt" + th.save(obj, path) + + # Save unit ID order + u_id_list = [str(u) for u in self.learning_role.rl_strats.keys()] + mapping = {"u_id_order": u_id_list} + map_path = os.path.join(directory, "u_id_order.json") + with open(map_path, "w") as f: + json.dump(mapping, f, indent=2) + + def save_actor_params(self, directory: str) -> None: + """Save actor network parameters for all agents.""" + os.makedirs(directory, exist_ok=True) + + for u_id, strategy in self.learning_role.rl_strats.items(): + obj = { + "actor": strategy.actor.state_dict(), + "actor_optimizer": strategy.actor.optimizer.state_dict(), + } + path = f"{directory}/actor_{u_id}.pt" + th.save(obj, path) + + def load_params(self, directory: str) -> None: + """Load all actor and critic parameters from disk.""" + self.load_critic_params(directory) + self.load_actor_params(directory) + + def load_critic_params(self, directory: str) -> None: + """Load critic parameters.""" + logger.info("Loading PPO critic parameters...") + + if not os.path.exists(directory): + logger.warning( + "Specified directory does not exist. Using randomly initialized critics." + ) + return + + for u_id, strategy in self.learning_role.rl_strats.items(): + critic_path = os.path.join(directory, "critics", f"critic_{u_id}.pt") + if not os.path.exists(critic_path): + logger.warning(f"No saved critic for {u_id}; skipping.") + continue + + try: + critic_params = th.load(critic_path, weights_only=True) + strategy.critic.load_state_dict(critic_params["critic"]) + strategy.critic.optimizer.load_state_dict(critic_params["critic_optimizer"]) + except Exception as e: + logger.warning(f"Failed to load critic for {u_id}: {e}") + + def load_actor_params(self, directory: str) -> None: + """Load actor network parameters from disk.""" + logger.info("Loading PPO actor parameters...") + + if not os.path.exists(directory): + logger.warning( + "Specified directory for actors does not exist! " + "Starting with randomly initialized values!" + ) + return + + for u_id, strategy in self.learning_role.rl_strats.items(): + try: + actor_params = self.load_obj( + directory=f"{directory}/actors/actor_{str(u_id)}.pt" + ) + + strategy.actor.load_state_dict(actor_params["actor"]) + strategy.actor.optimizer.load_state_dict(actor_params["actor_optimizer"]) + strategy.actor.loaded = True + + except Exception: + logger.warning(f"No actor values loaded for agent {u_id}") + + def initialize_policy(self, actors_and_critics: dict = None) -> None: + """ + Initialize actor and critic networks for all agents. + + Args: + actors_and_critics: Optional pre-existing networks to assign + """ + if actors_and_critics is None: + self.check_strategy_dimensions() + self.create_actors() + self.create_critics() + else: + for u_id, strategy in self.learning_role.rl_strats.items(): + strategy.actor = actors_and_critics["actors"][u_id] + strategy.critic = actors_and_critics["critics"][u_id] + + self.obs_dim = actors_and_critics["obs_dim"] + self.act_dim = actors_and_critics["act_dim"] + self.unique_obs_dim = actors_and_critics["unique_obs_dim"] + + def check_strategy_dimensions(self) -> None: + """Validate that all agents have consistent dimensions.""" + foresight_list = [] + obs_dim_list = [] + act_dim_list = [] + unique_obs_dim_list = [] + num_timeseries_obs_dim_list = [] + + for strategy in self.learning_role.rl_strats.values(): + foresight_list.append(strategy.foresight) + obs_dim_list.append(strategy.obs_dim) + act_dim_list.append(strategy.act_dim) + unique_obs_dim_list.append(strategy.unique_obs_dim) + num_timeseries_obs_dim_list.append(strategy.num_timeseries_obs_dim) + + if len(set(foresight_list)) > 1: + raise ValueError( + f"All foresight values must be the same for all RL agents. THe defined learning strategies have the following foresight values: {foresight_list}" + ) + else: + self.foresight = foresight_list[0] + + if len(set(obs_dim_list)) > 1: + raise ValueError( + f"All observation dimensions must be the same. Got: {obs_dim_list}" + ) + else: + self.obs_dim = obs_dim_list[0] + + if len(set(act_dim_list)) > 1: + raise ValueError( + f"All action dimensions must be the same. Got: {act_dim_list}" + ) + else: + self.act_dim = act_dim_list[0] + + if len(set(unique_obs_dim_list)) > 1: + raise ValueError( + f"All unique_obs_dim values must be the same. Got: {unique_obs_dim_list}" + ) + else: + self.unique_obs_dim = unique_obs_dim_list[0] + + if len(set(num_timeseries_obs_dim_list)) > 1: + raise ValueError( + f"All num_timeseries_obs_dim values must be the same. " + f"Got: {num_timeseries_obs_dim_list}" + ) + else: + self.num_timeseries_obs_dim = num_timeseries_obs_dim_list[0] + + def create_actors(self) -> None: + """Create stochastic actor networks for all agents.""" + for strategy in self.learning_role.rl_strats.values(): + # Create PPO Actor + strategy.actor = ActorPPO( + obs_dim=self.obs_dim, + act_dim=self.act_dim, + float_type=self.float_type, + ).to(self.device) + + # Create Optimizer + strategy.actor.optimizer = AdamW( + strategy.actor.parameters(), + lr=self.learning_role.calc_lr_from_progress(1), + ) + + strategy.actor.loaded = False + + def create_critics(self) -> None: + """ + Create value networks for all agents. + """ + n_agents = len(self.learning_role.rl_strats) + + for strategy in self.learning_role.rl_strats.values(): + # Create value network + strategy.critic = CriticPPO( + n_agents=n_agents, + obs_dim=self.obs_dim, + unique_obs_dim=self.unique_obs_dim, + float_type=self.float_type, + ).to(self.device) + + # Create optimizer + strategy.critic.optimizer = AdamW( + strategy.critic.parameters(), + lr=self.learning_role.calc_lr_from_progress(1), + ) + + def extract_policy(self) -> dict: + """Extract all actor and critic networks into a dictionary.""" + actors = {} + critics = {} + + for u_id, strategy in self.learning_role.rl_strats.items(): + actors[u_id] = strategy.actor + critics[u_id] = strategy.critic + + return { + "actors": actors, + "critics": critics, + "obs_dim": self.obs_dim, + "act_dim": self.act_dim, + "unique_obs_dim": self.unique_obs_dim, + } + + def update_policy(self) -> None: + """ + Update actor and critic networks. + """ + logger.debug("Updating Policy") + + strategies = list(self.learning_role.rl_strats.values()) + n_rl_agents = len(strategies) + + # Get rollout buffer + rollout_buffer = self.learning_role.rollout_buffer + + # Update learning rate + progress_remaining = self.learning_role.get_progress_remaining() + learning_rate = self.learning_role.calc_lr_from_progress(progress_remaining) + + for strategy in strategies: + for param_group in strategy.critic.optimizer.param_groups: + param_group["lr"] = learning_rate + for param_group in strategy.actor.optimizer.param_groups: + param_group["lr"] = learning_rate + + # Get last values for advantage computation + last_values = np.zeros(n_rl_agents) + dones = np.zeros(n_rl_agents) + + # Get the buffer size to index into the last stored state + buffer_size = rollout_buffer.pos if not rollout_buffer.full else rollout_buffer.buffer_size + + if buffer_size > 0: + # Get the last observation from the buffer + last_obs = rollout_buffer.observations[buffer_size-1] + last_dones = rollout_buffer.dones[buffer_size-1] + + with th.no_grad(): + for i, strategy in enumerate(strategies): + obs_tensor = th.as_tensor( + last_obs[i:i+1], + device = self.device, + dtype = self.float_type + ) + # Get value estimate from critic + last_values[i] = strategy.critic(obs_tensor).cpu().numpy().flatten()[0] + dones[i] = last_dones[i] + + # Compute advantages and returns + rollout_buffer.compute_returns_and_advantages(last_values, dones) + + # Initialize metrics storage + all_actor_losses = [] + all_critic_losses = [] + all_entropy_losses = [] + + for epoch in range(self.n_epochs): + for batch in rollout_buffer.get(self.learning_config.batch_size): + for i, strategy in enumerate(strategies): + actor = strategy.actor + critic = strategy.critic + + obs_i = batch.observations[:, i, :] + actions_i = batch.actions[:, i, :] + old_log_probs_i = batch.old_log_probs[:, i] + advantages_i = batch.advantages[:, i] + returns_i = batch.returns[:, i] + old_values_i = batch.old_values[:, i] + + advantages_i = (advantages_i - advantages_i.mean()) / ( + advantages_i.std() + 1e-8 + ) + + log_probs, entropy = actor.evaluate_actions( + obs_i, + actions_i + ) + values = critic(obs_i).flatten() + + # Importance sampling ratio + ratio = th.exp(log_probs - old_log_probs_i) + + # Clipped surrogate objective + policy_loss_1 = advantages_i * ratio + policy_loss_2 = advantages_i * th.clamp( + ratio, 1 - self.clip_range, 1 + self.clip_range + ) + policy_loss = -th.min(policy_loss_1, policy_loss_2) + + # Entropy loss + entropy_loss = -self.entropy_coef * entropy.mean() + + if self.clip_rnage_vf is not None: + # Clipped value function loss + values_clipped = old_values_i + th.clamp( + values - old_values_i, + -self.clip_range_vf, + self.clip_range_vf + ) + value_loss_1 = F.mse_loss(values, returns_i) + value_loss_2 = F.mse_loss(values_clipped, returns_i) + value_loss = th.max(value_loss_1, value_loss_2) + else: + value_loss = F.mse_loss(values, returns_i) + + loss = policy_loss + entropy_loss + self.vf_coef * value_loss + + # Actor update + actor.optimizer.zero_grad() + critic.optimizer.zero_grad() + loss.backward() + + # Gradient clipping + th.nn.utils.clip_grad_norm_( + actor.parameters(), self.max_grad_norm + ) + th.nn.utils.clip_grad_norm_( + critic.parameters(), self.max_grad_norm + ) + + actor.optimizer.step() + critic.optimizer.step() + + # Store metrics + all_actor_losses.append(policy_loss.item()) + all_critic_losses.append(value_loss.item()) + all_entropy_losses.append(entropy_loss.item()) + + self.n_updates += 1 + + # Log average metrics + if self.learning_role.tensor_board_logger: + self.learning_role.tensor_board_logger.log_scalar( + "ppo/actor_loss", np.mean(all_actor_losses), self.n_updates + ) + self.learning_role.tensor_board_logger.log_scalar( + "ppo/critic_loss", np.mean(all_critic_losses), self.n_updates + ) + self.learning_role.tensor_board_logger.log_scalar( + "ppo/entropy_loss", np.mean(all_entropy_losses), self.n_updates + ) + + # Clear rollout buffer + rollout_buffer.reset() + + logger.debug( + f"PPO update complete. Actor loss: {np.mean(all_actor_losses):.4f}, " + f"Value loss: {np.mean(all_critic_losses):.4f}" + ) \ No newline at end of file diff --git a/assume/reinforcement_learning/algorithms/matd3.py b/assume/reinforcement_learning/algorithms/matd3.py index 12d2a9a38..dbdd50e41 100644 --- a/assume/reinforcement_learning/algorithms/matd3.py +++ b/assume/reinforcement_learning/algorithms/matd3.py @@ -329,7 +329,7 @@ def create_actors(self) -> None: The created actor networks are associated with each unit strategy and stored as attributes. Note: - The observation dimension need to be the same, due to the centralized criic that all actors share. + The observation dimension need to be the same, due to the centralized critic that all actors share. If you have units with different observation dimensions. They need to have different critics and hence learning roles. """ diff --git a/assume/reinforcement_learning/learning_role.py b/assume/reinforcement_learning/learning_role.py index 9018db79f..ddf2aa622 100644 --- a/assume/reinforcement_learning/learning_role.py +++ b/assume/reinforcement_learning/learning_role.py @@ -19,6 +19,8 @@ ) from assume.reinforcement_learning.algorithms.base_algorithm import RLAlgorithm from assume.reinforcement_learning.algorithms.matd3 import TD3 +from assume.reinforcement_learning.algorithms.maddpg import DDPG +from assume.reinforcement_learning.algorithms.mappo import PPO from assume.reinforcement_learning.buffer import ReplayBuffer from assume.reinforcement_learning.learning_utils import ( linear_schedule_func, @@ -456,6 +458,10 @@ def create_learning_algorithm(self, algorithm: RLAlgorithm): """ if algorithm == "matd3": self.rl_algorithm = TD3(learning_role=self) + elif algorithm == "maddpg": + self.rl_algorithm = DDPG(learning_role=self) + elif algorithm == "mappo": + self.rl_algorithm = PPO(learning_role=self) else: logger.error(f"Learning algorithm {algorithm} not implemented!") diff --git a/assume/reinforcement_learning/learning_utils.py b/assume/reinforcement_learning/learning_utils.py index 3ec682a1c..268b0ed5b 100644 --- a/assume/reinforcement_learning/learning_utils.py +++ b/assume/reinforcement_learning/learning_utils.py @@ -248,7 +248,7 @@ def transfer_weights( ) -> dict | None: """ Transfer weights from loaded model to new model. Copy only those obs- and action-slices for matching IDs. - New IDs keep their original (random) weights. Function only works if the neural network architeczture remained stable besides the input layer, namely with the same hidden layers. + New IDs keep their original (random) weights. Function only works if the neural network architecture remained stable besides the input layer, namely with the same hidden layers. Args: model (th.nn.Module): The model to transfer weights to. @@ -259,7 +259,7 @@ def transfer_weights( act_dim (int): The action dimension size. unique_obs (int): The unique observation size per agent, smaller than obs_base as these include also shared observation values. - returns: + Returns: dict | None: The updated state dictionary with transferred weights, or None if architecture mismatch. """ diff --git a/assume/reinforcement_learning/neural_network_architecture.py b/assume/reinforcement_learning/neural_network_architecture.py index a173b4b5c..bfd1032c6 100644 --- a/assume/reinforcement_learning/neural_network_architecture.py +++ b/assume/reinforcement_learning/neural_network_architecture.py @@ -6,16 +6,20 @@ from torch import nn from torch.nn import functional as F +from typing import List, Tuple, Type, Optional, Union -class CriticTD3(nn.Module): - """Initialize parameters and build model. + +class Critic(nn.Module): + """ + Base Critic class handling architecture generation and initialization. Args: n_agents (int): Number of agents - obs_dim (int): Dimension of each state - act_dim (int): Dimension of each action + obs_dim (int): Dimension of observation per agent + act_dim: Dimension of action per agent + float_type: Data type for parameters + unique_obs_dim: Dimension of agent-specific observations """ - def __init__( self, n_agents: int, @@ -26,39 +30,41 @@ def __init__( ): super().__init__() + # Calculate total (centralized) dimensions self.obs_dim = obs_dim + unique_obs_dim * (n_agents - 1) self.act_dim = act_dim * n_agents - # Select proper architecture based on `n_agents` + self.float_type = float_type + + # Dynamic Architecture Definition + self.hidden_sizes = self._get_architecture(n_agents) + + def _get_architecture( + self, n_agents: int + ) -> List[int]: + """Returns hidden layer sizes based on the number of agents.""" if n_agents <= 20: hidden_sizes = [256, 128] # Shallow network for small `n_agents` elif n_agents <= 50: hidden_sizes = [512, 256, 128] # Medium network else: hidden_sizes = [1024, 512, 256, 128] # Deeper network for large `n_agents` + return hidden_sizes - # First Q-network (Q1) - self.q1_layers = self._build_q_network(hidden_sizes, float_type) - - # Second Q-network (Q2) for double Q-learning - self.q2_layers = self._build_q_network(hidden_sizes, float_type) - - # Initialize weights properly - self._init_weights() - - def _build_q_network(self, hidden_sizes, float_type): + def _build_q_network(self) -> nn.ModuleList: """ - Dynamically creates a Q-network given the chosen hidden layer sizes. + Dynamically create a Q-network given the chosen hidden layer sizes. """ layers = nn.ModuleList() input_dim = ( self.obs_dim + self.act_dim - ) # Input includes all observations and actions + ) # Input includes all observations and actions - for h in hidden_sizes: - layers.append(nn.Linear(input_dim, h, dtype=float_type)) + for h in self.hidden_sizes: + layers.append(nn.Linear(input_dim, h, dtype=self.float_type)) + layers.append(nn.ReLU()) input_dim = h - layers.append(nn.Linear(input_dim, 1, dtype=float_type)) # Output Q-value + layers.append(nn.Linear(input_dim, 1, dtype=self.float_type)) # Output Q-value return layers @@ -72,40 +78,163 @@ def init_layer(m): self.apply(init_layer) - def forward(self, obs, actions): + +class CriticTD3(Critic): + """Initialize parameters and build model. + + Args: + n_agents (int): Number of agents + obs_dim (int): Dimension of each state + act_dim (int): Dimension of each action + """ + def __init__( + self, + n_agents: int, + obs_dim: int, + act_dim: int, + float_type, + unique_obs_dim: int + ): + super().__init__( + n_agents, + obs_dim, + act_dim, + float_type, + unique_obs_dim + ) + + # First Q-network (Q1) + self.q1_layers = self._build_q_network() + + # Second Q-network (Q2) for double Q-learning + self.q2_layers = self._build_q_network() + + def forward( + self, + obs: th.Tensor, + actions: th.Tensor + ) -> Tuple[th.Tensor, th.Tensor]: """ Forward pass through both Q-networks. """ - xu = th.cat([obs, actions], dim=1) # Concatenate obs & actions + xu = th.cat([obs, actions], dim=1) # Concatenate obs & actions # Compute Q1 - x1 = xu - for layer in self.q1_layers[:-1]: # All hidden layers - x1 = F.relu(layer(x1)) - x1 = self.q1_layers[-1](x1) # Output layer (no activation) + x1 = nn.Sequential(*self.q1_layers)(xu) # Compute Q2 - x2 = xu - for layer in self.q2_layers[:-1]: # All hidden layers - x2 = F.relu(layer(x2)) - x2 = self.q2_layers[-1](x2) # Output layer (no activation) + x2 = nn.Sequential(*self.q2_layers)(xu) return x1, x2 - def q1_forward(self, obs, actions): + def q1_forward( + self, + obs: th.Tensor, + actions: th.Tensor + ) -> th.Tensor: """ Compute only Q1 (used during actor updates). """ x = th.cat([obs, actions], dim=1) - for layer in self.q1_layers[:-1]: # All hidden layers - x = F.relu(layer(x)) + x = nn.Sequential(*self.q1_layers)(x) + + return x - x = self.q1_layers[-1](x) # Output layer (no activation) + +class CriticDDPG(Critic): + """Initialize parameters and build model. + + Args: + n_agents (int): Number of agents + obs_dim (int): Dimension of observation per agent + act_dim: Dimension of action per agent + float_type: Data type for parameters + unique_obs_dim: Dimension of agent-specific observations + """ + def __init__( + self, + n_agents: int, + obs_dim: int, + act_dim: int, + float_type: th.dtype, + unique_obs_dim: int, + ): + super().__init__( + n_agents, + obs_dim, + act_dim, + float_type, + unique_obs_dim + ) + + # Q-network + self.q_layers = self._build_q_network() + + # Initialize weights properly + self._init_weights() + + def forward( + self, + obs: th.Tensor, + actions: th.Tensor + ) -> th.Tensor: + """Returns Q value.""" + xu = th.cat([obs, actions], dim=1) # Concatenate obs & actions + + # Compute Q + x = nn.Sequential(*self.layers)(xu) return x +class CriticPPO(Critic): + """Initialize parameters and build PPO value network. + + Args: + n_agents (int): Number of agents + obs_dim (int): Dimension of observation per agent + float_type: Data type for parameters + unique_obs_dim: Dimension of agent-specific observations + """ + + def __init__( + self, + n_agents: int, + obs_dim: int, + float_type, + unique_obs_dim: int + ): + super().__init__( + n_agents=n_agents, + obs_dim=obs_dim, + act_dim=0, + float_type=float_type, + unique_obs_dim=unique_obs_dim + ) + + # V-network + self.v_layers = self._build_q_network() + + # Initialize weights properly + self._init_weights() + + def _init_weights(self) -> None: + """ + Apply Orthogonal initialization. + """ + def init_layer(m): + if isinstance(m, nn.Linear): + nn.init.orthogonal_(m.weight, gain=1.0) + nn.init.zeros_(m.bias) + + self.apply(init_layer) + + def forward(self, obs: th.Tensor) -> th.Tensor: + """Returns V value.""" + return self.v_net(obs) + + class Actor(nn.Module): """ Parent class for actor networks. @@ -122,23 +251,25 @@ class Actor(nn.Module): "softsign": F.softsign, "tanh": th.tanh, "sigmoid": th.sigmoid, - "relu": F.relu, + "relu": F.relu } def __init__(self): super().__init__() - self.activation = "softsign" # or "tanh", "sigmoid", "relu" + self.activation = "softsign" # or "tanh", "sigmoid", "relu" if self.activation not in self.activation_function_limit: raise ValueError( f"Activation '{self.activation}' not supported! Supported: {list(self.activation_function_limit.keys())}" ) + self.min_output, self.max_output = self.activation_function_limit[ self.activation ] self.activation_function = self.activation_function_map.get(self.activation) + if self.activation_function is None: raise ValueError( f"Activation '{self.activation}' not implemented in forward pass!" @@ -147,12 +278,12 @@ def __init__(self): class MLPActor(Actor): """ - The neurnal network for the MLP actor. + The neural network for the MLP actor. """ def __init__(self, obs_dim: int, act_dim: int, float_type, *args, **kwargs): super().__init__() - + self.FC1 = nn.Linear(obs_dim, 256, dtype=float_type) self.FC2 = nn.Linear(256, 128, dtype=float_type) self.FC3 = nn.Linear(128, act_dim, dtype=float_type) @@ -181,12 +312,12 @@ def forward(self, obs): class LSTMActor(Actor): """ - The LSTM recurrent neurnal network for the actor. + The LSTM recurrent neural network for the actor. Based on "Multi-Period and Multi-Spatial Equilibrium Analysis in Imperfect Electricity Markets" by Ye at al. (2019) - Note: the original source code was not available, therefore this implementation was derived from the published paper. + Note: the original source code was not available, therefore this implementation was derived from the published paper. Adjustments to resemble final layers from MLPActor: - dense layer 2 was omitted - single output layer with softsign activation function to output actions directly instead of two output layers for mean and stddev @@ -200,7 +331,7 @@ def __init__( unique_obs_dim: int, num_timeseries_obs_dim: int, *args, - **kwargs, + **kwargs ): super().__init__() self.float_type = float_type @@ -247,14 +378,14 @@ def forward(self, obs): outputs = [] for time_step in x1.split(1, dim=2): - time_step = time_step.reshape(-1, self.num_timeseries_obs_dim) + time_step = time_step.reshape(-1, self.num_timeseris_obs_dim) h_t, c_t = self.LSTM1(time_step, (h_t, c_t)) h_t2, c_t2 = self.LSTM2(h_t, (h_t2, c_t2)) outputs += [h_t2] outputs = th.cat(outputs, dim=1) x = th.cat((outputs, x2), dim=1) - + x = F.relu(self.FC1(x)) x = self.activation_function(self.FC2(x)) @@ -262,3 +393,161 @@ def forward(self, obs): x = x.squeeze(0) return x + + +class ActorPPO(nn.Module): + """ + PPO Stochastic Actor Network. + + Key differences from MLPActor (DDPG): + - Outputs mean AND log_std for Gaussian policy + - Provides log_prob for importance sampling + - Used with clipped surrogate objective + """ + + def __init__( + self, + obs_dim: int, + act_dim: int, + float_type, + log_std_init: float = 0.0, + *args, + **kwargs, + ): + """ + Initialize stochastic actor. + + Args: + obs_dim: Observation dimension + act_dim: Action dimension + float_type: Data type for parameters + log_std_init: Initial log standard deviation + """ + super().__init__() + + self.act_dim = act_dim + self.float_type = float_type + + # Policy network (outputs mean) + self.FC1 = nn.Linear(obs_dim, 256, dtype=float_type) + self.FC2 = nn.Linear(256, 128, dtype=float_type) + self.mean_layer = nn.Linear(128, act_dim, dtype=float_type) + + # Learnable log standard deviation + self.log_std = nn.Parameter( + th.ones(act_dim, dtype=float_type) * log_std_init + ) + + self._init_weights() + + def _init_weights(self) -> None: + """Apply orthogonal initialization.""" + def init_layer(m): + if isinstance(m, nn.Linear): + nn.init.orthogonal_(m.weight, gain=0.01) + nn.init.zeros_(m.bias) + + # Initialize hidden layers with larger gain + nn.init.orthogonal_(self.FC1.weight, gain=1.0) + nn.init.orthogonal_(self.FC2.weight, gain=1.0) + nn.init.zeros_(self.FC1.bias) + nn.init.zeros_(self.FC2.bias) + + # Initialize output layer with small gain + nn.init.orthogonal_(self.mean_layer.weight, gain=0.01) + nn.init.zeros_(self.mean_layer.bias) + + def forward(self, obs: th.Tensor) -> tuple[th.Tensor, th.Tensor]: + """ + Forward pass: observation → (mean, log_std). + + Args: + obs: Observations [batch, obs_dim] + + Returns: + Tuple of (action_mean, log_std) + """ + x = F.relu(self.FC1(obs)) + x = F.relu(self.FC2(x)) + mean = th.tanh(self.mean_layer(x)) # Bounded to [-1, 1] + + # Expand log_std to batch size + log_std = self.log_std.expand_as(mean) + + return mean, log_std + + def get_action_and_log_prob( + self, + obs: th.Tensor, + deterministic: bool = False, + ) -> tuple[th.Tensor, th.Tensor]: + """ + Sample action and compute log probability. + + Args: + obs: Observations + deterministic: If True, return mean action + + Returns: + Tuple of (action, log_prob) + """ + mean, log_std = self.forward(obs) + std = log_std.exp() + + if deterministic: + action = mean + else: + # Sample from Gaussian + noise = th.randn_like(mean) + action = mean + std * noise + + # Clamp action to valid range + action = th.clamp(action, -1.0, 1.0) + + # Compute log probability + log_prob = self._compute_log_prob(action, mean, std) + + return action, log_prob + + def evaluate_actions( + self, + obs: th.Tensor, + actions: th.Tensor, + ) -> tuple[th.Tensor, th.Tensor, th.Tensor]: + """ + Evaluate log probability and entropy for given actions. + + Used during PPO update to compute importance ratio. + + Args: + obs: Observations + actions: Actions to evaluate + + Returns: + Tuple of (log_prob, entropy, values) + """ + mean, log_std = self.forward(obs) + std = log_std.exp() + + # Log probability + log_prob = self._compute_log_prob(actions, mean, std) + + # Entropy for exploration bonus + entropy = 0.5 * (1.0 + th.log(2 * th.pi * std.pow(2))).sum(dim=-1) + + return log_prob, entropy + + def _compute_log_prob( + self, + actions: th.Tensor, + mean: th.Tensor, + std: th.Tensor, + ) -> th.Tensor: + """Compute log probability of actions under Gaussian distribution.""" + var = std.pow(2) + log_prob = -0.5 * ( + ((actions - mean).pow(2) / var) + + 2 * th.log(std) + + th.log(th.tensor(2 * th.pi)) + ) + return log_prob.sum(dim=-1) \ No newline at end of file diff --git a/assume/reinforcement_learning/rollout_buffer.py b/assume/reinforcement_learning/rollout_buffer.py new file mode 100644 index 000000000..a248584f8 --- /dev/null +++ b/assume/reinforcement_learning/rollout_buffer.py @@ -0,0 +1,261 @@ +# SPDX-FileCopyrightText: ASSUME Developers +# +# SPDX-License-Identifier: AGPL-3.0-or-later + +""" +ROLLOUT BUFFER - On-Policy Experience Storage for PPO + +Unlike the replay buffer (off-policy), the rollout buffer: +1. Stores complete trajectories from current policy +2. Computes advantages using GAE (Generalized Advantage Estimation) +3. Is cleared after each policy update (single-use data) +""" + +import numpy as np +import torch as th +from typing import NamedTuple, Generator + + +class RolloutBufferSamples(NamedTuple): + """Container for rollout buffer samples.""" + observations: th.Tensor + actions: th.Tensor + old_values: th.Tensor + old_log_probs: th.Tensor + advantages: th.Tensor + returns: th.Tensor + + +class RolloutBuffer: + """ + On-policy rollout buffer for PPO algorithm. + + Stores trajectories from the current policy and computes + GAE-based advantages for policy optimization. + + Key differences from ReplayBuffer: + - Single-use: data is discarded after update + - Stores log_probs for importance sampling + - Stores values for advantage computation + - Computes advantages and returns before sampling + """ + + def __init__( + self, + buffer_size: int, + obs_dim: int, + act_dim: int, + n_rl_units: int, + device: str | th.device, + float_type: th.dtype, + gamma: float = 0.99, + gae_lambda: float = 0.95, + ): + """ + Initialize rollout buffer. + + Args: + buffer_size: Maximum number of transitions per rollout + obs_dim: Observation dimension per agent + act_dim: Action dimension per agent + n_rl_units: Number of RL agents + device: Torch device (cpu/cuda) + float_type: Data type for tensors + gamma: Discount factor for returns + gae_lambda: Lambda for GAE computation + """ + self.buffer_size = buffer_size + self.obs_dim = obs_dim + self.act_dim = act_dim + self.n_rl_units = n_rl_units + self.device = device + self.float_type = float_type + self.gamma = gamma + self.gae_lambda = gae_lambda + + # Current position and full flag + self.pos = 0 + self.full = False + self.generator_ready = False + + # Allocate buffers + self.reset() + + def reset(self) -> None: + """Clear the buffer and allocate new storage.""" + self.observations = np.zeros( + (self.buffer_size, self.n_rl_units, self.obs_dim), + dtype=np.float32, + ) + self.actions = np.zeros( + (self.buffer_size, self.n_rl_units, self.act_dim), + dtype=np.float32, + ) + self.rewards = np.zeros( + (self.buffer_size, self.n_rl_units), + dtype=np.float32, + ) + self.values = np.zeros( + (self.buffer_size, self.n_rl_units), + dtype=np.float32, + ) + self.log_probs = np.zeros( + (self.buffer_size, self.n_rl_units), + dtype=np.float32, + ) + self.dones = np.zeros( + (self.buffer_size, self.n_rl_units), + dtype=np.float32, + ) + + # Computed after rollout + self.advantages = np.zeros( + (self.buffer_size, self.n_rl_units), + dtype=np.float32, + ) + self.returns = np.zeros( + (self.buffer_size, self.n_rl_units), + dtype=np.float32, + ) + + self.pos = 0 + self.full = False + self.generator_ready = False + + def add( + self, + obs: np.ndarray, + action: np.ndarray, + reward: np.ndarray, + done: np.ndarray, + value: np.ndarray, + log_prob: np.ndarray, + ) -> None: + """ + Add a transition to the buffer. + + Args: + obs: Observations [n_agents, obs_dim] + action: Actions taken [n_agents, act_dim] + reward: Rewards received [n_agents] + done: Episode done flags [n_agents] + value: Value estimates [n_agents] + log_prob: Log probabilities of actions [n_agents] + """ + if self.pos >= self.buffer_size: + self.full = True + return + + self.observations[self.pos] = np.array(obs).copy() + self.actions[self.pos] = np.array(action).copy() + self.rewards[self.pos] = np.array(reward).copy() + self.dones[self.pos] = np.array(done).copy() + self.values[self.pos] = np.array(value).copy() + self.log_probs[self.pos] = np.array(log_prob).copy() + + self.pos += 1 + + def compute_returns_and_advantages( + self, + last_values: np.ndarray, + dones: np.ndarray, + ) -> None: + """ + Compute GAE advantages and returns. + + Uses Generalized Advantage Estimation (GAE) for lower variance + advantage estimates. + + Args: + last_values: Value estimates for the last state [n_agents] + dones: Done flags for the last state [n_agents] + """ + last_values = np.array(last_values).flatten() + dones = np.array(dones).flatten() + + # GAE computation + last_gae_lam = np.zeros(self.n_rl_units, dtype=np.float32) + buffer_size = self.pos if not self.full else self.buffer_size + + for step in reversed(range(buffer_size)): + if step == buffer_size - 1: + next_non_terminal = 1.0 - dones + next_values = last_values + else: + next_non_terminal = 1.0 - self.dones[step + 1] + next_values = self.values[step + 1] + + # TD error + delta = ( + self.rewards[step] + + self.gamma * next_values * next_non_terminal + - self.values[step] + ) + + # GAE advantage + last_gae_lam = ( + delta + + self.gamma * self.gae_lambda * next_non_terminal * last_gae_lam + ) + self.advantages[step] = last_gae_lam + + # Returns = advantages + values + self.returns = self.advantages + self.values + self.generator_ready = True + + def get( + self, + batch_size: int | None = None, + ) -> Generator[RolloutBufferSamples, None, None]: + """ + Generate batches of samples for training. + + Args: + batch_size: Size of each batch. If None, return all data. + + Yields: + RolloutBufferSamples containing observation, action, etc. + """ + if not self.generator_ready: + raise ValueError( + "Must call compute_returns_and_advantages before sampling!" + ) + + buffer_size = self.pos if not self.full else self.buffer_size + indices = np.random.permutation(buffer_size) + + if batch_size is None: + batch_size = buffer_size + + start_idx = 0 + while start_idx < buffer_size: + batch_indices = indices[start_idx : start_idx + batch_size] + yield self._get_samples(batch_indices) + start_idx += batch_size + + def _get_samples(self, indices: np.ndarray) -> RolloutBufferSamples: + """Convert numpy arrays to torch tensors for given indices.""" + return RolloutBufferSamples( + observations=th.as_tensor( + self.observations[indices], device=self.device, dtype=self.float_type + ), + actions=th.as_tensor( + self.actions[indices], device=self.device, dtype=self.float_type + ), + old_values=th.as_tensor( + self.values[indices], device=self.device, dtype=self.float_type + ), + old_log_probs=th.as_tensor( + self.log_probs[indices], device=self.device, dtype=self.float_type + ), + advantages=th.as_tensor( + self.advantages[indices], device=self.device, dtype=self.float_type + ), + returns=th.as_tensor( + self.returns[indices], device=self.device, dtype=self.float_type + ), + ) + + def size(self) -> int: + """Return current number of stored transitions.""" + return self.buffer_size if self.full else self.pos \ No newline at end of file From 7cc7035edb1677defc80a702f93ceefbba2aae63 Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Fri, 9 Jan 2026 14:51:30 +0100 Subject: [PATCH 10/44] DONE: Added DDPG, PPO in multi-agent environment in /reinforcement_learning module. --- .../reinforcement_learning/rollout_buffer.py | 51 +------------------ 1 file changed, 1 insertion(+), 50 deletions(-) diff --git a/assume/reinforcement_learning/rollout_buffer.py b/assume/reinforcement_learning/rollout_buffer.py index a248584f8..630dff80e 100644 --- a/assume/reinforcement_learning/rollout_buffer.py +++ b/assume/reinforcement_learning/rollout_buffer.py @@ -2,15 +2,6 @@ # # SPDX-License-Identifier: AGPL-3.0-or-later -""" -ROLLOUT BUFFER - On-Policy Experience Storage for PPO - -Unlike the replay buffer (off-policy), the rollout buffer: -1. Stores complete trajectories from current policy -2. Computes advantages using GAE (Generalized Advantage Estimation) -3. Is cleared after each policy update (single-use data) -""" - import numpy as np import torch as th from typing import NamedTuple, Generator @@ -29,15 +20,6 @@ class RolloutBufferSamples(NamedTuple): class RolloutBuffer: """ On-policy rollout buffer for PPO algorithm. - - Stores trajectories from the current policy and computes - GAE-based advantages for policy optimization. - - Key differences from ReplayBuffer: - - Single-use: data is discarded after update - - Stores log_probs for importance sampling - - Stores values for advantage computation - - Computes advantages and returns before sampling """ def __init__( @@ -53,16 +35,6 @@ def __init__( ): """ Initialize rollout buffer. - - Args: - buffer_size: Maximum number of transitions per rollout - obs_dim: Observation dimension per agent - act_dim: Action dimension per agent - n_rl_units: Number of RL agents - device: Torch device (cpu/cuda) - float_type: Data type for tensors - gamma: Discount factor for returns - gae_lambda: Lambda for GAE computation """ self.buffer_size = buffer_size self.obs_dim = obs_dim @@ -133,14 +105,6 @@ def add( ) -> None: """ Add a transition to the buffer. - - Args: - obs: Observations [n_agents, obs_dim] - action: Actions taken [n_agents, act_dim] - reward: Rewards received [n_agents] - done: Episode done flags [n_agents] - value: Value estimates [n_agents] - log_prob: Log probabilities of actions [n_agents] """ if self.pos >= self.buffer_size: self.full = True @@ -162,13 +126,6 @@ def compute_returns_and_advantages( ) -> None: """ Compute GAE advantages and returns. - - Uses Generalized Advantage Estimation (GAE) for lower variance - advantage estimates. - - Args: - last_values: Value estimates for the last state [n_agents] - dones: Done flags for the last state [n_agents] """ last_values = np.array(last_values).flatten() dones = np.array(dones).flatten() @@ -209,16 +166,10 @@ def get( ) -> Generator[RolloutBufferSamples, None, None]: """ Generate batches of samples for training. - - Args: - batch_size: Size of each batch. If None, return all data. - - Yields: - RolloutBufferSamples containing observation, action, etc. """ if not self.generator_ready: raise ValueError( - "Must call compute_returns_and_advantages before sampling!" + "Must call compute_returns_and_advantages before sampling." ) buffer_size = self.pos if not self.full else self.buffer_size From fc2f6e0022342a657ac0f77d9490a1df5b74c710 Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Mon, 12 Jan 2026 16:10:49 +0100 Subject: [PATCH 11/44] UPDATED ppo-input-pipeline, code-documentation DELETED rollout_buffer.py ADDED RolloutBuffer-in-buffer.py --- assume/common/base.py | 7 + .../algorithms/mappo.py | 179 +++++++++--- assume/reinforcement_learning/buffer.py | 254 +++++++++++++++++- .../reinforcement_learning/learning_role.py | 129 ++++++++- .../neural_network_architecture.py | 82 +++--- .../reinforcement_learning/rollout_buffer.py | 212 --------------- assume/strategies/learning_strategies.py | 61 ++++- 7 files changed, 633 insertions(+), 291 deletions(-) delete mode 100644 assume/reinforcement_learning/rollout_buffer.py diff --git a/assume/common/base.py b/assume/common/base.py index 33fb9d28a..32abc38c1 100644 --- a/assume/common/base.py +++ b/assume/common/base.py @@ -865,6 +865,13 @@ class LearningConfig: target_policy_noise: float = 0.2 target_noise_clip: float = 0.5 + ppo_clip_range: float | None = 0.1 + ppo_clip_range_vf: float | None = None + ppo_n_epochs: int = 10 + ppo_entropy_coef: float = 0.01 + ppo_vf_coef: float = 0.5 + ppo_gae_lambda: float = 0.95 + def __post_init__(self): """Calculate defaults that depend on other fields and validate inputs.""" if self.early_stopping_steps is None: diff --git a/assume/reinforcement_learning/algorithms/mappo.py b/assume/reinforcement_learning/algorithms/mappo.py index b3f43c5d4..f3fa7865a 100644 --- a/assume/reinforcement_learning/algorithms/mappo.py +++ b/assume/reinforcement_learning/algorithms/mappo.py @@ -28,21 +28,23 @@ class PPO(RLAlgorithm): def __init__( self, learning_role, - clip_range = 0.2, # Clipping parameter - clip_range_vf = None, - n_epochs = 10, # Number of epochs per update - entropy_coef = 0.01, # Entropy bonus coefficient - vf_coef = 0.5, # Value function loss coefficient - max_grad_norm = 0.5, # Gradient clipping + clip_range = 0.1, # Epsilon clipping constant preventing the policy from changing too much in a single update. + clip_range_vf = 0.1, # preventing the value function from changing too much from previous estimates + n_epochs = 30, # sample efficiency + entropy_coef = 0.02, # encourages exploration by rewarding "randomness" + vf_coef = 1.0, # balances the importance of training the Critic and training the Actor + max_grad_norm = 0.5, # Gradient clipping ): """Initialize PPO algorithm.""" super().__init__(learning_role) - self.clip_range = clip_range - self.clip_range_vf = clip_range_vf - self.n_epochs = n_epochs - self.entropy_coef = entropy_coef - self.vf_coef = vf_coef + config = self.learning_config + + self.clip_range = clip_range if clip_range is not None else getattr(config, 'ppo_clip_range', 0.2) + self.clip_range_vf = clip_range_vf if clip_range_vf is not None else getattr(config, 'ppo_clip_range_vf', None) + self.n_epochs = n_epochs if n_epochs is not None else getattr(config, 'ppo_n_epochs', 10) + self.entropy_coef = entropy_coef if entropy_coef is not None else getattr(config, 'ppo_entropy_coef', 0.01) + self.vf_coef = vf_coef if vf_coef is not None else getattr(config, 'ppo_vf_coef', 0.5) self.max_grad_norm = max_grad_norm # Update counter @@ -274,6 +276,21 @@ def update_policy(self) -> None: # Get rollout buffer rollout_buffer = self.learning_role.rollout_buffer + + # Check if rollout buffer has data + if rollout_buffer is None or rollout_buffer.pos == 0: + logger.debug("Rollout buffer is empty, skipping policy update") + return + + # Accumulate data if we don't have enough for a full batch + # This decouples train_freq from the required rollout length + if rollout_buffer.pos < self.learning_role.learning_config.batch_size: + logger.debug( + f"Rollout buffer has {rollout_buffer.pos} samples, " + f"waiting for {self.learning_role.learning_config.batch_size} (batch_size). " + "Skipping update to accumulate more on-policy data." + ) + return # Update learning rate progress_remaining = self.learning_role.get_progress_remaining() @@ -293,16 +310,37 @@ def update_policy(self) -> None: buffer_size = rollout_buffer.pos if not rollout_buffer.full else rollout_buffer.buffer_size if buffer_size > 0: - # Get the last observation from the buffer - last_obs = rollout_buffer.observations[buffer_size-1] - last_dones = rollout_buffer.dones[buffer_size-1] + # Use the LAST observation as the bootstrap for the REST of the buffer. + # We sacrifice the last step (pos-1) to serve as s_{t+1} for the step before it. + # This ensures V(s_{t+1}) is calculating using the REAL next state, not self-referential. + + last_idx = buffer_size - 1 + last_obs = rollout_buffer.observations[last_idx] + last_dones = rollout_buffer.dones[last_idx] + + # Reduce buffer size by 1 so as to not train on the bootstrap step + rollout_buffer.pos -= 1 + if rollout_buffer.full: + rollout_buffer.full = False # If it was full, it's not anymore + + # Prepare unique observations for centralized critic + last_unique_obs = last_obs[:, self.obs_dim - self.unique_obs_dim :] with th.no_grad(): for i, strategy in enumerate(strategies): + # Construct centralized observation + obs_i = last_obs[i : i + 1] + other_unique = np.concatenate( + (last_unique_obs[:i], last_unique_obs[i + 1 :]), axis=0 + ) + centralized_obs = np.concatenate( + (obs_i, other_unique.reshape(1, -1)), axis=1 + ) + obs_tensor = th.as_tensor( - last_obs[i:i+1], - device = self.device, - dtype = self.float_type + centralized_obs, + device=self.device, + dtype=self.float_type, ) # Get value estimate from critic last_values[i] = strategy.critic(obs_tensor).cpu().numpy().flatten()[0] @@ -315,14 +353,54 @@ def update_policy(self) -> None: all_actor_losses = [] all_critic_losses = [] all_entropy_losses = [] + + # Initialize unit_params for gradient logging + # Use an empty list that will be dynamically extended + unit_params = [] + step_count = 0 + + # Helper to create a new step entry + def create_step_entry(): + return { + u_id: { + "actor_loss": None, + "actor_total_grad_norm": None, + "actor_max_grad_norm": None, + "critic_loss": None, + "critic_total_grad_norm": None, + "critic_max_grad_norm": None, + } + for u_id in self.learning_role.rl_strats.keys() + } for epoch in range(self.n_epochs): for batch in rollout_buffer.get(self.learning_config.batch_size): + current_batch_size = batch.observations.shape[0] + + # Precompute unique observation parts for centralized critic + unique_obs_from_others = batch.observations[ + :, :, self.obs_dim - self.unique_obs_dim : + ].reshape(current_batch_size, n_rl_agents, -1) + for i, strategy in enumerate(strategies): actor = strategy.actor critic = strategy.critic obs_i = batch.observations[:, i, :] + + # Construct centralized state + other_unique_obs = th.cat( + (unique_obs_from_others[:, :i], unique_obs_from_others[:, i + 1 :]), + dim=1, + ) + all_states = th.cat( + ( + obs_i.reshape(current_batch_size, -1), + other_unique_obs.reshape(current_batch_size, -1), + ), + dim=1, + ) + actions_i = batch.actions[:, i, :] old_log_probs_i = batch.old_log_probs[:, i] advantages_i = batch.advantages[:, i] @@ -337,7 +415,7 @@ def update_policy(self) -> None: obs_i, actions_i ) - values = critic(obs_i).flatten() + values = critic(all_states).flatten() # Importance sampling ratio ratio = th.exp(log_probs - old_log_probs_i) @@ -347,12 +425,12 @@ def update_policy(self) -> None: policy_loss_2 = advantages_i * th.clamp( ratio, 1 - self.clip_range, 1 + self.clip_range ) - policy_loss = -th.min(policy_loss_1, policy_loss_2) + policy_loss = -th.min(policy_loss_1, policy_loss_2).mean() # Entropy loss entropy_loss = -self.entropy_coef * entropy.mean() - if self.clip_rnage_vf is not None: + if self.clip_range_vf is not None: # Clipped value function loss values_clipped = old_values_i + th.clamp( values - old_values_i, @@ -372,11 +450,24 @@ def update_policy(self) -> None: critic.optimizer.zero_grad() loss.backward() + # Calculate gradient norms BEFORE clipping + actor_params = list(actor.parameters()) + critic_params = list(critic.parameters()) + + actor_max_grad_norm = max( + (p.grad.norm().item() for p in actor_params if p.grad is not None), + default=0.0 + ) + critic_max_grad_norm = max( + (p.grad.norm().item() for p in critic_params if p.grad is not None), + default=0.0 + ) + # Gradient clipping - th.nn.utils.clip_grad_norm_( + actor_total_grad_norm = th.nn.utils.clip_grad_norm_( actor.parameters(), self.max_grad_norm ) - th.nn.utils.clip_grad_norm_( + critic_total_grad_norm = th.nn.utils.clip_grad_norm_( critic.parameters(), self.max_grad_norm ) @@ -387,20 +478,44 @@ def update_policy(self) -> None: all_actor_losses.append(policy_loss.item()) all_critic_losses.append(value_loss.item()) all_entropy_losses.append(entropy_loss.item()) + + # Ensure we have an entry for this step + if step_count >= len(unit_params): + unit_params.append(create_step_entry()) + + # Store per-unit gradient params for this step + unit_params[step_count][strategy.unit_id]["actor_loss"] = policy_loss.item() + unit_params[step_count][strategy.unit_id]["critic_loss"] = value_loss.item() + unit_params[step_count][strategy.unit_id]["actor_total_grad_norm"] = actor_total_grad_norm.item() if isinstance(actor_total_grad_norm, th.Tensor) else actor_total_grad_norm + unit_params[step_count][strategy.unit_id]["actor_max_grad_norm"] = actor_max_grad_norm + unit_params[step_count][strategy.unit_id]["critic_total_grad_norm"] = critic_total_grad_norm.item() if isinstance(critic_total_grad_norm, th.Tensor) else critic_total_grad_norm + unit_params[step_count][strategy.unit_id]["critic_max_grad_norm"] = critic_max_grad_norm + + step_count += 1 self.n_updates += 1 # Log average metrics - if self.learning_role.tensor_board_logger: - self.learning_role.tensor_board_logger.log_scalar( - "ppo/actor_loss", np.mean(all_actor_losses), self.n_updates - ) - self.learning_role.tensor_board_logger.log_scalar( - "ppo/critic_loss", np.mean(all_critic_losses), self.n_updates - ) - self.learning_role.tensor_board_logger.log_scalar( - "ppo/entropy_loss", np.mean(all_entropy_losses), self.n_updates - ) + # Log average metrics + # if self.learning_role.tensor_board_logger: + # self.learning_role.tensor_board_logger.log_scalar( + # "ppo/actor_loss", np.mean(all_actor_losses), self.n_updates + # ) + # self.learning_role.tensor_board_logger.log_scalar( + # "ppo/critic_loss", np.mean(all_critic_losses), self.n_updates + # ) + # self.learning_role.tensor_board_logger.log_scalar( + # "ppo/entropy_loss", np.mean(all_entropy_losses), self.n_updates + # ) + # if all_actor_losses: + # logger.info( + # f"PPO Update {self.n_updates} - Actor loss: {np.mean(all_actor_losses):.4f}, " + # f"Critic loss: {np.mean(all_critic_losses):.4f}, " + # f"Entropy loss: {np.mean(all_entropy_losses):.4f}" + # ) + + # Write gradient params to output + self.learning_role.write_rl_grad_params_to_output(learning_rate, unit_params) # Clear rollout buffer rollout_buffer.reset() diff --git a/assume/reinforcement_learning/buffer.py b/assume/reinforcement_learning/buffer.py index 534310b6d..d021452a4 100644 --- a/assume/reinforcement_learning/buffer.py +++ b/assume/reinforcement_learning/buffer.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: AGPL-3.0-or-later import warnings -from typing import NamedTuple +from typing import NamedTuple, Generator import numpy as np import torch as th @@ -172,3 +172,255 @@ def sample(self, batch_size: int) -> ReplayBufferSamples: ) return ReplayBufferSamples(*tuple(map(self.to_torch, data))) + +class RolloutBufferSamples(NamedTuple): + """ + Container for roll buffer samples. It holds one batch of training samples + from PPO's rollout buffer. + """ + observations: th.Tensor # states/observations the agent saw + actions: th.Tensor # actions the agent took + old_values: th.Tensor # critic's value estimates + old_log_probs: th.Tensor # log_probability of taking each action + advantages: th.Tensor # generalized advantage estimates + returns: th.Tensor # expected returns + +class RolloutBuffer: + """ + On-policy rollout buffer for PPO algorithm. This is different from TD3/DDPG which keep old data in a replay buffer. The buffer stores data for all the agents together. + + buffer_size (int): maximum number of transitions the buffer can store before training. + obs_dim (int): dimension of the observation space. + act_dim (int): dimension of the action space. + n_rl_units (int): number of RL agents in the multi-agent system. + device (str | th.device): specifies the device for training. + float_type (th.dtype): precision of floating-point numbers. + gamma (float): discount factor for defining how much to value future rewards. + gae_lambda (float): GAE (Generalized Advantage Estimationn) smoothing parameter. + """ + + def __init__( + self, + buffer_size: int, + obs_dim: int, + act_dim: int, + n_rl_units: int, + device: str | th.device, + float_type: th.dtype, + gamma: float = 0.99, + gae_lambda: float = 0.98 + ): + """Initialize the rollout buffer.""" + self.buffer_size = buffer_size + self.obs_dim = obs_dim + self.act_dim = act_dim + self.n_rl_units = n_rl_units + self.device = device + self.float_type = float_type + self.gamma = gamma + self.gae_lambda = gae_lambda + + # Current position and full flag + self.pos = 0 + self.full = False + self.generator_ready = False + + # Allocate buffers + self.reset() + + def reset(self) -> None: + """Clear the buffer and allocate new storage.""" + self.observations = np.zeros( + ( + self.buffer_size, + self.n_rl_units, + self.obs_dim + ), + dtype = np.float32 + ) + self.actions = np.zeros( + ( + self.buffer_size, + self.n_rl_units, + self.act_dim + ), + dtype = np.float32 + ) + self.rewards = np.zeros( + ( + self.buffer_size, + self.n_rl_units + ), + dtype = np.float32 + ) + self.values = np.zeros( + ( + self.buffer_size, + self.n_rl_units + ), + dtype = np.float32 + ) + self.log_probs = np.zeros( + ( + self.buffer_size, + self.n_rl_units + ), + dtype = np.float32 + ) + self.dones = np.zeros( + ( + self.buffer_size, + self.n_rl_units + ), + dtype = np.float32 + ) + + # Computed after rollout + self.advantages = np.zeros( + ( + self.buffer_size, + self.n_rl_units + ), + dtype = np.float32 + ) + self.returns = np.zeros( + ( + self.buffer_size, + self.n_rl_units + ), + dtype = np.float32 + ) + + self.pos = 0 + self.full = False + self.generator_ready = False + + def add( + self, + obs: np.ndarray, + action: np.ndarray, + reward: np.ndarray, + done: np.ndarray, + value: np.ndarray, + log_prob: np.ndarray + ) -> None: + """Add a transition to the buffer.""" + if self.pos >= self.buffer_size: + self.full = True + return + + self.observations[self.pos] = np.array(obs).copy() + self.actions[self.pos] = np.array(action).copy() + self.rewards[self.pos] = np.array(reward).flatten().copy() + self.dones[self.pos] = np.array(done).flatten().copy() + self.values[self.pos] = np.array(value).flatten().copy() + self.log_probs[self.pos] = np.array(log_prob).flatten().copy() + # flattening the rewards, dones, values, log_probs array to (n_units,) size + + self.pos += 1 + + def compute_returns_and_advantages( + self, + last_values: np.ndarray, + dones: np.ndarray + ) -> None: + """Compute GAE advantages and returns.""" + # taking the final value estimates and episode-end flags, + # and making them flat arrays providing one number per agent. + last_values = np.array(last_values).flatten() + dones = np.array(dones).flatten() + + # GAE computation + # starting with running total of zero for each agent. + last_gae_lam = np.zeros(self.n_rl_units, dtype=np.float32) + buffer_size = self.pos if not self.full else self.buffer_size + + # backward loop + for step in reversed(range(buffer_size)): + if step == buffer_size - 1: + # if at the last step, use the last_vlaues given as input + next_non_terminal = 1.0 - dones + next_values = last_values + else: + # for all the other steps, get the next value and next episode flag. + next_non_terminal = 1.0 - self.dones[step + 1] + next_values = self.values[step + 1] + + # TD error + delta = ( + self.rewards[step] + + self.gamma * next_values * next_non_terminal + - self.values[step] + ) + + # GAE advantage + last_gae_lam = ( + delta + + self.gamma * self.gae_lambda * next_non_terminal * last_gae_lam + ) + self.advantages[step] = last_gae_lam + + # Returns = advantages + values + self.returns = self.advantages + self.values + self.generator_ready = True + + def get( + self, + batch_size: int | None = None + ) -> Generator[RolloutBufferSamples, None, None]: + """Generate batches of samples for training.""" + if not self.generator_ready: + raise ValueError( + "Must call compute_returns_and_advantages before sampling." + ) + + buffer_size = self.pos if not self.full else self.buffer_size + indices = np.random.permutation(buffer_size) + + if batch_size is None: + batch_size = buffer_size + + start_idx = 0 + while start_idx < buffer_size: + batch_indices = indices[start_idx : start_idx + batch_size] + yield self._get_samples(batch_indices) + start_idx += batch_size + + def _get_samples(self, indices: np.ndarray) -> RolloutBufferSamples: + """Convert numpy arrays to torch tensors for given indices.""" + return RolloutBufferSamples( + observations = th.as_tensor( + self.observations[indices], + device = self.device, + dtype = self.float_type + ), + actions = th.as_tensor( + self.actions[indices], + device = self.device, + dtype = self.float_type + ), + old_values = th.as_tensor( + self.values[indices], + device = self.device, + dtype = self.float_type + ), + old_log_probs = th.as_tensor( + self.log_probs[indices], + device = self.device, + dtype = self.float_type + ), + advantages = th.as_tensor( + self.advantages[indices], + device = self.device, + dtype = self.float_type + ), + returns = th.as_tensor( + self.returns[indices], + device = self.device, + dtype = self.float_type + ) + ) + + def size(self) -> int: + """Return current number of stored transitions.""" + return self.buffer_size if self.full else self.pos diff --git a/assume/reinforcement_learning/learning_role.py b/assume/reinforcement_learning/learning_role.py index ddf2aa622..446b77ad6 100644 --- a/assume/reinforcement_learning/learning_role.py +++ b/assume/reinforcement_learning/learning_role.py @@ -7,6 +7,7 @@ from datetime import datetime from pathlib import Path +import numpy as np import pandas as pd import torch as th from mango import Role @@ -21,7 +22,10 @@ from assume.reinforcement_learning.algorithms.matd3 import TD3 from assume.reinforcement_learning.algorithms.maddpg import DDPG from assume.reinforcement_learning.algorithms.mappo import PPO -from assume.reinforcement_learning.buffer import ReplayBuffer +from assume.reinforcement_learning.buffer import ( + ReplayBuffer, + RolloutBuffer +) from assume.reinforcement_learning.learning_utils import ( linear_schedule_func, transform_buffer_data, @@ -54,6 +58,7 @@ def __init__( # how many learning roles do exist and how are they named self.buffer: ReplayBuffer = None + self.rollout_buffer: RolloutBuffer = None # For on-policy algorithms (PPO) self.episodes_done = 0 self.rl_strats: dict[int, LearningStrategy] = {} self.learning_config = learning_config @@ -123,6 +128,10 @@ def __init__( self.all_rewards = defaultdict(lambda: defaultdict(list)) self.all_regrets = defaultdict(lambda: defaultdict(list)) self.all_profits = defaultdict(lambda: defaultdict(list)) + # PPO algorithm specific caches for on-policy learning + self.all_values = defaultdict(lambda: defaultdict(list)) + self.all_log_probs = defaultdict(lambda: defaultdict(list)) + self.all_dones = defaultdict(lambda: defaultdict(list)) def on_ready(self): """ @@ -237,6 +246,10 @@ async def store_to_buffer_and_update(self) -> None: current_noises = self.all_noises current_regrets = self.all_regrets current_profits = self.all_profits + # PPO specific caches + current_values = self.all_values + current_log_probs = self.all_log_probs + current_dones = self.all_dones # Reset cache dicts immediately with new defaultdicts self.all_obs = defaultdict(lambda: defaultdict(list)) @@ -245,6 +258,10 @@ async def store_to_buffer_and_update(self) -> None: self.all_noises = defaultdict(lambda: defaultdict(list)) self.all_regrets = defaultdict(lambda: defaultdict(list)) self.all_profits = defaultdict(lambda: defaultdict(list)) + # PPO specific resets + self.all_values = defaultdict(lambda: defaultdict(list)) + self.all_log_probs = defaultdict(lambda: defaultdict(list)) + self.all_dones = defaultdict(lambda: defaultdict(list)) # Get timestamps from cache we took all_timestamps = sorted(current_obs.keys()) @@ -260,6 +277,9 @@ async def store_to_buffer_and_update(self) -> None: "noises": {t: current_noises[t] for t in timestamps_to_process}, "regret": {t: current_regrets[t] for t in timestamps_to_process}, "profit": {t: current_profits[t] for t in timestamps_to_process}, + "values": {t: current_values[t] for t in timestamps_to_process}, + "log_probs": {t: current_log_probs[t] for t in timestamps_to_process}, + "dones": {t: current_dones[t] for t in timestamps_to_process} } # write data to output agent @@ -292,12 +312,84 @@ async def _store_to_buffer_and_update_sync(self, cache, device) -> None: ) return - # rewrite dict so that obs.shape == (n_rl_units, obs_dim) and sorted by keys and store in buffer - self.buffer.add( - obs=transform_buffer_data(cache["obs"], device), - actions=transform_buffer_data(cache["actions"], device), - reward=transform_buffer_data(cache["rewards"], device), - ) + # check which buffer type to use based on algorithm + if self.learning_config.algorithm == "mappo": + # for PPO use on-policy RolloutBuffer + # Add each transition to the rollout buffer + for timestamp in sorted(cache["obs"].keys()): + obs_data = transform_buffer_data( + { + timestamp: cache["obs"][timestamp] + }, + device + ) + actions_data = transform_buffer_data( + { + timestamp: cache["actions"][timestamp] + }, + device + ) + rewards_data = transform_buffer_data( + { + timestamp: cache["rewards"][timestamp] + }, + device + ) + + if cache["values"].get(timestamp): + values_data = transform_buffer_data( + { + timestamp: cache["values"][timestamp] + }, + device + ) + else: + values_data = np.zeros(len(self.values_data)) + + if cache["log_probs"].get(timestamp): + log_probs_data = transform_buffer_data( + { + timestamp: cache["log_probs"][timestamp] + }, + device + ) + else: + log_probs_data = np.zeros(len(self.log_probs_data)) + + if cache["dones"].get(timestamp): + dones_data = transform_buffer_data( + { + timestamp: cache["dones"][timestamp] + }, + device + ) + else: + dones_data = np.zeros(len(self.rl_strats)) + + # Helper to convert to numpy + def to_numpy(data): + if isinstance(data, th.Tensor): + return data.cpu().numpy() + return np.array(data) + + # Add to rollout buffer + if self.rollout_buffer is not None: + self.rollout_buffer.add( + obs = to_numpy(obs_data), + action = to_numpy(actions_data), + reward = to_numpy(rewards_data), + done = to_numpy(dones_data), + value = to_numpy(values_data), + log_prob = to_numpy(log_probs_data) + ) + else: + # for TD3/DDPG use off-policy ReplayBuffer + # rewrite dict so that obs.shape == (n_rl_units, obs_dim) and sorted by keys and store in buffer + self.buffer.add( + obs = transform_buffer_data(cache["obs"], device), + actions = transform_buffer_data(cache["actions"], device), + reward = transform_buffer_data(cache["rewards"], device), + ) if ( self.episodes_done @@ -350,6 +442,27 @@ def add_reward_to_cache(self, unit_id, start, reward, regret, profit) -> None: self.all_regrets[start][unit_id].append(regret) self.all_profits[start][unit_id].append(profit) + def add_ppo_data_to_cache( + self, + unit_id, + start, + value, + log_prob, + done=False + ) -> None: + """ + Add PPO specific data to the cache dict, per unit_id. + + Args: + unit_id (str): The id of the unit. + value (float): The value estimate V(s) from the critic. + log_prob (float): The log probability of the action. + done (bool): Whether a terminal state or not. + """ + self.all_values[start][unit_id].append(value) + self.all_log_probs[start][unit_id].append(log_prob) + self.all_dones[start][unit_id].append(float(done)) + def load_inter_episodic_data(self, inter_episodic_data): """ Load the inter-episodic data from the dict stored across simulation runs. @@ -364,6 +477,7 @@ def load_inter_episodic_data(self, inter_episodic_data): self.rl_eval = inter_episodic_data["all_eval"] self.avg_rewards = inter_episodic_data["avg_all_eval"] self.buffer = inter_episodic_data["buffer"] + self.rollout_buffer = inter_episodic_data["rollout_buffer"] self.initialize_policy(inter_episodic_data["actors_and_critics"]) @@ -393,6 +507,7 @@ def get_inter_episodic_data(self): "all_eval": self.rl_eval, "avg_all_eval": self.avg_rewards, "buffer": self.buffer, + "rollout_buffer": self.rollout_buffer, "actors_and_critics": self.rl_algorithm.extract_policy(), } diff --git a/assume/reinforcement_learning/neural_network_architecture.py b/assume/reinforcement_learning/neural_network_architecture.py index bfd1032c6..87c1cb2a8 100644 --- a/assume/reinforcement_learning/neural_network_architecture.py +++ b/assume/reinforcement_learning/neural_network_architecture.py @@ -183,7 +183,7 @@ def forward( xu = th.cat([obs, actions], dim=1) # Concatenate obs & actions # Compute Q - x = nn.Sequential(*self.layers)(xu) + x = nn.Sequential(*self.q_layers)(xu) return x @@ -232,7 +232,10 @@ def init_layer(m): def forward(self, obs: th.Tensor) -> th.Tensor: """Returns V value.""" - return self.v_net(obs) + x = obs + for layer in self.v_layers: + x = layer(x) + return x class Actor(nn.Module): @@ -396,15 +399,20 @@ def forward(self, obs): class ActorPPO(nn.Module): - """ - PPO Stochastic Actor Network. - - Key differences from MLPActor (DDPG): - - Outputs mean AND log_std for Gaussian policy - - Provides log_prob for importance sampling - - Used with clipped surrogate objective - """ + activation_function_limit = { + "softsign": (-1, 1), + "tanh": (-1, 1), + "sigmoid": (0, 1), + "relu": (0, float("inf")), + } + activation_function_map = { + "softsign": F.softsign, + "tanh": th.tanh, + "sigmoid": th.sigmoid, + "relu": F.relu + } + def __init__( self, obs_dim: int, @@ -414,20 +422,22 @@ def __init__( *args, **kwargs, ): - """ - Initialize stochastic actor. - - Args: - obs_dim: Observation dimension - act_dim: Action dimension - float_type: Data type for parameters - log_std_init: Initial log standard deviation - """ super().__init__() self.act_dim = act_dim self.float_type = float_type + self.activation = "softsign" # or "tanh", "sigmoid", "relu" + + if self.activation not in self.activation_function_limit: + raise ValueError( + f"Activation '{self.activation}' not supported! Supported: {list(self.activation_function_limit.keys())}" + ) + + self.min_output, self.max_output = self.activation_function_limit[ + self.activation + ] + # Policy network (outputs mean) self.FC1 = nn.Linear(obs_dim, 256, dtype=float_type) self.FC2 = nn.Linear(256, 128, dtype=float_type) @@ -457,21 +467,31 @@ def init_layer(m): nn.init.orthogonal_(self.mean_layer.weight, gain=0.01) nn.init.zeros_(self.mean_layer.bias) - def forward(self, obs: th.Tensor) -> tuple[th.Tensor, th.Tensor]: - """ - Forward pass: observation → (mean, log_std). + def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor: + """Forward pass""" + x = F.relu(self.FC1(obs)) + x = F.relu(self.FC2(x)) + mean = th.tanh(self.mean_layer(x)) # Bounded to [-1, 1] - Args: - obs: Observations [batch, obs_dim] - - Returns: - Tuple of (action_mean, log_std) + if deterministic: + return mean + + # Sample from Gaussian during training + log_std = self.log_std.expand_as(mean) + std = log_std.exp() + noise = th.randn_like(mean) + action = mean + std * noise + + # Clamp to valid range + return th.clamp(action, -1.0, 1.0) + + def get_distribution(self, obs: th.Tensor) -> tuple[th.Tensor, th.Tensor]: + """ + Get the policy distribution parameters. """ x = F.relu(self.FC1(obs)) x = F.relu(self.FC2(x)) mean = th.tanh(self.mean_layer(x)) # Bounded to [-1, 1] - - # Expand log_std to batch size log_std = self.log_std.expand_as(mean) return mean, log_std @@ -491,7 +511,7 @@ def get_action_and_log_prob( Returns: Tuple of (action, log_prob) """ - mean, log_std = self.forward(obs) + mean, log_std = self.get_distribution(obs) std = log_std.exp() if deterministic: @@ -526,7 +546,7 @@ def evaluate_actions( Returns: Tuple of (log_prob, entropy, values) """ - mean, log_std = self.forward(obs) + mean, log_std = self.get_distribution(obs) std = log_std.exp() # Log probability diff --git a/assume/reinforcement_learning/rollout_buffer.py b/assume/reinforcement_learning/rollout_buffer.py deleted file mode 100644 index 630dff80e..000000000 --- a/assume/reinforcement_learning/rollout_buffer.py +++ /dev/null @@ -1,212 +0,0 @@ -# SPDX-FileCopyrightText: ASSUME Developers -# -# SPDX-License-Identifier: AGPL-3.0-or-later - -import numpy as np -import torch as th -from typing import NamedTuple, Generator - - -class RolloutBufferSamples(NamedTuple): - """Container for rollout buffer samples.""" - observations: th.Tensor - actions: th.Tensor - old_values: th.Tensor - old_log_probs: th.Tensor - advantages: th.Tensor - returns: th.Tensor - - -class RolloutBuffer: - """ - On-policy rollout buffer for PPO algorithm. - """ - - def __init__( - self, - buffer_size: int, - obs_dim: int, - act_dim: int, - n_rl_units: int, - device: str | th.device, - float_type: th.dtype, - gamma: float = 0.99, - gae_lambda: float = 0.95, - ): - """ - Initialize rollout buffer. - """ - self.buffer_size = buffer_size - self.obs_dim = obs_dim - self.act_dim = act_dim - self.n_rl_units = n_rl_units - self.device = device - self.float_type = float_type - self.gamma = gamma - self.gae_lambda = gae_lambda - - # Current position and full flag - self.pos = 0 - self.full = False - self.generator_ready = False - - # Allocate buffers - self.reset() - - def reset(self) -> None: - """Clear the buffer and allocate new storage.""" - self.observations = np.zeros( - (self.buffer_size, self.n_rl_units, self.obs_dim), - dtype=np.float32, - ) - self.actions = np.zeros( - (self.buffer_size, self.n_rl_units, self.act_dim), - dtype=np.float32, - ) - self.rewards = np.zeros( - (self.buffer_size, self.n_rl_units), - dtype=np.float32, - ) - self.values = np.zeros( - (self.buffer_size, self.n_rl_units), - dtype=np.float32, - ) - self.log_probs = np.zeros( - (self.buffer_size, self.n_rl_units), - dtype=np.float32, - ) - self.dones = np.zeros( - (self.buffer_size, self.n_rl_units), - dtype=np.float32, - ) - - # Computed after rollout - self.advantages = np.zeros( - (self.buffer_size, self.n_rl_units), - dtype=np.float32, - ) - self.returns = np.zeros( - (self.buffer_size, self.n_rl_units), - dtype=np.float32, - ) - - self.pos = 0 - self.full = False - self.generator_ready = False - - def add( - self, - obs: np.ndarray, - action: np.ndarray, - reward: np.ndarray, - done: np.ndarray, - value: np.ndarray, - log_prob: np.ndarray, - ) -> None: - """ - Add a transition to the buffer. - """ - if self.pos >= self.buffer_size: - self.full = True - return - - self.observations[self.pos] = np.array(obs).copy() - self.actions[self.pos] = np.array(action).copy() - self.rewards[self.pos] = np.array(reward).copy() - self.dones[self.pos] = np.array(done).copy() - self.values[self.pos] = np.array(value).copy() - self.log_probs[self.pos] = np.array(log_prob).copy() - - self.pos += 1 - - def compute_returns_and_advantages( - self, - last_values: np.ndarray, - dones: np.ndarray, - ) -> None: - """ - Compute GAE advantages and returns. - """ - last_values = np.array(last_values).flatten() - dones = np.array(dones).flatten() - - # GAE computation - last_gae_lam = np.zeros(self.n_rl_units, dtype=np.float32) - buffer_size = self.pos if not self.full else self.buffer_size - - for step in reversed(range(buffer_size)): - if step == buffer_size - 1: - next_non_terminal = 1.0 - dones - next_values = last_values - else: - next_non_terminal = 1.0 - self.dones[step + 1] - next_values = self.values[step + 1] - - # TD error - delta = ( - self.rewards[step] - + self.gamma * next_values * next_non_terminal - - self.values[step] - ) - - # GAE advantage - last_gae_lam = ( - delta - + self.gamma * self.gae_lambda * next_non_terminal * last_gae_lam - ) - self.advantages[step] = last_gae_lam - - # Returns = advantages + values - self.returns = self.advantages + self.values - self.generator_ready = True - - def get( - self, - batch_size: int | None = None, - ) -> Generator[RolloutBufferSamples, None, None]: - """ - Generate batches of samples for training. - """ - if not self.generator_ready: - raise ValueError( - "Must call compute_returns_and_advantages before sampling." - ) - - buffer_size = self.pos if not self.full else self.buffer_size - indices = np.random.permutation(buffer_size) - - if batch_size is None: - batch_size = buffer_size - - start_idx = 0 - while start_idx < buffer_size: - batch_indices = indices[start_idx : start_idx + batch_size] - yield self._get_samples(batch_indices) - start_idx += batch_size - - def _get_samples(self, indices: np.ndarray) -> RolloutBufferSamples: - """Convert numpy arrays to torch tensors for given indices.""" - return RolloutBufferSamples( - observations=th.as_tensor( - self.observations[indices], device=self.device, dtype=self.float_type - ), - actions=th.as_tensor( - self.actions[indices], device=self.device, dtype=self.float_type - ), - old_values=th.as_tensor( - self.values[indices], device=self.device, dtype=self.float_type - ), - old_log_probs=th.as_tensor( - self.log_probs[indices], device=self.device, dtype=self.float_type - ), - advantages=th.as_tensor( - self.advantages[indices], device=self.device, dtype=self.float_type - ), - returns=th.as_tensor( - self.returns[indices], device=self.device, dtype=self.float_type - ), - ) - - def size(self) -> int: - """Return current number of stored transitions.""" - return self.buffer_size if self.full else self.pos \ No newline at end of file diff --git a/assume/strategies/learning_strategies.py b/assume/strategies/learning_strategies.py index a55db0cb0..c15c56031 100644 --- a/assume/strategies/learning_strategies.py +++ b/assume/strategies/learning_strategies.py @@ -262,6 +262,7 @@ def get_actions(self, next_observation): ----- In learning mode, actions incorporate noise for exploration. Initial exploration relies solely on noise to cover the action space broadly. + For PPO, we also store log_prob and value estimates for later use. """ # distinction whether we are in learning mode or not to handle exploration realised with noise @@ -283,15 +284,37 @@ def get_actions(self, next_observation): # ============================================================================= # only use noise as the action to enforce exploration curr_action = noise + + # For PPO, store dummy log_prob and value during initial exploration + if self.algorithm == "mappo": + self._last_log_prob = th.tensor(0.0, device=self.device) + self._last_value = th.tensor(0.0, device=self.device) else: - # if we are not in the initial exploration phase we chose the action with the actor neural net - # and add noise to the action - curr_action = self.actor(next_observation).detach() - noise = self.action_noise.noise( - device=self.device, dtype=self.float_type - ) - curr_action += noise + # Check if we're using PPO algorithm + if self.algorithm == "mappo": + # PPO: use get_action_and_log_prob for proper stochastic sampling + curr_action, log_prob = self.actor.get_action_and_log_prob(next_observation.unsqueeze(0)) + curr_action = curr_action.squeeze(0).detach() + self._last_log_prob = log_prob.squeeze(0).detach() + + # Get value estimate from critic (if available) + if hasattr(self.learning_role, 'critics') and self.unit_id in self.learning_role.critics: + critic = self.learning_role.critics[self.unit_id] + self._last_value = critic(next_observation.unsqueeze(0)).squeeze().detach() + else: + self._last_value = th.tensor(0.0, device=self.device) + + # PPO uses stochastic policy, no external noise needed + noise = th.zeros_like(curr_action, dtype=self.float_type) + else: + # TD3/DDPG: if we are not in the initial exploration phase we chose the action with the actor neural net + # and add noise to the action + curr_action = self.actor(next_observation).detach() + noise = self.action_noise.noise( + device=self.device, dtype=self.float_type + ) + curr_action += noise # make sure that noise adding does not exceed the actual output of the NN as it pushes results in a direction that actor can't even reach curr_action = th.clamp( @@ -299,7 +322,11 @@ def get_actions(self, next_observation): ) else: # if we are not in learning mode we just use the actor neural net to get the action without adding noise - curr_action = self.actor(next_observation).detach() + if self.algorithm == "mappo": + # For PPO evaluation, use deterministic action (mean) + curr_action = self.actor(next_observation, deterministic=True).detach() + else: + curr_action = self.actor(next_observation).detach() # noise is an tensor with zeros, because we are not in learning mode noise = th.zeros_like(curr_action, dtype=self.float_type) @@ -493,6 +520,15 @@ def calculate_bids( if self.learning_mode: self.learning_role.add_actions_to_cache(self.unit_id, start, actions, noise) + # For PPO, also cache value estimates and log probabilities + if self.algorithm == "mappo" and hasattr(self, '_last_log_prob'): + self.learning_role.add_ppo_data_to_cache( + self.unit_id, + start, + getattr(self, '_last_value', 0.0), + self._last_log_prob.item() if hasattr(self._last_log_prob, 'item') else self._last_log_prob, + done=False + ) return bids @@ -795,6 +831,15 @@ def calculate_bids( if self.learning_mode: self.learning_role.add_actions_to_cache(self.unit_id, start, actions, noise) + # For PPO, also cache value estimates and log probabilities + if self.algorithm == "mappo" and hasattr(self, '_last_log_prob'): + self.learning_role.add_ppo_data_to_cache( + self.unit_id, + start, + getattr(self, '_last_value', 0.0), + self._last_log_prob.item() if hasattr(self._last_log_prob, 'item') else self._last_log_prob, + done=False + ) return bids From 78d80331f1640496ec47fc08c51a054d7f821bd0 Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Wed, 14 Jan 2026 04:00:06 +0100 Subject: [PATCH 12/44] FIX: initial values_data assignment --- assume/reinforcement_learning/learning_role.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/assume/reinforcement_learning/learning_role.py b/assume/reinforcement_learning/learning_role.py index 446b77ad6..3e2c463f8 100644 --- a/assume/reinforcement_learning/learning_role.py +++ b/assume/reinforcement_learning/learning_role.py @@ -344,7 +344,7 @@ async def _store_to_buffer_and_update_sync(self, cache, device) -> None: device ) else: - values_data = np.zeros(len(self.values_data)) + values_data = np.zeros(len(self.rl_strats)) if cache["log_probs"].get(timestamp): log_probs_data = transform_buffer_data( @@ -354,7 +354,7 @@ async def _store_to_buffer_and_update_sync(self, cache, device) -> None: device ) else: - log_probs_data = np.zeros(len(self.log_probs_data)) + log_probs_data = np.zeros(len(self.rl_strats)) if cache["dones"].get(timestamp): dones_data = transform_buffer_data( From 36292ac1a2eac978659ceff19741237abfe363e3 Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Fri, 13 Feb 2026 11:41:18 +0100 Subject: [PATCH 13/44] updated the structure and segmented the parameters for off-policy and on-policy algorithms --- assume/common/base.py | 144 ++++-- .../algorithms/base_algorithm.py | 431 +++++++++++++++++- .../algorithms/maddpg.py | 408 ++--------------- .../algorithms/mappo.py | 360 ++++++--------- .../algorithms/matd3.py | 423 +---------------- assume/reinforcement_learning/buffer.py | 81 +++- .../reinforcement_learning/learning_role.py | 73 ++- .../reinforcement_learning/learning_utils.py | 16 + .../neural_network_architecture.py | 207 ++++++--- assume/strategies/learning_strategies.py | 6 +- 10 files changed, 984 insertions(+), 1165 deletions(-) diff --git a/assume/common/base.py b/assume/common/base.py index 32abc38c1..fdc1a042c 100644 --- a/assume/common/base.py +++ b/assume/common/base.py @@ -4,7 +4,7 @@ import logging from collections import defaultdict -from dataclasses import dataclass +from dataclasses import dataclass, field from datetime import datetime, timedelta import numpy as np @@ -752,6 +752,89 @@ def remove_empty_bids(self, bids: list) -> list: return cleaned_bids +@dataclass +class AlgorithmConfig: + """ + Base configuration for algorithm-specific parameters. + + Parameters: + actor_architecture (str): The architecture of the neural networks used for the actors. Options include + "mlp" (Multi-Layer Perceptron) and "lstm" (Long Short-Term Memory). Default is "mlp". + batch_size (int): The batch size of experiences sampled from the replay buffer for each training update. + Larger batches provide more stable gradients but require more memory. Default is 128. + gamma (float): The discount factor for future rewards, ranging from 0 to 1. Default is 0.99. + train_freq (str): Defines the frequency at which networks are updated. Default is "24h". + """ + + # actor_architecture: str = "mlp" + batch_size: int = 128 + gamma: float = 0.99 + train_freq: str = "24h" + + +@dataclass +class OffPolicyConfig(AlgorithmConfig): + """ + Configuration for off-policy algorithms (MATD3/MADDPG) hyperparameters. + + These parameters control the off-policy actor-critic algorithm behavior such as delayed policy updates, + target network updates, and exploration noise. + + Parameters: + episodes_collecting_initial_experience (int): The number of episodes at the start during which random + actions are chosen instead of using the actor network. Default is 5. + gradient_steps (int): The number of gradient descent steps performed during each training update. Default is 100. + noise_dt (int): The time step parameter for the Ornstein-Uhlenbeck process. Default is 1. + noise_scale (int): The scale factor multiplied by the noise drawn from the distribution. Default is 1. + noise_sigma (float): The standard deviation of the noise distribution for exploration. Default is 0.1. + action_noise_schedule (str | None): Which action noise decay schedule to use. Default is None. + policy_delay (int): The frequency (in gradient steps) at which the actor policy is updated. Default is 2. + tau (float): The soft update coefficient for updating target networks. Default is 0.005. + target_policy_noise (float): The standard deviation of noise added to target policy actions. Default is 0.2. + target_noise_clip (float): The maximum absolute value for clipping the target policy noise. Default is 0.5. + replay_buffer_size (int): The maximum number of transitions stored in the replay buffer. Default is 50000. + """ + + episodes_collecting_initial_experience: int = 5 + gradient_steps: int = 100 + noise_dt: int = 1 + noise_scale: int = 1 + noise_sigma: float = 0.1 + actor_architecture: str = "mlp" + action_noise_schedule: str | None = None + policy_delay: int = 2 + tau: float = 0.005 + target_policy_noise: float = 0.2 + target_noise_clip: float = 0.5 + replay_buffer_size: int = 50000 + + +@dataclass +class OnPolicyConfig(AlgorithmConfig): + """ + Configuration for on-policy algorithms (PPO/MAPPO) hyperparameters. + + These parameters control the PPO algorithm behavior such as clipping ranges, + number of optimization epochs, and loss coefficients. + + Parameters: + clip_ratio (float): The clipping ratio for the PPO surrogate objective. Default is 0.1. + entropy_coef (float): Coefficient for entropy term in loss. Default is 0.01. + gae_lambda (float): Lambda parameter for Generalized Advantage Estimation (GAE). Default is 0.95. + max_grad_norm (float): Maximum gradient norm for clipping. Default is 0.5. + vf_coef (float): Coefficient for value function term in loss. Default is 0.5. + n_epochs (int): Number of optimization epochs per rollout. Default is 10. + """ + + clip_ratio: float = 0.1 + entropy_coef: float = 0.01 + gae_lambda: float = 0.95 + max_grad_norm: float = 0.5 + vf_coef: float = 0.5 + n_epochs: int = 10 + actor_architecture: str = "mlp" + + @dataclass class LearningConfig: """ @@ -803,30 +886,17 @@ class LearningConfig: early stopping. If the reward improvement is less than this threshold over early_stopping_steps, training is terminated early. Default is 0.05. - algorithm (str): Specifies which reinforcement learning algorithm to use. Currently, only "matd3" - (Multi-Agent Twin Delayed Deep Deterministic Policy Gradient) is implemented. Default is "matd3". + algorithm (str): Specifies which reinforcement learning algorithm to use. Options include "matd3" + (Multi-Agent Twin Delayed Deep Deterministic Policy Gradient), "maddpg", and "mappo". Default is "matd3". replay_buffer_size (int): The maximum number of transitions stored in the replay buffer for experience replay. Larger buffers allow for more diverse training samples. Default is 500000. gamma (float): The discount factor for future rewards, ranging from 0 to 1. Higher values give more weight to long-term rewards in decision-making. Default is 0.99. actor_architecture (str): The architecture of the neural networks used for the actors. Options include "mlp" (Multi-Layer Perceptron) and "lstm" (Long Short-Term Memory). Default is "mlp". - policy_delay (int): The frequency (in gradient steps) at which the actor policy is updated. - TD3 updates the critic more frequently than the actor to stabilize training. Default is 2. - noise_sigma (float): The standard deviation of the Ornstein-Uhlenbeck or Gaussian noise distribution - used to generate exploration noise added to actions. Default is 0.1. - noise_scale (int): The scale factor multiplied by the noise drawn from the distribution. - Larger values increase exploration. Default is 1. - noise_dt (int): The time step parameter for the Ornstein-Uhlenbeck process, which determines how - quickly the noise decays over time. Used for noise scheduling. Default is 1. - action_noise_schedule (str | None): Which action noise decay schedule to use. Currently only "linear" - decay is available, which linearly decreases exploration noise over training. Default is "linear". - tau (float): The soft update coefficient for updating target networks. Controls how slowly target - networks track the main networks. Smaller values mean slower updates. Default is 0.005. - target_policy_noise (float): The standard deviation of noise added to target policy actions during - critic updates. This smoothing helps prevent overfitting to narrow policy peaks. Default is 0.2. - target_noise_clip (float): The maximum absolute value for clipping the target policy noise. - Prevents the noise from being too large. Default is 0.5. + + off_policy (OffPolicyConfig): Nested configuration for off-policy algorithms (MATD3/MADDPG) hyperparameters. + on_policy (OnPolicyConfig): Nested configuration for on-policy algorithms (PPO/MAPPO) hyperparameters. """ @@ -856,24 +926,32 @@ class LearningConfig: replay_buffer_size: int = 50000 gamma: float = 0.99 actor_architecture: str = "mlp" - policy_delay: int = 2 - noise_sigma: float = 0.1 - noise_scale: int = 1 - noise_dt: int = 1 - action_noise_schedule: str | None = None - tau: float = 0.005 - target_policy_noise: float = 0.2 - target_noise_clip: float = 0.5 - ppo_clip_range: float | None = 0.1 - ppo_clip_range_vf: float | None = None - ppo_n_epochs: int = 10 - ppo_entropy_coef: float = 0.01 - ppo_vf_coef: float = 0.5 - ppo_gae_lambda: float = 0.95 + # Nested algorithm configurations + off_policy: OffPolicyConfig = field(default_factory=OffPolicyConfig) + on_policy: OnPolicyConfig = field(default_factory=OnPolicyConfig) def __post_init__(self): """Calculate defaults that depend on other fields and validate inputs.""" + # Convert nested dicts to dataclass instances if necessary + if isinstance(self.off_policy, dict): + self.off_policy = OffPolicyConfig(**self.off_policy) + if isinstance(self.on_policy, dict): + self.on_policy = OnPolicyConfig(**self.on_policy) + + for config in [self.off_policy, self.on_policy]: + # config.actor_architecture = self.actor_architecture + if config: + config.batch_size = self.batch_size + config.gamma = self.gamma + config.train_freq = self.train_freq + + self.off_policy.actor_architecture = self.actor_architecture + self.on_policy.actor_architecture = self.actor_architecture + self.off_policy.episodes_collecting_initial_experience = self.episodes_collecting_initial_experience + self.off_policy.gradient_steps = self.gradient_steps + self.off_policy.replay_buffer_size = self.replay_buffer_size + if self.early_stopping_steps is None: self.early_stopping_steps = int( self.training_episodes / self.validation_episodes_interval + 1 diff --git a/assume/reinforcement_learning/algorithms/base_algorithm.py b/assume/reinforcement_learning/algorithms/base_algorithm.py index 9ab5b258b..cf8a76d27 100644 --- a/assume/reinforcement_learning/algorithms/base_algorithm.py +++ b/assume/reinforcement_learning/algorithms/base_algorithm.py @@ -1,12 +1,17 @@ # SPDX-FileCopyrightText: ASSUME Developers # # SPDX-License-Identifier: AGPL-3.0-or-later - +import json import logging +import os import torch as th +from torch.optim import AdamW from assume.reinforcement_learning.algorithms import actor_architecture_aliases +from assume.reinforcement_learning.learning_utils import ( + transfer_weights, +) logger = logging.getLogger(__name__) @@ -89,4 +94,426 @@ def load_params(self, directory: str) -> None: """ Load learning params - abstract method to be implemented by the Learning Algorithm """ - pass + + +class A2CAlgorithm(RLAlgorithm): + """ + The base A2C model class for actor-critic algorithms. + Provides shared save/load/initialize functionality for MATD3, MADDPG, and MAPPO. + + Args: + learning_role (Learning Role object): Learning object + + Attributes: + uses_target_networks (bool): Whether this algorithm uses target networks. + TD3 and DDPG use target networks (True), PPO does not (False). + """ + + # Class attribute - subclasses can override + uses_target_networks: bool = True + + def __init__( + self, + learning_role, + ): + super().__init__(learning_role) + + def save_params(self, directory): + """ + Save the parameters of both actor and critic networks. + + Args: + directory (str): The base directory for saving the parameters. + """ + self.save_critic_params(directory=f"{directory}/critics") + self.save_actor_params(directory=f"{directory}/actors") + + def save_critic_params(self, directory): + """ + Save the parameters of critic networks. + + Args: + directory (str): The base directory for saving the parameters. + """ + os.makedirs(directory, exist_ok=True) + for u_id, strategy in self.learning_role.rl_strats.items(): + obj = { + "critic": strategy.critics.state_dict(), + "critic_optimizer": strategy.critics.optimizer.state_dict(), + } + # Only save target critic if this algorithm uses target networks + if self.uses_target_networks: + obj["critic_target"] = strategy.target_critics.state_dict() + + path = f"{directory}/critic_{u_id}.pt" + th.save(obj, path) + + # record the exact order of u_ids and save it with critics to ensure that the same order is used when loading the parameters + u_id_list = [str(u) for u in self.learning_role.rl_strats.keys()] + mapping = {"u_id_order": u_id_list} + map_path = os.path.join(directory, "u_id_order.json") + with open(map_path, "w") as f: + json.dump(mapping, f, indent=2) + + def save_actor_params(self, directory): + """ + Save the parameters of actor networks. + + Args: + directory (str): The base directory for saving the parameters. + """ + os.makedirs(directory, exist_ok=True) + for u_id, strategy in self.learning_role.rl_strats.items(): + obj = { + "actor": strategy.actor.state_dict(), + "actor_optimizer": strategy.actor.optimizer.state_dict(), + } + # Only save target actor if this algorithm uses target networks + if self.uses_target_networks: + obj["actor_target"] = strategy.actor_target.state_dict() + + path = f"{directory}/actor_{u_id}.pt" + th.save(obj, path) + + def load_params(self, directory: str) -> None: + """ + Load the parameters of both actor and critic networks. + + Args: + directory (str): The directory from which the parameters should be loaded. + """ + self.load_critic_params(directory) + self.load_actor_params(directory) + + def load_critic_params(self, directory: str) -> None: + """ + Load critic, target_critic, and optimizer states for each agent strategy. + If agent count differs between saved and current model, performs weight transfer for both networks. + + Args: + directory (str): The directory from which the parameters should be loaded. + """ + logger.info("Loading critic parameters...") + + if not os.path.exists(directory): + logger.warning( + "Specified directory does not exist. Using randomly initialized critics." + ) + return + + map_path = os.path.join(directory, "critics", "u_id_order.json") + if os.path.exists(map_path): + # read the saved order of u_ids from critics save directory + with open(map_path) as f: + loaded_id_order = json.load(f).get("u_id_order", []) + else: + logger.warning("No u_id_order.json: assuming same order as current.") + loaded_id_order = [str(u) for u in self.learning_role.rl_strats.keys()] + + new_id_order = [str(u) for u in self.learning_role.rl_strats.keys()] + direct_load = loaded_id_order == new_id_order + + if direct_load: + logger.info("Agents order unchanged. Loading critic weights directly.") + else: + logger.info( + f"Agents length and/or order mismatch: n_old={len(loaded_id_order)}, n_new={len(new_id_order)}. Transferring weights for critics and target critics." + ) + + for u_id, strategy in self.learning_role.rl_strats.items(): + critic_path = os.path.join(directory, "critics", f"critic_{u_id}.pt") + if not os.path.exists(critic_path): + logger.warning(f"No saved critic for {u_id}; skipping.") + continue + + try: + critic_params = th.load(critic_path, weights_only=True) + + # Required keys depend on whether algorithm uses target networks + required_keys = ["critic", "critic_optimizer"] + if self.uses_target_networks: + required_keys.append("critic_target") + + for key in required_keys: + if key not in critic_params: + logger.warning( + f"Missing {key} in critic params for {u_id}; skipping." + ) + continue + + if direct_load: + strategy.critics.load_state_dict(critic_params["critic"]) + strategy.critics.optimizer.load_state_dict( + critic_params["critic_optimizer"] + ) + # Only load target critic if this algorithm uses target networks + if self.uses_target_networks and "critic_target" in critic_params: + strategy.target_critics.load_state_dict( + critic_params["critic_target"] + ) + logger.debug(f"Loaded critic for {u_id} directly.") + else: + critic_weights = transfer_weights( + model=strategy.critics, + loaded_state=critic_params["critic"], + loaded_id_order=loaded_id_order, + new_id_order=new_id_order, + obs_base=strategy.obs_dim, + act_dim=strategy.act_dim, + unique_obs=strategy.unique_obs_dim, + ) + + strategy.critics.load_state_dict(critic_weights) + + # Only transfer target critic weights if this algorithm uses target networks + if self.uses_target_networks and "critic_target" in critic_params: + target_critic_weights = transfer_weights( + model=strategy.target_critics, + loaded_state=critic_params["critic_target"], + loaded_id_order=loaded_id_order, + new_id_order=new_id_order, + obs_base=strategy.obs_dim, + act_dim=strategy.act_dim, + unique_obs=strategy.unique_obs_dim, + ) + if target_critic_weights is not None: + strategy.target_critics.load_state_dict(target_critic_weights) + + logger.debug(f"Critic weights transferred for {u_id}.") + + except Exception as e: + logger.warning(f"Failed to load critic for {u_id}: {e}") + + def load_actor_params(self, directory: str) -> None: + """ + Load the parameters of actor networks from a specified directory. + + Args: + directory (str): The directory from which the parameters should be loaded. + """ + logger.info("Loading actor parameters...") + if not os.path.exists(directory): + logger.warning( + "Specified directory for loading the actors does not exist! Starting with randomly initialized values!" + ) + return + + for u_id, strategy in self.learning_role.rl_strats.items(): + try: + actor_params = self.load_obj( + directory=f"{directory}/actors/actor_{str(u_id)}.pt" + ) + strategy.actor.load_state_dict(actor_params["actor"]) + strategy.actor.optimizer.load_state_dict( + actor_params["actor_optimizer"] + ) + + # Only load target actor if this algorithm uses target networks + if self.uses_target_networks and "actor_target" in actor_params: + strategy.actor_target.load_state_dict(actor_params["actor_target"]) + + # add a tag to the strategy to indicate that the actor was loaded + strategy.actor.loaded = True + except Exception: + logger.warning(f"No actor values loaded for agent {u_id}") + + def initialize_policy(self, actors_and_critics: dict = None) -> None: + """ + Create actor and critic networks for reinforcement learning. + + If `actors_and_critics` is None, this method creates new actor and critic networks. + If `actors_and_critics` is provided, it assigns existing networks to the respective attributes. + + Args: + actors_and_critics (dict): The actor and critic networks to be assigned. + + """ + if actors_and_critics is None: + self.check_strategy_dimensions() + self.create_actors() + self.create_critics() + + else: + for u_id, strategy in self.learning_role.rl_strats.items(): + strategy.actor = actors_and_critics["actors"][u_id] + strategy.critics = actors_and_critics["critics"][u_id] + + if self.uses_target_networks: + strategy.actor_target = actors_and_critics["actor_targets"][u_id] + strategy.target_critics = actors_and_critics["target_critics"][u_id] + + self.obs_dim = actors_and_critics["obs_dim"] + self.act_dim = actors_and_critics["act_dim"] + self.unique_obs_dim = actors_and_critics["unique_obs_dim"] + + def check_strategy_dimensions(self) -> None: + """ + Iterate over all learning strategies and check if the dimensions of observations and actions are the same. + Also check if the unique observation dimensions are the same. If not, raise a ValueError. + This is important for centralized critic algorithms, as it uses a centralized critic that requires consistent dimensions across all agents. + """ + foresight_list = [] + obs_dim_list = [] + act_dim_list = [] + unique_obs_dim_list = [] + num_timeseries_obs_dim_list = [] + + for strategy in self.learning_role.rl_strats.values(): + foresight_list.append(strategy.foresight) + obs_dim_list.append(strategy.obs_dim) + act_dim_list.append(strategy.act_dim) + unique_obs_dim_list.append(strategy.unique_obs_dim) + num_timeseries_obs_dim_list.append(strategy.num_timeseries_obs_dim) + + if len(set(foresight_list)) > 1: + raise ValueError( + f"All foresight values must be the same for all RL agents. The defined learning strategies have the following foresight values: {foresight_list}" + ) + else: + self.foresight = foresight_list[0] + + if len(set(act_dim_list)) > 1: + raise ValueError( + f"All action dimensions must be the same for all RL agents. The defined learning strategies have the following action dimensions: {act_dim_list}" + ) + else: + self.act_dim = act_dim_list[0] + + if len(set(unique_obs_dim_list)) > 1: + raise ValueError( + f"All unique_obs_dim values must be the same for all RL agents. The defined learning strategies have the following unique_obs_dim values: {unique_obs_dim_list}" + ) + else: + self.unique_obs_dim = unique_obs_dim_list[0] + + if len(set(num_timeseries_obs_dim_list)) > 1: + raise ValueError( + f"All num_timeseries_obs_dim values must be the same for all RL agents. The defined learning strategies have the following num_timeseries_obs_dim values: {num_timeseries_obs_dim_list}" + ) + else: + self.num_timeseries_obs_dim = num_timeseries_obs_dim_list[0] + + # Check last, as other cases should fail before! + if len(set(obs_dim_list)) > 1: + raise ValueError( + f"All observation dimensions must be the same for all RL agents. The defined learning strategies have the following observation dimensions: {obs_dim_list}" + ) + else: + self.obs_dim = obs_dim_list[0] + + def create_actors(self) -> None: + """ + Create actor networks for reinforcement learning for each unit strategy. + + This method initializes actor networks and their corresponding target networks for each unit strategy. + The actors are designed to map observations to action probabilities in a reinforcement learning setting. + + Note: + The observation dimension need to be the same, due to the centralized critic that all actors share. + If you have units with different observation dimensions. They need to have different critics and hence learning roles. + + """ + + for strategy in self.learning_role.rl_strats.values(): + strategy.actor = self.actor_architecture_class( + obs_dim=self.obs_dim, + act_dim=self.act_dim, + float_type=self.float_type, + unique_obs_dim=self.unique_obs_dim, + num_timeseries_obs_dim=self.num_timeseries_obs_dim, + ).to(self.device) + + strategy.actor_target = self.actor_architecture_class( + obs_dim=self.obs_dim, + act_dim=self.act_dim, + float_type=self.float_type, + unique_obs_dim=self.unique_obs_dim, + num_timeseries_obs_dim=self.num_timeseries_obs_dim, + ).to(self.device) + + strategy.actor_target.load_state_dict(strategy.actor.state_dict()) + strategy.actor_target.train(mode=False) + + strategy.actor.optimizer = AdamW( + strategy.actor.parameters(), + lr=self.learning_role.calc_lr_from_progress( + 1 + ), # 1=100% of simulation remaining, uses learning_rate from config as starting point + ) + + strategy.actor.loaded = False + + def create_critics(self) -> None: + """ + Create critic networks for reinforcement learning. + + This method initializes critic networks for each agent in the reinforcement learning setup. + + Note: + The observation dimension need to be the same, due to the centralized critic that all actors share. + If you have units with different observation dimensions. They need to have different critics and hence learning roles. + """ + n_agents = len(self.learning_role.rl_strats) + + for strategy in self.learning_role.rl_strats.values(): + strategy.critics = self.critic_architecture_class( + n_agents=n_agents, + obs_dim=self.obs_dim, + act_dim=self.act_dim, + unique_obs_dim=self.unique_obs_dim, + float_type=self.float_type, + ).to(self.device) + + strategy.target_critics = self.critic_architecture_class( + n_agents=n_agents, + obs_dim=self.obs_dim, + act_dim=self.act_dim, + unique_obs_dim=self.unique_obs_dim, + float_type=self.float_type, + ).to(self.device) + + strategy.target_critics.load_state_dict(strategy.critics.state_dict()) + strategy.target_critics.train(mode=False) + + strategy.critics.optimizer = AdamW( + strategy.critics.parameters(), + lr=self.learning_role.calc_lr_from_progress( + 1 + ), # 1 = 100% of simulation remaining, uses learning_rate from config as starting point + ) + + def extract_policy(self) -> dict: + """ + Extract actor and critic networks. + + This method extracts the actor and critic networks associated with each learning strategy and organizes them into a + dictionary structure. The extracted networks include actors, actor_targets, critics, and target_critics. The resulting + dictionary is typically used for saving and sharing these networks. + + Returns: + dict: The extracted actor and critic networks. + """ + actors = {} + actor_targets = {} + + critics = {} + target_critics = {} + + for u_id, strategy in self.learning_role.rl_strats.items(): + actors[u_id] = strategy.actor + actor_targets[u_id] = strategy.actor_target + + critics[u_id] = strategy.critics + target_critics[u_id] = strategy.target_critics + + actors_and_critics = { + "actors": actors, + "actor_targets": actor_targets, + "critics": critics, + "target_critics": target_critics, + "obs_dim": self.obs_dim, + "act_dim": self.act_dim, + "unique_obs_dim": self.unique_obs_dim, + } + + return actors_and_critics diff --git a/assume/reinforcement_learning/algorithms/maddpg.py b/assume/reinforcement_learning/algorithms/maddpg.py index a49ac7c83..24517527d 100644 --- a/assume/reinforcement_learning/algorithms/maddpg.py +++ b/assume/reinforcement_learning/algorithms/maddpg.py @@ -2,59 +2,43 @@ # # SPDX-License-Identifier: AGPL-3.0-or-later -""" -MADDPG - Multi-Agent Deep Deterministic Policy Gradient - -This module implements the DDPG algorithm for multi-agent settings (MADDPG). - -DDPG vs TD3 Comparison: ------------------------ -| Feature | DDPG (this) | TD3 | -|-------------------|-----------------|------------------| -| Critics | 1 (single) | 2 (twin) | -| Policy Updates | Every step | Delayed (1:2) | -| Target Noise | No | Yes (smoothing) | -| Overestimation | Can occur | Reduced | -| Complexity | Simpler | More complex | - -MADDPG extends DDPG to multi-agent settings using: -- Centralized Training: Critic sees all agents' observations and actions -- Decentralized Execution: Each actor only uses its own observation -""" - -import json import logging -import os import torch as th from torch.nn import functional as F -from torch.optim import AdamW -from assume.reinforcement_learning.algorithms.base_algorithm import RLAlgorithm +from assume.reinforcement_learning.algorithms.base_algorithm import A2CAlgorithm from assume.reinforcement_learning.learning_utils import ( polyak_update, - transfer_weights, ) from assume.reinforcement_learning.neural_network_architecture import CriticDDPG logger = logging.getLogger(__name__) -class DDPG(RLAlgorithm): +class DDPG(A2CAlgorithm): """ Deep Deterministic Policy Gradient (DDPG) Algorithm. - - Extended to multi-agent settings (MADDPG) for electricity market simulations. - - Key Features: - - Single critic network (vs twin critics in TD3) - - Updates actor every step (no policy delay) - - No target action smoothing noise - - Centralized training with decentralized execution + + An off-policy actor-critic algorithm using deterministic policy gradients. + It uses a single critic network and updates the actor at every training step. + Target networks are updated using Polyak averaging to stabilize the learning + process. It is designed for environments with continuous action + spaces by combining the benefits of Q-learning and policy gradients. It + utilizes a replay buffer to break correlations between consecutive samples + and improve sample efficiency. + + Args: + learning_role (LearningRole): The central learning role managing the agents and buffer. """ def __init__(self, learning_role): - """Initialize DDPG algorithm.""" + """ + Initialize DDPG algorithm. + + Args: + learning_role (LearningRole): The learning role object. + """ super().__init__(learning_role) # Gradient step counter @@ -63,337 +47,15 @@ def __init__(self, learning_role): # Gradient clipping threshold self.grad_clip_norm = 1.0 - # ========================================================================= - # CHECKPOINT SAVING METHODS - # ========================================================================= - - def save_params(self, directory: str) -> None: - """Save all actor and critic network parameters to disk.""" - self.save_critic_params(directory=f"{directory}/critics") - self.save_actor_params(directory=f"{directory}/actors") - - def save_critic_params(self, directory: str) -> None: - """Save critic network parameters for all agents.""" - os.makedirs(directory, exist_ok=True) - - for u_id, strategy in self.learning_role.rl_strats.items(): - obj = { - "critic": strategy.critic.state_dict(), - "critic_target": strategy.target_critic.state_dict(), - "critic_optimizer": strategy.critic.optimizer.state_dict(), - } - path = f"{directory}/critic_{u_id}.pt" - th.save(obj, path) - - # Save unit ID order for weight transfer - u_id_list = [str(u) for u in self.learning_role.rl_strats.keys()] - mapping = {"u_id_order": u_id_list} - map_path = os.path.join(directory, "u_id_order.json") - with open(map_path, "w") as f: - json.dump(mapping, f, indent=2) - - def save_actor_params(self, directory: str) -> None: - """Save actor network parameters for all agents.""" - os.makedirs(directory, exist_ok=True) - - for u_id, strategy in self.learning_role.rl_strats.items(): - obj = { - "actor": strategy.actor.state_dict(), - "actor_target": strategy.actor_target.state_dict(), - "actor_optimizer": strategy.actor.optimizer.state_dict(), - } - path = f"{directory}/actor_{u_id}.pt" - th.save(obj, path) - - # ========================================================================= - # CHECKPOINT LOADING METHODS - # ========================================================================= - - def load_params(self, directory: str) -> None: - """Load all actor and critic parameters from disk.""" - self.load_critic_params(directory) - self.load_actor_params(directory) - - def load_critic_params(self, directory: str) -> None: - """Load critic parameters with support for agent count changes.""" - logger.info("Loading critic parameters...") - - if not os.path.exists(directory): - logger.warning( - "Specified directory does not exist. Using randomly initialized critics." - ) - return - - # Load saved unit ID order - map_path = os.path.join(directory, "critics", "u_id_order.json") - if os.path.exists(map_path): - with open(map_path) as f: - loaded_id_order = json.load(f).get("u_id_order", []) - else: - logger.warning("No u_id_order.json: assuming same order as current.") - loaded_id_order = [str(u) for u in self.learning_role.rl_strats.keys()] - - new_id_order = [str(u) for u in self.learning_role.rl_strats.keys()] - direct_load = loaded_id_order == new_id_order - - if direct_load: - logger.info("Agents order unchanged. Loading critic weights directly.") - else: - logger.info( - f"Agents mismatch: n_old={len(loaded_id_order)}, " - f"n_new={len(new_id_order)}. Transferring weights." - ) - - for u_id, strategy in self.learning_role.rl_strats.items(): - critic_path = os.path.join(directory, "critics", f"critic_{u_id}.pt") - if not os.path.exists(critic_path): - logger.warning(f"No saved critic for {u_id}; skipping.") - continue - - try: - critic_params = th.load(critic_path, weights_only=True) - - for key in ("critic", "critic_target", "critic_optimizer"): - if key not in critic_params: - logger.warning(f"Missing {key} in critic params for {u_id}.") - continue - - if direct_load: - strategy.critic.load_state_dict(critic_params["critic"]) - strategy.target_critic.load_state_dict(critic_params["critic_target"]) - strategy.critic.optimizer.load_state_dict(critic_params["critic_optimizer"]) - else: - # Weight transfer for agent count changes - critic_weights = transfer_weights( - model=strategy.critic, - loaded_state=critic_params["critic"], - loaded_id_order=loaded_id_order, - new_id_order=new_id_order, - obs_base=strategy.obs_dim, - act_dim=strategy.act_dim, - unique_obs=strategy.unique_obs_dim, - ) - target_critic_weights = transfer_weights( - model=strategy.target_critic, - loaded_state=critic_params["critic_target"], - loaded_id_order=loaded_id_order, - new_id_order=new_id_order, - obs_base=strategy.obs_dim, - act_dim=strategy.act_dim, - unique_obs=strategy.unique_obs_dim, - ) - - if critic_weights is None or target_critic_weights is None: - logger.warning(f"Weights transfer failed for {u_id}.") - continue - - strategy.critic.load_state_dict(critic_weights) - strategy.target_critic.load_state_dict(target_critic_weights) - - except Exception as e: - logger.warning(f"Failed to load critic for {u_id}: {e}") - - def load_actor_params(self, directory: str) -> None: - """Load actor network parameters from disk.""" - logger.info("Loading actor parameters...") - - if not os.path.exists(directory): - logger.warning( - "Specified directory for actors does not exist! " - "Starting with randomly initialized values!" - ) - return - - for u_id, strategy in self.learning_role.rl_strats.items(): - try: - actor_params = self.load_obj( - directory=f"{directory}/actors/actor_{str(u_id)}.pt" - ) - - strategy.actor.load_state_dict(actor_params["actor"]) - strategy.actor_target.load_state_dict(actor_params["actor_target"]) - strategy.actor.optimizer.load_state_dict(actor_params["actor_optimizer"]) - strategy.actor.loaded = True - - except Exception: - logger.warning(f"No actor values loaded for agent {u_id}") - - # ========================================================================= - # NETWORK INITIALIZATION - # ========================================================================= - - def initialize_policy(self, actors_and_critics: dict = None) -> None: - """ - Initialize actor and critic networks for all agents. - - Args: - actors_and_critics: Optional pre-existing networks to assign - """ - if actors_and_critics is None: - self.check_strategy_dimensions() - self.create_actors() - self.create_critics() - else: - for u_id, strategy in self.learning_role.rl_strats.items(): - strategy.actor = actors_and_critics["actors"][u_id] - strategy.actor_target = actors_and_critics["actor_targets"][u_id] - strategy.critic = actors_and_critics["critics"][u_id] - strategy.target_critic = actors_and_critics["target_critics"][u_id] - - self.obs_dim = actors_and_critics["obs_dim"] - self.act_dim = actors_and_critics["act_dim"] - self.unique_obs_dim = actors_and_critics["unique_obs_dim"] - - def check_strategy_dimensions(self) -> None: - """Validate that all agents have consistent dimensions.""" - obs_dim_list = [] - act_dim_list = [] - unique_obs_dim_list = [] - num_timeseries_obs_dim_list = [] - - for strategy in self.learning_role.rl_strats.values(): - obs_dim_list.append(strategy.obs_dim) - act_dim_list.append(strategy.act_dim) - unique_obs_dim_list.append(strategy.unique_obs_dim) - num_timeseries_obs_dim_list.append(strategy.num_timeseries_obs_dim) - - if len(set(obs_dim_list)) > 1: - raise ValueError( - f"All observation dimensions must be the same. " - f"Got: {obs_dim_list}" - ) - else: - self.obs_dim = obs_dim_list[0] - - if len(set(act_dim_list)) > 1: - raise ValueError( - f"All action dimensions must be the same. " - f"Got: {act_dim_list}" - ) - else: - self.act_dim = act_dim_list[0] - - if len(set(unique_obs_dim_list)) > 1: - raise ValueError( - f"All unique_obs_dim values must be the same. " - f"Got: {unique_obs_dim_list}" - ) - else: - self.unique_obs_dim = unique_obs_dim_list[0] - - if len(set(num_timeseries_obs_dim_list)) > 1: - raise ValueError( - f"All num_timeseries_obs_dim values must be the same. " - f"Got: {num_timeseries_obs_dim_list}" - ) - else: - self.num_timeseries_obs_dim = num_timeseries_obs_dim_list[0] - - def create_actors(self) -> None: - """Create actor (policy) networks for all agents.""" - for strategy in self.learning_role.rl_strats.values(): - # Create main actor network - strategy.actor = self.actor_architecture_class( - obs_dim=self.obs_dim, - act_dim=self.act_dim, - float_type=self.float_type, - unique_obs_dim=self.unique_obs_dim, - num_timeseries_obs_dim=self.num_timeseries_obs_dim, - ).to(self.device) - - # Create target actor network - strategy.actor_target = self.actor_architecture_class( - obs_dim=self.obs_dim, - act_dim=self.act_dim, - float_type=self.float_type, - unique_obs_dim=self.unique_obs_dim, - num_timeseries_obs_dim=self.num_timeseries_obs_dim, - ).to(self.device) - - # Initialize target with same weights - strategy.actor_target.load_state_dict(strategy.actor.state_dict()) - strategy.actor_target.train(mode=False) - - # Create optimizer - strategy.actor.optimizer = AdamW( - strategy.actor.parameters(), - lr=self.learning_role.calc_lr_from_progress(1), - ) - - strategy.actor.loaded = False - - def create_critics(self) -> None: - """ - Create critic (Q-function) networks for all agents. - - Key difference from TD3: Uses single critic instead of twin critics. - """ - n_agents = len(self.learning_role.rl_strats) - - for strategy in self.learning_role.rl_strats.values(): - # Create main critic (single Q-network, not twin) - strategy.critic = CriticDDPG( - n_agents=n_agents, - obs_dim=self.obs_dim, - act_dim=self.act_dim, - unique_obs_dim=self.unique_obs_dim, - float_type=self.float_type, - ).to(self.device) - - # Create target critic - strategy.target_critic = CriticDDPG( - n_agents=n_agents, - obs_dim=self.obs_dim, - act_dim=self.act_dim, - unique_obs_dim=self.unique_obs_dim, - float_type=self.float_type, - ).to(self.device) - - # Initialize target with same weights - strategy.target_critic.load_state_dict(strategy.critic.state_dict()) - strategy.target_critic.train(mode=False) - - # Create optimizer - strategy.critic.optimizer = AdamW( - strategy.critic.parameters(), - lr=self.learning_role.calc_lr_from_progress(1), - ) - - def extract_policy(self) -> dict: - """Extract all actor and critic networks into a dictionary.""" - actors = {} - actor_targets = {} - critics = {} - target_critics = {} - - for u_id, strategy in self.learning_role.rl_strats.items(): - actors[u_id] = strategy.actor - actor_targets[u_id] = strategy.actor_target - critics[u_id] = strategy.critic - target_critics[u_id] = strategy.target_critic - - return { - "actors": actors, - "actor_targets": actor_targets, - "critics": critics, - "target_critics": target_critics, - "obs_dim": self.obs_dim, - "act_dim": self.act_dim, - "unique_obs_dim": self.unique_obs_dim, - } - - # ========================================================================= - # CORE TRAINING: POLICY UPDATE - # ========================================================================= + # Define the critic architecture class for DDPG (single critic) + self.critic_architecture_class = CriticDDPG def update_policy(self) -> None: """ Update actor and critic networks using the DDPG algorithm. - - Key differences from TD3: - 1. Uses single critic (no twin Q-learning) - 2. Updates actor every step (no policy delay) - 3. No target action smoothing noise + Performs sampling from replay buffer. Updates the critic (MSE Loss). + Updates the Actor (Policy Gradient). Updates the target networks using + polyak update. """ logger.debug("Updating Policy (MADDPG/DDPG)") @@ -423,7 +85,7 @@ def update_policy(self) -> None: for strategy in strategies: self.update_learning_rate( - [strategy.critic.optimizer, strategy.actor.optimizer], + [strategy.critics.optimizer, strategy.actor.optimizer], learning_rate=learning_rate, ) strategy.action_noise.update_noise_decay(updated_noise_decay) @@ -468,13 +130,13 @@ def update_policy(self) -> None: # CRITIC UPDATE # ================================================================= for strategy in strategies: - strategy.critic.optimizer.zero_grad(set_to_none=True) + strategy.critics.optimizer.zero_grad(set_to_none=True) total_critic_loss = 0.0 for i, strategy in enumerate(strategies): - critic = strategy.critic - critic_target = strategy.target_critic + critic = strategy.critics + critic_target = strategy.target_critics # Build centralized observation other_unique_obs = th.cat( @@ -522,12 +184,12 @@ def update_policy(self) -> None: total_critic_loss.backward() for strategy in strategies: - parameters = list(strategy.critic.parameters()) + parameters = list(strategy.critics.parameters()) max_grad_norm = max(p.grad.norm() for p in parameters) total_norm = th.nn.utils.clip_grad_norm_( parameters, max_norm=self.grad_clip_norm ) - strategy.critic.optimizer.step() + strategy.critics.optimizer.step() unit_params[step][strategy.unit_id]["critic_total_grad_norm"] = total_norm unit_params[step][strategy.unit_id]["critic_max_grad_norm"] = max_grad_norm @@ -542,7 +204,7 @@ def update_policy(self) -> None: for i, strategy in enumerate(strategies): actor = strategy.actor - critic = strategy.critic + critic = strategy.critics state_i = states[:, i, :] action_i = actor(state_i) @@ -594,20 +256,20 @@ def update_policy(self) -> None: all_target_actor_params = [] for strategy in strategies: - all_critic_params.extend(strategy.critic.parameters()) - all_target_critic_params.extend(strategy.target_critic.parameters()) + all_critic_params.extend(strategy.critics.parameters()) + all_target_critic_params.extend(strategy.target_critics.parameters()) all_actor_params.extend(strategy.actor.parameters()) all_target_actor_params.extend(strategy.actor_target.parameters()) polyak_update( all_critic_params, all_target_critic_params, - self.learning_config.tau, + self.learning_config.off_policy.tau, ) polyak_update( all_actor_params, all_target_actor_params, - self.learning_config.tau, + self.learning_config.off_policy.tau, ) # Log metrics diff --git a/assume/reinforcement_learning/algorithms/mappo.py b/assume/reinforcement_learning/algorithms/mappo.py index f3fa7865a..da7952ed1 100644 --- a/assume/reinforcement_learning/algorithms/mappo.py +++ b/assume/reinforcement_learning/algorithms/mappo.py @@ -1,6 +1,7 @@ # SPDX-FileCopyrightText: ASSUME Developers # # SPDX-License-Identifier: AGPL-3.0-or-later + import json import logging import os @@ -10,214 +11,114 @@ from torch.nn import functional as F from torch.optim import AdamW -from assume.reinforcement_learning.algorithms.base_algorithm import RLAlgorithm -from assume.reinforcement_learning.learning_utils import polyak_update +from assume.reinforcement_learning.algorithms.base_algorithm import A2CAlgorithm from assume.reinforcement_learning.neural_network_architecture import ( ActorPPO, - CriticPPO + CriticPPO, + LSTMActorPPO, ) -from assume.reinforcement_learning.rollout_buffer import RolloutBuffer logger = logging.getLogger(__name__) -class PPO(RLAlgorithm): + +class PPO(A2CAlgorithm): """ Proximal Policy Optimization (PPO) Algorithm. + + A policy gradient method that alternates between + sampling data through interaction with the environment, + and optimizing a surrogate objective function using + stochastic gradient ascent. It is an on-policy algorithm. + + Args: + learning_role (LearningRole): The central learning role. + clip_range (float): Clipping parameter epsilon. + clip_range_vf (float, optional): Clipping parameter for the value function. + If None, value function is not clipped. + n_epochs (int): Number of epochs to optimize the surrogate loss per update. + entropy_coef (float): Entropy coefficient for the loss calculation. + vf_coef (float): Value function coefficient for the loss calculation. + max_grad_norm (float): The maximum value for the gradient clipping. """ def __init__( - self, + self, learning_role, - clip_range = 0.1, # Epsilon clipping constant preventing the policy from changing too much in a single update. - clip_range_vf = 0.1, # preventing the value function from changing too much from previous estimates - n_epochs = 30, # sample efficiency - entropy_coef = 0.02, # encourages exploration by rewarding "randomness" - vf_coef = 1.0, # balances the importance of training the Critic and training the Actor - max_grad_norm = 0.5, # Gradient clipping + clip_range=0.1, + clip_range_vf=0.1, + n_epochs=30, + entropy_coef=0.02, + vf_coef=1.0, + max_grad_norm=0.5, ): - """Initialize PPO algorithm.""" + """ + Initialize PPO algorithm with specific hyperparameters. + + Args: + learning_role (LearningRole): The learning role object. + clip_range (float, optional): The epsilon parameter for PPO clipping. + clip_range_vf (float, optional): The epsilon parameter for value function clipping. + n_epochs (int, optional): Number of optimization epochs per rollout. + entropy_coef (float, optional): Coefficient for entropy term in loss. + vf_coef (float, optional): Coefficient for value function term in loss. + max_grad_norm (float, optional): Maximum gradient norm for clipping. + """ super().__init__(learning_role) + # Set PPO-specific architecture classes + self.actor_architecture_class = ActorPPO + self.critic_architecture_class = CriticPPO + config = self.learning_config - - self.clip_range = clip_range if clip_range is not None else getattr(config, 'ppo_clip_range', 0.2) - self.clip_range_vf = clip_range_vf if clip_range_vf is not None else getattr(config, 'ppo_clip_range_vf', None) - self.n_epochs = n_epochs if n_epochs is not None else getattr(config, 'ppo_n_epochs', 10) - self.entropy_coef = entropy_coef if entropy_coef is not None else getattr(config, 'ppo_entropy_coef', 0.01) - self.vf_coef = vf_coef if vf_coef is not None else getattr(config, 'ppo_vf_coef', 0.5) - self.max_grad_norm = max_grad_norm + ppo_config = getattr(config, "ppo", None) + + # Use PPO-specific config if available, otherwise use defaults + self.clip_range = clip_range if clip_range is not None else getattr(ppo_config, "clip_ratio", 0.2) + self.clip_range_vf = clip_range_vf if clip_range_vf is not None else getattr(ppo_config, "clip_range_vf", None) + self.n_epochs = n_epochs if n_epochs is not None else getattr(ppo_config, "n_epochs", 10) + self.entropy_coef = entropy_coef if entropy_coef is not None else getattr(ppo_config, "entropy_coef", 0.01) + self.vf_coef = vf_coef if vf_coef is not None else getattr(ppo_config, "vf_coef", 0.5) + self.max_grad_norm = max_grad_norm if max_grad_norm is not None else getattr(ppo_config, "max_grad_norm", 0.5) # Update counter self.n_updates = 0 - def save_params(self, directory: str) -> None: - """Save all actor and critic network parameters to disk.""" - self.save_critic_params(directory=f"{directory}/critics") - self.save_actor_params(directory=f"{directory}/actors") - - def save_critic_params(self, directory: str) -> None: - """Save value network parameters for all agents.""" - os.makedirs(directory, exist_ok=True) - - for u_id, strategy in self.learning_role.rl_strats.items(): - obj = { - "critic": strategy.critic.state_dict(), - "critic_optimizer": strategy.critic.optimizer.state_dict(), - } - path = f"{directory}/critic_{u_id}.pt" - th.save(obj, path) - - # Save unit ID order - u_id_list = [str(u) for u in self.learning_role.rl_strats.keys()] - mapping = {"u_id_order": u_id_list} - map_path = os.path.join(directory, "u_id_order.json") - with open(map_path, "w") as f: - json.dump(mapping, f, indent=2) - - def save_actor_params(self, directory: str) -> None: - """Save actor network parameters for all agents.""" - os.makedirs(directory, exist_ok=True) - - for u_id, strategy in self.learning_role.rl_strats.items(): - obj = { - "actor": strategy.actor.state_dict(), - "actor_optimizer": strategy.actor.optimizer.state_dict(), - } - path = f"{directory}/actor_{u_id}.pt" - th.save(obj, path) + # ========================================================================= + # CHECKPOINT SAVING METHODS + # ========================================================================= - def load_params(self, directory: str) -> None: - """Load all actor and critic parameters from disk.""" - self.load_critic_params(directory) - self.load_actor_params(directory) + uses_target_networks: bool = False - def load_critic_params(self, directory: str) -> None: - """Load critic parameters.""" - logger.info("Loading PPO critic parameters...") + # Note: save_params, save_critic_params, save_actor_params, load_params, + # load_critic_params, load_actor_params, initialize_policy are inherited from A2CAlgorithm - if not os.path.exists(directory): - logger.warning( - "Specified directory does not exist. Using randomly initialized critics." - ) - return - for u_id, strategy in self.learning_role.rl_strats.items(): - critic_path = os.path.join(directory, "critics", f"critic_{u_id}.pt") - if not os.path.exists(critic_path): - logger.warning(f"No saved critic for {u_id}; skipping.") - continue - - try: - critic_params = th.load(critic_path, weights_only=True) - strategy.critic.load_state_dict(critic_params["critic"]) - strategy.critic.optimizer.load_state_dict(critic_params["critic_optimizer"]) - except Exception as e: - logger.warning(f"Failed to load critic for {u_id}: {e}") - - def load_actor_params(self, directory: str) -> None: - """Load actor network parameters from disk.""" - logger.info("Loading PPO actor parameters...") - - if not os.path.exists(directory): - logger.warning( - "Specified directory for actors does not exist! " - "Starting with randomly initialized values!" - ) - return - for u_id, strategy in self.learning_role.rl_strats.items(): - try: - actor_params = self.load_obj( - directory=f"{directory}/actors/actor_{str(u_id)}.pt" - ) - - strategy.actor.load_state_dict(actor_params["actor"]) - strategy.actor.optimizer.load_state_dict(actor_params["actor_optimizer"]) - strategy.actor.loaded = True - - except Exception: - logger.warning(f"No actor values loaded for agent {u_id}") - - def initialize_policy(self, actors_and_critics: dict = None) -> None: + def create_actors(self) -> None: """ - Initialize actor and critic networks for all agents. - - Args: - actors_and_critics: Optional pre-existing networks to assign + Creates stochastic actor networks for all agents. + Initializes the ActorPPO network and its optimizer for each agent strategy. """ - if actors_and_critics is None: - self.check_strategy_dimensions() - self.create_actors() - self.create_critics() - else: - for u_id, strategy in self.learning_role.rl_strats.items(): - strategy.actor = actors_and_critics["actors"][u_id] - strategy.critic = actors_and_critics["critics"][u_id] - - self.obs_dim = actors_and_critics["obs_dim"] - self.act_dim = actors_and_critics["act_dim"] - self.unique_obs_dim = actors_and_critics["unique_obs_dim"] - - def check_strategy_dimensions(self) -> None: - """Validate that all agents have consistent dimensions.""" - foresight_list = [] - obs_dim_list = [] - act_dim_list = [] - unique_obs_dim_list = [] - num_timeseries_obs_dim_list = [] - - for strategy in self.learning_role.rl_strats.values(): - foresight_list.append(strategy.foresight) - obs_dim_list.append(strategy.obs_dim) - act_dim_list.append(strategy.act_dim) - unique_obs_dim_list.append(strategy.unique_obs_dim) - num_timeseries_obs_dim_list.append(strategy.num_timeseries_obs_dim) - - if len(set(foresight_list)) > 1: - raise ValueError( - f"All foresight values must be the same for all RL agents. THe defined learning strategies have the following foresight values: {foresight_list}" - ) - else: - self.foresight = foresight_list[0] - - if len(set(obs_dim_list)) > 1: - raise ValueError( - f"All observation dimensions must be the same. Got: {obs_dim_list}" - ) - else: - self.obs_dim = obs_dim_list[0] - - if len(set(act_dim_list)) > 1: - raise ValueError( - f"All action dimensions must be the same. Got: {act_dim_list}" - ) - else: - self.act_dim = act_dim_list[0] - - if len(set(unique_obs_dim_list)) > 1: - raise ValueError( - f"All unique_obs_dim values must be the same. Got: {unique_obs_dim_list}" - ) - else: - self.unique_obs_dim = unique_obs_dim_list[0] - - if len(set(num_timeseries_obs_dim_list)) > 1: - raise ValueError( - f"All num_timeseries_obs_dim values must be the same. " - f"Got: {num_timeseries_obs_dim_list}" - ) - else: - self.num_timeseries_obs_dim = num_timeseries_obs_dim_list[0] + config = self.learning_config + ppo_config = getattr(config, "ppo", None) + actor_architecture = getattr(ppo_config, "actor_architecture", "mlp") - def create_actors(self) -> None: - """Create stochastic actor networks for all agents.""" for strategy in self.learning_role.rl_strats.values(): # Create PPO Actor - strategy.actor = ActorPPO( - obs_dim=self.obs_dim, - act_dim=self.act_dim, - float_type=self.float_type, - ).to(self.device) + if actor_architecture == "lstm": + strategy.actor = LSTMActorPPO( + obs_dim=self.obs_dim, + act_dim=self.act_dim, + float_type=self.float_type, + unique_obs_dim=self.unique_obs_dim, + num_timeseries_obs_dim=strategy.num_timeseries_obs_dim, + ).to(self.device) + else: + strategy.actor = ActorPPO( + obs_dim=self.obs_dim, + act_dim=self.act_dim, + float_type=self.float_type, + ).to(self.device) # Create Optimizer strategy.actor.optimizer = AdamW( @@ -229,13 +130,14 @@ def create_actors(self) -> None: def create_critics(self) -> None: """ - Create value networks for all agents. + Creates value networks for all agents. + Initializes the CriticPPO network (Centralized Critic) and its optimizer. """ n_agents = len(self.learning_role.rl_strats) for strategy in self.learning_role.rl_strats.values(): # Create value network - strategy.critic = CriticPPO( + strategy.critics = CriticPPO( n_agents=n_agents, obs_dim=self.obs_dim, unique_obs_dim=self.unique_obs_dim, @@ -243,19 +145,24 @@ def create_critics(self) -> None: ).to(self.device) # Create optimizer - strategy.critic.optimizer = AdamW( - strategy.critic.parameters(), + strategy.critics.optimizer = AdamW( + strategy.critics.parameters(), lr=self.learning_role.calc_lr_from_progress(1), ) def extract_policy(self) -> dict: - """Extract all actor and critic networks into a dictionary.""" + """ + Extract all actor and critic networks into a dictionary. + + Returns: + dict: Dictionary with keys 'actors', 'critics', and dimension information. + """ actors = {} critics = {} for u_id, strategy in self.learning_role.rl_strats.items(): actors[u_id] = strategy.actor - critics[u_id] = strategy.critic + critics[u_id] = strategy.critics return { "actors": actors, @@ -265,9 +172,20 @@ def extract_policy(self) -> dict: "unique_obs_dim": self.unique_obs_dim, } + # ========================================================================= + # CORE TRAINING: POLICY UPDATE + # ========================================================================= + def update_policy(self) -> None: """ - Update actor and critic networks. + Update actor and critic networks using proximal policy optimization (PPO). + Checks if enough data is collected (batch_size). + Computes Generalized Advantage Estimation (GAE) and Returns using the last value estimate. + Updates the Actor and Critic networks over multiple epochs (n_epochs) using mini-batches. + Calculates the surrogate objective with clipping (clip_range). + Calculates value function loss (MSE) and entropy bonus. + Logs metrics and gradients. + Clears the on-policy buffer after the update. """ logger.debug("Updating Policy") @@ -276,7 +194,7 @@ def update_policy(self) -> None: # Get rollout buffer rollout_buffer = self.learning_role.rollout_buffer - + # Check if rollout buffer has data if rollout_buffer is None or rollout_buffer.pos == 0: logger.debug("Rollout buffer is empty, skipping policy update") @@ -297,7 +215,7 @@ def update_policy(self) -> None: learning_rate = self.learning_role.calc_lr_from_progress(progress_remaining) for strategy in strategies: - for param_group in strategy.critic.optimizer.param_groups: + for param_group in strategy.critics.optimizer.param_groups: param_group["lr"] = learning_rate for param_group in strategy.actor.optimizer.param_groups: param_group["lr"] = learning_rate @@ -313,16 +231,16 @@ def update_policy(self) -> None: # Use the LAST observation as the bootstrap for the REST of the buffer. # We sacrifice the last step (pos-1) to serve as s_{t+1} for the step before it. # This ensures V(s_{t+1}) is calculating using the REAL next state, not self-referential. - + last_idx = buffer_size - 1 last_obs = rollout_buffer.observations[last_idx] last_dones = rollout_buffer.dones[last_idx] - + # Reduce buffer size by 1 so as to not train on the bootstrap step rollout_buffer.pos -= 1 if rollout_buffer.full: - rollout_buffer.full = False # If it was full, it's not anymore - + rollout_buffer.full = False # If it was full, it's not anymore + # Prepare unique observations for centralized critic last_unique_obs = last_obs[:, self.obs_dim - self.unique_obs_dim :] @@ -343,7 +261,7 @@ def update_policy(self) -> None: dtype=self.float_type, ) # Get value estimate from critic - last_values[i] = strategy.critic(obs_tensor).cpu().numpy().flatten()[0] + last_values[i] = strategy.critics(obs_tensor).cpu().numpy().flatten()[0] dones[i] = last_dones[i] # Compute advantages and returns @@ -353,12 +271,12 @@ def update_policy(self) -> None: all_actor_losses = [] all_critic_losses = [] all_entropy_losses = [] - + # Initialize unit_params for gradient logging # Use an empty list that will be dynamically extended unit_params = [] step_count = 0 - + # Helper to create a new step entry def create_step_entry(): return { @@ -384,10 +302,10 @@ def create_step_entry(): for i, strategy in enumerate(strategies): actor = strategy.actor - critic = strategy.critic + critic = strategy.critics obs_i = batch.observations[:, i, :] - + # Construct centralized state other_unique_obs = th.cat( (unique_obs_from_others[:, :i], unique_obs_from_others[:, i + 1 :]), @@ -411,10 +329,7 @@ def create_step_entry(): advantages_i.std() + 1e-8 ) - log_probs, entropy = actor.evaluate_actions( - obs_i, - actions_i - ) + log_probs, entropy = actor.evaluate_actions(obs_i, actions_i) values = critic(all_states).flatten() # Importance sampling ratio @@ -435,7 +350,7 @@ def create_step_entry(): values_clipped = old_values_i + th.clamp( values - old_values_i, -self.clip_range_vf, - self.clip_range_vf + self.clip_range_vf, ) value_loss_1 = F.mse_loss(values, returns_i) value_loss_2 = F.mse_loss(values_clipped, returns_i) @@ -453,14 +368,14 @@ def create_step_entry(): # Calculate gradient norms BEFORE clipping actor_params = list(actor.parameters()) critic_params = list(critic.parameters()) - + actor_max_grad_norm = max( (p.grad.norm().item() for p in actor_params if p.grad is not None), - default=0.0 + default=0.0, ) critic_max_grad_norm = max( (p.grad.norm().item() for p in critic_params if p.grad is not None), - default=0.0 + default=0.0, ) # Gradient clipping @@ -478,42 +393,31 @@ def create_step_entry(): all_actor_losses.append(policy_loss.item()) all_critic_losses.append(value_loss.item()) all_entropy_losses.append(entropy_loss.item()) - + # Ensure we have an entry for this step if step_count >= len(unit_params): unit_params.append(create_step_entry()) - + # Store per-unit gradient params for this step unit_params[step_count][strategy.unit_id]["actor_loss"] = policy_loss.item() unit_params[step_count][strategy.unit_id]["critic_loss"] = value_loss.item() - unit_params[step_count][strategy.unit_id]["actor_total_grad_norm"] = actor_total_grad_norm.item() if isinstance(actor_total_grad_norm, th.Tensor) else actor_total_grad_norm + unit_params[step_count][strategy.unit_id]["actor_total_grad_norm"] = ( + actor_total_grad_norm.item() + if isinstance(actor_total_grad_norm, th.Tensor) + else actor_total_grad_norm + ) unit_params[step_count][strategy.unit_id]["actor_max_grad_norm"] = actor_max_grad_norm - unit_params[step_count][strategy.unit_id]["critic_total_grad_norm"] = critic_total_grad_norm.item() if isinstance(critic_total_grad_norm, th.Tensor) else critic_total_grad_norm + unit_params[step_count][strategy.unit_id]["critic_total_grad_norm"] = ( + critic_total_grad_norm.item() + if isinstance(critic_total_grad_norm, th.Tensor) + else critic_total_grad_norm + ) unit_params[step_count][strategy.unit_id]["critic_max_grad_norm"] = critic_max_grad_norm - + step_count += 1 self.n_updates += 1 - # Log average metrics - # Log average metrics - # if self.learning_role.tensor_board_logger: - # self.learning_role.tensor_board_logger.log_scalar( - # "ppo/actor_loss", np.mean(all_actor_losses), self.n_updates - # ) - # self.learning_role.tensor_board_logger.log_scalar( - # "ppo/critic_loss", np.mean(all_critic_losses), self.n_updates - # ) - # self.learning_role.tensor_board_logger.log_scalar( - # "ppo/entropy_loss", np.mean(all_entropy_losses), self.n_updates - # ) - # if all_actor_losses: - # logger.info( - # f"PPO Update {self.n_updates} - Actor loss: {np.mean(all_actor_losses):.4f}, " - # f"Critic loss: {np.mean(all_critic_losses):.4f}, " - # f"Entropy loss: {np.mean(all_entropy_losses):.4f}" - # ) - # Write gradient params to output self.learning_role.write_rl_grad_params_to_output(learning_rate, unit_params) diff --git a/assume/reinforcement_learning/algorithms/matd3.py b/assume/reinforcement_learning/algorithms/matd3.py index dbdd50e41..4280df98f 100644 --- a/assume/reinforcement_learning/algorithms/matd3.py +++ b/assume/reinforcement_learning/algorithms/matd3.py @@ -2,25 +2,21 @@ # # SPDX-License-Identifier: AGPL-3.0-or-later -import json import logging -import os import torch as th from torch.nn import functional as F -from torch.optim import AdamW -from assume.reinforcement_learning.algorithms.base_algorithm import RLAlgorithm +from assume.reinforcement_learning.algorithms.base_algorithm import A2CAlgorithm from assume.reinforcement_learning.learning_utils import ( polyak_update, - transfer_weights, ) from assume.reinforcement_learning.neural_network_architecture import CriticTD3 logger = logging.getLogger(__name__) -class TD3(RLAlgorithm): +class TD3(A2CAlgorithm): """ Twin Delayed Deep Deterministic Policy Gradients (TD3). Addressing Function Approximation Error in Actor-Critic Methods. @@ -38,405 +34,8 @@ def __init__(self, learning_role): self.n_updates = 0 self.grad_clip_norm = 1.0 - def save_params(self, directory): - """ - This method saves the parameters of both the actor and critic networks associated with the learning role. It organizes the - saved parameters into separate directories for critics and actors within the specified base directory. - - Args: - directory (str): The base directory for saving the parameters. - """ - self.save_critic_params(directory=f"{directory}/critics") - self.save_actor_params(directory=f"{directory}/actors") - - def save_critic_params(self, directory): - """ - Save the parameters of critic networks. - - This method saves the parameters of the critic networks, including the critic's state_dict, critic_target's state_dict, - and the critic's optimizer state_dict. It organizes the saved parameters into a directory structure specific to the critic - associated with each learning strategy. - - Args: - directory (str): The base directory for saving the parameters. - """ - os.makedirs(directory, exist_ok=True) - for u_id, strategy in self.learning_role.rl_strats.items(): - obj = { - "critic": strategy.critics.state_dict(), - "critic_target": strategy.target_critics.state_dict(), - "critic_optimizer": strategy.critics.optimizer.state_dict(), - } - path = f"{directory}/critic_{u_id}.pt" - th.save(obj, path) - - # record the exact order of u_ids and save it with critics to ensure that the same order is used when loading the parameters - u_id_list = [str(u) for u in self.learning_role.rl_strats.keys()] - mapping = {"u_id_order": u_id_list} - map_path = os.path.join(directory, "u_id_order.json") - with open(map_path, "w") as f: - json.dump(mapping, f, indent=2) - - def save_actor_params(self, directory): - """ - Save the parameters of actor networks. - - This method saves the parameters of the actor networks, including the actor's state_dict, actor_target's state_dict, and - the actor's optimizer state_dict. It organizes the saved parameters into a directory structure specific to the actor - associated with each learning strategy. - - Args: - directory (str): The base directory for saving the parameters. - """ - os.makedirs(directory, exist_ok=True) - for u_id, strategy in self.learning_role.rl_strats.items(): - obj = { - "actor": strategy.actor.state_dict(), - "actor_target": strategy.actor_target.state_dict(), - "actor_optimizer": strategy.actor.optimizer.state_dict(), - } - path = f"{directory}/actor_{u_id}.pt" - th.save(obj, path) - - def load_params(self, directory: str) -> None: - """ - Load the parameters of both actor and critic networks. - - This method loads the parameters of both the actor and critic networks associated with the learning role from the specified - directory. It uses the `load_critic_params` and `load_actor_params` methods to load the respective parameters. - - Args: - directory (str): The directory from which the parameters should be loaded. - """ - self.load_critic_params(directory) - self.load_actor_params(directory) - - def load_critic_params(self, directory: str) -> None: - """ - Load critic, target_critic, and optimizer states for each agent strategy. - If agent count differs between saved and current model, performs weight transfer for both networks. - Args: - directory (str): The directory from which the parameters should be loaded. - """ - logger.info("Loading critic parameters...") - - if not os.path.exists(directory): - logger.warning( - "Specified directory does not exist. Using randomly initialized critics." - ) - return - - map_path = os.path.join(directory, "critics", "u_id_order.json") - if os.path.exists(map_path): - # read the saved order of u_ids from critics save directory - with open(map_path) as f: - loaded_id_order = json.load(f).get("u_id_order", []) - else: - logger.warning("No u_id_order.json: assuming same order as current.") - loaded_id_order = [str(u) for u in self.learning_role.rl_strats.keys()] - - new_id_order = [str(u) for u in self.learning_role.rl_strats.keys()] - direct_load = loaded_id_order == new_id_order - - if direct_load: - logger.info("Agents order unchanged. Loading critic weights directly.") - else: - logger.info( - f"Agents length and/or order mismatch: n_old={len(loaded_id_order)}, n_new={len(new_id_order)}. Transferring weights for critics and target critics." - ) - - for u_id, strategy in self.learning_role.rl_strats.items(): - critic_path = os.path.join(directory, "critics", f"critic_{u_id}.pt") - if not os.path.exists(critic_path): - logger.warning(f"No saved critic for {u_id}; skipping.") - continue - - try: - critic_params = th.load(critic_path, weights_only=True) - for key in ("critic", "critic_target", "critic_optimizer"): - if key not in critic_params: - logger.warning( - f"Missing {key} in critic params for {u_id}; skipping." - ) - continue - - if direct_load: - strategy.critics.load_state_dict(critic_params["critic"]) - strategy.target_critics.load_state_dict( - critic_params["critic_target"] - ) - strategy.critics.optimizer.load_state_dict( - critic_params["critic_optimizer"] - ) - logger.debug(f"Loaded critic for {u_id} directly.") - else: - critic_weights = transfer_weights( - model=strategy.critics, - loaded_state=critic_params["critic"], - loaded_id_order=loaded_id_order, - new_id_order=new_id_order, - obs_base=strategy.obs_dim, - act_dim=strategy.act_dim, - unique_obs=strategy.unique_obs_dim, - ) - target_critic_weights = transfer_weights( - model=strategy.target_critics, - loaded_state=critic_params["critic_target"], - loaded_id_order=loaded_id_order, - new_id_order=new_id_order, - obs_base=strategy.obs_dim, - act_dim=strategy.act_dim, - unique_obs=strategy.unique_obs_dim, - ) - - if critic_weights is None or target_critic_weights is None: - logger.warning( - f"Critic weights transfer failed for {u_id}; skipping." - ) - continue - - strategy.critics.load_state_dict(critic_weights) - strategy.target_critics.load_state_dict(target_critic_weights) - logger.debug(f"Critic weights transferred for {u_id}.") - - except Exception as e: - logger.warning(f"Failed to load critic for {u_id}: {e}") - - def load_actor_params(self, directory: str) -> None: - """ - Load the parameters of actor networks from a specified directory. - - This method loads the parameters of actor networks, including the actor's state_dict, actor_target's state_dict, and - the actor's optimizer state_dict, from the specified directory. It iterates through the learning strategies associated - with the learning role, loads the respective parameters, and updates the actor and target actor networks accordingly. - - Args: - directory (str): The directory from which the parameters should be loaded. - """ - logger.info("Loading actor parameters...") - if not os.path.exists(directory): - logger.warning( - "Specified directory for loading the actors does not exist! Starting with randomly initialized values!" - ) - return - - for u_id, strategy in self.learning_role.rl_strats.items(): - try: - actor_params = self.load_obj( - directory=f"{directory}/actors/actor_{str(u_id)}.pt" - ) - strategy.actor.load_state_dict(actor_params["actor"]) - strategy.actor_target.load_state_dict(actor_params["actor_target"]) - strategy.actor.optimizer.load_state_dict( - actor_params["actor_optimizer"] - ) - - # add a tag to the strategy to indicate that the actor was loaded - strategy.actor.loaded = True - except Exception: - logger.warning(f"No actor values loaded for agent {u_id}") - - def initialize_policy(self, actors_and_critics: dict = None) -> None: - """ - Create actor and critic networks for reinforcement learning. - - If `actors_and_critics` is None, this method creates new actor and critic networks. - If `actors_and_critics` is provided, it assigns existing networks to the respective attributes. - - Args: - actors_and_critics (dict): The actor and critic networks to be assigned. - - """ - if actors_and_critics is None: - self.check_strategy_dimensions() - self.create_actors() - self.create_critics() - - else: - for u_id, strategy in self.learning_role.rl_strats.items(): - strategy.actor = actors_and_critics["actors"][u_id] - strategy.actor_target = actors_and_critics["actor_targets"][u_id] - - strategy.critics = actors_and_critics["critics"][u_id] - strategy.target_critics = actors_and_critics["target_critics"][u_id] - - self.obs_dim = actors_and_critics["obs_dim"] - self.act_dim = actors_and_critics["act_dim"] - self.unique_obs_dim = actors_and_critics["unique_obs_dim"] - - def check_strategy_dimensions(self) -> None: - """ - Iterate over all learning strategies and check if the dimensions of observations and actions are the same. - Also check if the unique observation dimensions are the same. If not, raise a ValueError. - This is important for the TD3 algorithm, as it uses a centralized critic that requires consistent dimensions across all agents. - """ - foresight_list = [] - obs_dim_list = [] - act_dim_list = [] - unique_obs_dim_list = [] - num_timeseries_obs_dim_list = [] - - for strategy in self.learning_role.rl_strats.values(): - foresight_list.append(strategy.foresight) - obs_dim_list.append(strategy.obs_dim) - act_dim_list.append(strategy.act_dim) - unique_obs_dim_list.append(strategy.unique_obs_dim) - num_timeseries_obs_dim_list.append(strategy.num_timeseries_obs_dim) - - if len(set(foresight_list)) > 1: - raise ValueError( - f"All foresight values must be the same for all RL agents. The defined learning strategies have the following foresight values: {foresight_list}" - ) - else: - self.foresight = foresight_list[0] - - if len(set(act_dim_list)) > 1: - raise ValueError( - f"All action dimensions must be the same for all RL agents. The defined learning strategies have the following action dimensions: {act_dim_list}" - ) - else: - self.act_dim = act_dim_list[0] - - if len(set(unique_obs_dim_list)) > 1: - raise ValueError( - f"All unique_obs_dim values must be the same for all RL agents. The defined learning strategies have the following unique_obs_dim values: {unique_obs_dim_list}" - ) - else: - self.unique_obs_dim = unique_obs_dim_list[0] - - if len(set(num_timeseries_obs_dim_list)) > 1: - raise ValueError( - f"All num_timeseries_obs_dim values must be the same for all RL agents. The defined learning strategies have the following num_timeseries_obs_dim values: {num_timeseries_obs_dim_list}" - ) - else: - self.num_timeseries_obs_dim = num_timeseries_obs_dim_list[0] - - # Check last, as other cases should fail before! - if len(set(obs_dim_list)) > 1: - raise ValueError( - f"All observation dimensions must be the same for all RL agents. The defined learning strategies have the following observation dimensions: {obs_dim_list}" - ) - else: - self.obs_dim = obs_dim_list[0] - - def create_actors(self) -> None: - """ - Create actor networks for reinforcement learning for each unit strategy. - - This method initializes actor networks and their corresponding target networks for each unit strategy. - The actors are designed to map observations to action probabilities in a reinforcement learning setting. - - The created actor networks are associated with each unit strategy and stored as attributes. - - Note: - The observation dimension need to be the same, due to the centralized critic that all actors share. - If you have units with different observation dimensions. They need to have different critics and hence learning roles. - - """ - - for strategy in self.learning_role.rl_strats.values(): - strategy.actor = self.actor_architecture_class( - obs_dim=self.obs_dim, - act_dim=self.act_dim, - float_type=self.float_type, - unique_obs_dim=self.unique_obs_dim, - num_timeseries_obs_dim=self.num_timeseries_obs_dim, - ).to(self.device) - - strategy.actor_target = self.actor_architecture_class( - obs_dim=self.obs_dim, - act_dim=self.act_dim, - float_type=self.float_type, - unique_obs_dim=self.unique_obs_dim, - num_timeseries_obs_dim=self.num_timeseries_obs_dim, - ).to(self.device) - - strategy.actor_target.load_state_dict(strategy.actor.state_dict()) - strategy.actor_target.train(mode=False) - - strategy.actor.optimizer = AdamW( - strategy.actor.parameters(), - lr=self.learning_role.calc_lr_from_progress( - 1 - ), # 1=100% of simulation remaining, uses learning_rate from config as starting point - ) - - strategy.actor.loaded = False - - def create_critics(self) -> None: - """ - Create critic networks for reinforcement learning. - - This method initializes critic networks for each agent in the reinforcement learning setup. - - Note: - The observation dimension need to be the same, due to the centralized criic that all actors share. - If you have units with different observation dimensions. They need to have different critics and hence learning roles. - """ - n_agents = len(self.learning_role.rl_strats) - - for strategy in self.learning_role.rl_strats.values(): - strategy.critics = CriticTD3( - n_agents=n_agents, - obs_dim=self.obs_dim, - act_dim=self.act_dim, - unique_obs_dim=self.unique_obs_dim, - float_type=self.float_type, - ).to(self.device) - - strategy.target_critics = CriticTD3( - n_agents=n_agents, - obs_dim=self.obs_dim, - act_dim=self.act_dim, - unique_obs_dim=self.unique_obs_dim, - float_type=self.float_type, - ).to(self.device) - - strategy.target_critics.load_state_dict(strategy.critics.state_dict()) - strategy.target_critics.train(mode=False) - - strategy.critics.optimizer = AdamW( - strategy.critics.parameters(), - lr=self.learning_role.calc_lr_from_progress( - 1 - ), # 1 = 100% of simulation remaining, uses learning_rate from config as starting point - ) - - def extract_policy(self) -> dict: - """ - Extract actor and critic networks. - - This method extracts the actor and critic networks associated with each learning strategy and organizes them into a - dictionary structure. The extracted networks include actors, actor_targets, critics, and target_critics. The resulting - dictionary is typically used for saving and sharing these networks. - - Returns: - dict: The extracted actor and critic networks. - """ - actors = {} - actor_targets = {} - - critics = {} - target_critics = {} - - for u_id, strategy in self.learning_role.rl_strats.items(): - actors[u_id] = strategy.actor - actor_targets[u_id] = strategy.actor_target - - critics[u_id] = strategy.critics - target_critics[u_id] = strategy.target_critics - - actors_and_critics = { - "actors": actors, - "actor_targets": actor_targets, - "critics": critics, - "target_critics": target_critics, - "obs_dim": self.obs_dim, - "act_dim": self.act_dim, - "unique_obs_dim": self.unique_obs_dim, - } - - return actors_and_critics + # Define the critic architecture class for TD3 + self.critic_architecture_class = CriticTD3 def update_policy(self): """ @@ -457,7 +56,7 @@ def update_policy(self): """ - logger.debug("Updating Policy") + logger.debug("Updating Policy (TD3)") # Stack strategies for easier access strategies = list(self.learning_role.rl_strats.values()) @@ -514,11 +113,11 @@ def update_policy(self): with th.no_grad(): # Select action according to policy and add clipped noise noise = ( - th.randn_like(actions) * self.learning_config.target_policy_noise + th.randn_like(actions) * self.learning_config.off_policy.target_policy_noise ) noise = noise.clamp( - -self.learning_config.target_noise_clip, - self.learning_config.target_noise_clip, + -self.learning_config.off_policy.target_noise_clip, + self.learning_config.off_policy.target_noise_clip, ) # Select next actions for all agents @@ -643,7 +242,7 @@ def update_policy(self): ###################################################################### # ACTOR UPDATE (DELAYED): Accumulate losses for all agents in one pass ###################################################################### - if self.n_updates % self.learning_config.policy_delay == 0: + if self.n_updates % self.learning_config.off_policy.policy_delay == 0: # Zero-grad for all actors first for strategy in strategies: strategy.actor.optimizer.zero_grad(set_to_none=True) @@ -743,10 +342,10 @@ def update_policy(self): polyak_update( all_critic_params, all_target_critic_params, - self.learning_config.tau, + self.learning_config.off_policy.tau, ) polyak_update( - all_actor_params, all_target_actor_params, self.learning_config.tau + all_actor_params, all_target_actor_params, self.learning_config.off_policy.tau ) self.learning_role.write_rl_grad_params_to_output(learning_rate, unit_params) diff --git a/assume/reinforcement_learning/buffer.py b/assume/reinforcement_learning/buffer.py index d021452a4..b084020da 100644 --- a/assume/reinforcement_learning/buffer.py +++ b/assume/reinforcement_learning/buffer.py @@ -187,16 +187,23 @@ class RolloutBufferSamples(NamedTuple): class RolloutBuffer: """ - On-policy rollout buffer for PPO algorithm. This is different from TD3/DDPG which keep old data in a replay buffer. The buffer stores data for all the agents together. - - buffer_size (int): maximum number of transitions the buffer can store before training. - obs_dim (int): dimension of the observation space. - act_dim (int): dimension of the action space. - n_rl_units (int): number of RL agents in the multi-agent system. - device (str | th.device): specifies the device for training. - float_type (th.dtype): precision of floating-point numbers. - gamma (float): discount factor for defining how much to value future rewards. - gae_lambda (float): GAE (Generalized Advantage Estimationn) smoothing parameter. + Rollout buffer is used in on-policy algorithms like PPO. + + It corresponds to the transitions collected using the current policy. + This experience is discarded after the policy is updated. + In order to use PPO, the current observations are needed to be stored. + the observations include actions, rewards, values, log probabilities and done for each action. + + Args: + buffer_size (int): Max number of elements allowed in the buffer + obs_dim (int): Dimension of the observation space + act_dim (int): Dimension of the action space + n_rl_units (int): Number of RL agents + device (str | th.device): PyTorch device config + float_type (th.dtype): Data type for floating point numbers + gamma (float): Discount factor + gae_lambda (float): bias-variance trade-off factor for Generalized Advantage Estimator + """ def __init__( @@ -229,7 +236,10 @@ def __init__( self.reset() def reset(self) -> None: - """Clear the buffer and allocate new storage.""" + """ + Reset the rollout buffer. + Clearing the buffer and allocating new storage. + """ self.observations = np.zeros( ( self.buffer_size, @@ -304,7 +314,17 @@ def add( value: np.ndarray, log_prob: np.ndarray ) -> None: - """Add a transition to the buffer.""" + """ + Add a transition to the buffer. + + Args: + obs (np.ndarray): Observation of the agents + action (np.ndarray): Action taken by the agents + reward (np.ndarray): Reward obtained + done (np.ndarray): Whether the episode ended + value (np.ndarray): Value estimate from the critic + log_prob (np.ndarray): Log probability of the action + """ if self.pos >= self.buffer_size: self.full = True return @@ -324,7 +344,14 @@ def compute_returns_and_advantages( last_values: np.ndarray, dones: np.ndarray ) -> None: - """Compute GAE advantages and returns.""" + """ + Uses Generalized Advantage Estimation to compute the advantage. + To obtain the lambda-return, the advantage is added to the value estiamte. + + Args: + last_values (np.ndarray): value estimation for the last step + dones (np.ndarray): whether the last step was terminal + """ # taking the final value estimates and episode-end flags, # and making them flat arrays providing one number per agent. last_values = np.array(last_values).flatten() @@ -368,7 +395,15 @@ def get( self, batch_size: int | None = None ) -> Generator[RolloutBufferSamples, None, None]: - """Generate batches of samples for training.""" + """ + Generator for generating batches of transition samples for training. + + Args: + batch_size (int | None): Number of samples to be accessed per batch. + + Yields: + Generator[RolloutBufferSamples]: A generator yielding RolloutBufferSamples + """ if not self.generator_ready: raise ValueError( "Must call compute_returns_and_advantages before sampling." @@ -387,7 +422,16 @@ def get( start_idx += batch_size def _get_samples(self, indices: np.ndarray) -> RolloutBufferSamples: - """Convert numpy arrays to torch tensors for given indices.""" + """ + Helper function to sample data from the buffer. + Converts numpy arrays to torch tensors for given indices. + + Args: + indices (np.ndarray): Indices of the samples to retrieve. + + Returns: + RolloutBufferSamples: The batch of samples converted to PyTorch tensors. + """ return RolloutBufferSamples( observations = th.as_tensor( self.observations[indices], @@ -422,5 +466,10 @@ def _get_samples(self, indices: np.ndarray) -> RolloutBufferSamples: ) def size(self) -> int: - """Return current number of stored transitions.""" + """ + Return the current number of stored transitions. + + Returns: + int: The size of the buffer. + """ return self.buffer_size if self.full else self.pos diff --git a/assume/reinforcement_learning/learning_role.py b/assume/reinforcement_learning/learning_role.py index 3e2c463f8..8a06f38ef 100644 --- a/assume/reinforcement_learning/learning_role.py +++ b/assume/reinforcement_learning/learning_role.py @@ -98,12 +98,12 @@ def __init__( lambda x: self.learning_config.learning_rate ) - if self.learning_config.action_noise_schedule == "linear": + if self.learning_config.off_policy.action_noise_schedule == "linear": self.calc_noise_from_progress = linear_schedule_func( - self.learning_config.noise_dt + self.learning_config.off_policy.noise_dt ) else: - self.calc_noise_from_progress = lambda x: self.learning_config.noise_dt + self.calc_noise_from_progress = lambda x: self.learning_config.off_policy.noise_dt self.eval_episodes_done = 0 @@ -336,51 +336,36 @@ async def _store_to_buffer_and_update_sync(self, cache, device) -> None: device ) - if cache["values"].get(timestamp): - values_data = transform_buffer_data( - { - timestamp: cache["values"][timestamp] - }, - device - ) - else: - values_data = np.zeros(len(self.rl_strats)) + values_data = transform_buffer_data( + { + timestamp: cache["values"][timestamp] + }, + device + ) - if cache["log_probs"].get(timestamp): - log_probs_data = transform_buffer_data( - { - timestamp: cache["log_probs"][timestamp] - }, - device - ) - else: - log_probs_data = np.zeros(len(self.rl_strats)) - - if cache["dones"].get(timestamp): - dones_data = transform_buffer_data( - { - timestamp: cache["dones"][timestamp] - }, - device - ) - else: - dones_data = np.zeros(len(self.rl_strats)) + log_probs_data = transform_buffer_data( + { + timestamp: cache["log_probs"][timestamp] + }, + device + ) - # Helper to convert to numpy - def to_numpy(data): - if isinstance(data, th.Tensor): - return data.cpu().numpy() - return np.array(data) + dones_data = transform_buffer_data( + { + timestamp: cache["dones"][timestamp] + }, + device + ) # Add to rollout buffer if self.rollout_buffer is not None: self.rollout_buffer.add( - obs = to_numpy(obs_data), - action = to_numpy(actions_data), - reward = to_numpy(rewards_data), - done = to_numpy(dones_data), - value = to_numpy(values_data), - log_prob = to_numpy(log_probs_data) + obs = obs_data, + action = actions_data, + reward = rewards_data, + done = dones_data, + value = values_data, + log_prob = log_probs_data ) else: # for TD3/DDPG use off-policy ReplayBuffer @@ -683,10 +668,10 @@ def compare_and_save_policies(self, metrics: dict) -> bool: ) if ( self.learning_config.learning_rate_schedule - or self.learning_config.action_noise_schedule + or self.learning_config.off_policy.action_noise_schedule ) is not None: logger.info( - f"Learning rate schedule ({self.learning_config.learning_rate_schedule}) or action noise schedule ({self.learning_config.action_noise_schedule}) were scheduled to decay, further learning improvement can be possible. End value of schedule may not have been reached." + f"Learning rate schedule ({self.learning_config.learning_rate_schedule}) or action noise schedule ({self.learning_config.off_policy.action_noise_schedule}) were scheduled to decay, further learning improvement can be possible. End value of schedule may not have been reached." ) self.rl_algorithm.save_params( diff --git a/assume/reinforcement_learning/learning_utils.py b/assume/reinforcement_learning/learning_utils.py index 268b0ed5b..8d72f4aeb 100644 --- a/assume/reinforcement_learning/learning_utils.py +++ b/assume/reinforcement_learning/learning_utils.py @@ -26,6 +26,22 @@ class ObsActRew(TypedDict): Schedule = Callable[[float], float] +class ActivationLimits(TypedDict): + """Output limits for activation functions.""" + + min: float + max: float + func: Callable[[th.Tensor], th.Tensor] + + +activation_function_limit: dict[str, ActivationLimits] = { + "tanh": {"min": -1, "max": 1, "func": th.tanh}, + "sigmoid": {"min": 0, "max": 1, "func": th.sigmoid}, + "relu": {"min": 0, "max": float("inf"), "func": th.nn.functional.relu}, + "softsign": {"min": -1, "max": 1, "func": th.nn.functional.softsign}, +} + + # Ornstein-Uhlenbeck Noise # from https://github.com/songrotek/DDPG/blob/master/ou_noise.py class OUNoise: diff --git a/assume/reinforcement_learning/neural_network_architecture.py b/assume/reinforcement_learning/neural_network_architecture.py index 87c1cb2a8..59e8e80e0 100644 --- a/assume/reinforcement_learning/neural_network_architecture.py +++ b/assume/reinforcement_learning/neural_network_architecture.py @@ -8,6 +8,8 @@ from typing import List, Tuple, Type, Optional, Union +from assume.reinforcement_learning.learning_utils import activation_function_limit + class Critic(nn.Module): """ @@ -243,40 +245,19 @@ class Actor(nn.Module): Parent class for actor networks. """ - activation_function_limit = { - "softsign": (-1, 1), - "tanh": (-1, 1), - "sigmoid": (0, 1), - "relu": (0, float("inf")), - } - - activation_function_map = { - "softsign": F.softsign, - "tanh": th.tanh, - "sigmoid": th.sigmoid, - "relu": F.relu - } - def __init__(self): super().__init__() - self.activation = "softsign" # or "tanh", "sigmoid", "relu" + self.activation = "softsign" # or "tanh", "sigmoid", "relu" - if self.activation not in self.activation_function_limit: + if self.activation not in activation_function_limit: raise ValueError( - f"Activation '{self.activation}' not supported! Supported: {list(self.activation_function_limit.keys())}" + f"Activation '{self.activation}' not supported! Supported: {list(activation_function_limit.keys())}" ) - - self.min_output, self.max_output = self.activation_function_limit[ - self.activation - ] - self.activation_function = self.activation_function_map.get(self.activation) - - if self.activation_function is None: - raise ValueError( - f"Activation '{self.activation}' not implemented in forward pass!" - ) + self.min_output = activation_function_limit[self.activation]["min"] + self.max_output = activation_function_limit[self.activation]["max"] + self.activation_function = activation_function_limit[self.activation]["func"] class MLPActor(Actor): @@ -399,20 +380,10 @@ def forward(self, obs): class ActorPPO(nn.Module): - activation_function_limit = { - "softsign": (-1, 1), - "tanh": (-1, 1), - "sigmoid": (0, 1), - "relu": (0, float("inf")), - } - - activation_function_map = { - "softsign": F.softsign, - "tanh": th.tanh, - "sigmoid": th.sigmoid, - "relu": F.relu - } - + """ + PPO Actor network with stochastic policy (Gaussian). + """ + def __init__( self, obs_dim: int, @@ -427,16 +398,15 @@ def __init__( self.act_dim = act_dim self.float_type = float_type - self.activation = "softsign" # or "tanh", "sigmoid", "relu" + self.activation = "softsign" # or "tanh", "sigmoid", "relu" - if self.activation not in self.activation_function_limit: + if self.activation not in activation_function_limit: raise ValueError( - f"Activation '{self.activation}' not supported! Supported: {list(self.activation_function_limit.keys())}" + f"Activation '{self.activation}' not supported! Supported: {list(activation_function_limit.keys())}" ) - - self.min_output, self.max_output = self.activation_function_limit[ - self.activation - ] + + self.min_output = activation_function_limit[self.activation]["min"] + self.max_output = activation_function_limit[self.activation]["max"] # Policy network (outputs mean) self.FC1 = nn.Linear(obs_dim, 256, dtype=float_type) @@ -564,10 +534,139 @@ def _compute_log_prob( std: th.Tensor, ) -> th.Tensor: """Compute log probability of actions under Gaussian distribution.""" - var = std.pow(2) - log_prob = -0.5 * ( - ((actions - mean).pow(2) / var) - + 2 * th.log(std) - + th.log(th.tensor(2 * th.pi)) + distribution = th.distributions.Normal(mean, std) + return distribution.log_prob(actions).sum(dim=-1) + + +class LSTMActorPPO(ActorPPO): + """ + PPO Actor network with LSTM architecture and stochastic policy (Gaussian). + """ + + def __init__( + self, + obs_dim: int, + act_dim: int, + float_type, + unique_obs_dim: int, + num_timeseries_obs_dim: int, + log_std_init: float = 0.0, + *args, + **kwargs, + ): + # Initialize ActorPPO params + nn.Module.__init__(self) # Don't call ActorPPO.__init__ to avoid FC creation + + self.act_dim = act_dim + self.float_type = float_type + self.unique_obs_dim = unique_obs_dim + self.num_timeseries_obs_dim = num_timeseries_obs_dim + + self.activation = "softsign" + self.activation_function = activation_function_limit[self.activation]["func"] + + # Compute timeseries length for LSTM + try: + self.timeseries_len = int( + (obs_dim - unique_obs_dim) / num_timeseries_obs_dim + ) + except Exception as e: + raise ValueError( + f"Using LSTM but not providing correctly shaped timeseries: Expected integer as unique timeseries length, got {(obs_dim - unique_obs_dim) / num_timeseries_obs_dim} instead." + ) from e + + # LSTM Layers + self.LSTM1 = nn.LSTMCell(num_timeseries_obs_dim, 8, dtype=float_type) + self.LSTM2 = nn.LSTMCell(8, 16, dtype=float_type) + + # Fully Connected Layers + self.FC1 = nn.Linear(self.timeseries_len * 16 + unique_obs_dim, 128, dtype=float_type) + self.mean_layer = nn.Linear(128, act_dim, dtype=float_type) + + # Learnable log standard deviation + self.log_std = nn.Parameter( + th.ones(act_dim, dtype=float_type) * log_std_init + ) + + self._init_weights() + + def _init_weights(self) -> None: + """Apply orthogonal initialization.""" + def init_layer(m): + if isinstance(m, nn.Linear): + nn.init.orthogonal_(m.weight, gain=1.0) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.LSTMCell): + nn.init.orthogonal_(m.weight_ih, gain=1.0) + nn.init.orthogonal_(m.weight_hh, gain=1.0) + nn.init.zeros_(m.bias_ih) + nn.init.zeros_(m.bias_hh) + + self.apply(init_layer) + + # Initialize output layer with small gain + nn.init.orthogonal_(self.mean_layer.weight, gain=0.01) + nn.init.zeros_(self.mean_layer.bias) + + def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor: + """Forward pass""" + if obs.dim() not in (1, 2): + raise ValueError( + f"LSTMCell: Expected input to be 1D or 2D, got {obs.dim()}D instead" + ) + + is_batched = obs.dim() == 2 + if not is_batched: + obs = obs.unsqueeze(0) + + # Split observation into time series and stationary parts + x1, x2 = obs.split( + [obs.shape[1] - self.unique_obs_dim, self.unique_obs_dim], dim=1 ) - return log_prob.sum(dim=-1) \ No newline at end of file + x1 = x1.reshape(-1, self.num_timeseries_obs_dim, self.timeseries_len) + + # Initial hidden states + batch_size = x1.size(0) + h_t = th.zeros(batch_size, 8, dtype=self.float_type, device=obs.device) + c_t = th.zeros(batch_size, 8, dtype=self.float_type, device=obs.device) + + h_t2 = th.zeros(batch_size, 16, dtype=self.float_type, device=obs.device) + c_t2 = th.zeros(batch_size, 16, dtype=self.float_type, device=obs.device) + + outputs = [] + + # LSTM Loop + for time_step in x1.split(1, dim=2): + # x1 is (Batch, Features, Time) -> split on Time dim=2 + # time_step is (Batch, Features, 1) -> reshape to (Batch, Features) + time_step = time_step.reshape(batch_size, self.num_timeseries_obs_dim) + h_t, c_t = self.LSTM1(time_step, (h_t, c_t)) + h_t2, c_t2 = self.LSTM2(h_t, (h_t2, c_t2)) + outputs.append(h_t2) + + # Concatenate LSTM outputs + outputs = th.cat(outputs, dim=1) + + # Concatenate with stationary observations + x = th.cat((outputs, x2), dim=1) + + # FC Layers + x = F.relu(self.FC1(x)) + mean = th.tanh(self.mean_layer(x)) # Bounded to [-1, 1] + + if not is_batched: + mean = mean.squeeze(0) + + if deterministic: + return mean + + # Sample from Gaussian during training + log_std = self.log_std.expand_as(mean) + std = log_std.exp() # Ensure positive + # Add small epsilon for numerical stability + std = std + 1e-6 + noise = th.randn_like(mean) + action = mean + std * noise + + # Clamp to valid range + return th.clamp(action, -1.0, 1.0) \ No newline at end of file diff --git a/assume/strategies/learning_strategies.py b/assume/strategies/learning_strategies.py index c15c56031..a07d03698 100644 --- a/assume/strategies/learning_strategies.py +++ b/assume/strategies/learning_strategies.py @@ -74,10 +74,10 @@ def __init__(self, *args, **kwargs): self.action_noise = NormalActionNoise( mu=0.0, - sigma=self.learning_config.noise_sigma, + sigma=self.learning_config.off_policy.noise_sigma, action_dimension=self.act_dim, - scale=self.learning_config.noise_scale, - dt=self.learning_config.noise_dt, + scale=self.learning_config.off_policy.noise_scale, + dt=self.learning_config.off_policy.noise_dt, ) self.learning_role.register_strategy(self) From a464155a3836a9c6e55eaa97c24002b9a6f1f7dd Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Fri, 13 Feb 2026 15:20:48 +0100 Subject: [PATCH 14/44] fixed independent working of on-policy algorithms --- assume/common/base.py | 68 +++---- .../algorithms/maddpg.py | 4 +- .../algorithms/matd3.py | 4 +- .../reinforcement_learning/learning_role.py | 186 ++++++++++++------ assume/scenario/loader_csv.py | 4 +- assume/strategies/learning_strategies.py | 26 ++- 6 files changed, 178 insertions(+), 114 deletions(-) diff --git a/assume/common/base.py b/assume/common/base.py index fdc1a042c..3293dec86 100644 --- a/assume/common/base.py +++ b/assume/common/base.py @@ -772,6 +772,22 @@ class AlgorithmConfig: train_freq: str = "24h" +# Algorithm category mapping +ALGORITHM_CATEGORIES = { + "mappo": "on-policy", + "matd3": "off-policy", + "maddpg": "off-policy" +} + +def is_on_policy(algorithm_name: str) -> bool: + """Check if algorithm is on-policy.""" + return ALGORITHM_CATEGORIES.get(algorithm_name) == "on-policy" + +def is_off_policy(algorithm_name: str) -> bool: + """Check if algorithm is off-policy.""" + return ALGORITHM_CATEGORIES.get(algorithm_name) == "off-policy" + + @dataclass class OffPolicyConfig(AlgorithmConfig): """ @@ -795,18 +811,18 @@ class OffPolicyConfig(AlgorithmConfig): replay_buffer_size (int): The maximum number of transitions stored in the replay buffer. Default is 50000. """ - episodes_collecting_initial_experience: int = 5 - gradient_steps: int = 100 - noise_dt: int = 1 - noise_scale: int = 1 - noise_sigma: float = 0.1 - actor_architecture: str = "mlp" - action_noise_schedule: str | None = None - policy_delay: int = 2 - tau: float = 0.005 - target_policy_noise: float = 0.2 - target_noise_clip: float = 0.5 - replay_buffer_size: int = 50000 + # episodes_collecting_initial_experience: int = 5 + # gradient_steps: int = 100 + # noise_dt: int = 1 + # noise_scale: int = 1 + # noise_sigma: float = 0.1 + # actor_architecture: str = "mlp" + # action_noise_schedule: str | None = None + # policy_delay: int = 2 + # tau: float = 0.005 + # target_policy_noise: float = 0.2 + # target_noise_clip: float = 0.5 + # replay_buffer_size: int = 50000 @dataclass @@ -859,9 +875,6 @@ class LearningConfig: device (str): The device to use for PyTorch computations. Options include "cpu", "cuda", or specific CUDA devices like "cuda:0". Default is "cpu". - episodes_collecting_initial_experience (int): The number of episodes at the start during which random - actions are chosen instead of using the actor network. This helps populate the replay buffer with - diverse experiences. Default is 5. exploration_noise_std (float): The standard deviation of Gaussian noise added to actions during exploration in the environment. Higher values encourage more exploration. Default is 0.2. training_episodes (int): The number of training episodes, where one episode is the entire simulation @@ -873,8 +886,6 @@ class LearningConfig: batch_size (int): The batch size of experiences sampled from the replay buffer for each training update. Larger batches provide more stable gradients but require more memory. In environments with many leanring agents we advise small batch sizes. Default is 128. - gradient_steps (int): The number of gradient descent steps performed during each training update. - More steps can lead to better learning but increase computation time. Default is 100. learning_rate (float): The learning rate (step size) for the optimizer, which controls how much the policy and value networks are updated during training. Default is 0.001. learning_rate_schedule (str | None): Which learning rate decay schedule to use. Currently only "linear" @@ -888,8 +899,6 @@ class LearningConfig: algorithm (str): Specifies which reinforcement learning algorithm to use. Options include "matd3" (Multi-Agent Twin Delayed Deep Deterministic Policy Gradient), "maddpg", and "mappo". Default is "matd3". - replay_buffer_size (int): The maximum number of transitions stored in the replay buffer for experience replay. - Larger buffers allow for more diverse training samples. Default is 500000. gamma (float): The discount factor for future rewards, ranging from 0 to 1. Higher values give more weight to long-term rewards in decision-making. Default is 0.99. actor_architecture (str): The architecture of the neural networks used for the actors. Options include @@ -910,20 +919,17 @@ class LearningConfig: max_bid_price: float | None = 100.0 device: str = "cpu" - episodes_collecting_initial_experience: int = 5 exploration_noise_std: float = 0.2 training_episodes: int = 100 validation_episodes_interval: int = 5 train_freq: str = "24h" batch_size: int = 128 - gradient_steps: int = 100 learning_rate: float = 0.001 learning_rate_schedule: str | None = None early_stopping_steps: int | None = None early_stopping_threshold: float = 0.05 algorithm: str = "matd3" - replay_buffer_size: int = 50000 gamma: float = 0.99 actor_architecture: str = "mlp" @@ -948,29 +954,13 @@ def __post_init__(self): self.off_policy.actor_architecture = self.actor_architecture self.on_policy.actor_architecture = self.actor_architecture - self.off_policy.episodes_collecting_initial_experience = self.episodes_collecting_initial_experience - self.off_policy.gradient_steps = self.gradient_steps - self.off_policy.replay_buffer_size = self.replay_buffer_size if self.early_stopping_steps is None: self.early_stopping_steps = int( self.training_episodes / self.validation_episodes_interval + 1 ) - # if we do not have initial experience collected we will get an error as no samples are available on the - # buffer from which we can draw experience to adapt the strategy, hence we set it to minimum one episode - if self.episodes_collecting_initial_experience < 1: - logger.warning( - f"episodes_collecting_initial_experience need to be at least 1 to sample from buffer, got {self.episodes_collecting_initial_experience}. setting to 1" - ) - - self.episodes_collecting_initial_experience = 1 - - # check that gradient_steps is positive - if self.gradient_steps <= 0: - raise ValueError( - f"gradient_steps need to be positive, got {self.gradient_steps}" - ) + # check that gradient_steps is positive (now checked in off_policy config) class LearningStrategy(BaseStrategy): diff --git a/assume/reinforcement_learning/algorithms/maddpg.py b/assume/reinforcement_learning/algorithms/maddpg.py index 24517527d..2a4b61633 100644 --- a/assume/reinforcement_learning/algorithms/maddpg.py +++ b/assume/reinforcement_learning/algorithms/maddpg.py @@ -75,7 +75,7 @@ def update_policy(self) -> None: } for u_id in self.learning_role.rl_strats.keys() } - for _ in range(self.learning_config.gradient_steps) + for _ in range(self.learning_config.off_policy.gradient_steps) ] # Update noise and learning rate schedules @@ -91,7 +91,7 @@ def update_policy(self) -> None: strategy.action_noise.update_noise_decay(updated_noise_decay) # Main gradient step loop - for step in range(self.learning_config.gradient_steps): + for step in range(self.learning_config.off_policy.gradient_steps): self.n_updates += 1 # Sample from replay buffer diff --git a/assume/reinforcement_learning/algorithms/matd3.py b/assume/reinforcement_learning/algorithms/matd3.py index 4280df98f..2cc2ee455 100644 --- a/assume/reinforcement_learning/algorithms/matd3.py +++ b/assume/reinforcement_learning/algorithms/matd3.py @@ -74,7 +74,7 @@ def update_policy(self): } for u_id in self.learning_role.rl_strats.keys() } - for _ in range(self.learning_config.gradient_steps) + for _ in range(self.learning_config.off_policy.gradient_steps) ] # update noise decay and learning rate @@ -97,7 +97,7 @@ def update_policy(self): ) strategy.action_noise.update_noise_decay(updated_noise_decay) - for step in range(self.learning_config.gradient_steps): + for step in range(self.learning_config.off_policy.gradient_steps): self.n_updates += 1 transitions = self.learning_role.buffer.sample( diff --git a/assume/reinforcement_learning/learning_role.py b/assume/reinforcement_learning/learning_role.py index 8a06f38ef..23a0ce0ff 100644 --- a/assume/reinforcement_learning/learning_role.py +++ b/assume/reinforcement_learning/learning_role.py @@ -12,7 +12,12 @@ import torch as th from mango import Role -from assume.common.base import LearningConfig, LearningStrategy +from assume.common.base import ( + LearningConfig, + LearningStrategy, + is_off_policy, + is_on_policy, +) from assume.common.utils import ( create_rrule, datetime2timestamp, @@ -97,13 +102,15 @@ def __init__( self.calc_lr_from_progress = ( lambda x: self.learning_config.learning_rate ) - - if self.learning_config.off_policy.action_noise_schedule == "linear": - self.calc_noise_from_progress = linear_schedule_func( - self.learning_config.off_policy.noise_dt - ) - else: - self.calc_noise_from_progress = lambda x: self.learning_config.off_policy.noise_dt + # Only set up noise schedule for off-policy algorithms + if is_off_policy(self.learning_config.algorithm): + if self.learning_config.off_policy.action_noise_schedule == "linear": + self.calc_noise_from_progress = linear_schedule_func( + self.learning_config.off_policy.noise_dt + ) + else: + self.calc_noise_from_progress = lambda x: self.learning_config.off_policy.noise_dt + # For on-policy algorithms, no noise schedule needed self.eval_episodes_done = 0 @@ -215,15 +222,25 @@ def determine_validation_interval(self) -> int: training_episodes = self.learning_config.training_episodes validation_interval = min(training_episodes, default_interval) - min_required_episodes = ( - self.learning_config.episodes_collecting_initial_experience - + validation_interval - ) - - if self.learning_config.training_episodes < min_required_episodes: - raise ValueError( - f"Training episodes ({training_episodes}) must be greater than the sum of initial experience episodes ({self.learning_config.episodes_collecting_initial_experience}) and evaluation interval ({validation_interval})." + # Only check initial experience episodes for off-policy algorithms + if is_off_policy(self.learning_config.algorithm): + min_required_episodes = ( + self.learning_config.off_policy.episodes_collecting_initial_experience + + validation_interval ) + + if self.learning_config.training_episodes < min_required_episodes: + raise ValueError( + f"Training episodes ({training_episodes}) must be greater than the sum of initial experience episodes ({self.learning_config.off_policy.episodes_collecting_initial_experience}) and evaluation interval ({validation_interval})." + ) + else: + # For on-policy algorithms, no initial experience collection needed + min_required_episodes = validation_interval + + if self.learning_config.training_episodes < min_required_episodes: + raise ValueError( + f"Training episodes ({training_episodes}) must be greater than evaluation interval ({validation_interval})." + ) return validation_interval @@ -376,10 +393,15 @@ async def _store_to_buffer_and_update_sync(self, cache, device) -> None: reward = transform_buffer_data(cache["rewards"], device), ) - if ( - self.episodes_done - >= self.learning_config.episodes_collecting_initial_experience - ): + # Only update policy after initial experience for off-policy algorithms + if is_off_policy(self.learning_config.algorithm): + if ( + self.episodes_done + >= self.learning_config.off_policy.episodes_collecting_initial_experience + ): + self.rl_algorithm.update_policy() + else: + # For on-policy algorithms, update policy immediately self.rl_algorithm.update_policy() def add_observation_to_cache(self, unit_id, start, observation) -> None: @@ -467,11 +489,14 @@ def load_inter_episodic_data(self, inter_episodic_data): self.initialize_policy(inter_episodic_data["actors_and_critics"]) # Disable initial exploration if initial experience collection is complete - if ( - self.episodes_done - >= self.learning_config.episodes_collecting_initial_experience - ): - self.turn_off_initial_exploration() + # Only for off-policy algorithms + if is_off_policy(self.learning_config.algorithm): + if ( + self.episodes_done + >= self.learning_config.off_policy.episodes_collecting_initial_experience + ): + self.turn_off_initial_exploration() + # For on-policy algorithms, no initial exploration to disable # In continue_learning mode, disable it only for loaded strategies elif self.learning_config.continue_learning: @@ -521,28 +546,35 @@ def get_progress_remaining(self) -> float: total_duration = self.end - self.start elapsed_duration = self.context.current_timestamp - self.start - learning_episodes = ( - self.learning_config.training_episodes - - self.learning_config.episodes_collecting_initial_experience - ) + # Only calculate progress for off-policy algorithms + if is_off_policy(self.learning_config.algorithm): + initial_experience_episodes = self.learning_config.off_policy.episodes_collecting_initial_experience + learning_episodes = ( + self.learning_config.training_episodes + - initial_experience_episodes + ) - if ( - self.episodes_done - < self.learning_config.episodes_collecting_initial_experience - ): - progress_remaining = 1 - else: - progress_remaining = ( - 1 - - ( - ( - self.episodes_done - - self.learning_config.episodes_collecting_initial_experience + if ( + self.episodes_done + < initial_experience_episodes + ): + progress_remaining = 1 + else: + progress_remaining = ( + 1 + - ( + ( + self.episodes_done + - initial_experience_episodes + ) + / learning_episodes ) - / learning_episodes + - ((1 / learning_episodes) * (elapsed_duration / total_duration)) ) - - ((1 / learning_episodes) * (elapsed_duration / total_duration)) - ) + else: + # For on-policy algorithms, simpler progress calculation + total_episodes = self.learning_config.training_episodes + progress_remaining = 1 - (self.episodes_done / total_episodes) - (elapsed_duration / total_duration) return progress_remaining @@ -712,7 +744,11 @@ def init_logging( evaluation_mode=self.learning_config.evaluation_mode, episode=episode, eval_episode=eval_episode, - episodes_collecting_initial_experience=self.learning_config.episodes_collecting_initial_experience, + episodes_collecting_initial_experience=( + self.learning_config.off_policy.episodes_collecting_initial_experience + if is_off_policy(self.learning_config.algorithm) + else 0 + ), ) # Parameters required for sending data to the output role @@ -786,25 +822,55 @@ def write_rl_grad_params_to_output( Each dictionary maps critic names to their corresponding loss values. """ # gradient steps performed in previous training episodes - gradient_steps_done = ( - max( - self.episodes_done - - self.learning_config.episodes_collecting_initial_experience, - 0, + if is_off_policy(self.learning_config.algorithm): + gradient_steps_done = ( + max( + self.episodes_done + - self.learning_config.off_policy.episodes_collecting_initial_experience, + 0, + ) + * int( + (timestamp2datetime(self.end) - timestamp2datetime(self.start)) + / pd.Timedelta(self.learning_config.train_freq) + ) + * self.learning_config.off_policy.gradient_steps ) - * int( - (timestamp2datetime(self.end) - timestamp2datetime(self.start)) - / pd.Timedelta(self.learning_config.train_freq) + current_gradient_steps = self.learning_config.off_policy.gradient_steps + else: + # For on-policy, no gradient steps concept - use 1 for calculation purposes + gradient_steps_done = 0 + current_gradient_steps = 1 + + # Handle different parameter structures for on-policy vs off-policy + if self.learning_config.algorithm == "mappo": + # For PPO/MAPPO: unit_params_list length equals actual update steps + actual_gradient_steps = len(unit_params_list) + gradient_step_range = range(actual_gradient_steps) + # For on-policy, use simple step counting + base_step = self.update_steps * actual_gradient_steps + else: + # For off-policy: use configured gradient_steps + actual_gradient_steps = self.learning_config.off_policy.gradient_steps + gradient_step_range = range(actual_gradient_steps) + + # gradient steps performed in previous training episodes + gradient_steps_done = ( + max( + self.episodes_done + - self.learning_config.off_policy.episodes_collecting_initial_experience, + 0, + ) + * int( + (timestamp2datetime(self.end) - timestamp2datetime(self.start)) + / pd.Timedelta(self.learning_config.train_freq) + ) + * self.learning_config.off_policy.gradient_steps ) - * self.learning_config.gradient_steps - ) + base_step = gradient_steps_done + self.update_steps * actual_gradient_steps output_list = [ { - "step": gradient_steps_done - + self.update_steps - * self.learning_config.gradient_steps # gradient steps performed in current training episode - + gradient_step, + "step": base_step + gradient_step, "unit": u_id, "actor_loss": params["actor_loss"], "actor_total_grad_norm": params["actor_total_grad_norm"], @@ -814,7 +880,7 @@ def write_rl_grad_params_to_output( "critic_max_grad_norm": params["critic_max_grad_norm"], "learning_rate": learning_rate, } - for gradient_step in range(self.learning_config.gradient_steps) + for gradient_step in gradient_step_range for u_id, params in unit_params_list[gradient_step].items() ] diff --git a/assume/scenario/loader_csv.py b/assume/scenario/loader_csv.py index 115ad74d6..ae59cc078 100644 --- a/assume/scenario/loader_csv.py +++ b/assume/scenario/loader_csv.py @@ -1142,7 +1142,7 @@ def run_learning( if ( episode % validation_interval == 0 and episode - >= world.learning_role.learning_config.episodes_collecting_initial_experience + >= world.learning_role.learning_config.off_policy.episodes_collecting_initial_experience + validation_interval ): world.reset() @@ -1190,7 +1190,7 @@ def run_learning( # save the policies after each episode in case the simulation is stopped or crashes if ( episode - >= world.learning_role.learning_config.episodes_collecting_initial_experience + >= world.learning_role.learning_config.off_policy.episodes_collecting_initial_experience + validation_interval ): world.learning_role.rl_algorithm.save_params( diff --git a/assume/strategies/learning_strategies.py b/assume/strategies/learning_strategies.py index a07d03698..7337fafd8 100644 --- a/assume/strategies/learning_strategies.py +++ b/assume/strategies/learning_strategies.py @@ -22,6 +22,10 @@ from assume.common.utils import min_max_scale from assume.reinforcement_learning.algorithms import actor_architecture_aliases from assume.reinforcement_learning.learning_utils import NormalActionNoise +from assume.common.base import ( + is_off_policy, + is_on_policy, +) logger = logging.getLogger(__name__) @@ -72,13 +76,16 @@ def __init__(self, *args, **kwargs): # learning role overwrites this if loaded from file or after initial experience episodes self.collect_initial_experience_mode = True - self.action_noise = NormalActionNoise( - mu=0.0, - sigma=self.learning_config.off_policy.noise_sigma, - action_dimension=self.act_dim, - scale=self.learning_config.off_policy.noise_scale, - dt=self.learning_config.off_policy.noise_dt, - ) + + if is_off_policy(self.algorithm): + self.action_noise = NormalActionNoise( + mu=0.0, + sigma=self.learning_config.off_policy.noise_sigma, + action_dimension=self.act_dim, + scale=self.learning_config.off_policy.noise_scale, + dt=self.learning_config.off_policy.noise_dt, + ) + # For on-policy algorithms, no action noise needed - variable remains undefined self.learning_role.register_strategy(self) @@ -269,7 +276,8 @@ def get_actions(self, next_observation): if self.learning_mode and not self.evaluation_mode: # if we are in learning mode the first x episodes we want to explore the entire action space # to get a good initial experience, in the area around the costs of the agent - if self.collect_initial_experience_mode: + # Only use initial experience collection for off-policy algorithms (not PPO) + if self.collect_initial_experience_mode and self.algorithm != "mappo": # define current action as solely noise noise = th.normal( mean=0.0, @@ -291,7 +299,7 @@ def get_actions(self, next_observation): self._last_value = th.tensor(0.0, device=self.device) else: - # Check if we're using PPO algorithm + # For PPO/MAPPO, always use the policy (no initial random exploration) if self.algorithm == "mappo": # PPO: use get_action_and_log_prob for proper stochastic sampling curr_action, log_prob = self.actor.get_action_and_log_prob(next_observation.unsqueeze(0)) From d5521ab1ad3de1eb07606bbb6cfc9cff15fb1d74 Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Sat, 14 Feb 2026 09:10:34 +0100 Subject: [PATCH 15/44] Improved the parameters flow independently and fixed different buffer definitions to defining a single buffer argument --- assume/common/base.py | 24 +++++++------- .../algorithms/mappo.py | 17 ++++++---- .../reinforcement_learning/learning_role.py | 33 ++++++++----------- .../neural_network_architecture.py | 19 +++++++---- 4 files changed, 49 insertions(+), 44 deletions(-) diff --git a/assume/common/base.py b/assume/common/base.py index 3293dec86..622f9dcca 100644 --- a/assume/common/base.py +++ b/assume/common/base.py @@ -811,18 +811,18 @@ class OffPolicyConfig(AlgorithmConfig): replay_buffer_size (int): The maximum number of transitions stored in the replay buffer. Default is 50000. """ - # episodes_collecting_initial_experience: int = 5 - # gradient_steps: int = 100 - # noise_dt: int = 1 - # noise_scale: int = 1 - # noise_sigma: float = 0.1 - # actor_architecture: str = "mlp" - # action_noise_schedule: str | None = None - # policy_delay: int = 2 - # tau: float = 0.005 - # target_policy_noise: float = 0.2 - # target_noise_clip: float = 0.5 - # replay_buffer_size: int = 50000 + episodes_collecting_initial_experience: int = 5 + gradient_steps: int = 100 + noise_dt: int = 1 + noise_scale: int = 1 + noise_sigma: float = 0.1 + actor_architecture: str = "mlp" + action_noise_schedule: str | None = None + policy_delay: int = 2 + tau: float = 0.005 + target_policy_noise: float = 0.2 + target_noise_clip: float = 0.5 + replay_buffer_size: int = 50000 @dataclass diff --git a/assume/reinforcement_learning/algorithms/mappo.py b/assume/reinforcement_learning/algorithms/mappo.py index da7952ed1..49cd8d3b5 100644 --- a/assume/reinforcement_learning/algorithms/mappo.py +++ b/assume/reinforcement_learning/algorithms/mappo.py @@ -44,10 +44,10 @@ class PPO(A2CAlgorithm): def __init__( self, learning_role, - clip_range=0.1, + clip_range=0.2, clip_range_vf=0.1, - n_epochs=30, - entropy_coef=0.02, + n_epochs=50, + entropy_coef=0.05, vf_coef=1.0, max_grad_norm=0.5, ): @@ -192,8 +192,8 @@ def update_policy(self) -> None: strategies = list(self.learning_role.rl_strats.values()) n_rl_agents = len(strategies) - # Get rollout buffer - rollout_buffer = self.learning_role.rollout_buffer + # Get buffer (will be RolloutBuffer for on-policy algorithms) + rollout_buffer = self.learning_role.buffer # Check if rollout buffer has data if rollout_buffer is None or rollout_buffer.pos == 0: @@ -325,8 +325,11 @@ def create_step_entry(): returns_i = batch.returns[:, i] old_values_i = batch.old_values[:, i] - advantages_i = (advantages_i - advantages_i.mean()) / ( - advantages_i.std() + 1e-8 + # Normalize advantages across the entire batch, not per-mini-batch + # This provides more stable training + advantages_flat = advantages_i.flatten() + advantages_i = (advantages_i - advantages_flat.mean()) / ( + advantages_flat.std() + 1e-8 ) log_probs, entropy = actor.evaluate_actions(obs_i, actions_i) diff --git a/assume/reinforcement_learning/learning_role.py b/assume/reinforcement_learning/learning_role.py index 23a0ce0ff..dbfb7273d 100644 --- a/assume/reinforcement_learning/learning_role.py +++ b/assume/reinforcement_learning/learning_role.py @@ -61,9 +61,8 @@ def __init__( ): super().__init__() - # how many learning roles do exist and how are they named - self.buffer: ReplayBuffer = None - self.rollout_buffer: RolloutBuffer = None # For on-policy algorithms (PPO) + # Single buffer that can be either ReplayBuffer (off-policy) or RolloutBuffer (on-policy) + self.buffer = None self.episodes_done = 0 self.rl_strats: dict[int, LearningStrategy] = {} self.learning_config = learning_config @@ -329,10 +328,9 @@ async def _store_to_buffer_and_update_sync(self, cache, device) -> None: ) return - # check which buffer type to use based on algorithm - if self.learning_config.algorithm == "mappo": - # for PPO use on-policy RolloutBuffer - # Add each transition to the rollout buffer + # Add data to buffer - type depends on algorithm category + if is_on_policy(self.learning_config.algorithm): + # For on-policy algorithms (PPO/MAPPO), use RolloutBuffer for timestamp in sorted(cache["obs"].keys()): obs_data = transform_buffer_data( { @@ -375,17 +373,16 @@ async def _store_to_buffer_and_update_sync(self, cache, device) -> None: ) # Add to rollout buffer - if self.rollout_buffer is not None: - self.rollout_buffer.add( - obs = obs_data, - action = actions_data, - reward = rewards_data, - done = dones_data, - value = values_data, - log_prob = log_probs_data - ) + self.buffer.add( + obs = obs_data, + action = actions_data, + reward = rewards_data, + done = dones_data, + value = values_data, + log_prob = log_probs_data + ) else: - # for TD3/DDPG use off-policy ReplayBuffer + # For off-policy algorithms (TD3/DDPG), use ReplayBuffer # rewrite dict so that obs.shape == (n_rl_units, obs_dim) and sorted by keys and store in buffer self.buffer.add( obs = transform_buffer_data(cache["obs"], device), @@ -484,7 +481,6 @@ def load_inter_episodic_data(self, inter_episodic_data): self.rl_eval = inter_episodic_data["all_eval"] self.avg_rewards = inter_episodic_data["avg_all_eval"] self.buffer = inter_episodic_data["buffer"] - self.rollout_buffer = inter_episodic_data["rollout_buffer"] self.initialize_policy(inter_episodic_data["actors_and_critics"]) @@ -517,7 +513,6 @@ def get_inter_episodic_data(self): "all_eval": self.rl_eval, "avg_all_eval": self.avg_rewards, "buffer": self.buffer, - "rollout_buffer": self.rollout_buffer, "actors_and_critics": self.rl_algorithm.extract_policy(), } diff --git a/assume/reinforcement_learning/neural_network_architecture.py b/assume/reinforcement_learning/neural_network_architecture.py index 59e8e80e0..74c8d69eb 100644 --- a/assume/reinforcement_learning/neural_network_architecture.py +++ b/assume/reinforcement_learning/neural_network_architecture.py @@ -2,6 +2,7 @@ # # SPDX-License-Identifier: AGPL-3.0-or-later +import numpy as np import torch as th from torch import nn from torch.nn import functional as F @@ -223,11 +224,14 @@ def __init__( def _init_weights(self) -> None: """ - Apply Orthogonal initialization. + Apply Orthogonal initialization with appropriate gains. """ def init_layer(m): if isinstance(m, nn.Linear): - nn.init.orthogonal_(m.weight, gain=1.0) + if m.out_features == 1: # Output layer + nn.init.orthogonal_(m.weight, gain=0.01) + else: # Hidden layers + nn.init.orthogonal_(m.weight, gain=np.sqrt(2)) nn.init.zeros_(m.bias) self.apply(init_layer) @@ -421,15 +425,18 @@ def __init__( self._init_weights() def _init_weights(self) -> None: - """Apply orthogonal initialization.""" + """Apply orthogonal initialization with appropriate gains.""" def init_layer(m): if isinstance(m, nn.Linear): - nn.init.orthogonal_(m.weight, gain=0.01) + if m.out_features == self.act_dim: # Output layer (mean) + nn.init.orthogonal_(m.weight, gain=0.01) + else: # Hidden layers + nn.init.orthogonal_(m.weight, gain=np.sqrt(2)) nn.init.zeros_(m.bias) # Initialize hidden layers with larger gain - nn.init.orthogonal_(self.FC1.weight, gain=1.0) - nn.init.orthogonal_(self.FC2.weight, gain=1.0) + nn.init.orthogonal_(self.FC1.weight, gain=np.sqrt(2)) + nn.init.orthogonal_(self.FC2.weight, gain=np.sqrt(2)) nn.init.zeros_(self.FC1.bias) nn.init.zeros_(self.FC2.bias) From fddaf2c423d4b26e91141ba660f67241d4f5b982 Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Sat, 14 Feb 2026 09:41:45 +0100 Subject: [PATCH 16/44] Updated base_algorithm.py documentation. --- .../algorithms/base_algorithm.py | 345 +++++++++++++----- 1 file changed, 246 insertions(+), 99 deletions(-) diff --git a/assume/reinforcement_learning/algorithms/base_algorithm.py b/assume/reinforcement_learning/algorithms/base_algorithm.py index cf8a76d27..d1ac21ddb 100644 --- a/assume/reinforcement_learning/algorithms/base_algorithm.py +++ b/assume/reinforcement_learning/algorithms/base_algorithm.py @@ -17,18 +17,37 @@ class RLAlgorithm: - """ - The base RL model class. To implement your own RL algorithm, you need to subclass this class and implement the `update_policy` method. - - Args: - learning_role (Learning Role object): Learning object + """Base reinforcement learning algorithm class. + + This is the foundation class for all Reinforcement Learning algorithms in the framework. + To implement a custom RL algorithm, subclass this class and override the `update_policy` method. + + The class provides common functionality for: + - Learning rate scheduling + - Parameter saving/loading + - Device management + + Attributes: + learning_role: The learning role object containing configuration and strategies. + learning_config: Configuration parameters from the learning role. + device: The computation device (CPU/GPU) for tensors. + float_type: The floating point precision type for computations. + actor_architecture_class: The actor network architecture class. + + Example: + >>> class CustomAlgorithm(RLAlgorithm): + ... def update_policy(self): + ... # Custom policy update logic + ... pass """ - def __init__( - self, - # init learning_role as object of Learning class - learning_role, - ): + def __init__(self, learning_role): + """Initialize the RL algorithm. + + Args: + learning_role: Learning role object containing configuration and strategies. + Must be an instance of the Learning class. + """ super().__init__() self.learning_role = learning_role @@ -51,17 +70,23 @@ def update_learning_rate( optimizers: list[th.optim.Optimizer] | th.optim.Optimizer, learning_rate: float, ) -> None: - """ - Update the optimizers learning rate using the current learning rate schedule and the current progress remaining (from 1 to 0). - + """Update optimizer learning rates. + + Sets the learning rate for one or more optimizers. Handles both single + optimizers and lists of optimizers uniformly. + Args: - optimizers (List[th.optim.Optimizer] | th.optim.Optimizer): An optimizer or a list of optimizers. - + optimizers: A single optimizer or list of optimizers to update. + learning_rate: The new learning rate value to set. + Note: - Adapted from SB3: + Adapted from Stable Baselines 3: - https://github.com/DLR-RM/stable-baselines3/blob/512eea923afad6f6da4bb53d72b6ea4c6d856e59/stable_baselines3/common/base_class.py#L286 - https://github.com/DLR-RM/stable-baselines3/blob/512eea923afad6f6da4bb53d72b6ea4c6d856e59/stable_baselines3/common/utils.py#L68 - + + Example: + >>> optimizer = AdamW(model.parameters(), lr=0.001) + >>> algorithm.update_learning_rate(optimizer, 0.0001) """ if not isinstance(optimizers, list): @@ -70,70 +95,128 @@ def update_learning_rate( for param_group in optimizer.param_groups: param_group["lr"] = learning_rate - def update_policy(self): + def update_policy(self) -> None: + """Update the policy parameters. + + This method must be overridden by subclasses to implement the specific + policy update logic for each RL algorithm. The base implementation raises + an error to enforce this requirement. + + Raises: + NotImplementedError: If called on the base class without override. + + Example: + >>> class CustomAlgorithm(RLAlgorithm): + ... def update_policy(self): + ... # Implement algorithm-specific policy update + ... pass + """ logger.error( - "No policy update function of the used Rl algorithm was defined. Please define how the policies should be updated in the specific algorithm you use" + "No policy update function of the used RL algorithm was defined. " + "Please define how the policies should be updated in the specific " + "algorithm you use." ) def load_obj(self, directory: str): - """ - Load an object from a specified directory. - - This method loads an object, typically saved as a checkpoint file, from the specified - directory and returns it. It uses the `torch.load` function and specifies the device for loading. - + """Load a serialized object from directory. + + Loads a PyTorch serialized object from the specified directory path. + The object is loaded onto the device specified by the algorithm's configuration. + Args: - directory (str): The directory from which the object should be loaded. - + directory: Path to the directory containing the serialized object. + Should point to a valid .pt file. + Returns: - object: The loaded object. + object: The deserialized Python object. + + Example: + >>> model_state = algorithm.load_obj('/path/to/checkpoint.pt') """ return th.load(directory, map_location=self.device, weights_only=True) def load_params(self, directory: str) -> None: - """ - Load learning params - abstract method to be implemented by the Learning Algorithm + """Load learning parameters from disk. + + Abstract method that should be implemented by subclasses to load + algorithm-specific parameters from the specified directory. + + Args: + directory: Path to the directory containing saved parameters. + + Note: + This is an abstract method that must be overridden by subclasses. """ class A2CAlgorithm(RLAlgorithm): - """ - The base A2C model class for actor-critic algorithms. - Provides shared save/load/initialize functionality for MATD3, MADDPG, and MAPPO. - - Args: - learning_role (Learning Role object): Learning object + """Base actor-critic algorithm class. + + Provides shared functionality for actor-critic reinforcement learning algorithms + including parameter management, network initialization, and saving/loading utilities. + This serves as the foundation for algorithms like MATD3, MADDPG, and MAPPO. + + The class handles: + - Actor and critic network creation and management + - Target network synchronization (when applicable) + - Parameter saving and loading + - Weight transfer between different agent configurations Attributes: - uses_target_networks (bool): Whether this algorithm uses target networks. + uses_target_networks: Whether this algorithm uses target networks. TD3 and DDPG use target networks (True), PPO does not (False). + + Example: + >>> class ActorCriticAlgorithm(A2CAlgorithm): + ... def update_policy(self): + ... # Custom actor-critic update logic + ... pass """ - # Class attribute - subclasses can override + #: Whether this algorithm uses target networks for stability. + #: TD3 and DDPG use target networks (True), PPO does not (False). uses_target_networks: bool = True - def __init__( - self, - learning_role, - ): - super().__init__(learning_role) - - def save_params(self, directory): + def __init__(self, learning_role): + """Initialize the actor-critic algorithm. + + Args: + learning_role: Learning role object containing configuration and strategies. """ - Save the parameters of both actor and critic networks. + super().__init__(learning_role) + def save_params(self, directory: str) -> None: + """Save actor and critic network parameters. + + Saves both actor and critic network parameters to separate subdirectories. + Creates the directory structure if it doesn't exist. + Args: - directory (str): The base directory for saving the parameters. + directory: Base directory path where parameters will be saved. + Will create 'actors/' and 'critics/' subdirectories. + + Example: + >>> algorithm.save_params('/path/to/save/directory') + # Creates: + # /path/to/save/directory/actors/ + # /path/to/save/directory/critics/ """ self.save_critic_params(directory=f"{directory}/critics") self.save_actor_params(directory=f"{directory}/actors") - def save_critic_params(self, directory): - """ - Save the parameters of critic networks. - + def save_critic_params(self, directory: str) -> None: + """Save critic network parameters. + + Saves critic networks, their optimizers, and target critics (if applicable) + for all registered learning strategies. Also saves agent ID ordering information + to ensure proper loading. + Args: - directory (str): The base directory for saving the parameters. + directory: Directory path where critic parameters will be saved. + Will be created if it doesn't exist. + + Example: + >>> algorithm.save_critic_params('/path/to/critics/') """ os.makedirs(directory, exist_ok=True) for u_id, strategy in self.learning_role.rl_strats.items(): @@ -155,12 +238,18 @@ def save_critic_params(self, directory): with open(map_path, "w") as f: json.dump(mapping, f, indent=2) - def save_actor_params(self, directory): - """ - Save the parameters of actor networks. - + def save_actor_params(self, directory: str) -> None: + """Save actor network parameters. + + Saves actor networks, their optimizers, and target actors (if applicable) + for all registered learning strategies. + Args: - directory (str): The base directory for saving the parameters. + directory: Directory path where actor parameters will be saved. + Will be created if it doesn't exist. + + Example: + >>> algorithm.save_actor_params('/path/to/actors/') """ os.makedirs(directory, exist_ok=True) for u_id, strategy in self.learning_role.rl_strats.items(): @@ -176,22 +265,36 @@ def save_actor_params(self, directory): th.save(obj, path) def load_params(self, directory: str) -> None: - """ - Load the parameters of both actor and critic networks. - + """Load actor and critic network parameters. + + Loads both actor and critic parameters from the specified directory. + Calls load_critic_params() and load_actor_params() sequentially. + Args: - directory (str): The directory from which the parameters should be loaded. + directory: Base directory containing 'actors/' and 'critics/' subdirectories. + + Example: + >>> algorithm.load_params('/path/to/saved/parameters/') """ self.load_critic_params(directory) self.load_actor_params(directory) def load_critic_params(self, directory: str) -> None: - """ - Load critic, target_critic, and optimizer states for each agent strategy. - If agent count differs between saved and current model, performs weight transfer for both networks. + """Load critic network parameters. + + Loads critic networks, target critics (if applicable), and optimizer states + for each registered agent strategy. Handles cases where the number of agents + differs between saved and current models by performing intelligent weight transfer. Args: - directory (str): The directory from which the parameters should be loaded. + directory: Base directory containing the 'critics/' subdirectory. + + Note: + Automatically handles agent count mismatches through weight transfer. + Preserves the order of agents using saved mapping information. + + Example: + >>> algorithm.load_critic_params('/path/to/saved/parameters/') """ logger.info("Loading critic parameters...") @@ -285,11 +388,16 @@ def load_critic_params(self, directory: str) -> None: logger.warning(f"Failed to load critic for {u_id}: {e}") def load_actor_params(self, directory: str) -> None: - """ - Load the parameters of actor networks from a specified directory. - + """Load actor network parameters. + + Loads actor networks, target actors (if applicable), and optimizer states + for each registered agent strategy from the specified directory. + Args: - directory (str): The directory from which the parameters should be loaded. + directory: The directory containing the 'actors/' subdirectory where the parameters should be loaded. + + Example: + >>> algorithm.load_actor_params('/path/to/saved/parameters/') """ logger.info("Loading actor parameters...") if not os.path.exists(directory): @@ -325,8 +433,17 @@ def initialize_policy(self, actors_and_critics: dict = None) -> None: If `actors_and_critics` is provided, it assigns existing networks to the respective attributes. Args: - actors_and_critics (dict): The actor and critic networks to be assigned. - + actors_and_critics: Optional dictionary containing pre-trained networks. + If None, creates new networks. If provided, assigns existing networks. + Expected format includes 'actors', 'critics', and optionally + 'actor_targets' and 'target_critics' keys. + + Example: + >>> # Create new networks + >>> algorithm.initialize_policy() + >>> + >>> # Assign existing networks + >>> algorithm.initialize_policy(existing_networks_dict) """ if actors_and_critics is None: self.check_strategy_dimensions() @@ -347,10 +464,25 @@ def initialize_policy(self, actors_and_critics: dict = None) -> None: self.unique_obs_dim = actors_and_critics["unique_obs_dim"] def check_strategy_dimensions(self) -> None: - """ - Iterate over all learning strategies and check if the dimensions of observations and actions are the same. - Also check if the unique observation dimensions are the same. If not, raise a ValueError. - This is important for centralized critic algorithms, as it uses a centralized critic that requires consistent dimensions across all agents. + """Validate learning strategy dimensions. + + Ensures all registered learning strategies have consistent dimensional + properties required for centralized critic algorithms. Checks: + - Observation dimensions + - Action dimensions + - Unique observation dimensions + - Timeseries observation dimensions + - Foresight parameters + If not consistent, raises a ValueError. This is important for centralized + critic algorithms, as it uses a centralized critic that requires consistent + dimensions across all agents. + + Raises: + ValueError: If any dimension mismatch is detected across strategies. + + Note: + This validation is crucial for centralized critic algorithms where + all agents must have compatible observation and action spaces. """ foresight_list = [] obs_dim_list = [] @@ -402,16 +534,19 @@ def check_strategy_dimensions(self) -> None: self.obs_dim = obs_dim_list[0] def create_actors(self) -> None: - """ - Create actor networks for reinforcement learning for each unit strategy. - - This method initializes actor networks and their corresponding target networks for each unit strategy. - The actors are designed to map observations to action probabilities in a reinforcement learning setting. - + """Create actor networks for all learning strategies. + + This method initializes actor networks and their corresponding target networks for + each registered unit strategy. Actors map observations to actions. + Note: - The observation dimension need to be the same, due to the centralized critic that all actors share. - If you have units with different observation dimensions. They need to have different critics and hence learning roles. - + All strategies must have the same observation dimension due to the + centralized critic architecture. Units with different observation + dimensions require separate learning roles with different critics. + + Example: + >>> algorithm.create_actors() + >>> # Creates actor and actor_target for each strategy """ for strategy in self.learning_role.rl_strats.values(): @@ -444,14 +579,19 @@ def create_actors(self) -> None: strategy.actor.loaded = False def create_critics(self) -> None: - """ - Create critic networks for reinforcement learning. - - This method initializes critic networks for each agent in the reinforcement learning setup. - + """Create critic networks for all learning strategies. + + Initializes critic networks and their corresponding target networks for + each registered agent strategy. Critics evaluate state-action pairs. + Note: - The observation dimension need to be the same, due to the centralized critic that all actors share. - If you have units with different observation dimensions. They need to have different critics and hence learning roles. + All strategies must have the same observation dimension due to the + centralized critic architecture. Units with different observation + dimensions require separate learning roles with different critics. + + Example: + >>> algorithm.create_critics() + >>> # Creates critics and target_critics for each strategy """ n_agents = len(self.learning_role.rl_strats) @@ -483,15 +623,22 @@ def create_critics(self) -> None: ) def extract_policy(self) -> dict: - """ - Extract actor and critic networks. - - This method extracts the actor and critic networks associated with each learning strategy and organizes them into a - dictionary structure. The extracted networks include actors, actor_targets, critics, and target_critics. The resulting - dictionary is typically used for saving and sharing these networks. - + """Extract all policy networks. + + Collects actor and critic networks from all learning strategies into + a structured dictionary. Includes both primary and target networks. + Returns: - dict: The extracted actor and critic networks. + Dictionary containing all network components organized by type: + - 'actors': Primary actor networks + - 'actor_targets': Target actor networks + - 'critics': Primary critic networks + - 'target_critics': Target critic networks + - Dimension information for reconstruction + + Example: + >>> policy_dict = algorithm.extract_policy() + >>> # Contains all networks ready for saving or transfer """ actors = {} actor_targets = {} From 239fb4314b2247579961aeafc5c648e8a7bb687d Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Sat, 14 Feb 2026 15:58:09 +0100 Subject: [PATCH 17/44] Updated maddpg.py documentation. --- .../algorithms/maddpg.py | 91 +++++++++++-------- 1 file changed, 53 insertions(+), 38 deletions(-) diff --git a/assume/reinforcement_learning/algorithms/maddpg.py b/assume/reinforcement_learning/algorithms/maddpg.py index 2a4b61633..87ac2220d 100644 --- a/assume/reinforcement_learning/algorithms/maddpg.py +++ b/assume/reinforcement_learning/algorithms/maddpg.py @@ -17,27 +17,38 @@ class DDPG(A2CAlgorithm): - """ - Deep Deterministic Policy Gradient (DDPG) Algorithm. - - An off-policy actor-critic algorithm using deterministic policy gradients. - It uses a single critic network and updates the actor at every training step. - Target networks are updated using Polyak averaging to stabilize the learning - process. It is designed for environments with continuous action - spaces by combining the benefits of Q-learning and policy gradients. It - utilizes a replay buffer to break correlations between consecutive samples - and improve sample efficiency. - - Args: - learning_role (LearningRole): The central learning role managing the agents and buffer. + """Deep Deterministic Policy Gradient (DDPG) Algorithm. + + An off-policy actor-critic algorithm that uses deterministic policy gradients + for continuous action spaces. DDPG combines Q-learning with policy gradients, + using: + + - A single critic network to estimate Q-values + - Deterministic actor networks that map states to actions + - Target networks updated via Polyak averaging for stability + - Replay buffer for sample efficiency and decorrelation + + Unlike TD3, DDPG updates the actor at every training step without delay. + + Attributes: + n_updates: Counter for gradient updates performed. + grad_clip_norm: Maximum gradient norm for clipping. + critic_architecture_class: Critic network architecture (CriticDDPG). + + Example: + >>> ddpg = DDPG(learning_role) + >>> ddpg.update_policy() # Performs one training iteration """ - def __init__(self, learning_role): - """ - Initialize DDPG algorithm. + def __init__(self, learning_role) -> None: + """Initialize the DDPG algorithm. + + Sets up the algorithm with gradient counters, clipping parameters, + and critic architecture. Args: - learning_role (LearningRole): The learning role object. + learning_role: Learning role object managing agents and replay buffer. + Must have off-policy configuration. """ super().__init__(learning_role) @@ -51,18 +62,21 @@ def __init__(self, learning_role): self.critic_architecture_class = CriticDDPG def update_policy(self) -> None: - """ - Update actor and critic networks using the DDPG algorithm. - Performs sampling from replay buffer. Updates the critic (MSE Loss). - Updates the Actor (Policy Gradient). Updates the target networks using - polyak update. + """Update actor and critic networks using DDPG algorithm. + + Performs one complete training iteration consisting of: + 1. Sampling batches from replay buffer + 2. Updating critic networks using MSE loss + 3. Updating actor networks using policy gradient + 4. Updating target networks via Polyak averaging + """ logger.debug("Updating Policy (MADDPG/DDPG)") strategies = list(self.learning_role.rl_strats.values()) n_rl_agents = len(strategies) - # Initialize metrics storage + # Initialize metrics storage for gradient logging unit_params = [ { u_id: { @@ -78,11 +92,12 @@ def update_policy(self) -> None: for _ in range(self.learning_config.off_policy.gradient_steps) ] - # Update noise and learning rate schedules + # Update noise decay and learning rate based on training progress progress_remaining = self.learning_role.get_progress_remaining() updated_noise_decay = self.learning_role.calc_noise_from_progress(progress_remaining) learning_rate = self.learning_role.calc_lr_from_progress(progress_remaining) + # Update learning rates and noise schedules for all strategies for strategy in strategies: self.update_learning_rate( [strategy.critics.optimizer, strategy.actor.optimizer], @@ -90,11 +105,11 @@ def update_policy(self) -> None: ) strategy.action_noise.update_noise_decay(updated_noise_decay) - # Main gradient step loop + # Perform gradient updates for specified number of steps for step in range(self.learning_config.off_policy.gradient_steps): self.n_updates += 1 - # Sample from replay buffer + # Sample transition batch from replay buffer transitions = self.learning_role.buffer.sample( self.learning_config.batch_size ) @@ -106,7 +121,7 @@ def update_policy(self) -> None: transitions.rewards, ) - # Compute target actions (no smoothing noise in DDPG) + # Compute target actions using target actors with th.no_grad(): next_actions = th.stack([ strategy.actor_target(next_states[:, i, :]).clamp(-1, 1) @@ -117,7 +132,7 @@ def update_policy(self) -> None: all_actions = actions.view(self.learning_config.batch_size, -1) - # Precompute observation slices + # Extract unique observations for centralized critic construction unique_obs_from_others = states[ :, :, self.obs_dim - self.unique_obs_dim : ].reshape(self.learning_config.batch_size, n_rl_agents, -1) @@ -126,9 +141,9 @@ def update_policy(self) -> None: :, :, self.obs_dim - self.unique_obs_dim : ].reshape(self.learning_config.batch_size, n_rl_agents, -1) - # ================================================================= - # CRITIC UPDATE - # ================================================================= + # ------------------------------------------------------------ + # CRITIC UPDATE PHASE + # ------------------------------------------------------------ for strategy in strategies: strategy.critics.optimizer.zero_grad(set_to_none=True) @@ -194,9 +209,9 @@ def update_policy(self) -> None: unit_params[step][strategy.unit_id]["critic_total_grad_norm"] = total_norm unit_params[step][strategy.unit_id]["critic_max_grad_norm"] = max_grad_norm - # ================================================================= - # ACTOR UPDATE (every step, no delay in DDPG) - # ================================================================= + # ------------------------------------------------------------ + # ACTOR UPDATE PHASE (updated every step) + # ------------------------------------------------------------ for strategy in strategies: strategy.actor.optimizer.zero_grad(set_to_none=True) @@ -247,9 +262,9 @@ def update_policy(self) -> None: unit_params[step][strategy.unit_id]["actor_total_grad_norm"] = total_norm unit_params[step][strategy.unit_id]["actor_max_grad_norm"] = max_grad_norm - # ================================================================= - # TARGET NETWORK UPDATES (Polyak averaging) - # ================================================================= + # ------------------------------------------------------------ + # TARGET NETWORK UPDATE PHASE (Polyak averaging) + # ------------------------------------------------------------ all_critic_params = [] all_target_critic_params = [] all_actor_params = [] @@ -272,5 +287,5 @@ def update_policy(self) -> None: self.learning_config.off_policy.tau, ) - # Log metrics + # Log gradient parameters and metrics to output self.learning_role.write_rl_grad_params_to_output(learning_rate, unit_params) From 19c975a5be2eb1a47a8806f83bfae60cd4c445df Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Mon, 16 Feb 2026 08:26:27 +0100 Subject: [PATCH 18/44] temporary fix --- assume/reinforcement_learning/algorithms/matd3.py | 1 - assume/reinforcement_learning/neural_network_architecture.py | 3 --- 2 files changed, 4 deletions(-) diff --git a/assume/reinforcement_learning/algorithms/matd3.py b/assume/reinforcement_learning/algorithms/matd3.py index 39dfdd0b1..578077f13 100644 --- a/assume/reinforcement_learning/algorithms/matd3.py +++ b/assume/reinforcement_learning/algorithms/matd3.py @@ -17,7 +17,6 @@ logger = logging.getLogger(__name__) -class TD3(A2CAlgorithm): class TD3(A2CAlgorithm): """ Twin Delayed Deep Deterministic Policy Gradients (TD3). diff --git a/assume/reinforcement_learning/neural_network_architecture.py b/assume/reinforcement_learning/neural_network_architecture.py index 75f114fa9..c6c6ebfb0 100644 --- a/assume/reinforcement_learning/neural_network_architecture.py +++ b/assume/reinforcement_learning/neural_network_architecture.py @@ -12,9 +12,6 @@ from assume.reinforcement_learning.learning_utils import activation_function_limit -class Critic(nn.Module): - """ - Base Critic class handling architecture generation and initialization. class Critic(nn.Module): """ Base Critic class handling architecture generation and initialization. From 7727cb99b8ef28185ca7eab2b644ec507028b144 Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Mon, 16 Feb 2026 09:21:41 +0100 Subject: [PATCH 19/44] Updated the example_02a config file to test the scenario --- assume/common/base.py | 5 +- .../algorithms/base_algorithm.py | 6 -- .../algorithms/matd3.py | 2 - assume/reinforcement_learning/buffer.py | 1 - .../reinforcement_learning/learning_role.py | 33 +-------- .../neural_network_architecture.py | 17 ----- assume/strategies/learning_strategies.py | 16 +--- examples/inputs/example_02a/config.yaml | 74 ++++++++++++++----- 8 files changed, 67 insertions(+), 87 deletions(-) diff --git a/assume/common/base.py b/assume/common/base.py index a2081831b..622f9dcca 100644 --- a/assume/common/base.py +++ b/assume/common/base.py @@ -5,7 +5,6 @@ import logging from collections import defaultdict from dataclasses import dataclass, field -from dataclasses import dataclass, field from datetime import datetime, timedelta import numpy as np @@ -878,6 +877,10 @@ class LearningConfig: CUDA devices like "cuda:0". Default is "cpu". exploration_noise_std (float): The standard deviation of Gaussian noise added to actions during exploration in the environment. Higher values encourage more exploration. Default is 0.2. + training_episodes (int): The number of training episodes, where one episode is the entire simulation + horizon specified in the general config. Default is 100. + validation_episodes_interval (int): The interval (in episodes) at which validation episodes are run + to evaluate the current policy's performance without training updates. Default is 5. train_freq (str): Defines the frequency in time steps at which the actor and critic networks are updated. Accepts time strings like "24h" for 24 hours or "1d" for 1 day. Default is "24h". batch_size (int): The batch size of experiences sampled from the replay buffer for each training update. diff --git a/assume/reinforcement_learning/algorithms/base_algorithm.py b/assume/reinforcement_learning/algorithms/base_algorithm.py index 8ca1e24a1..d1ac21ddb 100644 --- a/assume/reinforcement_learning/algorithms/base_algorithm.py +++ b/assume/reinforcement_learning/algorithms/base_algorithm.py @@ -2,22 +2,16 @@ # # SPDX-License-Identifier: AGPL-3.0-or-later import json -import json import logging import os -import os import torch as th from torch.optim import AdamW -from torch.optim import AdamW from assume.reinforcement_learning.algorithms import actor_architecture_aliases from assume.reinforcement_learning.learning_utils import ( transfer_weights, ) -from assume.reinforcement_learning.learning_utils import ( - transfer_weights, -) logger = logging.getLogger(__name__) diff --git a/assume/reinforcement_learning/algorithms/matd3.py b/assume/reinforcement_learning/algorithms/matd3.py index 578077f13..2cc2ee455 100644 --- a/assume/reinforcement_learning/algorithms/matd3.py +++ b/assume/reinforcement_learning/algorithms/matd3.py @@ -7,7 +7,6 @@ import torch as th from torch.nn import functional as F -from assume.reinforcement_learning.algorithms.base_algorithm import A2CAlgorithm from assume.reinforcement_learning.algorithms.base_algorithm import A2CAlgorithm from assume.reinforcement_learning.learning_utils import ( polyak_update, @@ -57,7 +56,6 @@ def update_policy(self): """ - logger.debug("Updating Policy (TD3)") logger.debug("Updating Policy (TD3)") # Stack strategies for easier access diff --git a/assume/reinforcement_learning/buffer.py b/assume/reinforcement_learning/buffer.py index d5b1251f0..b084020da 100644 --- a/assume/reinforcement_learning/buffer.py +++ b/assume/reinforcement_learning/buffer.py @@ -4,7 +4,6 @@ import warnings from typing import NamedTuple, Generator -from typing import NamedTuple, Generator import numpy as np import torch as th diff --git a/assume/reinforcement_learning/learning_role.py b/assume/reinforcement_learning/learning_role.py index cc81737f0..dbfb7273d 100644 --- a/assume/reinforcement_learning/learning_role.py +++ b/assume/reinforcement_learning/learning_role.py @@ -7,7 +7,6 @@ from datetime import datetime from pathlib import Path -import numpy as np import numpy as np import pandas as pd import torch as th @@ -25,8 +24,6 @@ timestamp2datetime, ) from assume.reinforcement_learning.algorithms.base_algorithm import RLAlgorithm -from assume.reinforcement_learning.algorithms.maddpg import DDPG -from assume.reinforcement_learning.algorithms.mappo import PPO from assume.reinforcement_learning.algorithms.matd3 import TD3 from assume.reinforcement_learning.algorithms.maddpg import DDPG from assume.reinforcement_learning.algorithms.mappo import PPO @@ -141,10 +138,6 @@ def __init__( self.all_values = defaultdict(lambda: defaultdict(list)) self.all_log_probs = defaultdict(lambda: defaultdict(list)) self.all_dones = defaultdict(lambda: defaultdict(list)) - # PPO algorithm specific caches for on-policy learning - self.all_values = defaultdict(lambda: defaultdict(list)) - self.all_log_probs = defaultdict(lambda: defaultdict(list)) - self.all_dones = defaultdict(lambda: defaultdict(list)) def on_ready(self): """ @@ -273,10 +266,6 @@ async def store_to_buffer_and_update(self) -> None: current_values = self.all_values current_log_probs = self.all_log_probs current_dones = self.all_dones - # PPO specific caches - current_values = self.all_values - current_log_probs = self.all_log_probs - current_dones = self.all_dones # Reset cache dicts immediately with new defaultdicts self.all_obs = defaultdict(lambda: defaultdict(list)) @@ -289,10 +278,6 @@ async def store_to_buffer_and_update(self) -> None: self.all_values = defaultdict(lambda: defaultdict(list)) self.all_log_probs = defaultdict(lambda: defaultdict(list)) self.all_dones = defaultdict(lambda: defaultdict(list)) - # PPO specific resets - self.all_values = defaultdict(lambda: defaultdict(list)) - self.all_log_probs = defaultdict(lambda: defaultdict(list)) - self.all_dones = defaultdict(lambda: defaultdict(list)) # Get timestamps from cache we took all_timestamps = sorted(current_obs.keys()) @@ -427,7 +412,7 @@ def add_observation_to_cache(self, unit_id, start, observation) -> None: """ self.all_obs[start][unit_id].append(observation) - def add_actions_to_cache(self, unit_id, start, action, extra_info) -> None: + def add_actions_to_cache(self, unit_id, start, action, noise) -> None: """ Add the action and noise to the cache dict, per unit_id. @@ -446,15 +431,7 @@ def add_actions_to_cache(self, unit_id, start, action, extra_info) -> None: return self.all_actions[start][unit_id].append(action) - - if isinstance(extra_info, th.Tensor) and extra_info.shape == action.shape: - self.all_noises[start][unit_id].append(extra_info) # It's noise - else: - self.all_log_probs[start][unit_id].append( - extra_info["log_probs"] - ) # It's log_probs and other stuff - self.all_values[start][unit_id].append(extra_info["value"]) - self.all_dones[start][unit_id].append(float(extra_info["done"])) + self.all_noises[start][unit_id].append(noise) def add_reward_to_cache(self, unit_id, start, reward, regret, profit) -> None: """ @@ -504,7 +481,6 @@ def load_inter_episodic_data(self, inter_episodic_data): self.rl_eval = inter_episodic_data["all_eval"] self.avg_rewards = inter_episodic_data["avg_all_eval"] self.buffer = inter_episodic_data["buffer"] - self.rollout_buffer = inter_episodic_data["rollout_buffer"] self.initialize_policy(inter_episodic_data["actors_and_critics"]) @@ -537,7 +513,6 @@ def get_inter_episodic_data(self): "all_eval": self.rl_eval, "avg_all_eval": self.avg_rewards, "buffer": self.buffer, - "rollout_buffer": self.rollout_buffer, "actors_and_critics": self.rl_algorithm.extract_policy(), } @@ -614,10 +589,6 @@ def create_learning_algorithm(self, algorithm: RLAlgorithm): self.rl_algorithm = DDPG(learning_role=self) elif algorithm == "mappo": self.rl_algorithm = PPO(learning_role=self) - elif algorithm == "maddpg": - self.rl_algorithm = DDPG(learning_role=self) - elif algorithm == "mappo": - self.rl_algorithm = PPO(learning_role=self) else: logger.error(f"Learning algorithm {algorithm} not implemented!") diff --git a/assume/reinforcement_learning/neural_network_architecture.py b/assume/reinforcement_learning/neural_network_architecture.py index c6c6ebfb0..74c8d69eb 100644 --- a/assume/reinforcement_learning/neural_network_architecture.py +++ b/assume/reinforcement_learning/neural_network_architecture.py @@ -22,10 +22,6 @@ class Critic(nn.Module): act_dim: Dimension of action per agent float_type: Data type for parameters unique_obs_dim: Dimension of agent-specific observations - obs_dim (int): Dimension of observation per agent - act_dim: Dimension of action per agent - float_type: Data type for parameters - unique_obs_dim: Dimension of agent-specific observations """ def __init__( self, @@ -37,7 +33,6 @@ def __init__( ): super().__init__() - # Calculate total (centralized) dimensions # Calculate total (centralized) dimensions self.obs_dim = obs_dim + unique_obs_dim * (n_agents - 1) self.act_dim = act_dim * n_agents @@ -59,22 +54,15 @@ def _get_architecture( hidden_sizes = [1024, 512, 256, 128] # Deeper network for large `n_agents` return hidden_sizes - def _build_q_network(self) -> nn.ModuleList: - return hidden_sizes - def _build_q_network(self) -> nn.ModuleList: """ Dynamically create a Q-network given the chosen hidden layer sizes. - Dynamically create a Q-network given the chosen hidden layer sizes. """ layers = nn.ModuleList() input_dim = ( self.obs_dim + self.act_dim ) # Input includes all observations and actions - for h in self.hidden_sizes: - layers.append(nn.Linear(input_dim, h, dtype=self.float_type)) - layers.append(nn.ReLU()) for h in self.hidden_sizes: layers.append(nn.Linear(input_dim, h, dtype=self.float_type)) layers.append(nn.ReLU()) @@ -136,11 +124,9 @@ def forward( # Compute Q1 x1 = nn.Sequential(*self.q1_layers)(xu) - x1 = nn.Sequential(*self.q1_layers)(xu) # Compute Q2 x2 = nn.Sequential(*self.q2_layers)(xu) - x2 = nn.Sequential(*self.q2_layers)(xu) return x1, x2 @@ -281,7 +267,6 @@ def __init__(self): class MLPActor(Actor): """ The neural network for the MLP actor. - The neural network for the MLP actor. """ def __init__(self, obs_dim: int, act_dim: int, float_type, *args, **kwargs): @@ -316,7 +301,6 @@ def forward(self, obs): class LSTMActor(Actor): """ The LSTM recurrent neural network for the actor. - The LSTM recurrent neural network for the actor. Based on "Multi-Period and Multi-Spatial Equilibrium Analysis in Imperfect Electricity Markets" by Ye at al. (2019) @@ -382,7 +366,6 @@ def forward(self, obs): outputs = [] for time_step in x1.split(1, dim=2): - time_step = time_step.reshape(-1, self.num_timeseris_obs_dim) time_step = time_step.reshape(-1, self.num_timeseris_obs_dim) h_t, c_t = self.LSTM1(time_step, (h_t, c_t)) h_t2, c_t2 = self.LSTM2(h_t, (h_t2, c_t2)) diff --git a/assume/strategies/learning_strategies.py b/assume/strategies/learning_strategies.py index 3b7a21ecc..7337fafd8 100644 --- a/assume/strategies/learning_strategies.py +++ b/assume/strategies/learning_strategies.py @@ -487,9 +487,7 @@ def calculate_bids( # ============================================================================= # 2. Get the Actions, based on the observations # ============================================================================= - # Depending on the algorithm, we call specific function that passes obs through actor and generates actions - # extra_info is either noise (MATD3) or log_probs (PPO) - actions, extra_info = self.get_actions(self, next_observation) + actions, noise = self.get_actions(next_observation) # ============================================================================= # 3. Transform Actions into bids @@ -818,9 +816,7 @@ def calculate_bids( # ============================================================================= # 2. Get the Actions, based on the observations # ============================================================================= - # Depending on the algorithm, we call specific function that passes obs through actor and generates actions - # extra_info is either noise (MATD3) or log_probs (PPO) - actions, extra_info = self.get_actions(self, next_observation) + actions, noise = self.get_actions(next_observation) # ============================================================================= # 3. Transform Actions into bids @@ -1015,9 +1011,7 @@ def calculate_bids( # ============================================================================= # Get the Actions, based on the observations # ============================================================================= - # Depending on the algorithm, we call specific function that passes obs through actor and generates actions - # extra_info is either noise (MATD3) or log_probs (PPO) - actions, extra_info = self.get_actions(self, next_observation) + actions, noise = self.get_actions(next_observation) # ============================================================================= # 3. Transform Actions into bids @@ -1063,9 +1057,7 @@ def calculate_bids( ) if self.learning_mode: - self.learning_role.add_actions_to_cache( - self.unit_id, start, actions, extra_info - ) + self.learning_role.add_actions_to_cache(self.unit_id, start, actions, noise) return bids diff --git a/examples/inputs/example_02a/config.yaml b/examples/inputs/example_02a/config.yaml index 5c0850122..400fe39aa 100644 --- a/examples/inputs/example_02a/config.yaml +++ b/examples/inputs/example_02a/config.yaml @@ -18,29 +18,31 @@ base: algorithm: matd3 learning_rate: 0.001 training_episodes: 100 + actor_architecture: mlp + train_freq: 100h + batch_size: 128 + gamma: 0.99 device: cpu validation_episodes_interval: 5 - matd3: - actor_architecture: mlp - batch_size: 64 + + # Off-policy parameters (required for TD3/DDPG algorithms) + off_policy: episodes_collecting_initial_experience: 3 - gamma: 0.99 gradient_steps: 10 - noise_dt: 1 - noise_scale: 1 + gamma: 0.99 noise_sigma: 0.1 + noise_scale: 1 action_noise_schedule: linear - train_freq: 24h - ppo: - actor_architecture: dist - batch_size: 11 - clip_ratio: 0.05 - entropy_coef: 0.005 + noise_dt: 1 + replay_buffer_size: 10000 + + # On-policy parameters (required for PPO/MAPPO algorithms) + on_policy: + clip_ratio: 0.2 + entropy_coef: 0.01 + vf_coef: 0.5 gae_lambda: 0.95 - gamma: 0.99 - max_grad_norm: 0.3 - train_freq: 33h - vf_coef: 0.75 + n_epochs: 25 markets_config: EOM: @@ -88,6 +90,25 @@ base_lstm: early_stopping_steps: 10 early_stopping_threshold: 0.05 actor_architecture: lstm + + # Off-policy parameters + off_policy: + episodes_collecting_initial_experience: 5 + gradient_steps: 24 + gamma: 0.99 + noise_sigma: 0.1 + noise_scale: 1 + action_noise_schedule: linear + noise_dt: 1 + replay_buffer_size: 10000 + + # On-policy parameters + on_policy: + clip_ratio: 0.2 + entropy_coef: 0.01 + vf_coef: 0.5 + gae_lambda: 0.95 + n_epochs: 25 markets_config: EOM: @@ -133,6 +154,25 @@ tiny: noise_dt: 1 validation_episodes_interval: 5 actor_architecture: mlp + + # Off-policy parameters + off_policy: + episodes_collecting_initial_experience: 3 + gradient_steps: 24 + gamma: 0.99 + noise_sigma: 0.1 + noise_scale: 1 + action_noise_schedule: linear + noise_dt: 1 + replay_buffer_size: 10000 + + # On-policy parameters + on_policy: + clip_ratio: 0.2 + entropy_coef: 0.01 + vf_coef: 0.5 + gae_lambda: 0.95 + n_epochs: 25 markets_config: EOM: @@ -150,4 +190,4 @@ tiny: maximum_bid_price: 3000 minimum_bid_price: -500 price_unit: EUR/MWh - market_mechanism: pay_as_clear + market_mechanism: pay_as_clear \ No newline at end of file From 9435df562dd00abb22827a2a2447091559d7f36f Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Sat, 28 Feb 2026 09:23:00 +0100 Subject: [PATCH 20/44] Updated the all example_02 scenario config files according to the updated on-policy and off-policy pipelines. --- examples/inputs/example_02a/config.yaml | 33 ++---- examples/inputs/example_02b/config.yaml | 46 ++++++-- examples/inputs/example_02c/config.yaml | 24 +++-- examples/inputs/example_02d/config.yaml | 138 ++++++++++++++++++++++-- examples/inputs/example_02e/config.yaml | 46 ++++++-- 5 files changed, 230 insertions(+), 57 deletions(-) diff --git a/examples/inputs/example_02a/config.yaml b/examples/inputs/example_02a/config.yaml index 400fe39aa..b794ed374 100644 --- a/examples/inputs/example_02a/config.yaml +++ b/examples/inputs/example_02a/config.yaml @@ -24,18 +24,17 @@ base: gamma: 0.99 device: cpu validation_episodes_interval: 5 - + # Off-policy parameters (required for TD3/DDPG algorithms) off_policy: episodes_collecting_initial_experience: 3 gradient_steps: 10 - gamma: 0.99 noise_sigma: 0.1 noise_scale: 1 action_noise_schedule: linear noise_dt: 1 replay_buffer_size: 10000 - + # On-policy parameters (required for PPO/MAPPO algorithms) on_policy: clip_ratio: 0.2 @@ -77,32 +76,26 @@ base_lstm: algorithm: matd3 learning_rate: 0.001 training_episodes: 50 - episodes_collecting_initial_experience: 5 train_freq: 24h - gradient_steps: 24 batch_size: 256 gamma: 0.99 device: cpu - noise_sigma: 0.1 - noise_scale: 1 - noise_dt: 1 validation_episodes_interval: 5 early_stopping_steps: 10 early_stopping_threshold: 0.05 actor_architecture: lstm - - # Off-policy parameters + + # Off-policy parameters (required for TD3/DDPG algorithms) off_policy: episodes_collecting_initial_experience: 5 gradient_steps: 24 - gamma: 0.99 noise_sigma: 0.1 noise_scale: 1 action_noise_schedule: linear noise_dt: 1 replay_buffer_size: 10000 - - # On-policy parameters + + # On-policy parameters (required for PPO/MAPPO algorithms) on_policy: clip_ratio: 0.2 entropy_coef: 0.01 @@ -143,30 +136,24 @@ tiny: algorithm: matd3 learning_rate: 0.001 training_episodes: 10 - episodes_collecting_initial_experience: 3 train_freq: 24h - gradient_steps: 24 batch_size: 64 gamma: 0.99 device: cpu - noise_sigma: 0.1 - noise_scale: 1 - noise_dt: 1 validation_episodes_interval: 5 actor_architecture: mlp - - # Off-policy parameters + + # Off-policy parameters (required for TD3/DDPG algorithms) off_policy: episodes_collecting_initial_experience: 3 gradient_steps: 24 - gamma: 0.99 noise_sigma: 0.1 noise_scale: 1 action_noise_schedule: linear noise_dt: 1 replay_buffer_size: 10000 - - # On-policy parameters + + # On-policy parameters (required for PPO/MAPPO algorithms) on_policy: clip_ratio: 0.2 entropy_coef: 0.01 diff --git a/examples/inputs/example_02b/config.yaml b/examples/inputs/example_02b/config.yaml index 1f7897ac1..a10036ae3 100644 --- a/examples/inputs/example_02b/config.yaml +++ b/examples/inputs/example_02b/config.yaml @@ -18,18 +18,30 @@ base: algorithm: matd3 learning_rate: 0.001 training_episodes: 100 - episodes_collecting_initial_experience: 5 train_freq: 100h - gradient_steps: 10 batch_size: 128 gamma: 0.99 device: cpu - action_noise_schedule: linear - noise_sigma: 0.1 - noise_scale: 1 - noise_dt: 1 validation_episodes_interval: 5 + # Off-policy parameters (required for TD3/DDPG algorithms) + off_policy: + episodes_collecting_initial_experience: 5 + gradient_steps: 10 + noise_sigma: 0.1 + noise_scale: 1 + action_noise_schedule: linear + noise_dt: 1 + replay_buffer_size: 10000 + + # On-policy parameters (required for PPO/MAPPO algorithms) + on_policy: + clip_ratio: 0.2 + entropy_coef: 0.01 + vf_coef: 0.5 + gae_lambda: 0.95 + n_epochs: 25 + markets_config: EOM: operator: EOM_operator @@ -63,19 +75,31 @@ base_lstm: actor_architecture: lstm learning_rate: 0.001 training_episodes: 100 - episodes_collecting_initial_experience: 3 train_freq: 24h - gradient_steps: 1 batch_size: 256 gamma: 0.99 device: cpu - noise_sigma: 0.1 - noise_scale: 1 - noise_dt: 1 validation_episodes_interval: 5 early_stopping_steps: 10 early_stopping_threshold: 0.05 + # Off-policy parameters (required for TD3/DDPG algorithms) + off_policy: + episodes_collecting_initial_experience: 3 + gradient_steps: 1 + noise_sigma: 0.1 + noise_scale: 1 + noise_dt: 1 + replay_buffer_size: 10000 + + # On-policy parameters (required for PPO/MAPPO algorithms) + on_policy: + clip_ratio: 0.2 + entropy_coef: 0.01 + vf_coef: 0.5 + gae_lambda: 0.95 + n_epochs: 25 + markets_config: EOM: operator: EOM_operator diff --git a/examples/inputs/example_02c/config.yaml b/examples/inputs/example_02c/config.yaml index 78ac4ed7c..de46a341c 100644 --- a/examples/inputs/example_02c/config.yaml +++ b/examples/inputs/example_02c/config.yaml @@ -18,18 +18,30 @@ base: algorithm: matd3 learning_rate: 0.001 training_episodes: 100 - episodes_collecting_initial_experience: 5 train_freq: 100h - gradient_steps: 10 batch_size: 128 gamma: 0.99 device: cpu - action_noise_schedule: linear - noise_sigma: 0.1 - noise_scale: 1 - noise_dt: 1 validation_episodes_interval: 5 + # Off-policy parameters (required for TD3/DDPG algorithms) + off_policy: + episodes_collecting_initial_experience: 5 + gradient_steps: 10 + noise_sigma: 0.1 + noise_scale: 1 + action_noise_schedule: linear + noise_dt: 1 + replay_buffer_size: 10000 + + # On-policy parameters (required for PPO/MAPPO algorithms) + on_policy: + clip_ratio: 0.2 + entropy_coef: 0.01 + vf_coef: 0.5 + gae_lambda: 0.95 + n_epochs: 25 + markets_config: EOM: operator: EOM_operator diff --git a/examples/inputs/example_02d/config.yaml b/examples/inputs/example_02d/config.yaml index 78ac4ed7c..8c48781c4 100644 --- a/examples/inputs/example_02d/config.yaml +++ b/examples/inputs/example_02d/config.yaml @@ -18,18 +18,30 @@ base: algorithm: matd3 learning_rate: 0.001 training_episodes: 100 - episodes_collecting_initial_experience: 5 train_freq: 100h - gradient_steps: 10 batch_size: 128 gamma: 0.99 device: cpu - action_noise_schedule: linear - noise_sigma: 0.1 - noise_scale: 1 - noise_dt: 1 validation_episodes_interval: 5 + # Off-policy parameters (required for TD3/DDPG algorithms) + off_policy: + episodes_collecting_initial_experience: 5 + gradient_steps: 10 + noise_sigma: 0.1 + noise_scale: 1 + action_noise_schedule: linear + noise_dt: 1 + replay_buffer_size: 10000 + + # On-policy parameters (required for PPO/MAPPO algorithms) + on_policy: + clip_ratio: 0.2 + entropy_coef: 0.01 + vf_coef: 0.5 + gae_lambda: 0.95 + n_epochs: 25 + markets_config: EOM: operator: EOM_operator @@ -46,3 +58,117 @@ base: minimum_bid_price: -500 price_unit: EUR/MWh market_mechanism: pay_as_clear + +# --------------------------------------------------------------------------- +# MADDPG configuration (off-policy, no policy delay / twin critics) +# Switch algorithm: maddpg — all off_policy params remain active. +# On-policy block is kept for reference but not used by this algorithm. +# --------------------------------------------------------------------------- +# base: +# start_date: 2019-03-01 00:00 +# end_date: 2019-04-01 00:00 +# time_step: 1h +# save_frequency_hours: null +# seed: null +# +# learning_config: +# learning_mode: true +# continue_learning: false +# trained_policies_save_path: null +# trained_policies_load_path: null +# max_bid_price: 100 +# algorithm: maddpg +# learning_rate: 0.001 +# training_episodes: 100 +# train_freq: 100h +# batch_size: 128 +# gamma: 0.99 +# device: cpu +# validation_episodes_interval: 5 +# +# # Off-policy parameters (active for MADDPG) +# off_policy: +# episodes_collecting_initial_experience: 5 +# gradient_steps: 10 +# noise_sigma: 0.1 +# noise_scale: 1 +# action_noise_schedule: linear +# noise_dt: 1 +# replay_buffer_size: 10000 +# +# # On-policy parameters (not used by MADDPG, kept for reference) +# on_policy: +# clip_ratio: 0.2 +# entropy_coef: 0.01 +# vf_coef: 0.5 +# gae_lambda: 0.95 +# n_epochs: 25 +# +# markets_config: +# EOM: +# operator: EOM_operator +# product_type: energy +# products: +# - duration: 1h +# count: 1 +# first_delivery: 1h +# opening_frequency: 1h +# opening_duration: 1h +# volume_unit: MWh +# maximum_bid_volume: 100000 +# maximum_bid_price: 3000 +# minimum_bid_price: -500 +# price_unit: EUR/MWh +# market_mechanism: pay_as_clear + +# --------------------------------------------------------------------------- +# MAPPO configuration (on-policy) +# Switch algorithm: mappo — only on_policy params are used. +# Off-policy block is not applicable for MAPPO; it is omitted. +# --------------------------------------------------------------------------- +# base: +# start_date: 2019-03-01 00:00 +# end_date: 2019-04-01 00:00 +# time_step: 1h +# save_frequency_hours: null +# seed: null +# +# learning_config: +# learning_mode: true +# continue_learning: false +# trained_policies_save_path: null +# trained_policies_load_path: null +# max_bid_price: 100 +# algorithm: mappo +# learning_rate: 0.001 +# training_episodes: 100 +# train_freq: 100h +# batch_size: 128 +# gamma: 0.99 +# device: cpu +# validation_episodes_interval: 5 +# +# # On-policy parameters (active for MAPPO) +# on_policy: +# clip_ratio: 0.2 +# entropy_coef: 0.01 +# vf_coef: 0.5 +# gae_lambda: 0.95 +# n_epochs: 25 +# +# markets_config: +# EOM: +# operator: EOM_operator +# product_type: energy +# products: +# - duration: 1h +# count: 1 +# first_delivery: 1h +# opening_frequency: 1h +# opening_duration: 1h +# volume_unit: MWh +# maximum_bid_volume: 100000 +# maximum_bid_price: 3000 +# minimum_bid_price: -500 +# price_unit: EUR/MWh +# market_mechanism: pay_as_clear diff --git a/examples/inputs/example_02e/config.yaml b/examples/inputs/example_02e/config.yaml index 4201ec8ef..249d24d94 100644 --- a/examples/inputs/example_02e/config.yaml +++ b/examples/inputs/example_02e/config.yaml @@ -19,18 +19,30 @@ base: actor_architecture: mlp learning_rate: 0.0003 training_episodes: 30 - episodes_collecting_initial_experience: 5 train_freq: 720h - gradient_steps: 720 batch_size: 256 gamma: 0.999 device: cpu - # action_noise_schedule: linear - noise_sigma: 0.1 - noise_scale: 1 - noise_dt: 1 validation_episodes_interval: 5 + # Off-policy parameters (required for TD3/DDPG algorithms) + off_policy: + episodes_collecting_initial_experience: 5 + gradient_steps: 720 + noise_sigma: 0.1 + noise_scale: 1 + # action_noise_schedule: linear + noise_dt: 1 + replay_buffer_size: 10000 + + # On-policy parameters (required for PPO/MAPPO algorithms) + on_policy: + clip_ratio: 0.2 + entropy_coef: 0.01 + vf_coef: 0.5 + gae_lambda: 0.95 + n_epochs: 25 + markets_config: EOM: operator: EOM_operator @@ -65,15 +77,27 @@ tiny: learning_rate: 0.001 training_episodes: 5 validation_episodes_interval: 2 - episodes_collecting_initial_experience: 1 train_freq: 24h - gradient_steps: 24 batch_size: 64 gamma: 0.99 device: cpu - noise_sigma: 0.1 - noise_scale: 1 - noise_dt: 1 + + # Off-policy parameters (required for TD3/DDPG algorithms) + off_policy: + episodes_collecting_initial_experience: 1 + gradient_steps: 24 + noise_sigma: 0.1 + noise_scale: 1 + noise_dt: 1 + replay_buffer_size: 10000 + + # On-policy parameters (required for PPO/MAPPO algorithms) + on_policy: + clip_ratio: 0.2 + entropy_coef: 0.01 + vf_coef: 0.5 + gae_lambda: 0.95 + n_epochs: 25 markets_config: EOM: From e1bbcd6ce965fce320074c75155c8566ccc1e592 Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Sat, 28 Feb 2026 10:07:56 +0100 Subject: [PATCH 21/44] updated the 04a notebook to work with the updated config pipeline --- examples/inputs/example_02a/config.yaml | 208 ++++---- ...forcement_learning_algorithm_example.ipynb | 492 +++++++++++++++++- 2 files changed, 556 insertions(+), 144 deletions(-) diff --git a/examples/inputs/example_02a/config.yaml b/examples/inputs/example_02a/config.yaml index b794ed374..7a78a8321 100644 --- a/examples/inputs/example_02a/config.yaml +++ b/examples/inputs/example_02a/config.yaml @@ -1,180 +1,154 @@ -# SPDX-FileCopyrightText: ASSUME Developers -# -# SPDX-License-Identifier: AGPL-3.0-or-later - base: - start_date: 2019-03-01 00:00 end_date: 2019-03-31 00:00 - time_step: 1h - save_frequency_hours: null - seed: null - learning_config: - learning_mode: true - continue_learning: false - trained_policies_save_path: null - trained_policies_load_path: null - max_bid_price: 100 algorithm: matd3 - learning_rate: 0.001 - training_episodes: 100 - actor_architecture: mlp - train_freq: 100h - batch_size: 128 - gamma: 0.99 + batch_size: 256 + continue_learning: false device: cpu - validation_episodes_interval: 5 - - # Off-policy parameters (required for TD3/DDPG algorithms) + gamma: 0.99 + learning_mode: true + learning_rate: 0.001 + max_bid_price: 100 off_policy: - episodes_collecting_initial_experience: 3 - gradient_steps: 10 - noise_sigma: 0.1 - noise_scale: 1 - action_noise_schedule: linear + action_noise_schedule: null + episodes_collecting_initial_experience: 5 + gradient_steps: 24 noise_dt: 1 - replay_buffer_size: 10000 - - # On-policy parameters (required for PPO/MAPPO algorithms) + noise_scale: 1 + noise_sigma: 0.1 + replay_buffer_size: 50000 on_policy: - clip_ratio: 0.2 + clip_ratio: 0.1 entropy_coef: 0.01 - vf_coef: 0.5 gae_lambda: 0.95 n_epochs: 25 - + vf_coef: 0.5 + train_freq: 24h + trained_policies_save_path: null + training_episodes: 100 + validation_episodes_interval: 5 markets_config: EOM: + market_mechanism: pay_as_clear + maximum_bid_price: 3000 + maximum_bid_volume: 100000 + minimum_bid_price: -500 + opening_duration: 1h + opening_frequency: 1h operator: EOM_operator + price_unit: EUR/MWh product_type: energy - start_date: 2019-03-01 00:00 products: - - duration: 1h - count: 1 - first_delivery: 1h - opening_frequency: 1h - opening_duration: 1h + - count: 1 + duration: 1h + first_delivery: 1h + start_date: 2019-03-01 00:00 volume_unit: MWh - maximum_bid_volume: 100000 - maximum_bid_price: 3000 - minimum_bid_price: -500 - price_unit: EUR/MWh - market_mechanism: pay_as_clear - -base_lstm: - start_date: 2019-03-01 00:00 - end_date: 2019-03-31 00:00 - time_step: 1h save_frequency_hours: null seed: null - + start_date: 2019-03-01 00:00 + time_step: 1h +base_lstm: + end_date: 2019-03-31 00:00 learning_config: - learning_mode: true - continue_learning: false - trained_policies_save_path: null - max_bid_price: 100 + actor_architecture: lstm algorithm: matd3 - learning_rate: 0.001 - training_episodes: 50 - train_freq: 24h batch_size: 256 - gamma: 0.99 + continue_learning: false device: cpu - validation_episodes_interval: 5 early_stopping_steps: 10 early_stopping_threshold: 0.05 - actor_architecture: lstm - - # Off-policy parameters (required for TD3/DDPG algorithms) + gamma: 0.99 + learning_mode: true + learning_rate: 0.001 + max_bid_price: 100 off_policy: + action_noise_schedule: linear episodes_collecting_initial_experience: 5 gradient_steps: 24 - noise_sigma: 0.1 - noise_scale: 1 - action_noise_schedule: linear noise_dt: 1 + noise_scale: 1 + noise_sigma: 0.1 replay_buffer_size: 10000 - - # On-policy parameters (required for PPO/MAPPO algorithms) on_policy: clip_ratio: 0.2 entropy_coef: 0.01 - vf_coef: 0.5 gae_lambda: 0.95 n_epochs: 25 - + vf_coef: 0.5 + train_freq: 24h + trained_policies_save_path: null + training_episodes: 50 + validation_episodes_interval: 5 markets_config: EOM: + market_mechanism: pay_as_clear + maximum_bid_price: 3000 + maximum_bid_volume: 100000 + minimum_bid_price: -500 + opening_duration: 1h + opening_frequency: 1h operator: EOM_operator + price_unit: EUR/MWh product_type: energy - start_date: 2019-03-01 00:00 products: - - duration: 1h - count: 1 - first_delivery: 1h - opening_frequency: 1h - opening_duration: 1h + - count: 1 + duration: 1h + first_delivery: 1h + start_date: 2019-03-01 00:00 volume_unit: MWh - maximum_bid_volume: 100000 - maximum_bid_price: 3000 - minimum_bid_price: -500 - price_unit: EUR/MWh - market_mechanism: pay_as_clear - -tiny: - start_date: 2019-01-01 00:00 - end_date: 2019-01-05 00:00 - time_step: 1h save_frequency_hours: null seed: null - + start_date: 2019-03-01 00:00 + time_step: 1h +tiny: + end_date: 2019-01-05 00:00 learning_config: - learning_mode: true - continue_learning: false - trained_policies_save_path: null - max_bid_price: 100 + actor_architecture: mlp algorithm: matd3 - learning_rate: 0.001 - training_episodes: 10 - train_freq: 24h batch_size: 64 - gamma: 0.99 + continue_learning: false device: cpu - validation_episodes_interval: 5 - actor_architecture: mlp - - # Off-policy parameters (required for TD3/DDPG algorithms) + gamma: 0.99 + learning_mode: true + learning_rate: 0.001 + max_bid_price: 100 off_policy: + action_noise_schedule: linear episodes_collecting_initial_experience: 3 gradient_steps: 24 - noise_sigma: 0.1 - noise_scale: 1 - action_noise_schedule: linear noise_dt: 1 + noise_scale: 1 + noise_sigma: 0.1 replay_buffer_size: 10000 - - # On-policy parameters (required for PPO/MAPPO algorithms) on_policy: clip_ratio: 0.2 entropy_coef: 0.01 - vf_coef: 0.5 gae_lambda: 0.95 n_epochs: 25 - + vf_coef: 0.5 + train_freq: 24h + trained_policies_save_path: null + training_episodes: 10 + validation_episodes_interval: 5 markets_config: EOM: + market_mechanism: pay_as_clear + maximum_bid_price: 3000 + maximum_bid_volume: 100000 + minimum_bid_price: -500 + opening_duration: 1h + opening_frequency: 1h operator: EOM_operator + price_unit: EUR/MWh product_type: energy - start_date: 2019-01-01 00:00 products: - - duration: 1h - count: 1 - first_delivery: 1h - opening_frequency: 1h - opening_duration: 1h + - count: 1 + duration: 1h + first_delivery: 1h + start_date: 2019-01-01 00:00 volume_unit: MWh - maximum_bid_volume: 100000 - maximum_bid_price: 3000 - minimum_bid_price: -500 - price_unit: EUR/MWh - market_mechanism: pay_as_clear \ No newline at end of file + save_frequency_hours: null + seed: null + start_date: 2019-01-01 00:00 + time_step: 1h diff --git a/examples/notebooks/04a_reinforcement_learning_algorithm_example.ipynb b/examples/notebooks/04a_reinforcement_learning_algorithm_example.ipynb index 1df7c3561..40233e39b 100644 --- a/examples/notebooks/04a_reinforcement_learning_algorithm_example.ipynb +++ b/examples/notebooks/04a_reinforcement_learning_algorithm_example.ipynb @@ -65,7 +65,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "d2e2b8fe", "metadata": { "colab": { @@ -173,7 +173,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "id": "7d9899ff", "metadata": {}, "outputs": [], @@ -220,12 +220,20 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "id": "ade14744", "metadata": { "id": "xUsbeZdPJ_2Q" }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:assume.common.utils:PyTorch set to use deterministic algorithms. This may impact performance but ensures reproducibility. For better performance, consider setting 'deterministic' to False when set_random_seed() is called.\n" + ] + } + ], "source": [ "import logging\n", "import os\n", @@ -280,10 +288,25 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "id": "cf00bba8", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAACRYAAAjmCAMAAACuWvXyAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAA8UExURQAAAP///wAAAEdHR39/f4mJiZ+fn6+bmr+/v8PDw8zp19+4tt/f3+Ly6O3X1e/48/Xp6fj8+fr19P///70dN0QAAAACdFJOUwD+LJYSIwAAAAlwSFlzAAAywAAAMsABKGRa2wAA/09JREFUeF7s/duSG7vOdQF2b7cv/EescDi+93/XTgIzmQAIHjJLqpKUc1zsLYIgCIAgheVD+f9DCCGEEEIM/9+G/0cIIYSQW4OW4EbsbdH/RZARQgghhNwUtAQ3gm0RIYQQQlLQEtwItkWEEEIISUFLcCPYFhFCCCEkBS3BjWBbRAghhJAUtAQ3gm0RIYQQQlLQEtwItkWEEEIISUFLcCPYFhFCCCEkBS3BjWBbRAghhJAUtAQ3gm0RIYQQQlLQEtwItkWEEEIISUFLcCPYFhFCCCEkBS3BjWBbRAghhJAUtAQ3gm0RIYQQQlLQEtwItkWEEEIISUFLcCPYFhFCCCEkBS3BjWBbRAghhJAUtAQ3gm0RIYQQQlLQEtwItkWEEEIISUFLcCPYFhFCCCEkBS3BjWBbRAghhJAUtAQ3gm0RIYQQQlLQEtwItkWEEEIISUFLcCPYFhFCCCEkBS3BjWBbRAghhJAUtAQ3gm0RIYQQQlLQEtwItkWEEEIISUFLcCPYFhFCCCEkBS3BjWBbRAghhJAUtAQ3gm0RIYQQQlLQEtwItkWEEEIISUFLcCPYFhFCCCEkBS3BjWBbRAghhJAUtAQ3gm0RIYQQQlLQEtwItkWEEEIISUFLcCPYFhFCCCEkBS3BjWBbRAghhJAUtAQ3gm0RIYQQQlLQEtwItkWEEEIISUFLcCPYFhFCCCEkBS3BjWBbRAghhJAUtAQ3gm0RIYQQQlLQEtwItkWEEEIISUFLcCPYFhFCCCEkBS3BjWBbRAghhJAUtAQ3gm0RIYQQQlLQEtwItkWEEEIISUFLcCPYFhFCCCEkBS3BjbhfW/Tfvy26f/9hdIbrKyuSWiXY+buJ/v3D4DV4QZcewd9/5Ry30P7+96WzdKf5FyLyUD60BAl5I/SJuxNPaIv+g4XtQYPka8CY8MXvsQ39Stw4ber6ygNYKDgrNWev8xXwgi49gL/1FAtfbGZgpfDtbVFpGHY+tW94vRK0Wff8+/f379dfJ0JeD5T4jXh8W3R0Ra/YFplvxZO2rq80wEDBGnl4zr7OC7r0deK3GtuiV+YFS7DfFin/2BqRjwPFfSMe3hbZ/xp/vbbI/VoBZGtcX2nB+oIJxbz/r/LbMS/o0peR3wR1sC16YV6xBGdt0cbXfpedkJcDlX0jHt0WuW+e12uLYEY59dRijXL5kcb6ggnFP7YQ/iwv6NJXcV+zCtuiF+YVS3ChLdpgY0Q+CZT1jXhwW+T/e/zl2iL/qp1x7/pKB9YXTCiQgJd4VOEL+IR3PumK2Ba9MggOvEYJrrVFH3og5Kagqm/EY9ui8LsUL94WtTH+91dIdpmtXATLC2YTSMA3fsNquNmG8AV8+5f+E2h+B21jJa5+jmySvj1Dd2uLXqMEF9uiz/gPCUIE1PSNeGRb1Pz3+Lu1Rft08gR/ZFuEDbNoMANe4zvpS2Rd0coxQjNVxVSBbdHDQXDgzdoi9kXkY0BJ34gHtkXt71K8eFvUuAf5vC26HBjWF7pt0bc9qDUmjC2YAR/wxiMSy7+Fr9pRjqxNtkUPB8GB1yhB/wwM+YA7Q0gBFX0jHtcWJX924+XaIu9j/Cqrs8l33HjlMlhfMKH4X8mA8PnUbTG2/JBLT8PHs/7XqEc5Ylv0VF6xBE+0RR9waQgpoKBvxMPaIv+IKS/XFo2/TEZt0YO+hmCgYEOBSPhqiOusfuV/o0tPA5EIZ46PbdHPgeiEFynBXtb/++9v0zF96KmQ24GCvhGPaouyruhBDwOMCV9+HY2fja36sKXfcaOVy8BAwRoxvxb1jY8pdszP+mdcehaXmwisYVv0A7xgCQ6zHhujF2nlCPkaqOcb8aC2KO2KHvSYwZjw9Zdm9zT5qWs1iPw7brByGZgoOCvDX6h6Ese3DgSen3DpadhvLIiWGOcIUwW2RU/g9UpwkvXwDEJKyFuDcr4RD2mL2h8frLxiW/T//itPW/qnbXWLjc4r3F+5jNoXQijyj3V96z8dcDzxEES+36WnYQr01PmNc4Spwrd/cU++oD+EVyvBWdb9Q/gRV4fcHpTzjXhEW3T8J3XgJduiLkcYz/uOwwaFH38zjyccgg8GgRZO5X2cI0wV2BbdgmnWXV/07UVByBNAOd+IB7RF3a6IbVEDNiiwLfpGEGgBkjXYFhHLNOv+MYSQkHcG1Xwjvt4Wha7on3k53qstOhy/R1sEPzYg+GAQaAGSNbBmAwIHpgpsi27BPOtW4+fvOCFfB9V8I77cFrlfNS5vhRGwLYpggwLbom8EgRYgWQNrNiBwYKrAtugWzLPu/ivx26uCkMeDar4RX22Lmq7ofdsibLFxi7bIvPCQfDAItADJEpMcYarAtugWLGQdswLbIvIBoJpvxFfbIv9baOWrnm3RAGxQYFv0jSDQAiRLsC0ijoWs2/9Q5MGQDwDVfCO+2ha5V0C+6X+2LfpP/krv//37l/+b5wNMg3fxO+6/v7q5bA9ZQKeFp7VFC24UzDlBcoFtq32vv/+diuj6QR24SIe7q5YAyRKTHGGqsAexu7QW15aEI32QLfKFtug/5F68hGzAYj0F9Kc+jxcg+i1VX74My6auRbOzkPXFg8EZdNxdr+wDW0zzLJwovE1XlSd2V/XImyGHeiu+3BYdOcMbgOtWeHJbBNmxjfxgoYPs1cNU4ZjOfnR/BTr5yoPmZzeljy7mCjYUuztEm0UIRjR7LLlxPLotu3rqkqc10nkNMXvqoKYEGxtZqJjqMHi8V3KEYUFF4cjGcTVHdeq7xEZ/6p7FtI2bg3NlXX/SqdlERVVQY/TpzX5GqvUUosIVU6CpmXHwLXZ9J+uZSpXt25m0tmbWKtuzfBc3ThVeY7jjyqoeeT9wojfi623R/lWwX29zPU49111gTHD3F7K6T/OaJFcTE4V9EsMe0MpWVtqHrJC8z5gp2Em7HKIjsSO8J0tupDoHu8XUJUvzDgvpC4u5Uwc1IU9O+/WBiQ6Zs4XFHGFYKKLm62YUV6K80XOoxbp44p6lkfXWny9rteRi0xQclnR1G35r1u4OUeGKKSFP+b+/Fqj2sC51spapRJetTrxdq5VtaZuiQt7tnCo85+hO4sqqHnlHcJ434uttEa5EfSTMres9t+eAMcHdXsg2yih9ThoPIC/stxbDHtDKVoL826PQPAyQF2wo1gBEvffRYzdYdKOrpey6qUsH6dsqNDFfO6gh/dTEb0SIO3S+DFZzhGFhE6WLenH18tf5Rm+xuy0nr5u35NCulXUZ+V00oMOWLF76arb7Q1S4YqrQDccySb+10cl6pjJy2ZtZr+yDvNEpJEvOFF7XcNBd1SPvCU7zRjygLSqX3lwqc0WWn+shMCa4ewbZxibu3MzoAsSF/XHHsAe0spXK6LkdbG9DsSYgGr2QB8aTVTdGehu7xdSlnf5DvNE+sJjYWD+oEcMQ/OFA2KHxFCzmCMPC315K0rhGR9vzKWBdXM3d4NRaE6McRG2IN4r3+Ag0nsPatrYXfgjdegBR4YqpjVHJHkyyv5B1u8+u4lx2RoIZPxXwlV0ZFVP08VThDZSd3VU98qbgMG/EI9oij3kWHnMpYExwVxeyjb/9Ry/4AGkh+XLLgFa2sjDsEEbb21DsawjR+AnbqZ6suzF8eQ+LqUtg5ll8YCHeWD+oAeNQvSHIOvS+BBdzhGGh71MS1ziAnlMe6+Ja6sanFmxcLestNWGlhmPcHTjiQ++U4BVT06LZmSR/Iet2o13Fuhx9tmYmTqY7Tg7KBTTWDbEP68W4sqpH3hWc5Y34jLZo/2sVGf6yQ1hIvtwyoJWt3Bi+CQWjuwFhwfqVfgFMTW/s1k+4YfdK2BVTl5S5Y+GBhXRj/aD6jN/2DVt2EHXobbmYIwwnNJvMAlhKhHVx6Z7NTs0ZuVzWf5vcqYaRDpLrA7GKEBWumJoXDZjk3m7YyTpmheQyNVVjMnmqssF0jYnoXOFB2OFwG4IOvlDIO4KjvBGf0RaN8E5AWNgv7OSxgFa2cgOSAVBUICvYUNIvgJW2aDeC4QAoTs3usaUuCWf8UiCcsFgt0+8BZ2nsrHfzYDFHGE6IcQ2+ywEUhyx8QXug28d+gUE0AIoKZBv/mtSpxjxowR1IpwSvmFqpWaFXEWCededdcpmaX4Y70n6uspVpMsyKeeKgKCw+i8t65G3BUd6Iz2+L3HNv1+zyye8XQCtbuTF/ylwSICvYUOyDBdHSU15tn3Fj/DomL7mvhbVvGCgrkM1wB9VjHumGsVR/Rk9G90twLUcYzvBx+fzhx8b4P968cm/sghV9nwX8kCmfG5ONM/W0AdlG+8uBqjFO6IFqK50SvGIKImVPeRLkrATnWXdGIbPLGu/rlvOcbwQH55fxONRzhTexXP1Y1SPvC87yRnxWW6RXPbx3nQc83lhIC+ldxlzBzkMkpF82vefZhmLfJohWnjxjAwJh7sZGuqWhOx+Sq3+n+b/415Z6SQfzg+oQkoKfG9d8xdnkbthVYWrELEeYMizEBaFgZ+xmCz5a9YXEueMx5u1/E1gzEAmnyjoCo6E8NvSvnMe6sZHbKYgKF0zZGRs8RMrWKNjdU6yhNOvOZNVoXT7Y97xU2WESf43fZMI8VZAI1vc8Oc4yEnzYPdav6pH3BYd5Iz6pLTom/S8AQShAVDAvRgHSQphRMFew88ejYnxz+1ttiAo2FPsyQdTFmDaWz7mxMduyN2/lvUwUbHgQVVYOqgM0FfvnSb2lWHkQF6xjE2Y5wlRlJS474b2034sQDbC+ze+Z+871GTAOXa4niDKg5ovDueymegUFUeGCKUgKLniTlnkOC3aDdIU79+pBdNkClUuV7e3aKSwyImvFu54WHsaFxJlDIkNlqEfeFznKW/E5bZHfDEKh84Bb8QakhTCjYK7g5hFveABUKFjHICrYBfZ5g6hHL72n3NiYbdmbh0gIJv1/70JYgASsHVSO+x4IsZrMjA43rBoxyxGmwFpcEBVC+mz+5k5a36KhFqsdbR95s3aulrXyD7/ApL/kUT5hAjizbg6yQif95031ux+jD8kYaz/LuqvBw2RweWP/taBaGk7FxTSobIiUsEiCxueC6CjR86Tw7F2GCJRfCjq2WtUjbwzO90Z8TFsUbmDv/YKo0P/mDDMK5gp+vgTcrLD7Q1SApGAd7mgnGM2Q3TNubMy27MxbcbPOvpE2PkiUxYPKgaLQBGu/PYIpSAth/xGzHGFKWYtreDuONWkFOk7lzR5MGz8mNiAQLpa10O5hFzb+QiyYpZ3NzpsyK6JnEG/MU75h906ybivQKjiXN5KlmBFWK9uZbXP+n7VzsvCM6WFiVvXIG4MDvhGf0hY1j0LHDYgK4RpDWkgvOOYKcb59kXrqkBTsIvu+QZQz/H5bd2NjtmVnHhKh3c/1RZBtQCA0i87Ui3UqUbbfHj5aCAtZljrMcoQpYTEuSArD9EHSZ5IKj1Eeps0nLcsUFAt5WW9k7gzd7VR1J/3nTZkVkFR6sXcY7m0nCxBvhJkksUPDvcqGQJi5D7XCSuEZf7IyqKzqkTcGB3wjPqQtah8F+85AVICkENZAWkifGMwVZk9QwaTBqENSsKF0nG2BTmHtFcrd2Jhtmc9baXa6mBIOByEotKlbjn0DakISf987CAtreRNmrmGqsBiXEWbpO6anXvZjTYBeITOMqZULOy3rjWyLcSoxUTBWO2vOmzpWNAEeU+0JJti9na3k77V1IsmzjClhtbK7ziQY3Uz1mMbmRn9Yiqt65I3BAd8ItkU7kBbSFxJzhZUXNH+HICnYUDrONpzPbe7GxmzLfN6+/dkzaP9j/cgSBIXlg8qw1tP4rXsQKZAVTrzeM9cwVViMyziYFdER4LTErPlZLdi8QeQ4vIKgj9k2L+tOgsepxETBBN5Zc97UsaLJ1DE1zXjB7j3BbuWWZad1qbKtbOb92cIzHg9Nr+qRNwYHfCM+ti2yD41ZBUkhrIG0kF5wzBVWHgCzv0kDJAUbin02Icowaqupzd3YmG2Zz0NQyD3ApADRJHWdg8qwPk2/fN1OkBUme1hmOcJUYTEujAuQOI410/O1vs2UjW5auseVnSZnWtYdZ8apzN+Mzprzpo4VjXPH1MqldnuPcTu5ZVmOrcJyZWNYyJNugF4BEkdTeFOHwKoeeWNwvjeCbdEOpIX0hcRcYeUFnX5/2FDs4wJRQiekIbkbG7Mt83kICnkSzOkfyzAuLB9UxjRN1pbbCbLCauY2ZvthqrAWlzGY3w1MbkDQxfo2u2dQK6TRH7amlZ3XEyRCnt9xKr+tLWoWHOpLdWH3HuKPxC5LD2sc00ZW2RgWZudm7Kf7N4Vn9xulZlWPvDE43hvBtmgH0kL6yGCuMHuECnZ/iDYgKNhQpq+iAIXC8gOUu7Ex2zKdt8I8CVajOolxYfmgMqAlQBTAZMEVH2SF5dTNc4SpwlpcxuDat1Mf69vsnkGtkEZ/2JpW9rSsO46PU/nktsg4HQOEeGOpLuzeI8KJ2GVpijEnQBTAZAHGp0YNRjcvFkxu6Nie87amm5xVPfLG4HRvBNuiHUgLs5dr9ggVpt8fNpTxSw8uJTZ3Y2O2ZTo/99PuV9OEcWH5oDKgVegcgcmRcxCiwomXexYupgprcVn3JszctL5NysF6MmFaWHk9QVDoHMw4lXlpd9ZcMAXBRgjQ2IJkjN17QEzC2OUNzBWWK3tq1HC+8DCq1J9AFcB0padH3hcc7Y1gW7QDaSF9mTBX6Lxcjun3hw1l5YW7ltfcjY3Zlul8+zZH7H41TRgXlg8qIbXt6TkIUeHEsz3LEaYKa3Gd/3bqYn2b1IP1ZMK0sqwtiDYgKHQOZpzKvLY7ay6YMkLnn4mm43fA7t2lTeLY5YuV3Ur6WN0JKLykaNKOZ1WPvC841xvBtmgH0kL6MmGu0H9C//vv79/ev5ZZgKBgQ5k9mxs2Hoj6zNzYmG2Zzi+8xJgt1DRhXFg+qASr2TmCXlQQFU682bMcYaqwFheGK8zctL5N7plVndCxdKKsTx6Mkr8ZnTVXTEFSMNL10ttZSGWWwrHLFys7jzQHeivsiWiOu5B0PKt65G3Bod4ItkU7kBbSlwlzhfzlMv9AYwAKGxAUbCizZ/PMC77ixsZsy3R+4WwxXagqGBeWDyphQdN6bVUgKoz3cMxyhKnCWlwYrjBz0/o2uWdWdUJi6WRZ53djksq8rjprvmoK39k+ro7bEbcmAf9Wa2Ts8sXKPvPQQm+FfX/rkyX6t6pH3hac6I1gW7QDaSF92zBXSOaH7yV0NiAo2FBmz6bLav7yKotubMy2TOcxLvTOFtOFqoJxYfmgEqxLHc2eCkSF8R6OWY4wVViLC8MVsKKL9W1yz6zqhBjGcCl0NiAodKpznMqnt0VWnDHJYKWbj3///h3/vlnD2GU/v1zZGBSm7kNvBazo9zvRw1U98q7gPG8E26IdSAvp+4a5Qpzv/we1ArUNCAo2lNmzuZbUdTc2Zlum8xgXem5gulBVMC4sH1SCdamj2VOBqDDewzHLEaYKa3FhuAJWdLG+Te6ZVZ3gwrhU1undmaXy+W3RuC+aJPDA7r28aOayn1+ubAwKU1egtwJWbPh/ut8QtlvVI28KjvNGsC3agbSQPu2YK4R5+2KlQG8DgoIN5cSz2c/pCTc2Tmx5zGNc6PmB6UJVwbiwfFAJ1qWOZk8FosJ4D8csR5gqrMWF4Zz533S2vk3umVUd46KYLoPeBgSF9O7MUpm/GZ01V0xt9L67Nyb5M9i911dNXPbzy5WNQWHqCvTm+MKzm1rifqt65C3Bad4ItkU7kBbSpx1zBTdvN+oAzQ0ICjaUybO50jmccmNjsmU+j3Ghd7aYLlQVjAvLB5VgXepo9lQgKoz3cMxyhKnCWlwYFtIaO4H1bXLPZmGkXC3rTlxjH76jLer3RSeO4kTWLbMTsPPLlY1BYeoK9ArnCs9ua2g2XNUjbwgO80awLdqBtJC+HJgr2PmFrw+TSwgKNpTJs4mZQu9RO+fGxmTLfN5+s0AUwWyhHj/GheWDSrCanTxYr60xiArjPRyzHGGqsBaXyV/vIFexvk3umfUEoinn6gmCwsLBQGTI34zOmiumBLvw4NSPIDyRdcvY5YuVvXAZK18ovPy3UpN6X9QjbwfO8kawLdqBtJDeZcwVzHzzFvz9+99/Zb/8uwiCgg1l/GwuZPSkGxvjLTvz85c4feAxLiwfVEJq29NzEKLCeA/HLEeYKqzFZQx2AljG+ja7Z1ArQDLDWheWy7oT1ziVeYV31lwxVWhiKpz8m+TWxonXbezyxcqeX8YD48CFwss6nixvq3rkvcBR3gi2RTuQFtKXA3OFY97ussnNRtPvDxvK8NlcSOhZNzZmL3U6b4XuKCpWo6YJ48LyQWVAq9DJBGYFiASICpM9LLMcYaqwFtfM4Amsqdk9g1ohLe2Gs/UEQaGzwTjyvMQ7a66Y2tgn/m3oh63Vw9wyZ7JuGLu8gbnCcmVbo7NApg7MaDqezjmv6pE3Akd5I9gW7UBaSK8y5grHPASCf2Sn3x9We/RqdQJxYFpYcWNj9lCm83Nf7LKqgXFh+aAy5qWF2YLTgKww2cMyyxGmCmtxdU/jPNa32T0zeUtLuwHKwsmy7mwwTmV+sJ01V0wd8lmuxpzJumHs8saVyrZGZ0X9iMKzNjYgbVnVI+8CTvJGsC3agbSQPu2YK9T5wcs0/f6w+oNnsxOH47QbG7OXOp231vLvP3P6xzKMC8sHlZEat3T9g6ww2cMyyxGmCmtxnYh1hvVtds/OXsnT9QRBIS+LSSpzBztrrpg6FmF8Ebv3iddt7PLGlcruFnvCYwrPhjFK5KoeeQ9wkjeCbdEOpIX0kcFcoc5jXIiPzfT7wy4YPJsr6cR8Yc2NjdlLnc9DIEDkwVzhcBeCwvJBZVif0jOyCs4WZIXJHpZZjjBVWIwL48IXL4f1bWaqm5YOUC1E9WlZp+cyS+XT26Lq9jdm3TB2ecMqLFc2hgJEXaBW+EIKVu/qiTtNXh+c5I1gW7QDaSF9mDBX2OfNHk2odn+INiAo2FD6z+ZKNs+7sTF7qfP57HW22PkjixAUlg8qBWqFNBuYEyBSICvM9jDMcoSpwmJcs/ytYy1N7xn0ClPdK/UEQSG9O7NUPr0tqmsWwh9xKusHY5cLmCykdjEnQOSszorpjO4Ak92hlVU98g7gJG8E26IdSAvp0465wj5v9miWTL8/bCjdZzO3Ejjvxsbspc7nrbnsdM3hm1UQFNrUWpOzJ9SaT3StKb8RhIXZHoZZjjBVWIxrlr91rG9TS5O8BYyT58u6TYQwTmX+ZnTWXDEFycY3Zv1g7HLhSmVbYcds5UGFZwIZ1tGqHnkHcJI34ofaIvl58Ys/MURtKW4FZIXFb6XRGkgL6dOOucI+P7r80+8Pu6L3bHaCCJx3Y6O35U5nHhKh9cguSr+U1g8qpWN/BzOCNwVhYbaHYZYjTBVW45p8/RXWHJykwmN96SibXY3pxpe8niAopHdnlsr8zeisuWIKEuHfX/lRA5c4lfWDscuFiWHMCIfzEAhZ3m2cjyk84yYkOat65B3ASd6IH2mL6j+is/SyQFdwNxeywvK3LSSFsMb4nT7tmCvs8+byN0vsMwTRBgQFG0rv2Zz5pJx3YyP/fjvouGTFzTpr07oCSaENo3NQOdAThrEG3yAsLDz+O7McYaqwGpdLUeZKCWLFRXsQ82tkU5NpF7eq/Hw9QVBoEyF0ygkYq8a7hRKEyJCbgiRS/rL+qb+mfy7rlbHLAmaF1cq2ZtuiKbOPLjyzIyQ5q3rkHcBJ3ogfaYswvbHytEBVcPcWssLqt9JgzcxvzBX2lebyxyWdhwyCgg3Fvm8QFRZSWTjvxkYnP5WOS+40wi/3uZfXegJRYfmgcqxXgwNsLEFamO5xMHMNU4UrBZgY1RhGh71jMzHXd3lr1OHq7s35eoKg0CZC6JWTkhd6Z80VU5D0+NfxusHuPc96ZeyyYFVWK9tduFhMuugQynBnpfC2jrGjVjhUV/XIu4KjvBE/0RbZF2DhPYKm4O4fZIUr30qDtwdT/1kVTBV2cfe18++VmYOgYEPJDS08psJ5NzbcHGT2eesZ7T/FdoWfgaiwfFAd7Cm5+vL/jHesPIgL8z0qsxxhprAcl89f51/mXPgu8dnusvvlsuO9PTIHZ87XEwSFNhFC16ZgvDOhd9ZcMeXDz1j7HX2798Ih7YxdVpyL1vagsq1dn/p95lA/WXiqHk7T7LfPrOqR9wVHeSMe0RaF1zLnuB5eHcIBUBTcZYas0F4+u4tZBUmhf5V1rjxH5hHCTKGuxLhgNJuf9Ar5BgQFG4pdANFSWrEpRoU1NwoQCvJIFvXDqdSlQviGwR/U+Bs2M37kqat0DqpDSAr+Q/W/v8GraAjiwnyPAywRkhzphLAeV/B0/3GJPoS5k/FwO+x+xWLCr4/854pkPzQMC6fLuk2EYNdCZDChmw07a75qqocr2Q7Wzoo+GLusXKpsCHdwGe1JHV6eK7xdaM8zC2NVj7wvOMob8eNtkb/mGVAUnDZkhfYx7nwrQVIIa+xdPjiUIChUoXtp9nesNSS6AgQFG0r6jsRnLAFv3mk3ChAGMDl42pZO2y+BrLB8UD0WspLsAnlhYY8KlgQweTUuyMZMvczLtaH6taR/tZ4gKLSJELrlJJgNTbPRWXPF1FLZWP0Odu8F9Z2xy+BKZS9cxqOSIBgDdeNx7bWsg3vwq3rkjcFZ3ogfaIvsG9F9Rg1QFI47vgFZobXS+VaCpBDXQBzAZL7Sh9IF2hsQFGwo2bO5YhuPzmk3CvmiNDSIwMpxu3O6dFBdFr492rcYE4WVPXYmOcK4cCKupesy9XLxzA+/TjQGp+sJgkKbCKFfTgXjmzm7zporpk6FP8DuPdeujF3euVLZ1nLOseRM4WHURbXW9cgbg7O8ET/eFs2/paAoOG3ICsvfSpAU4pr8VaqvCsaFYyUEGcYclDcgKNhQsmdz/uAdzmGYkbpR6Jza7tbgJZ+fdzhUSAvLB9Vn+u2RfF9hprC0B5jkCMPCmbjm+VtwcqU8Noxf82/drNYjs7JuEyEMymnDGDWH11lzxdTGPPyFtNu9kzLrMXa5MnUx2XK2xi5ZL7yZ1T1Rq3rkncFh3gi2RQeQB/bFGBZ6sVj+M08GlDcgKNhQsmezb/pgf/POuiHkizKTEFVmBx7PFOLC8kENmLzF2dcVpgpre4BxjjAsnIpremEWfFwpjw3r1+w77Ejc2XqCoNAmQhiVk/PMnF5nzRVT/6/5g1EpWek4rJWp8sHY5YMLlT1Z449jufBWHbniMHk3cJo34gfaIp9lyAZAUXDfF5AVlr+VICk0azpxYBajglnZexe2LY+3ELobEBRsKNmzufKOT5+njhtKvgjBDV/y4Ym3f6cHE4XlgxoxfIzbHTYwV1jcAwxzhFHhXFz+LxdFlv5S1Ep5bDi/xmus6sl6gqCQpn9STnY7803aWfNFU8PUQ72L3fvEd/7YZcPQuU5qR2suF974hkNpY1WPvDE4zhvxE22RvZqdq26BpuCuOWSF1kznWwmSQrsmfTP2xRgW7Mp0jb4vGNhcQlCwoWTPppX1ON6dc24o6bntJicved+75EQxU1g+qCH9173TUmC2sLqHMswRhoWzcQ1ON0lgwkp5bHhjg2/FkLjLZd3xflxOZjfzVdpZc8HUIfRR/vc3/EWvWXHYvY2nM8YuW05X9kZao4XMQ+tKwB3doFScI6t65H3Bed6In2iLTJpXnhaoCu6mQVZoH+POtxIkheQBb9+M4zGCoOBWJs8C1tQZHRYgKNhQsmdz8H5VTPpOuQGSJ62GNnvJO78rkR4o5grLBzUh/r1l0DOA6cL6HsIoRxgXTsfV+12d1lDOSnlsRHOdvLXfuYkedOqMDgsQFDr+j8vJbGYKqLPmvKlDlh2EjXSWfbv3ytsFxi57Tla2YO1XOn3UcuGt3vAzLwF5R3CiN+JH2qL6Ei1dHegK7qZDVmifs0ttUfwSxI/2ECAq+JXxi/NYs09guAFBwYaSPZud18th83fGjYrfxP6w38wlT/sc9n5YMKYLywc1pf36sKcVgEbhzB5CP0cQFS7ElXydLP+05bXy2GgNJl+7aeIulnUngnE5ma1MSXfWnDZ1LMjP3iyZ5d/ufeJ7f+xy5FRlK00t/Rt4t1x4qzd8/SUg7wgO9UY8oi26gNz86WX/Ef7+01cp+ZH2XY41PxrTFTf2HzPy78pL9l/9ISXl35aC8PvYdt+3f2riv5SjETZ/Z+rti5i8bbv2t32Rsv4qEkOh1ypgeuNVvs0vVHb9DcGVq7heeEZxaHZVj7wferB34ofaIkII+Q6OX7brfV1jeuOduz9CngMux41gW0QI+WCO3+HptEXmtzvZFhESweW4EWyLCCEfzJm2CBJCSAWX40awLSKEfDDmzwNDEsDkBv9QDCENuB03gm0RIeSDMW1R9meu7d+4g4gQcoDbcSPYFhFCPhm8aIXmrxK6v1vOP1lESAuux41gW0QI+WTMLwdtHH8f/b/w83ZO/CAiQu4D7seNYFtECPlo8KTNgDYhxIL7cSPYFhFCPhr7A8f78LfQCMnABbkRbIsIIZ+N/82ylP6/xErIvcEVuRFsiwghH87014v4V/MJ6YA7ciPYFhFCPh7/564D/KUiQrrgltwItkWEkM/H/nwiD5siQgbgntwItkWEkFuw/yPvjsV/oJ6Qu4KbciPYFhFC7sJ/f//925ujf//+/seeiJAJuC83gm0RIYQQQlLQEtwItkWEEEIISUFLcCPYFhFCCCEkBS3BjWBbRAghhJAUtAQ3gm0RIYQQQlLQEtwItkWEEEIISUFLcCPYFhFCCCEkBS3BjWBbRAghhJAUtAQ3gm0RIYQQQlLQEtwItkWEEEIISUFLcCPYFhFCCCEkBS3BjWBbRAghhJAUtAQ3gm0RIYQQQlLQEtwItkWEEEIISUFLcCPYFhFCCCEkBS3BjWBbRAghhJAUtAQ3gm0RIYQQQlLQEtwItkWEEEIISUFLcCPYFhFCCCEkBS3BjWBbRAghhJAUtAQ3gm0RIYQQQlLQEtwItkWEEEIISUFLcCPYFhFCCCEkBS3BjWBbRAghhJAUtAQ3gm0RIYQQQlLQEtwItkWEEEIISUFLcCPYFhFCCCEkBS3BjWBbRAghhJAUtAQ3gm0RIYQQQlLQEtwItkWEEEIISUFLcCPYFhFCCCEkBS3BjWBbRAghhJAUtAQ3otsWEUIIIYTcC7ZFhBBCCCEC2yJCCCGEEIFtESGEEEKIwLaIEEIIIURgW0QIIYQQIrAtIoQQQggR2BYRQgghhAhsiwghhBBCBLZFhBBCCCEC2yJCCCGEEIFtESGEEEKIwLaIEEIIIURgW0QIIYQQIrAtIoQQQggR2BYRQgghhAhsiwghhBBCBLZFhBBCCCEC2yJCCCGEEIFtESGEEEKIwLaIEEIIIURgW0QIIYQQIrAtIoQQQggR2BYRQgghhAhsiwghhBBCBLZFhBBCCCEC2yJCCCGEEIFtESGEEEKIwLaIEEIIIURgW0QIIYQQIrAtIoQQQggR2BYRQgghhAhsiwghhBBCBLZFhBBCCCEC2yJCCCGEEIFtESGEEEKIwLaIEEIIIURgW0QIIYQQIrAtIoQQQggR2BYRQgghhAhsiwghhBBChG9oi/78+t//fuNznzWtyv8KvzDY+L2N/uDz63LNyxhrykL+/oihsVKj8x6JVTZfNw5vz9XUyQp8HjGMZyBbzGrqo3j/iLf63MDgmcg+Vx/Xb7xG33FRCisP5zvyrKN6/3yda4sk3JN1qLU7e4/WtA6Cugy/4YJ8jYteyqpJalbyd6UtkuHLJxaEZ/JcTZ2twOcRwngKssWlYH//kq/nt+mVd65H/Cr8VFskw9V6/M5r9B0XpfChbdHTjopt0RxdM1u0pnUg2vVE9YBf/dG76uXKKtGZ5O9CW/Qeid0Jz6SMlmvqnPYzCWE8BdniwrGiIDbepCYqb+m044faonNvgCp/zzX6jotS+NC2SKJ6Rv7YFk3Z31F7q35v4CPItIZ4bRltfMtlvAycdF62qWiRRePMLOXvQlsko43XTuyOfybP1dTpCnwePoznIFucj1W/mYUXSNQp3tJpxw+1RTLaWCrIb71G33FRCp/ZFj3vqK7ka+WL8Pt4elukKfLJL2N8BJnWEK+9v9bPvyFfIfOyjPGxiywaZ2YpfyvVGnTeI7E7/pk8V1OnK/B5vG5bZLqit/uWEKdf4HSv80Nt0ak34FuvEduir/C8o7qSr7ICH1+Ab2uLTJKkmvEZJFpjRLu5uRi+KImXSSpaZNG4eJfyt1KtQec9EruTt0VrNXW6Ap/Hy7ZFqIbfz/8iegLi+rd8Wz8LTT8Gz0T2ufa4fus1Ylv0FZ53VBfytfRF+H08vS1C7dr3KEtBqzUmqMvwGy7I12i9XKoGWTVJzUr+Vqo16sjw5RMLNAnV2XM1dbYCn0cI4ynIFieDxTv6JsUQEd9f4Xgv80Nt0bk34Duv0XdclMKFr/l34GlHxbZogZIklyK53vhcabTGFBP2RDebb/B3Yxov01REYqwpC/lbqdZG5z0Sq8Rn8lxNnazA5/Edr71scfJFlGL9hm+h5yDOf8/X9ZPQ/GPwTGSfq4/rN14jtkVf41lHdSFfS1+E38c3tEUNj0iBOPLWj5ywlIpHxbpSrW/9AnzXM/lkXrUturLmdXhv7ws/1ha9KmyLXhO2RVdgW1RhW/RI2BYtI1ucrClZ87ZfDuI926IVZB+2RRW2RedgW3QFMYLPVxEb798WSRj43EWU2BbNYVu0jGxxrqa0NN42u+I926IVZB+2RRW2Ree4kC9Zgc8vANuiH0XCwOcuosS2aA7bomVkC7ZFbwXbogDboteEbdEFNGkYXEVsvH1btJYKUWJbNIdt0TKyxbmaevPsivdsi1aQfdgWVd76UfwBzudr7Yvw+/iBtkiLGYOriI23b4vWUiFKbIvmvPkX9853hCFbsC16K9gWBb6rItkWneN8vta+CL8PtkU/yVoqRIlt0Zw3/+Le+Y4wZAu2RW8F26LAd1Uk26Jz3L0tkmh0+Fsv7f9+xZ+Ba3P0G//8tkXV80z+gdVfTYpVjEF4MTTHLeGi//mtir9a64H0+gWPV1JhveynokHm1HkkpHW5U4lO/4qOdXljJUql62mCKOrH6TIobNu2GuGcuvGme/S1ZaLjEWz1sjCm44kNY//36jvm5zW811mwoKswABpJtpFu4vEb7tX8O3FUVrtw8mCMXjfuia1BPlSOQWV6fjWwON/zpHzaL2v5nDqjzIvHpUGVdSJDNPXjl65RTFVn37yyrl2jeR1n6Jomd8PYlEMFgohLYCcmz/gwO1d9p7/YlZnodHYYxCRS+bSHpYPyMQlrIX8NK/nKr9JeRhavkC57Lg9oi0r4WqDA58PmSD4G1FqSyf1VUcLPE1MZBuHmunWGQ33jjzsLN9WgBsOpBI9XUmG9lI+BzrnLXPHQ+hyKP61E68qmf0XHJXYtyoKb3q4xwGxEtMoH3R20VkNJNKems3WXNN5w7sfsijakFe9OL7wOXU+OMKz9LB1j7za8g+ZhkbFfocbSILwZxfizv9cgmpDVRb36m7lq9OCK4i/+0NYwH5nMx9WG7gPzJ9DzRD5acSH7aYizzb2F7ex0iMmEOv3FaxRTle7rTRyVdeUaTaa7qA8+c7PYNoZfLIJV6D2cHr9rNBkCjJZGi2WuLHBKjS/zL8vyoSrJfH5U3pm1w7BrOvnqXiVIHIf7oxv4RB7TFjnfw6nYHMnHgOo2mfSVVLBGhzfXH+yBPeKx9YAaDBrB45VUWC/lY6Djg8xtzoe4XMUmlRgz+OuKjnV5YyXK3ZkEt/eBzCXOYLrSHqw3GM4pibc99+r6kraLc1KiE/qe7GGENIYkz7zbaM+hhicjl2C1lp9Qm3ij2aYheLoXzeFOc7LCrhd3s9ZGtsb5EElyZwzBrSZqN9/zRD62xmu6wGzzJA0aH2YTMP3laxRTlezbeF9N6Iw3OD6YSZ4H6EJnbBrbwq1tEpjF5JjYbGbdBRlXggS0bd34hHlhsr9Itv8/tGQ6C2uev5aFfA2OGALH7v1g2XN5QFv0Kzl2zBdsjtqk79ZiJvuaigiObdQDDLK1hcN6U4kbg4yrQbv9RvB4JRXWy1mABpk7kWUli7FwTscldi3KrlW/t0Hm/rTL/JG0d3/DqYRzWswJ9Ne0zX7ZdOcEW0aeIIymQkI6ILV4jVG+3KCgyt5AJUv9nqmskH0eUDTG4bwOoNfuZaz1bc3y0Qiy9Jidspjt8p4n8jEpZl8ak803Mo0CphNk+gHXKKZKF2AgDEzo9ieuUWbMu9NFS29yZsGYrvH4xGf+FvKiLWQrDpupvWM6c9k41LsUNqpZTBAYGyJOjmqev5Z5voZXqe/87AY+kQe0Rf8T7+VXBv/skZhcN8lXFQx2gtaeK/3F2f1X0sxRy/jIUWqzosYP7f0g1fr+/dPPuCqY3Qupx+NUNF6O3d4RJf0NVvG4fmEaj5ssHzHqEnhzVid4uBIlrP6KRjdCCndkTvUQIBbZI/HO1pqwKuGcujlR13YT0O9q96pEV+uviO+/tS4TCww9WUny1Lvdv6CxK7jBhioP/P8DA/Cxgl3w5x1qaVo1kf3C4yHYQA7gofyPxt1a69qa5iOOq+NlUFN87KTjer4YyZTQ80Q+qjXxpQYhs2C2ecjrkYnRayHTbufdiM4LR55kt0wlpkpVMChgTVpZah+HUpgdDOZ1Gn/ESD7PicoLse15xHbQsPt5I+mjGFGVvVJk1MSXX/V5JSC58j9FJ3kMpjHpeFcriFg9M2Et5K9lIV8QdK9SUmIFkQ2XPY1HtEUbx6/pIUuHTpP8NAVBS4fRiElJGKc2KzJprOm4celQCGiYYTp4vJKKxsux2zuiVIiWjCB4Uxc1S07qBA9XooxjNfEr+8O4QBQKjSdmCcwcEmxsiiKcU5OTxmh5Yvb5Tgab5ftq3ezYvdxb4+6YoScrSZ55V5eE9OCjLo/3x+gmhOwq2KXxw+rJQvfnKo264YgbgiSIri03KMR8yDAYassppMRkJNrreYKB9UWVjK3p5nsuDgkyMXotoGCdhHuHYPclbm33zosDg412RfEWH3XSnIOMBwcjI5vXX86XEZqkGO0wNpVEjSb41qoLwYPTxGizuQma5WbLTVJtYW3fIVXYOHQQVRj3TWhI1VBBxKrXnMy4NhpUZZgvkRgjqnGs2CUY7MyXPY3HtEU2byo7ctJJPgY7UWsbmyW7VQw2ZHhsm9rc0clwSj69Yr57+rp5OI/g8UoqGi+HbldEaaOto2OzmD/d2kWkKid1gocLUaqJxle3TUBWbIwCTJyFzrFKdep4ISfbf57iQ6MdjW+IgX19G5XTHTL2RGf9vMrWvWvv6safozOVyaock52iLoQgRRZ2EZExtsezCSUA/W/RlixuFGRd0bO1mI86bJzcENFhYvt6dvZk+jiBnicQOl9UgsHS5jL08WiIg9dC5/2qeLLqtd+6yZ2MDx2dx2BDht63o7LOXiPV98bw/1M0mKq+FNu23+Hchi7CYCMxoi76ZRbZw6/A/29kTsVHx8+KKATldVR2+DOLSYaCHhNKVTc/VmauJsfnSRa1+ZpcpX0bDCrTZU/jIW2Rz5rKMGiTn6eg0YqEeRkeh5GnVdG5Q1e3CictSr3tNcywIni8korGy5HbB6IU1XRp3T54o8PgTbpkrBM9XIhSVPzdcvMJMh81VLbvBGcxAlEYzmklJwepdlCWbEBF5o9EnWHiyUKSp97h4Pw5GNysWuvqgpBdIZaLEIWIZ7oD9Lw1FdaVHVvzfPiIdS74o0YwaNGtMehHlQlVt7oy31xT6FVUo+8fdg4KKtszk9pohDI8NldnMMh9O1BjNVQdDg4mZPUUuvZcbA0yH/xdqfKD0Wxq70AsDysBZeYNqAyDBJk+PJJhwVvRjWLoGIFUaEjjm+QrOXRdgUGXr9TKOR7RFoVj9cUak99JQaMVCQUk6t2ba1HLxsf+/iGQSohICR6vpKLZeeC2QZTi9mG/1JuQzgs60cOFKN1AESPBf4ssGQfo99hR70JEVSmNt+tG0E6PRnTUJVUfBDVg4omPHPg1M++aCxKx0xpKXxdkXosorlR7sWjmO6Rxh+Lo2Jrnw7uqLoban9SprqnTvahUGqyI7MTmohHjSYM0yHTc2SdVRzFENXw4JMMjrrCvjGLUlRBc6rM9GFHoWhvjo1mLrcE7oEaCen5gO6EuPLlTOwuVoAZCgsZWm6SK9kZYEHa/lL8L+dpoUqa7YNBlmOmH8oi2KM12FTY5SlMwzaRuhcHs5lpkxvjYya1YwOfIIMzq8UoqGi/XqkGUmpdDpHVt8EZG0fAFnejhQpRuoIiRwdnKkjzAXSqDNlMirSuDdyvxHnhtH1PlqJKOwhKytO/JPMlT72Cjn3OZ1sSpsSb9DYlbnV2Cpg7nOyQbbHhpbmuej1AqMtM4JGYmOavTvahSqdtvvrnajp7Iun7Z5Dt7qQxaEyI9Voah3zf3raJHsU/PDyZPxhq+NGQwja1BjWCQG/ExRToxKjLXOgXmlRBiBMMtN3xMcKLRD2HJ6Gz+ZPZcvgpRYVbaQLRGdh/F09qi6nyTozQF00xmJ927uRadMYaDoR3R6xTaSpgrOo2XfbctotQUpl/rd8qzeV6n8XAhShl4DTESdzLIkmGAubPRveDdSrwHfn5aJarefS5GzDyZJ3nqXUhMi0yL92p4oLqTuNXZJQSoC2NALWncwVhua56PUGQyaE5AtuofqV/Ui0qk0YhLlHwebp7ndf1MLXZVOJhKsCyjw5CfnXiRHRcGB2JDM6fmmiwu4QpmMbYG52JupGda0dm8bMYrFyrBx7gzMRvTLqPWQ2+lZ3OYv3zRzL028NkhAdEa2X0Un94W6TJbEcHQjog7+6+EuaLTeLlWDaLUFLVuuNv2O6XOXNBpPFyIUlYcBgoyH3cyyHwnQF2WO7vLOyGtxHuQaWNwIGLVEYVhWD1mnqTzzr1F75qUHtR5tbsSRuKWSJJdRF7904UDZ0Aad9gkt6VSDA5EvJ+oqOwLZbDPHIgYn1tkti7qRZWK3TWSz8PN5WNjevZayHSzymbVfraovLoko8OQ31cGbdg7Z6+RagwMDnDhuIFB5W26d3TeDaIRH1ODzGZb950CMtvaFTE+pxYmDvmYdgcbL7Kjal1VeWevfNHMvTbwWWkD0RrZfRRv1hbVrWTUu7kHatbdOZF06Oy/EuaKTuPlWjWIUvNu6Ia7bb+T2g3OXNBpPFyIUlY4b3U+7mSQ+WGAvUT5rYN3WbwYJHhtGXRQHdXfGASWM/NknmT53EFUfDAJMr+lHFGsxNC61d3FR2gPckQa9x4sBrktEXbYdWWAIlMrHVQlQWZrmfaiEnEsZpuQ+eaa1+ZCzOpGpptV1s+ehXCQMjoMuVWzyvLzMugAHbWeOD5HQ0PBLMbWkBiJJTgxodMbTel2nVLmlRDc2zkV034KGBx4K5fydylfG6JgTnycp4poXSiU07x6W/Rn/zf2FH/SnZtrELF3UEU5nZNcCXNFp/FyrRpEqSkGb9uPZNDYPa/TeLgQZasyjVLmmwDVkJqVj5kJke9Lw9Yr8R5k2jnQ0bCE/o9kytA1GCTMkyyfO4iKzV2KzFeW3G/d6u7iVdN4Ejp6It3TleuIsMPungxQKWqlg6oclBfoOOtapr2oRByL2V6B+eaq0eR1do9k+gHXKBpy+3Z8q5y+RrpA0J90sI47Avk8j20n/2LRgX4+8DG1GDvhJVAhBg3zSggx7nQcWvyy3MmOCgOLyNvVgsyt56tzlWal3V32NF66LXI/mFPonXQnrSr2VkXUITnJwkqYKzqNlx23A6LUFIO37UcyaOye12k8XI/y0InjFlEYBigfs/vgJoJ3SbyZCZBlJ2fX0cBADX+O6A88mSdZPncQldSEReZ31nxvbXZ38RNTZ0BHz5VgriPCDntwMkDW1UoHVQH2a0ao59aLSsTxdG0M881VozkVl4gEmW7q6vw1imoLB3Bw/hrpCuB/UM0E54t8nscmdL9YdKCfD3xMCa5IrJ4IMqeEeSV08p05tP5luZMc1WL+KjK3mK/+VRqW9mjZ03jhtigrmt5J52lVCyGNIusQItlZCXNFp/FyUA0GUWqKwdv2Ixk0ds/rNB6uH/j+Q+5wisNazjWsWfmY2XATwbsk3oEbiXaHugFiA8u/ZCTaA0/mSZbPHWRdasIi85Ulz1ub3V38xNQZ0NFzJZjriLDDrisDZF2t5JiD2f+dBUud7kUl4ni6Nob55qqxl2Jl9lrIdNz5wjWKagsHcHDhGvk0n/glI+eLfJ7HtpEdgDXSpNjHlOFNHi+BDDOnhMyRnX1Rmu/WoUFMXS+So1rKn0HmVvI1vEr90p4sexov2xapINI76YHNmEURuiObshLmik7jZa8aPKLUFIO37UcyaOye12k8XLqkOnaMSzlXsWblY2bETQTvkngHfiTaR0g9dMedkJceojvwZJ5k+zklNWGR+YMVz1ub3V38xNQZ0NFzJZjriHByWqKDrKsV/dxHNw7Uc+tFJeJ4ujaG+eaq0cQzey1kuqmr89coqi0cwMGlaxQaI0inOF/k8zw2OBixRpoU+5hy1JedPT8y6MYzr4ROvqNDw5i6XiRHtZA/h8wt5Gt8lbqlPVv2NF61LaoJKf+WVtkgbCWjI0GpTZFF74an3GElzBWdxsvU7QZRajz2tv1IBo3d8zqNh0uXtO3xJ+nOdaxZ+ZhZcRPBuyTegSPntCvu165jreWI6sD2PMnyeeRdasIi88WCHtVKpK3N7i5+YuoM6Oi5Esx1RDiJweqseKQJ3/i1/3N+OpLJjZ4NEUdfbAzzzVXDXCgl3MUGmW6ycP4aRbWFAzi4eI3c75UsLdhwvsjneWx1m/SLRT43KfYx9cheAvncDWeWy0KqExwax9T1YvGouhMbMjfNl443OlepU9rzZU/jRdsiHLT5nebxSWc2VWY2VkR6LrUrYa6XLwaFNBUNotR4rBvutv1OMmjsntdpPFyJckPVKkG/QZSGAcrHJFG69b40eLcS70GivVolx4M4i1RQVQwS5kmWzyPvbO5SZF4s6AkvhNq61d1Fbe6qaTwJHT2R7unKdUQ4CcHqLHik+XbRybju0rMh4uiLvUbzzVWjyevstZDpJgvWmHxMLPhrFA25fTu+Vb5wjY7WaHGFy6R8nsaGHXpfLPK5MeJjGhFfAv0sHxPc1h1SHe/QJKbuKSRHNa8Nj8zN8qVDl0AZG5tZaS8sexqv2RYhI9bw+KQTm7qgzWG6/ZiVMFd0mq3XfBGlJhC/od8pt3tep9FaiVIX/cIb92vhj9yoIgYVu1fubNw6eLcS78E57cD+HJok9JnZnid56l1zJBGZ15TLxwXPW7e6u3j/0ngSOnoi3asj11k5LVGBmWl6dpOuKL2kF5WI3boN6+B8847GLEqZjjs7P3sWwoYyOgy5VTPv/fzM5cjeGGE4wR3BUmw6cKe2YGQWs8W/BOP4V+ymZXYqpl5hhO2X8hdZypcquf2jJLOzsOxpvGZbJAOvMj7p1qYaTFKohjBYYyXMFZ3GyzQVDaLURCLSutbvpHaDMxd0Gg9XopQ1p2pX1jcrrIO5s9Gf4N1KvAdeW01hsIIuX1ox82Se5Kl3qj04BDMPz/v+gNat7i5ensaTkOv5TXKdaT42RGU3I4OjYlvUotcQ0TQqEcec6JFjIJ9Hm3fy6owkyHRnlfrZq7wQi4wOQ27fWWXp/OVrhM1G2Tlwbi/FJp+9PwtGfEwzVFs36TkFZHZsNxyN4hySz4OYeoURwlrKX2QlX2rARyki45HawUBZWfY0XrIt0oE3Oz7p1mbvlNN0T1gJc0Wn8TJNRYMoNcUg0rrW75THeF6n8XAlShkmie8j64cB5s7GUw7ercR7cE67RVdgMGRmO4ShOPfm3onCwBuZRsrV9PTIErdE0u6iBuuBpvEk5Ho+1hWdHFHZfZLB6LCksEJJypppVCIOK/01ks/DSsk1Zq+FTMedna1envw1iob8vjLoe/HVa6T211a4I1iJTZ3zZ7ZgxMc0RY2Yj92VMjs40Q3n3o51aBpTc547a0cVasOTL/KGxUDYXRSMzJeYsLLsabxwW6Sfd8Yn3dhUQTwvQWZO5TY9/BDXSio6XmLQRZSiw8G0H+povGRFp/FwJUoZ4fMasmLsiQyaQ1OdulnwLvOr71im3Ww4QDfHYIxo9lUXkiyDkXdqo0bTINO7AT3kWbCJW6mnzeXraDXker4CO7ZEOg7AqUxdkl1D+mRN3aRnQsTRFRfEPB+qPs5Ei0zHnS9co6iVnUC3sr56jXRF17zDJ1IG49h0oJ93nBFVGOdwilrUz/Ix7Hgwr4RcxTo0jal7CCEsGY3z16Cz43zNr1Ja2ivLnsZLtkW2rnbGJx1tqnaeQJ0LCR/ShFAIwpVUNJGnqWgQpRiLCI/9wk4yGuZ4TSd6uBKljPB5DVkxDlD9OPxSwlEG79LsRxOVoH26SmTB2pWdeLKQ5Kl3qt4/Bpmt3qpDE+cTtzq7BGkaT0Kqp8LqWseWioenJRrVjowGVSrTwZ7IZp4ELeCvkQwGm3fC8UZaZDruLMLqZl55cTsZHob8vpPK+uo10s3nxVLwR7AQmw708443IoMYXIhphlhE+nKnKjI7ONLoHrAOzWOK57kTwlqrjYDMjvPlBkBkxiNfYoJIJsuexru0RTrfPelgU7V7+ZPJ6MAQWeDNwaEa10oqmsjTVDSIUghGtzuE6U5jj1d0oocrUcoouDsmWxEDDEMB3mLUeBf8itqBoK0bTs/mQDK1O1iM9VMw8WQ5ySPv9OCOcAIye3iY1UIkcyt1NW6dKiWkeiI7hD1bIh6elijUCNVONz2q7adDjoaexEzqWgymm+8F4o2rjUGMMh121q0OYRgKTTnK8FDyzjfH6wl1qraGB+PR9XvkZa82xTvhCGQ0jE0XYKCEVGtw3ghUBuflERsw4fdvmFZCjFFRq7psHlOamA1rpSDDYf5aFvLlBkqzSgUYKCIZLyvjfnV8hR9oixYWuUEBme6edEirDnsZUwfa8w8Sg9rLHKqylVQELzuLGkTJe4ftj5Vhp+akEo+XdILL61Gu/A20HVkwCRBWMVJEZHSCdzEnmYlfdRy1Z1Xyx/84XrdcB2GtYezJSpJn3kHfW9lk+1gmj+WZdiRzK1vXuJbGk6B6XjFmqmer2bRg8xEjlmE0tO0GidrTz4p6cljoeSLi4AgWYzDdPA0H+zufPDrvFjXno1a8eyKy3sj40AnOZ2e+yTDW2fVr9H+//b/34TbTgZu3hCNYiE1G9t4imKDhttxV3HW3jF6C1KnjqifbbWxrqiQtM7eF/SzkMYUj2PCOLuWvpVWI+dIIMBB0I7tTEuV8mY4Hvl3nx9oiCHa515LBYff48YBVJKMjr6qAAUZmywDMGb91hxDIAU46cahuspIK7+VGmooGUdqiPYzDH7MiZrkJUbfauKKDwVqU1Uql/DguTKbsaqMA4Ym5StDpX66YE+wTt4FCo92kx1eJn9PVuy8y8O57dL7jyUqSZ97VY4gB4bPMtckMmzoGbllp61i6MAEeJ2eA0UbX1jQfMn1EDMdNCtQ2BLrPMbv7doh6nojYmi2odxhMN08UjhcHggQofPkaxRCC83su8spSc+0R9g6mGDN7q7aZK+ioJR6BLh7FpuO64shqFTXu7k64LDrK5KGve8YtO1c9y75sWAUxRsElWT4PY5KR3UJxVgrz/LXM86WCw0ZVMGZVhhX6f9NluwDDh/IDbRFyXX41YVuPIL0WIpZa+qPnHLaS0ZEzPRwM1JZJegPOEr+iUX8GV39JXbB9/qP6KqpxBf+UELvzspCmokGVNvw/M+b0w051kV2i/wrxOZ3g8kqUe7I8/r+oPFDxnpShzALkatfZb7/VCd41OQkm9m1krtUeVwkmdW7/cSv71jpyxjxDT0IYSuPe0LsCFIKLsCCf29zZ4oykboVdqhtWLV/Ysq+NLtulfVuzfPjRntCyV7rZPipDNaZXo1roeSJim9mCGsdgY7L5bt1nQmUynSLThS9eoxhCdH5YWU2djg8G7niP9613VQwbdL7mbCG2fYOyJv9i2bNoPWoeTscevktG16kY0z7dq4TonuCSPI9JRu6EBWelMM9fguqM8lUGG/2rVHcuOdwUdEJF/WUa5OhCXOcH2qJ6qxRdHbS8ysYfFdWtRHjkVfUxkM8JmC409guj44fKwS+1UT1eSYXzUvB+hNU7Ote47NxtsqwCS/R4SSe4vBLlUbCefnqhEAkL2gxsOJ3gXeNXEvAG9FvtcDoAO2ZzdWeM+xEPPVlL8tA7YZQvNxBUe+By6lbnsJ1WZ2FDamnDrhzYmuQjDDtHUE03s79UVC30PBFxTKP6hkFhvPlGG80fXYP5BJluCb7Mr1EMoXE+TTX01cnla5Tloe68F0R63BvNEcxjaxTiF0viUftwOrI9rcfpUY/2K9T5tMx8kqcxidClQfBWCgu10TDPV6MRr9KG31g8ny2bVcdX+Im2yN9fjTJdVCmdsOxVt1IxBntW9fOergadVpLzH6Y3FszmqT/7lVRYL4FIdjoFKHMo9QOv3GY5pLC4Jj6e0wkur0TZPYBOePthRmeso0rIwIbXCd61OUnufXUq0U42jNYtR1qwro3gYODJUpI3Rt4J7UHUeRn5A1Fz3UPK3dpoE2F+x6DQWxiBXgjLGxvZGudDxi661nE7H6yVzJf/H5/ShohjFtUYBsJ4842Yhk0kH3Q2Q6YfcI1iCK3zCN6yJyKp02RDk7dm8th4DwXDhuQIZrGF7LRfLO3J7CptIkFcEFwaXfVCu9zOp2WmS3aHpjGpGIMDb0WZ5S8h+t/mKxgtE+X/nUcys6Mzk2X7vhg+lB9pi2ydYKLRsjmRDYYnrdr6WZ1K0OmdJOlDrL78gqk/+5VUWC9BkooGmdws28DCN0+WZe9xEXiPhZlOcHkhSo3IqPypvzDstz6Qye0wrTMxQCE8MPrr1gfBuywnx69SC8ZCqu1c2nDT9jxsMe62uvEqXU8WkgxG3hXqL9kL+uvcigowAGqt63TqlhASES30F3p2PWRPCcbGtkb5EEmI2O20zTq7dlLXHZ82ep6IOOwDxzAAw8033P4yKR9lLkWmt51tFq5coxhC6rw7c3Pk6nUsgdHBBG+sM5jBqCE7gllszhWZFCtOzaqIq6ISYzK4ZDSHv807pxqfRpWQxdgkeRKTyFuv0qOa5i9hmi9XyiI5PgG7775ysgwrMHooP9IWbUJZ+eu4TomW1hJ+S3qjDHsnrRnSz+pUgk4f/Nbfq9y2WDr8vbh3bXG5erySCutlpUlFg8zLJ3h85KSSZnnXdyk8pyNzh8vzKGXQPMZqxt0DwzHZD3DnOLRWJXiX5+T/fuMcg4WOttmw3XHv+JrT00veDQJ0PFkppZ2Rd4U/v9Va9FCFGAC8TD2vU7dANxGF0ULLoQdzibWZrX4+VIxBxTjeeh6vZjmDaqHniYjjProJBpXh5ht7Guz+jZGDMvuIaxRD6DmvGQiHdOUa9b3RA8CgoXMEw9g29qzus2UY91UTTiU9op3hBdjoXPWdfiWkMbZJHsZUppIszo8q9TVjmq/hVVKyL8LxMp3E4KGca4sImSDXo71OIu5VcJl7Tnn/IFtI+ETWSL8ByDKSvk+7RltRtF/chAjPqg62ReSRyH+BJJU6bOzL3Ke951si+Jyfg23R15D0fVpbtP33FD4REnlWdbAtIo9EvtrYFsmF5Rf8OdgWfQ1J3+f918WnNXrkYTytOtgWkUcybIt6v3xS5j7s9eNzfh62RV9D0vdhVcf/uiB9nlYdbIvII+n8qpD83lq3hGXys97zQbSkA9uiryHp+6xrtJUE/+uCdHhedbAtIg9F3ub4y0L6d5y6f9ZG1nzU87fF242WdGBb9DUkfR91jcp/TeEjIYEnVgfbIvJQpAEKj7P+WlG/hGX2k95z/kfuFdgWfQ1J30fVHcuB9HlidbAtIo9FHuftecbPsMC/ILfRL2GZ/qT33PwT2WQZtkVfQ9L3SYW3/ecUq4F0eGZ1sC0ijwW/NNQwKGGZ/6hGgr+DdgG2RV9D0vdR10j/+VRCMp5YHWyLyKPR30fzDPsE0eCvr9wdtkVfQ9LHa0TIV2FbRB5O+Hd1tqZo/F0nOnzP7w7boq8h6eM1IuSrsC0iz+D3/g/rnPmXdQghhJCfhW0RIYQQQojAtogQQgghRGBbRAghhBAisC0ihBBCCBHYFhFCCCGECGyLCCGEEEIEtkWEEEIIIQLbIkIIIYQQgW0RIYQQQojAtogQQgghRGBbRAghhBAisC0ihBBCCBHYFhFCCCGECGyLCCGEEEIEtkWEEEIIIQLbIkIIIYQQgW0RIYQQQojAtogQQgghRHiJtujX/woY3IIPiPi3hPAHo1vxR0L/jRHpsxXJryeXyP0ej6dx4ytNiIFt0Y/AtuidYVu0iOTpyTXy2Y+H3jLDr9+/n5bP51/pJpwHBPTnt5TAr18Yg9+/VHzLF4p8DbZFPwLboneGbdEa+BIM31cP5mZtkfCkzuhH2iLh+l36o+dfsI4fGz23+shHco+26PcGPr4GFyJ+UAx/NjMPefie/4ZmvMRJsi1aQ9K08dQiuWNb9KRfBPm5tujybdKrqBjHj16JbRE5zz3aoiebP8+FiB8UQ3mYHvKF/jNtUdkTH38OtkVr7F9ObIsu0+8jnpDUn2yLrrUvtisyNWC6Il5Tcp5btEVyGfH5NTgf8aNiKGbety16jZNkW7TG/u2E4XO4a1v0hKv3/Cs9COdKX4Su6Ncf7zPq7nl/CIt8OGyLfgS2Rdd4jZNkW7SI5OnJNXKHtujoGP78UUnh4Xl9/pWO4WwB/a4Bnb9Qevaxn0KzxKaIXOUWbZHYx+fX4HzED4pBnoz3bYte4yTZFq2ynRf/gv5XaPuI+rX/+KB/oi3aqH9q+uzOmojYFaEi2BWRy7At+hHOR/ygGNgWPQC2RS/Esx+Pn2XYRzz68v1QW7Sf4ekb1fFXpM0ehCzDtuhHOB/xg2JgW/QA2Ba9EM9+PH6WvI/QAnx4Bf5YW4RDPHuKag2DA5HydpLr3KIterL5C5yP+PSCHHlJ3rctkj3x+edgW/RC3LEtgvjRUT//SnfC2fu8k1vL0TfG1NY3P0zko2Bb9COwLbqG7InPPwfbohfilm3RtTZixs+1RXqx2RaR1+AObZHeEwxeg9MRPyoG2fht26IXOUl1g23RS3DLtuhaGzHj3doiWdMY+4mHiXwWd2iL9J5g8BqcjvhRMYiZt22LXuQk2Ra9EPdsizTqz2mLLsUja9gWkYfDtuhHYFt0iRc5SbZFLwTboofx/CvdC4dtEXklLrRF+BeL//er+V4QqXzaf+jYr1ZJ2f99Y/lJpEsv2++6bVbx+08FCxtui3SVIb8w6WUK33+iAxUXwAoLEe/OepvTGPJlkS1DjZnwpBwnu/CVbxM2S8b47Bqyw1w4yZqHZJci30tTfZXBztTB+s90F48W26L+RQH7kcSiLTL1AyF5p6bHNIimc096DNRFrB9dcizqBgZgkuppcBuu3LI9NiaBTg98xMkkTq/HAGyE0YF639jrhyWGIOz5o5s1K/drZSf6V0CNYBDphaMdzrFKRlFN3dDP8jGwm/c4H5fyk146cifOtkXmh6xuhB/VJrLt/82/atzoFNz8VqC23jv4go8mnUF7nSBx5OWuG4S5rC2S0STAhoWIcWGBucyQOI4tu8siPoOKfXj8yWZPl+dImF3YPDcb3vA0W/lhYuiwpnweml1EWD5UZ4zG1EGrsH0VrrRFw4sihDgPc7KyDKuGOYz5MQ2i6d6TnKG6yMoHzQbwSdH1GAgD5zZWatA51Xs8JoGOvZjwpST+/rOD6QnqabtJGvUoLJmT07H++LrU9V7kvYd0QwVJFCLvZaUXTlwVhoqNWT4GYgIU4/NSfup59WIgn8+5tshf8YKrLZFsWvL/B/6pbIsXVjGbMdm3P4uxI7nKG+pUmOu0RU0AmO8yjzhqxCcisO84WBZpVDeMdjsdjy2gC/40ZxOTEath9tx0QsLAcWzVFkinYz/06vzUwWj8VyiLhEnBFvoeSwY264djh0fTYxrt3Eltj3EMIkmSg2lB5zDYmKRlGtxG1IFJzILxNguHM+Dk4kZ9B/MTNNz2nETsbYzrWAxt6Wzyh/mCzhlJVG4KsXc+GDTobBtOtCbDqGZPWj4GEn83qtG1/GSXjtyNU21RVnX2WqmgKb/wcPQeCkwntBatzWx2L+mZxweqGebUdL1YovMrCSA3WZlG3D7U5lb2Yxgua4CCpSpnlgamCkjYldMepasX0vAks0m/CyTGOibmDmb1VWi/EipDZ4XUKFRk9S+rsm81P6bM7r5zL7U5mSWrLoLk+K2O7ojB2LmV4DYynQKmhYnnYy8mzLIS6fk7eu4sWknNDok428mEhaJqtaLOIRg/LzpuEifSbkY64TSr4lhQd/SzGvLkHu93Zy0/5nwH95t8OGfaor0U9TdK9l9eNaUlYxWLTv01cZ1WsEx/d9f+y4c6naFL9LcZ9j/HIBOFvZDVq92evVK6HIMeus4EU1Db9X6oju5fhIf3YZ1nHjHGe4QYyRRIYxDZcFkkBLRz5FAC2Y92aEq3k/9FMrDKJaPGXgZVpZ8ueIIS0z0O7TQLIcFH1dldVLDPFDAxddDnZv+jLxtNFiv7NoiitVmN4t/+hgo0NKf76Raw1cIxqWivCBnt0+PURo69ZISQzFY6lv8VnZoZo6MSDIbO2Q27wYWT7jweM8+HXkyYZiUAfxt3l7fUJY2y2sVAqJkpg6SOYUj+p+hkb5dK6lhG1STUqys6ljlDMBHR6U44ZpWMo1oIGi150Nrjii4s5Se7dOSGnGmLtAyPSmrKUsaFo6K0/IyOFq2R7C+Nu+SOeJdK4R9eyKSt4eaOheuUk14m9a3ahu+J9ybAhoWIxUNjowkhj2G+LBIC2tGF5ved1sNKAjOrVKkx3Lcr0yaE7WurCRCDCrZtSsBmQsbV5YLK5w7K2NqC8TaLFbUxCrpxsLzTu0HxCX/OVcGMikbHhHAw2hQ2wa4uU2ZLn9qIajc5PZbLsHCImqhUgMHQuZXgdgvWhApcTaig6/nYiwmydpQVT7OXjP/3e+WvNCjRgoJcYVSAXszeoaIKG403UafakF3MJj5WrMZoJ64JpOHANZtEEUQzuj8GBRk3m4UoFGw8yU926cgNOdMWlVJytaK1hsGGDDeaAm90XNGiFjFqaa+aWa6LnT2tcAw2muuUkV4mvTs1ZtXxvuB1CAstiUIT8fYN5RRk2uY6j2G6LBICAhqXf1+SvHpWkqFjb1hEPbu6wM3aQZ4FEYZFIjIby1jAf+ir+tzBJDeIcpDnTcPNqhEMNrKEH1+Ve2Y3DZHB13RVOCYZemfx/7PUBpLjl+0P26KwYXV01aHjz2vg3FJw6UmrjrtKhb7nQy8mzGxHmr2SQxiTpaWNebmOM2+izuHd+HlJkhFVGtpw8MuW2U4hamyIQUHGUauJorCen03LXTpyR061RQ1SRsclkGEoPi23cJe8Cm4nRi0y27lq6TsjmxwLmuuUkV0mWK+WcHW8KypsLmflSsQwikFhKYZ2WSQEpKTeLLoYbIVV4nVIjapgEJkEkGZBhSGmKJRhwZ/w1EEElKyKOw7xC1KjB8jsUtEEoQxyx6a1YVGzw2slGtGgyuo6TRQGI+eWgoO5cVamnst8x4sJC1nxtPpyCid211NzIWe/Ey2i4UVDUfmtg3UdhvgMOo9Bk/mCuoZBArxoCSkRWYinNS7jqJVGsZyfxB65H19ri0K1SVnFayWyqiOjeG/Glyl9jHbSpbLicGtsHqRPgm5dr6zqxHsjwv5jotMY7MxcamJeimGcqkIISElDx479F3whGeluarjjo8xFmwdpFkQW1+jWh6oO475zBzXKoJEvG+HjyhNe0ekmpoVjUsdyw5PUetJEi/VqQTTiXqEonJmRcyvBrV2lmedDLyYsZMUhQfmpgXZKSOj/HX+2zYagQY3quDGkiDDo9HMTcicjry6SwcXQHRqahKTSJv0ybtYmUSznJzFHbsjX2iKtJQxGxbwLdUFan/G5qYTL6OjMiUF8npkHyWVqrlOq05HuXIl4Q+bNqqUY5s9S+j6IrLUt0v4rMUoGNhCnGwviQ8fHfEUly0Ke4OiejBp35w6Wj01u0iwOUW8w6CV8R5Ubv1SKwYFIoauO5aU4Sa2jY0dM4LPb98BL3XmNnFsJrnPSviamng+9GLOSFYc47FOkNjBYALXQ4H/PSVxozkL22tOlhqLzXprrGGS+HoDGYs9DDWCQ0QsHv7VdEVkMyJ/0hoybsJMolvPTKpE78k1t0a7TVLaQS3f09uX12rmHYrBejLF5kFym5uKnOsnrYLkS8YbMG5tLMbTLIpmvPf8nWy4kw34+EJXO86N7RqOVzKWOmyEqGTW7inDkYJ6bXDrC1elsuSpParFi41edPLeT1DqcuwdiYrcgKs1W/jTcaOTcSnDB9o6XTj0feTFhJSsOWeC30u0xWAC1EAmZymTYa99eDUU3fd5zHYPMH/vI0OpLKka57YQj2H1FEA015y/jZrskCpGs5GcUPLkP39wWyeemkJt698hsXrHBgR0R12swMa8klym+GrlOLyiQT05dknlzkZdiaJdFQkBCJyrIu8ZGyYCj8rE1IGJ8DqjRbjKzLIgoWSDyqquj6K0IRw7mQWZZHKN23KBN+I7Ox5B6q1QOZ+Rzblv1uql1qC4GByLewxaVxpr30p+XDEbOrQTX7Oj3mHsuCrkXE+a2A1onGCiivHQGiu7p0T8TbBFx64KI8VkNNVGLdPeno3Mg88c+qn+MNd5RZnWFi//4SQFGHMdCc/tl3CQziUIkK/lpjJFb8oi2qFagjEbFrPemqb3s286gqzbaC4eJlHoNJuaV5DLtO1dDqc7uBAaBaxG3qVyKoXMChhCQ0DOd6RrmyVCNDqLRos70Ykhc7XrpdWUQN507qEZikJPMJLhk9RK+o8rRfG+Vc0YHG8257AaG5bEDzZTdLxk0trzv3ueBcyvB6edmR79UBh2smY0kRWOwLmWP2KFbuX3E24UT2ElKtN1qXsehAg9EOtE5kHnjvYyPY5sHpzt0qsbIw1BpikTGHWM2CpjPcTrpMZLbcakt2hr8+if/NmoFymhUzJ3aa+o9YPbKfg+6Q91mZl5oL9OGfZU3Up3mcXCcjLgk9ojWpHIcQ3dZJAQkiCgzLfKusU4yjKOq0UE0WuqX1rZzk7UsC50ER/9kEGOZO2g+GrIspuQXRQf6OSPPrAizVSLfQzO7hasySa0Dain7Uhk01eGPI5xX3zkVYmARue7SOWm/hww6YOkgRWOwKCVPqEzZHC1Xzo4GDRM4wU7SOzidJl6RTnT6z0tYIKNhcC4cAyLzlqJaqKaOVhKFSjo4nTZ2ckfOt0X11zwr68WsSycvW4J5yfxyiFKq3tR8Ib0W4RXrXJ2R/TMR229QwaRysMdoWSR7lkWUrelOCPNkqEYH0UjAA6n4P1iaZqHjRpyQQYxl7qD5aMiy2NK9KPJ5cEh5SCKcH5MrBu/iMLUO6KTsNmXQ+ONTE8+r65wIJsFpWpqs+z1k0MF7BRp7fbAiJTej2TjiiuM5GvS+AicYN5vXca+ofPZSndHzAn8w0uUY5PhwDN5SXg/+pDdk3Bhro1BJB6czuhTkPpxti7IKq7Uko1Ex6+rJy5bhtz3+Iw+ClKo0N7+RXgu9rNXfztUZ2V+OeP9pJBaTyt4ek2WREJAgomxNd0KYJ8Mfmqfvo4/I/7pGkoWOG3FCBnHTuYP6WT4asixGMttwRj73E9AJSYTZqjDhN3a/HjJKrQMaKcOMhtQ059VzToaT4HRt47TfQwYd6m6DFI2AfkrHBhKuW+wNxup+BXW1Zia34OPx7EtVp9naZa/VmT0vOo0VMli5Fd2DrnvLIKo5Xwsyboy1UXwhP+SWnGuL0NMHai3JaFTMWnuTly3Hl7a7PuObuGY+vxbf1xapIGBS2dljtizyA22RfDxF+PaGtJBkoeNGnJBBjGXuoCg0GvO2aHhR5PPgkPKQRLh0TLp+x9npp9Yhs5NrJTqNBZ+a7nntDI+nYCZ05eQqyWDi+cYgRX1Ed27b4fKtLO6mqKNHZmAPI6BKGHRQnWZvl71GJ/HeH5M97nUvsoPWmT27Mohq/qQ3ZNwYayNd9+zU2ZCP5VRbVG/Jr9/6DzqGWpLRqJh97Veaek9xvymBPeVzdsssS+bTaxG+/zpXZ2R/LeL6LfqrZLZIdCSTQrrHfFkkBCSIKFvTnRDmyehorGB/5X6che4mfkIGMZa5g6LQHG2WRUf1Pr0o8nlwSLlbIlw8puyq7HRS6xhOglzHpyY5r9w5+TwJTpdNrpIMJp4LoxR1ENUV2xa7jbC2144uPzbFlfdeqNLYcEfHZS/orDwvItH1YmqSnhjOgW62z7jBTlNNMm6MtZF+IT/klpxpi7Qs7Z9KCLUko1Exq/6sSRhwPGa6qXzMbpllyXx6LcL3X+fqiLRjfyni/f0xajI2gWUxLCyLhIAEESXu+4eqYZ6MjsYq9fv7cCHJQifBu+6+uwxiLHMHRaHJTZZFCxzvXRT5nCR8J3dLhCeOKV4VS5Jax2huJ9fxx5Gcl5Le40lw3nTF7yGDieeVUYoSRHPV9k74baizy9VDs0oz4h1WpXEMHR2R7tnzOtjJZlzGLgRzFeSTCPs04VSwG0byOao11STjxlgb6RfyQ27JibYouZChlmQ0KmZziSzd1zNhf8rUyNLKJaX0WgR/O1dHpNlV31iKWEfOQpRkMSwsi2Tu9NLTcX1nnoyJgTn7tzeGqavdTbyuDGJi5g7muZms0+n+ReklfCfPbG9V3xl/VTxNah0zDwui0pSa931gZ+0e2+A6gfqlK54bRilqOGlbEKfxz6H+mv0FwASsxKigXrjy6JfAgT+Yikh3815HN3In3EpUVJIii5t6CLTh7GgMe3blc1Rr8i/jxlgb6RfyQ27JibZI6sa/CqGWZDQqZq3PppDPvTf2AqkD8rHPkvn0WoT7lF+dTlDKSsRq1t9aEZllSQwryyIhIEFNtw9CHm0ln3ZO2c/XUN+qiSQL3ex7uR/tiHTkYJ6bLIsGmc3OCnZ6Cd/JM3vlmNTRvPRDah1qE4MOotJk1HupIwwi1rmV4Don7fdY8dwxSlHgtG1Yz89mDd3TBS0S74cIxhfNZvLA59Tp6MAbFZE/gbpG/n/sQxoO8DNusNNUk4wbYy4KRSRX8kPuyXpbpDfI102oJRkNi1k+Ny/L8PVs0V3Nx8llXDKfXguNuZrPr87YB5kcRyyDkDaZN7IkhpVlkRCQ0PNft+w+FAvJkM+zxI8RC9W3JAu9TfyD30mMSEcO5rnJsngwvSi9hO8sZNYwPCZdhEFApnIvZh4WRGWS0ey8DoxzS8HJ58aa32PFc88oRZ7ztsW5wXWco3s6E1pfTqYSDHLU0LionI7kNfgu81623zNZOw01CQf4GTfYaapJxo0xF4Uikiv5IffkbFuEAQi1JKNhMesgFl9T72N0V/0sH7NrZlgyr0bDmxe+//KrMza/ELEMws4yb+JKNllZFgkBKSJrFuH1xahlIRm5yinEQvU3yUJvE1WtK2XUxDh1cH/0PWkWKzqLAfAbyeBsZi8dk5rCICBTnSBkblBHG6lKyEx2XgfWOfk4CU6txbyEPWQ09twzSlFANE/b7tXJEuqd31NlNhEqGdRxT8Unz+nIVPBd5kMCsChTb1HlLIUyUQ3oCIOdcNK980gizYN3LKiQ27DeFmndYABCLckolqkrZl0Qb09T72PECLbJDXqWzKdfdUEYwlVUmN10YSFi+Rw0RGaMJjGIZLIskkaptqOLs8yuJENG6webIAbqJtlJ+q/OSpDKqE2MiEcOpgppFiuaAwyAT1ae8Eqa2WvHJLOdepCFzS7K2KogGtG0CMfndWCdWwkudyrsseC5x3ox4ZrtE/otumVwT0O2Z6cCDFLUUDjuYN3pyOfgu8iCM7gM8r8Q9UnDKehENZAWTiOUcWPMRQFEdD4/5KZ8qS3CN1KtJRnFMnXFHFcIuOUYzRH9fRtZOl6b3rEGUfLOw9v6NqRXR2T9+7QQsXz2D5AqGG+SGEQyWRbBG4bRjgjDKviNUcJKMlQnbncC9aLaS08ydUQ1j51l2CZm6mCW0VgWAbWJgRKKYJLbNKANEZ87JnE/r4eQ2oBMDk4/90Y9P4TpeVWcc6I5Di5kUdEtzB5xPMN5UbYITliK6th2+TPcx99A7J3kgddviRkVkAjjiKqNLlrqisgOodORz96i5jrmByewMcgcSMPZQER1IvO2Oem0ZvK1Kjudn43ZAZFPZL0tau/Jfjtr0cgovzUYZPdiv1YYtvzxf4NDt91FiUFRMZJexXvUjSy+KlND3pIua+7mwTxi1cBAgMI4hpVlDalCFoOIRklbSoaMo5VNq2v3t3+C1CAGaRbqMTmpKkZHksSIfORgq9CURaSZ3VdUK1nCf9VxGuVGtkpEh+7oqoxTG2gSWNjMHRJR8BrNSfg9hvd4IbjMKV1m45h5PvJCB2GtYZoVNVBdxlB/dlVG0G9Jd9yXGbEKgp0tOVWihrxGzLnqQEUH+llBrjvObEAwIA+nnmL1DjYxEtqT1qAbYy6KHZFFYZufoLBwQOQTOdsWHQVy/ECOKpJRLFNVw6C90IcZCFrK5FGXauEwgPWmcNXkIdCK378c9P9a4gU4HKtL1JAR1N0xyphHHN+KfRubyiSGlWUNqqGfj8daPTLrGqdb9u2GyUjsyLqe4TJp5tTg7CTjyWU1IeNk16mDjak9bhu4R6friuSiQMX0KeLHHphMGud31JDxtfG+jILRfXaS2kibwnCtZLqNISkIDEbObcyDayXNVSpMPPdz3gsZDE52mhUUR/VQh5b9JyMqUb9BFZp5rItxOEXRqQKsaE/HpE51YDTuvBtonYG8nWiJRjf+HD9f1MhVluxuT1rGza4uip3F/IRVdd9mE/LRnGiL9goppfNHX4NQSzKKFaQFjkG14v6ZIJXJdAJujV3g6heSX6qw/yQS4weuRJnfJrsVXu1sn/+oGRXVl2Q3HZ1pb5NlX9WPWAablaKAjUXLuJrFoKLhsgb1uMRYTnD3G9Y19v1wx4YWk7Fbxn8wTzK2P17qBnaYZGFjN6qO1Aqwe4ggi2fqIMbWJ01zLYvI7ncxkV+UmPDdY5nDeus9CKvaY4LAH4gamqW2YTceklqXYBxNOptqww1S5wrT4Db2PawNlck02Femng+90NGgLZplBcPqzh6URxcXIHDuO3QDlwQhOH7s1Kvj3dMYujERCk8G27CMNdDO87KbNpZ6HF4kWMNHOGWgzqrHOi/IuONOdGYtP00IIt3AkNyDE23RfhUP/qio1pIIY5nGYm6sbBdP/g/zDe2CUL6ZgnPDKzSlv4P5g1+6sr6SvUvdtahMI96vbOWXiiYxrCyL+DVVc/8ScozsIBm/Gh9iMlqFQidjmbLzonOS6cG4PUSSBjRzsJ2PZdHQJDNelM6umO+80BuzY8qmYWea2obMmFkCQSQ5Ljfw2CBXarBVyR6PkedDLzAe5WVk+0gKhp2gjh0x7L59qIXWIRynmUgr6ght5e3yhdcY7D4vMpvIW3peFPz6VrM9aRk32/ooKkv5aVaJdANDcg/OtEWhsMp/DUsx1VpSMQY7+i5gUAgPRdGXDzqb0BZ0qN7k6fEaECrd6xufsO17TyShLYpq5rcROkwjbjcWBeepzOzozMqyiH9uDteDrY3+174ghvBSGppkJM/RwMHGjaALqXLMtZuEU1EZBp6Zg0mEEns/P2FFe1E22u/LuinKDCPP5JjaWA4zs9Q2tHtZp2TctMU+KWoBg6FzwiS4QlApIcgHnd0Zeb6QomHlj2zvkzWzna6oLon6DVoLyTzCMDNtYHYWRRX88bckFF7QLVkp/986o+uGWQOqmdG8okF1m9cAMV2QceNOiKKylB+MKtMDIp/ImbbIXxSpICmmWksijxWkazAQbH3qbZCPMpfjr0hSo8kNttjXYHR7rRn55VvZuK7Yr467YCuvwTRipyCS4xPIYlhY1mBy6Z6i8GIev9DfQexsW7l1aTJcusKuDcGNqNs9yVAi0RGVYhCZOdhE6Msiwa4Qa7LC262/ji+YfGso0QkwO6aQB0iFSWoT/AIfsUg2+1YnZk7nMNgYOCfMgtuYXiVl5HnfCxgfnezGwDYM7H7LTmYD/N58AQId9I9C9dtMVS/s0lEdq6Hx27Xr7FhddeH4ZNF1GAyp4Qea+7rhdpd5+Shziowbd2IUByv5CWBJZo18Lufaoq165Dbit9Y3yrDWTJlqylSvLwZg/83i/TJI8eFzzv4H87r/rtBv/Y3vzWb6hb7/5nhndQXb7EbEsbrmuDp7AFN7O9OIo4NFIaQyi2FhWcPuSpOnI4dZCgOysejNk2FOr6tzMHaje5LDEtEZDFpmDsKl6lAZjSOJSS7DGM7vfVcfqAQ4eIknxzTKw6kTLhwL4goVl0/QSWzqWgyE4SEV5h7CRDXQeTz6ng+80L5smpu+7WKgykQpxinu1j2cfoLWQlq4OuWd7dfxUVR7/toTaAtv7XlJ40yBz5bW1YruXrKsY9lHPwo6i0GljeJgJT+e2QGRT+RsW3RrRheOkFshd6HfaL4tW1T49FWkAWozpH3Rd78iT3y7NCAMCHl/2BadgG0RIUDuwue1Rdt3/NKveywQf3FjRzL3QW2RxPmBDTK5LWyLTsC2iBAgd+Hzvgy37/hHXXDJED5bRP5BbdHzLBPyI7AtOgHbIkKA3IWPa4v+PDAmyRA+W3ryp/K8t4u/WEQ+DbZFJ2BbRAiQu/Bx34aPvN+Sodbaz7QRz3u7xPCjfuORkBeAbdEJ2BYRAuQufFpbtLUsj/uC1+cCg0oufTpPe7t+KB5CngfbohOwLSIEyF34sLZou+APjEj/hlZ4L+TXin7gF1ee9naJXf5iEfkk2BadgG0RIUDuwoe1Rb8eG5C+F1vToP8OV/2pOT+Rtme9XWoXA0I+ArZFJ2BbRAiQu/BhbdH/PfhXPdAFRX4ia896u8Tsp5UBuTlsi07AtogQIHeB34dj8PtojubflvkWnvR28ReLyAfCtugEbIsIAXIX2BbN0DfjoPePnjybJ71dP/PX6gh5KmyLCCHkafzZ/+m77J8hI4S8HGyLCCGEEEIEtkWEEEIIIQLbIkIIIYQQgW0RIYQQQojAtogQQgghRGBbRAghhBAisC0ihBBCCBHYFhFCCCGECGyLCCGEEEIEtkWEEEIIIQLbIkIIIYQQgW0RIYQQQojAtogQQgghRGBbRAghhBAisC0ihBBCCBHYFhFCCCGECGyLCCGEEEIEtkWEEEIIIQLbIkIIIYQQgW0RIYQQQojAtogQQgghRGBbRAghhBAisC0ihBBCCBHYFhFCCCGECGyLCCGEEEIEtkWEEEIIIQLbIkIIIYQQgW0RIYQQQojAtogQQgghRGBbRAghhBAisC0ihBBCCBHYFhFCCCGECGyLCCGEEEIEtkWEEEIIIQLbIkIIIYQQgW0RIYQQQojAtogQQgghRGBbRAghhBAisC0ihBBCCBHYFhFCCCGECGyLCCGEEEIEtkWEEEIIIQLbIkIIIYQQgW0RIYQQQojAtogQQgghRGBbRAghhBAisC0ihBBCCBHYFhFCCCGECGyLCCGEEEIEtkWEEEIIIQLbIkIIIYQQgW0RIYQQQojAtogQQgghRGBbRAghhBAisC0ihBBCCBHYFhFCCCGECGyLCCGEEEIEtkWEEEIIIQLbIkIIIYQQgW0RIYQQQojAtogQQgghRGBbRAghhBAisC0ihBBCCBHYFhFCCCGECGyLCCGEEEIEtkWEEEIIIQLbIkIIIYQQgW0RIYQQQojAtogQQgghRGBbRAghhBAisC0ihBBCCBHYFhFCCCGECGyLCCGEEEIEtkWEEEIIIQLbIkIIIYQQgW0RIYQQQojAtogQQgghRGBbRAghhBAisC0ihBBCCBHYFhFCCCGECGyLCCGEEEIEtkWEEEIIIQLbIkIIIYQQgW0RIYQQQojAtogQQgghRGBbRAghhBAisC0ihBBCCBHYFhFCCCGECGyLCCGEEEIEtkWEEEIIIQLbIkIIIYQQgW0RIYQQQojAtogQQgghRGBbRAghhBAisC0ihBBCCBHYFhFCCCGECGyLCCGEEEIEtkWEEEIIIQLbIkIIIYQQgW0RIYQQQojAtogQQgghRGBbRAghhBAisC0ihBBCCBHYFhFCCCGECGyLCCGEEEIEtkWEEEIIIQLbIkIIIYQQgW0RIYQQQojAtogQQgghRGBbRAghhBAisC0ihBBCCBHYFhFCCCGECGyLCCGEEEIEtkWEEEIIIQLbIkIIIYQQgW0RIYQQQojAtogQQgghRGBbRAghhBAisC0ihBBCCBHYFhFCCCGECGyLCCGEEEIEtkWEEEIIIQLbIkIIIYQQgW0RIYQQQojAtogQQgghRGBbRAghhBAisC0ihBBCCBHYFhFCCCGECGyLCCGEEEIEtkWEEEIIIQLbIkIIIYQQgW0RIYQQQojAtogQQgghRGBbRAghhBAisC0ihBBCCBHYFhFCCCGECGyLCCGEEEIEtkWEEEIIIQLbIkIIIYQQgW0RIYQQQojAtogQQgghRGBbRAghhBAisC0ihBBCCBHYFhFCCCGECGyLCCGEEEIEtkWEEEIIIQLbIkIIIYQQgW0RIYQQQojAtogQQgghRGBbRAghhBAisC0ihBBCCBHYFhFCCCGECGyLCCGEEEKEblv0/wghhBBya9AS3Ai2RYQQQghJQUtwI9gWEUIIISQFLcGNYFtECCGEkBS0BDeCbREhhBBCUtAS3Ai2RYQQQghJQUtwI9gWEUIIISQFLcGNYFtECCGEkBS0BDeCbREhhBBCUtAS3Ai2RYQQQghJQUtwI9gWEUIIISQFLcGNYFtECCGEkBS0BDeCbREhhBBCUtAS3Ai2RYQQQghJQUtwI9gWEUIIISQFLcGNYFtECCGEkBS0BDeCbREhhBBCUtAS3Ai2RYQQQghJQUtwI9gWEUIIISQFLcGNYFtECCGEkBS0BDeCbREhhBBCUtAS3Ai2RYQQQghJQUtwI9gWEUIIISQFLcGNYFtECCGEkBS0BDeCbREhhBBCUtAS3Ai2RYQQQghJQUtwI9gWEUIIISQFLcGNYFtECCGEkBS0BDeCbREhhBBCUtAS3Ai2RYQQQghJQUtwI9gWEUIIISQFLcGNYFtECCGEkBS0BDeCbREhhBBCUtAS3Ai2RYQQQghJQUtwI9gWEUIIISQFLcGNYFtECCGEkBS0BDeCbREhhBBCUtAS3Ai2RYQQQghJQUtwI9gWEUIIISQFLcGNYFtECCGEkBS0BDeCbREhhBBCUtAS3Ai2RYQQQghJQUtwI9gWEUIIISQFLcGNYFtECCGEkBS0BDeCbREhhBBCUtAS3Ai2RYQQQghJQUtwI9gWEUIIISQFLcGNYFtECCGEkBS0BDeCbREhhBBCUtAS3IgHt0V///37J8v//fv7H2Sfwn8lsn+PieqvpAgDQt6MUr/gdBU/7B59xYnX4sWeA82p8GmPOLkAauFGPLAtkufO8cXOyDx762Dt46nRff2l+A+WXuc1t6lmt0ZmfKEjedw9enZbBNtzvrj7yz0HcKfAtoiwLTpARpb52zRFwleu1Uu1RSa8rz4V9Rl8nYeQbRE5w/WO5IH36EPaotd7DuBNgW0RYVt0gIws0v5K0c4XfrX8ldoiFx9kFzHP4P/9318Ifxi2ReQMlzuSB96jD2mLXvA5gDMFtkWEbdEBMrKGu9qRyzfrldoiWFe+9nj5sCD8YdgWkTNc7kiwSPliE/AZbdELPgfwpcC2iLAtOkBGluj+UpFy9dXwD8YiWPtgvCtfe4RhBLzGu2Pje8pXDPkornYkj7xHl51YBbbnfGl32AAv8RzAlwLbIsK26AAZWWHSFV3+r0L/hi6CtQ8muALpNWADvMYvm9v42BaRGZOO5K+CkeGR92jmxJeB7TkPbIte4jmALwW2RYRt0QEyskB46jKuXa4Fwy1Y+2Ae+ZzDBmBbRN6PcUeCmeSaPPIeTZz4OrA9h20R+WRQCzfi623R8M8V7UD3HOENXQNrH4x35WuPMIyA13h3bHxsi8iMYUdSJzE2PPIejZ14ALA950u7wwZ4iecAvhTYFhG2RQfIyBzoA/0pjv/959+/a/8RFGysgbUPxvd+X/tPOv97jhD+MDbVbIvIjGFHUgscY8Mj79GHtEUv+BzAlwLbIsK26AAZmeJutf3b+L6puXK9XqgtemjfADPCizw7jwyPfD7DjmTQFj200IZOPADYnvO13WFEeI3nAM4U2BYRtkUHyMgUqAv+eXjKD+RwRiH7Fkz799WnwsTwlOf8AmyLyBmGHQkm8vv5wHv0nW3RE9uD13sO4E2BbRFhW3SAjMwYfZ/auUe1MD/WFtX3/AH/mlMN4iX+gGWBbRE5w6gjOa4oBJ7H3aMPaYte7zmAOwW2RYRt0QEyMgPahfZpwoTwoPv1c23R/5M/L/XvMU+X/FMpL/SP6bItImcYdSTHHASBh92jkROPALaF517VF3sONGSBbRFhW3SAjEywX6cQGWwP86D/EvrBtuiTsefItojMGHUkx2+TQfAsPqctejEQc4FtEWFbdICMTDiewPRlwlThQQ8X26KnwLaInIFt0SeDmAtsiwjbogNkZIJpi7JfDsKUANEXYVv0FNgWkTOMOhLINyB4FmyLngRiLrAtImyLDpCRCVAuQOIwXRPboleGbRE5A9uiTwYxF9gWEbZFB8jIGNOkpH92iG3Rm8C2iJxh0JGYKUieBduiJ4GYC2yLCNuiA2Rkwn///dXHKf8T1WyL3gS2ReQMbIs+GcRcYFtE2BYdICNLdO/OS7VF2sClfzP4799/cFX/8ZKv8Z/8fdv/+/cv+zfETwLHNlMTv7b+dA/hwl9+/kpb5HZezZ7L+LmU79t95agu+XySrQyOECGbg+I57dRRdBBU9lgfUo9g0JEg5gIkV9n/k6tX0Ctt0fbfbfshnL4Wuk75Qo3Mwpiy1ZHGMH8FLl2sfUUtUxkpXQtnqrvqPrIEybchZ3crHtIWdcHFESD6IqttETSOHxpn3tBwi/frXUmvOeYK9mZDdLzL4V+Dy14BqwFRoYrr9vuLrgx+/l38J+i2pwozi9j1p9qiZueVrX1chc7L2qbkP7/00iO74vPst4cF4wskleBnJ8LqyL6HWTY8BuhUJbedc9hdmNSqyUYyawwbu/mao9ls2RdjWLB+QmSs+UPKin/seMHb2NRcamZgkdC/ew5oXw0jedGajI5CWL5YB2GJWMfnQr58rbqV9m1azCV5FXBwN+K5bRFsFToP11nOtkXYt/u1kb/jydODmYKdhajGF9+AzFbnHTzE+m40b0/vQWkVC9t/mBmg2sO6dOKkwncumHz5pN5O2ob9eW7Xnn5jF32GvNBLiHEmrM5PpHU1xmePYXgDoQLf4naHw0mwTTRm0yTSPMhkjfO9ZV+MYcEmDaJqrnW89W3seMejVDMHK4TFMoP21TCaM09DiKW6kxbduDFK7G/q+FTIFq9WdyHVZWP0XuDYbsTHt0Wi5F+n41J2/+O2vbiYKKTPebIPaCLvvIOHWDZYfnvSpzMyeYisjeWTSqMVBu9eL+Ppax9Skm7Y+5LIWfbZZqQTDWYLkIDlqhod+fAYoLOxDdqY9rWpI9GwiTTZ05gwmU7W2IQl7IsxLNiTg2hDhpnjg+xljndPerlgoC/0C9oB7Q0Zng0j1FHef2xk3vR0RwGna/7awJOdehtll75XFMl5kZcFh3YjntoW2Uux/BiNcY8dZBnQ2CiXFR/Bfn27r04h3nGIC/lzvi3o2Bt8D9kQDvG2oPeot0/PKIiDdp3DurT6ZPWePKFz3MOUt++qS8lqekes+2wPIA/G2HIudL+PN0KEPj7v3DAs6Gz8l+6ni9cS1otDMTZMGpI13vuGfTGGBZtWiDa2FC0W/9DxQaElUaZAXZjcnx1ob1wKw79og0JqQjh5sYSefWuqWTlwqlUeOJW7RF4RHNmNeGpbZG8FRF/F3UrIMqCxsT2+4XbiRk7e8XBvIS3kz/nf/iMQHrHOO2jEg8cnPieDl8cyeYWsS4vfGpONUyujN7UQvbQp6W9oz2PMGZ8hEyDyGGPWg/EePkIXX0jO8Bigs/E3z2nZp+uJN22cSPbMo0zW2BJK2BdjWOjdo36VQBkMHB8X2jC1B9AWJvdnB9obl8JwM8MYQgjjeHP3Z2uEuPBMdY+VFzNKfh6c2I14Zltk793iSzTF3WXIMqCx8bd5sFVjfME3VG0HwkL+nDd/ctvgn4DOO2jE0WNDSOQ0CjB5hOyGa0c13TgxM3+Ig5vGrdF/Dq++sKd8ts6mG2CuAElhtoczZeJrHh9bZQ3Q2ejst0UyKCJn2+glZ2bsm1XJmsF2hX0xhoXOPRpUifev7/is0JI4E6AsLJYYtDcuheHeHIg6uCM8fbE25msKYd3V6s6AFnl5cGA34pltkb1D7hZ/AXeZIcuAxkb7PonC5M5ueJchLOTP+Yild3DukeBenrW3bWPyrNu9l74zZu/jRmNnxVnvp3FruCHUJ5z0GaJCVr3GNzM9P0QoCka7afuGNwY6Azq/igRgRrBJhshg3MrD3NdMTndfjGHBRgjRxvCUXFL6jkPaZ5jcHegKk/uzA+2NS2HYc5lUK7SE8xdrY2IfdO9jBygWFguCvDw4sRvxxLbIXovkvb2Gu2uQZUBjo/1VHFFYeErckwBZIX/Oh6y8g/M3R4G6AJGif+ek/qAWy+wNsnuvnNXSmxo2XXm8w5mupmTpiT3rc+eYdjBTgGTDx4if6OL/irJNr5loQh3GBJ0Bo1+87DqRHL0xk+emrmkicOyLMSzYCCGaAnWh67gPHT++x18L/3WfA1VhRX8D2lOgLti8QbQxuS02dxcu1mpX5OM+V92THaBFXh+c2I14Yltkr8XwjT+Du5mQZUAjAzfX3ub0hnunISuMn3M1FXoT92zn72DytaJ/azz+4A/zUtkZI26eLzOXYw35r5iU8BDjp8w1DZnfNmREf2jAfzG6bqYUpCT+2so0wAs+2wWtfeOacRkSwUbSOag2voNhSNCp/JMjzhpiFGRMs1hR8kh2jElT9/01dh+IHJgqrNyjUXn0nHBLTBpt0SSRNkBVGJ7GAbQNp8IwGXNHuVd9VXXu+1NfuljxNuQP4IaLGzKhF0Fd4HdobpszTF4aHNmNeF5bZO/KyiO0hrtskGVAI2N/jTHs33BnH6LC6Dk/brv/5oZQsHtAVIhPknHLTZntISm4Z8ZkaS31Jw8Lmor9ey4+aGfJh2dz6GdsIIOUhFcX0gHQVJZ8hqTQ2jeLjklryafRugvRRozPApUc6OzUaGK7aNPppkxAxonk6NM4B2tsSBA5MFWwSYWoYs7HhwRhoeOEqwxbTb1gekBzRLQCceVkGEYMQSEpVhuWXe898jMuFd4bs8jfK7fIrplXt928dSqpNPKqyIndiqe1RYPH6Ss4s5BlQONA/3v6/5kfnw9Tg2fdug1RwV5yiIC3BaHQe60gKvhHzOfMzUHmktGPApIx1v78xXLehLN1720vU2ELd6irKekuyrngs90hrHDhQLIBQSFm0Rg7bIX4Nvb/qraZS4C24raCbMf63flaM04kR28WGZ/6a2xIEDkwVehVR7TZK6mOE9aDeGyHqSTUCDRHxGOCGJwO48iYrT2IQPkVHRcWtIQQVfeOuAm/yHfWdieICjF7SXXnhVP4+28l++RlwDHeiKe1RfZyPfASuPsMWQY0KvF9LBRb7Y/0UH2h8253xM0m9r2zKUjfQS9uUgaxULcxK2IYEG/Elzul52oOFIXGfOfgXXSQVdypmlDcohhjb1EO9IRVnyEpxCXGs2OBMdMm8Vhx2PLxraQeQF/wi0Ypg7BgFqWhVPJvt/4auz9EDkwVbE4hUqIb85IyUlsWbVVgYgOCAVAcEcsCYuV8GIdTRhj3CNjlyxfLutIsspP5mhiZ9WJ3F8MCJORNwTHeiGe1RcN79wXcPYcsAxqgvcZK9mWavlLOYO85b6x1HpLODlbcOJw+9mYFJJVj68mrqgz3jkyU7dEfm0MgtGnvHKvdqV1lZ2dRXvLZLoJoxyyAZAOCwjBESEJ82aIeWFCI0UCsQAZslBBtGCfGmTE57q8Z5EzAVMGeGURC6wUmBIg2cidWw5lVTMhkTjQCsXAhjENqhJOqgJawfLEgEJpF6Vvj1gz3gQSjjeQYyDuBc7wRT2qL7FWf3etTdK55AzTAGQ/sDhAVICl0nvP2nU0fvLNigIlC3ehY0bw8x9T8+d+we09fMegJSWZTW7MNMCUcNu2q4VM8rIUCtIRVn3vfDwVINw51YyQL8Ziutuy2+aIOWFGI55t3eEKezrHb398WjZ0wq3InICkkB31Mz7MNxREx+RAXroRxZMwIsygO7OosJEwJad0lizBTWC0TM40lGG3MM01eGpzjjXhOW+S+sB56Ky61ReOHJYJFBbMQkkLnOY9PpH99ICqcEwNMFOpGx4omx8dU61aC3Xt2XvYIUl37+kPkZNlxWKOTb4wDzAjjM77k8+B0jWPHhLGRJf3woc7a+E7dEywpxK2szZgUiAvHlFmQuJAH1V9jt4fIgamC9RyiQpI7a/TYL3XCnjREjiMeCPpAb0T0FeLClTAOn4wwMWS4dLHyaq9gpnCYPFvdGBVUQN4VHOONeEpbZK9i9tR+AWcasgxoCCc9MPffvDOQFOyrAFGhfSyss8ZU+g52xcB4VeM5VjQhHlPZG9Zg956ly+pmD7FT2HfHsJDbx6QAkbcEkWXqSeWSz/4AIVLMaUCyAUEBEsdhLDnAjXEEHiwpxPMd5QziwrGbWZAcTf5t2F8z2r6AqYL1HKJCDKiAqcKxX+qEEWaGTDzTfENvRNwC4sKVMI6MWeHQUegUktPbwKQA0WwRpgrH3hAUIHE01Y1RIXeMvAs4xhvxjLbItS4PvhLONmQZ0BDOfN9svGNb1Cw48foX7N6zExv7uWHD3pOCYSH7vnDxHVYnW2Ub5VzyeQOSgs2j0T6yZfbIU4jJDQhOpd2BNYUY+ChUiAvHsrHf5lzMTv01o+0LmCpYzyEqxIAKs+o4nICgkFb+sSjbxwG9EdEGxIUrYRxCW4/DKwyVQh5QsqfdMVuEqULdOk22BZMbOrb7nqpt8mrgFG/EE9oid6MffSGcccgyoCFAtEr+RQBJwb4kEBXaB8Y6a5629B3sioHxqubU2I+bQ7wxelMrdu/ZkUFNgCiAyQKMzR5ir5G9xelWmCqMvYaSAFEAkwVjyjhg3TaHAcmG0c2dweQGBAtp6YA1hbhulDOIC8eysd/5beivGW1fwFTBeg5RIUuEtTr+poagkFb+sSjbxwG9EdEGxIUrYRwZc+9c8vdld+ziPKBkz9QNA6YKmZdr1W3booVkk9cFZ3gjHt8W+QsN4cO40BadvZHv0xaZ3ftfTpCMsXvnr94B1Aqd1NoXUSXj2Ao2V9XsbBmmCmOvoVRY9rlgvYKoAMmG2dZ/DwzZi2Gelg5YU4jxjGxCXDiWmQVJEvPb0F9z4sis5xAVsgOyVoff1P71GTKumA3oCVn3kADtwpUwTMYgqBw/b80xS3d6sfJaP8BUoe56urqtZwX9Sd3kDcEJ3oiHt0X+XVp8TdZx5iHLgEbh7HXMvwggKViDEBXafayzJhOdp2z8whmvjvc8d9VtvBa93XvydZE8s5H22W0lkdTs7NGfm1Uu+SxAUjhO0LhlrFkLE3ZTs/i6YE0hxjOyCXHhWGYWJEdvojI79dfMQsJUwXoOUSEGVJhVR3XCKk6Y1LnzafUhg3bhShgmY0kgWWd06WLNFmGqULc8X93tCnZG7wmO70Y8ui3yt3nxMTmBsw9ZBjQKa3fxv79//zX/vKZZCknBGoSo0O5jnTWp6HxzjL9QjGvmPYekYKSdfQfYvSdfF8kzG2kjmT3EG5gtVLPjlCyZFS75LJiVx0KzLSQFSFbYD2UWXxesKcR4RjYhLhzLzILk6E2sZqf+mllImCpYzyEqxIAK6QFmTtjtJ0zq3B/o4ksG7cKVMGzGbG1Xms7o0sXCqJAmAXOFuh/GK2CNDbbCzugNwdndiAe3Rf4qLL4lZ3AbQJYBjcL8Iub/xOaGWQpJwRqEqNDuY501uei8g+MvFOOiecrct8D+D0dgKCy+QnbN5OuiE5TFWlOV3HsHpgtVZZySpS8F4ZLPCkQFSKzMBgPRCvsGs/i6YE0hnvDIJsSFY5lZkJyNSbHZqb9mFhKmCtZziApZydoDrPOZE3b7CUmwHugJnaqJQLtwJQybMatr8Z5culgYFdJFmCvU3TBeYV9jb6dhmnfyYuDgbsRj2yJ/lRefklO4HSDLgEYhe58O2n9c02CWQlKwBiEqtPtYZ00yOu/g+Aul8/6NvN9YfYLs3pM1VrVzwq0KBoWeeUwXqso4JT56iFIu+awY+X7ARgSJANEKWDGNrwvWFGLZjWxCXDiWmQXJ2ZgUm536a2YhYapgPYeoEAMqzPqJtGImZPs4oCd0qiYC7cKVMFzG/GNqsL5AVFi/WBgV0kWYK9S9MF4BK/qPU89T8prg2G7EY9sirFUWX5JzPLgt6v46kWKWQlKwBiEqtPt8Q1s07ouW3x+792SRVe0ccauCQaFnHtOFqjJOybe0RfYMITp2dbFAtgJWTOPrgjWFWHYjmxAXjmVmQXI2JsVmp/6aWUiYKizfo3k/kVbMhGwfB/SETtVEoF24EobPWPe/2EzKISkkhydgugAVjArpIswVatwYr4AVG93TWEwneQn0zP5/H4xGePDQtshd4+dU/kPbouGvFBXMUkgK1iBEhXaf72iLfNI9vYeyxe49WWVVO2fcqmBQ6JnHdKGqjFPiY4co5ZLPALIC5BhtuCOHbM7x161n8XXBmkIsu5FNiAvHMrMgORuTYrNTf80sJEwVrOcQFWJAhSe0Rdk2HigKnaqJQLtwJYyYsV40R84hKCSHJ2C6ABWMCukizBVq3BjPcT9MoPvCLuaTvAJ6ZOggPhKN8OCRbZG7Ak+q+0e2RYOGApilkBSsQYgK7T7f0hb1w+gG3mL37j2vwKp2DrlVwaCw/nrPUuJDhyjlks/ATGhCjUDGO5AVVjM/i68L1hTiXiObEBeOZWZBcjYmxWan/ppZSJgqWM8hKmTJu9QWQXQZmBE6VROBduFKGK3LdtJQ48W4kByegOkCVDAqpIswV6hxY1zIIuvSa4wWE0peAD0xdBAfiUZ48MC2yNX/s6r+gW1Rt504MEshKViDEBXafb6nLeo8nf0fAZdgTfSeV5A+7R5rTb2wuRZBC2YL1YNxSpbMCpd83oGwEMY+U8aZzh4Ns/i6YE0h7jWyCXHhWGYWJEefR9VfMwsJUwXrOUSFLHmzfqI6YRUhugzMCIuXCdqFK2FkLvu/QrGzr750sTAqJCfu5mvcF6p7J/+TCpgkr48eGDqIj0QjPHhcW+Rq/8xX8inswzLyERqFzi3GbOXf37//idv5AwBJwRqEqNDuY501Gem8g+Pn0Xg1+CKqdH76Ww9rI30pD9Kn3WNLoSeJXPnGsNkfen3J5x3jREmqGer8jpno7NEwi68L1hTiXiObEBeOZWZBksT8NvTXzELCVMF6DlEhS561Wis7dQKCAiSXgRlh8TpBu3AljI7LWWeE5ZcuFkaF9NpgrpB5mUU2IemMhheWvBJ6YOggPhKN8OBhbZGr+8VH5AL2ko98hEYhv8XOX/fTNPIvAkgKVhmiQruPddakpPMOjp9H45V/UPaJf/uPXSr9HeaWsXvP3iuoFTqqmBVUYu3nzlmNmslxSlZcAVAqLPu8Y06xOHYcRLA0czbhwhIFawqx7EY2IS4cy8yCJDf5beivmYWEqYL1HKJCDKhgrWbf1IcTEBQyQ2eAGWHxRkG7cCWMfhE0nRHMpxYdVgOLMBJU4sFU4ZyXI5rOCHLy8uh5oYP4SDTCg0e1Ra7mF9+QKzyqLbK3PPyIsfyLAJKCVYeo0O7zHW3RLk++0U7g0gFZj54nB5gtQKOTCYP1YPUtttPpKVeu+FyBuOBGYUsbI0QzJvH1wZpCDHxkE+LCscwsSHKT34b+mllImCpYzyEqxIAKmBIg6jiR+3sJmBEWnzRoF66EMSwC9/TtqpculsnR7IyqzQvV3WD3/frxkO9CzwsdxEeiER48qC1yBb/4hFzCvQ2QZUCjkF4/zBXid0H+sEJSsAYhKrT7dN6szjs4fh6NV9bhugjji9i9k+9Gx+xRdWEjKYkokFodp8RNj4vuis8Vs81/VhHTO53THjGJrw/WFAbeNjYhLhzLzILk6PPb0F8zCwlTBes5RIUYUAFThWO/1InOPbkCzAiLBwrtwpUwJkVgNaF66WJZO1lgmCrU6QvV3eL+/HXuLnk99LzQQXwkGuHBY9oiW+3XL80K9naOfIRGIbt+6YsK8i8CSArWIESFdp/OU9J5B8fPY/7c1x2++BVg956Zsrrp02YV9rAxFCDyYK6w/I2RvPgdLvlcgXzjr1Fs8gR5YfE4JvH1wZpCDGdkE+LCscwsSNzOb0N/zSwkTBWs5xAVkvPJv/xTJ+z+X3yJYEVYNAXtwpUwZkWQPCgYCSoJYK6wZ8numLhplxxxQ1BYrO6M/BkjL42eFzqIj0QjPHhIW2S/ntJ79jjswzDyERqFzCHjcfPg5V8EkBSsQYgK7T7JK1bovIPj5zF/T+qaL74xdu+pKegVUl3MCRC5DbKvGDt/JHKcEpvemdNQKyz7XDnc+GvOAZMHsxhbxvENwJpCLLuRTYgLq0nOb4NZE9I5CwlTBes5RIUY0IZxwmQ2dwKSwqwoJsCKsHaeXw1jWgTGAgzY1ZmTdn53afaIYqaQu7mYjQSz9RdPh3wbel7oID4SjfDgEW2Rve3PLvbHt0WQHJg5sxSSgjUIUaHd5/ltESQbX8u73XtqyZ538kDaqGtOrDDbwNqEaGM5JekhW674XDGzR8bbKGYxtozjG4A1hejuyCbEhWPZcpLNTmZNiHQWEqYK1nOICsP8W6O5E5OTPgGMCIuWoF24EkaaMYvRhUezoksvFsZCG5k1ecyer+4UWNj4ghHyreh5oYP4SDTCgwe0Rfbirda6/CbzqZ+sAzpPSwM0Cu37NL6c+RcBJIVLz7mJtfMOjp9H45VxGRLhH37AwBXs3tMjnChjRjgcgkBo3eyYHKZktRSESz5XMLNx2EnKyt6EzlF48TC+EVhTiH6MbEJcOJbZPDZBdSbNJiGbs5AwVbCbQVRofHB5nTphPU5OutA5mwiMCOeXXAnjTBlDMvHSGj+yYaVjP43F09WdAgMb7cbkNdHzQgfxkWiEB19vi+xt6b1Dgfon79bULavfhdAoZNcPUwVIKp0vAkgK1iBEhXYfa8q8GJ13cPw8mjSbrEESKX9Z/9Rf089fzx5QFJqoXT1AtmF3GObcWhy6BbnwHJ8rhx+HJqYsLows+2W1lY+PfADWFGIwI5sQF8wySASIdhYOJqTeroDIgamC9RwiIWbORtS5R9YJe5hZXRQP5/WyoSaUxbsEbeFCGNMiMLqQeLOQVTrnNyxUdxnM5HBRIVZ3ii4vxMIlr4qeFzqIj0QjPPhyW+Qu0b//9/cvfnpOxN4XiDaWnieLu5uQZUCjkF0/TBXCRe48JD2DEBXafawts03nHRw/jyarJmmQ9Ag/eqCP3Xt+Ju4lDlv0HlXnavh1Qpdzu73bKPjV3Sjnks8VzBnSLGFOaQzpNr34IFoDawrxiEc2IS6YZTZ8H5ZLjF1iNgmJ6NR8BVMF6zlEyvnq6CU1eletzQvG+7SivwFt5XwY7sC2/7Jpdj0O5LAAgbB6sSBSIFPcEh8DRErHNbvJf1sE+Fgx5hsL5EXR80IH8ZFohAdfbYv8w+lHnuMW2HeguTcT3K2FLAMahWwLTBX8dPeLAJKCXQFRod3HOmsegc472HseFeOYeXpGCVfWfqfSfZX02UN02/qXEELBzoT31nrlN+/P9HIYNupwxedKmx9MeHyMPvXVhNnBmoVoDawpxLIb2YS4YJa54Ix3PprekpCw7ILar3fMFKznEAHrhC/xTnU4J/wKl5/DmjubHGjOOXaHAJwOwxyY5jEcrlE9Zr5+scyMd9MvOlndYio+P6KjQEJeHhzYR4F+CEBY+WJbFG/RCCwJtwvCVbJXNwMahfC0CPZZMBc3/iRWsxSSgjUIUaHdxzprd4GoAFGhIwbGM/Pa+uctx31ldFixs7GH6I+w/Kkmkcb0+Scx5lb/JNTfsLXztnFraaOcSz7vhMXdpAZj+7/B4nc5thgf+QCsKcSyG9mEuNAp4g3x+b/kH5wwS8xkzATEgnwpFt0jZJ0Q+i6U4ynSxotedTh5PCz8moU31jlACzTnHLYgqJwMwxzYXjA2SbnmlYsVUrQvieUbrsOp6t63sI2RbQ5tYOSlwYl9FOiHAISVr7VF4eaNwZpwJd29m+MWQ5YBjUJ2/5yZf/s70rwKZikkBWsQokK7j93FxNl53TpiYHyzD1zjcsLCN8DiQdYQV7aNCfHn3gPKyqJbCwFuXPG5gunKql5KrYXxkQ/AmkL0ZGQT4oJdtpQZt8RsEpMPcQCTXRcgmuFei64TS2UzLxoozjlMQTCjF8aRJiNEz+KbD+v9hYu1lqIN5+laeFiC0cb+XyC+OfSWyQuDE/so0A8BCCtfa4ugu8Z+D/yN7H2/dHhQW7TouVkKSaHznLf7fENb9KC+aPGVPEJc2LbZdeX59o/lmlvz8JQrPu9ETyBuWPqKqkGOj3wA1hRi2Y1sQlywy9a+V+0Ss0lMWX5mdS3GBesCRBO61RGceMylgOKcwxQEE/pFDtHcENSU8xdr9cjDshPVnZfBQSxb8rrgyD4K9EMAwsqX2qLVu6XsN8xfmHBdZ7gtIcuARiG9gKNbezyqZikkhc5z3u5jnTVxpu9gVwzMS+8f9JWvgGmSZ48YMCFOt02+dub1Ehxdc2u5hK74vAMN0FdcuBOHv+MjH4A1hVh2I5sQF9yyQWLMlFliNomp6CRgDxrDgnUBojFhq4ET80sxOOodaM45bEEwph9GPbCZ/6HkT1+s4ZJ/Zu70RlgxU1xIP3kVcGYfBfohAGHlS22RvdBz9hvmVzX3dYy7bpBlQKMQvzcUTLb8M3uYpZAUOs95u4911sSZvYMbHTEwD6V9UpI/ApIwfYQWT9KGOHm50y1nj2UshiW3TlTQFZ+Bd2Ww5yzGhUqYgzWFWHYjmxAX/LJuYv6ZKbPEbNIkLT+0XQ3DgnUBoiFxp5ETk5NuFyRAdc5hDIIhgzDqgZ0t1LMXa2Nw5CaMuHC1ui84RF4VnNlHgX4IQFj5gbbIZxmyVdx1gywDGoX4vaH07m15cfDRLYWk0HnO233sHuYdyN7BjY4YmFfMPIr2mwH/nwL1Losn6UIcbpgnffxaxr+14t3qbXfqgb3is+I8b76WLOEvHQVckOMjH4A1hej1yCbEBb+sdy6b1mHPLDGbtLnI48dqjArWBYgKvfQ1Gw2dsGlo8dF3gO6cY3cIClfCOA5sfFGgZDh5sQodB4txfNxoVi5X91Dv1KUlPwwO7aNAPwQgrPxEW2QvzNL7ZHDXH7IMaBQ6W+QPiSjXuMxSSArWIESFdh+7hXkI0newKwYmacereAj9O/Nf/OPjs2do8SR9iP0XMn+Hhf5Obf58StLdkm+IIZd8FqAmJK5aBtn0K8dHPgBrCtGXkU2IC2FZnpeSksOeWWI2aU8gvVm7GoYF6wJEhb/5zWyPZ+jE6Pt7dtQA2nOO3SEoXAnDHNjA/dx7a8YTK2QnXSHKx0yyV38jv1Nf7+ylJT8Lju2jQD8EIKz8RFtk0nz6gri3BrIMaBR6r0LybMGdOmOWQlKwBiEqtPvYHcz7YtMGUaEjBuaVrEk7ZMnr5d7VXg52Fk8ymkn+Rm8hf7dB57f90krwKWkPbPnHVRqu+FwYH4+n91ubTf4gL0C0BtYUztiEuNBkrk0LVDByS8wmycElX+l1LcYF6wJEhU2crE/OZ+zENp+f9GJT5Hwac+wOQeFKGO7AzlyUwln99ELFiSxXq9V9+aqR1wLn9lGgHwIQVn6kLarP5uDKdnA3GbIMaBSa21oJ/h/e7LuYpZAUrEGICu0+1lnzFthtISp0xMC8MLufx4L8oTFL+jlQFk+yNdO+fNnL72mf1V57E1PiF15pigpXfN6A7sZC2SZfHYm7Mb5lsKYQrY5sQlxonQnNTM1JlZslZpM0Gf2DgqhgXYCoUMQ9XxwzJzaN5qTXjlrBijnH7hAUroQRDmz9oihn9Ztzqv7VlyvP1lp1b1y8auSlwNF9FOiHAISVL7VF15Hr8gI3ZP9hIOWfEIPofRDPC72vaUxvPDO4LYd4/Nb/Hbb/6k9hGWa+/cYwB6aCa1zx+Sw2xuTfcXhF9qw8wN2TByW6CvQf5Is56c3Ss09BdxIeFIYpopUnavFiHZw8p8p6dX/DVSNPRY/vs0A/BCCs/FBbRL7O8WtRvQcN0xtv+hy1bRH5THDIhXNfz68FQii8cxiEGFDRHwX6IQBhhW3R23L0DJ0X2PweHtsi8tLgkAtsiwh5JVDRHwX6IQBhhW3R23KmLYLk3WBbdBdwyAW2RYS8EqjojwL9EICwwrbobTE9AyQBTG686xPNtugu4JALbIsIeSVQ0R8F+iEAYYVt0dtieobsz1zbvwUD0dvBtugu4JALbIsIeSVQ0R8F+iEAYYVt0fuCkyo0f43E/RXat/37H2yL7gIOucC2iJBXAhX9UaAfAhBW2Ba9L+aXgzaOvyX7X/ixIr2/v//6sC26CzjkAtsiQl4JVPRHgX4IQFhhW/TG4KhmQPsNYVt0F3DIBbZFhLwSqOiPAv0QgLDCtuiNsT9Fu88b/wg1tkV3AYdcYFtEyCuBiv4o0A8BCCtsi94Z/5tlKcv/AtQrwrboLuCQC2yLCHklUNEfBfohAGGFbdFbM/31ovd+nNkW3QUccoFtESGvBCr6o0A/BCCssC16c/yfuw689S8VbbAtugs45ALbIkJeCVT0R4F+CEBYYVv07oR/pdvw7k0R26L7gEMusC0i5JVARX8U6IcAhBW2RR/A/m9ZOz7hX6tmW3QXcMgFtkWEvBKo6I8C/RCAsMK26DP47++/f3tz9O/f3/8+oCcihBDy0+B75aNAPwQgrLAtIoQQQkgKWoKPAv0QgLDCtogQQgghKWgJPgr0QwDCCtsiQgghhKSgJfgo0A8BCCtsiwghhBCSgpbgo0A/BCCssC0ihBBCSApago8C/RCAsMK2iBBCCCEpaAk+CvRDAMIK2yJCCCGEpKAl+CjQDwEIK2yLCCGEEJKCluCjQD8EIKywLSKEEEJIClqCjwL9EICwwraIEEIIISloCT4K9EMAwgrbIkIIIYSkoCX4KNAPAQgrbIsIIYQQkoKW4KNAPwQgrLAtIoQQQkgKWoKPAv0QgLDCtogQQgghKWgJPgr0QwDCCtsiQgghhKSgJfgo0A8BCCtsiwghhBCSgpbgo0A/BCCssC0ihBBCSApago8C/RCAsMK2iBBCCCEpaAk+CvRDAMIK2yJCCCGEpKAl+CjQDwEIK2yLCCGEEJKCluCjQD8EIKywLSKEEEJIClqCjwL9EICwwraIEEIIISloCT4K9EMAwgrbIkIIIYSkoCX4KNAPAQgrbIsIIYQQkoKW4KNAPwQgrLAtIoQQQkgKWoKPAv0QgLDCtogQQgghKWgJPgr0QwDCCtsiQgghhKSgJfgo0A8BCCtsiwghhBCSgpbgo0A/BCCssC0ihBBCSApago8C/RCAsMK2iBBCCCEpaAk+CvRDAMIK2yJCCCGEpKAl+CjQDwEIK2yLCCGEEJKCluCjQD8EIKx02yJCCCGEkE8D/RCAsMK2iBBCCCG3Af0QgLDCtogQQgghtwH9EICwwraIEEIIIbcB/RCAsMK2iBBCCCG3Af0QgLDCtogQQgghtwH9EICwwraIEEIIIbcB/RCAsMK2iBBCCCG3Af0QgLDCtogQQgghtwH9EICwwraIEEIIIbcB/RCAsMK2iBBCCCG3Af0QgLDCtogQQgghtwH9EICwwraIEEIIIbcB/RCAsMK2iBBCCCG3Af0QgLDCtogQQgghtwH9EICwwraIEEIIIbcB/RCAsMK2iBBCCCG3Af0QgLDCtogQQgghtwH9EICwwraIEEIIIbcB/RCAsMK2iBBCCCG3Af0QgLDCtogQQgghtwH9EICwwraIEEIIIbcB/RCAsMK2iBBCCCG3Af0QgLDCtogQQgghtwH9EICwwraIEEIIIbcB/RCAsMK2iBBCCCG3Af0QgLDCtogQQgghtwH9EICwwraIEEIIIbcB/RCAsMK2iBBCCCG3Af0QgLDCtogQQgghtwH9EICwwraIEEIIIbcB/RCAsMK2iBBCCCG3Af0QgLDCtogQQgghtwH9EICwwraIEEIIIbcB/RCAsMK2iBBCCCG3Af0QgLDCtogQQgghtwH9EICwwraIEEIIIbcB/RCAsMK2iBBCCCG3Af0QgLDCtogQQgghtwH9EICwwraIEEIIIbcB/RCAsMK2iBBCCCG3Af0QgLDCtogQQgghtwH9EICwwraIEEIIIbcB/RCAsMK2iBBCCCG3Af0QgLDCtogQQgghtwH9EICwwraIEEIIIbcB/RCAsMK2iBBCCCG3Af0QgLDCtogQQgghtwH9EICwwraIEEIIIbcB/RCAsMK2iBBCCCG3Af0QgLDCtogQQgghtwH9EICwwraIEEIIIbcB/RCAsMK2iBBCCCG3Af0QgLDCtogQQgghtwH9EICwwraIEEIIIbcB/RCAsMK2iBBCCCG3Af0QgLDCtogQQgghtwH9EICwwraIEEIIIbcB/RCAsMK2iBBCCCG3Af0QgLDCtogQQgghtwH9EICwwraIEEIIIbcB/RCAsMK2iBBCCCG3Af0QgLDCtogQQgghtwH9EICwwraIEEIIIbcB/RCAsMK2iBBCCCG3Af0QgLDCtogQQgghtwH9EICwwraIEEIIIbcB/RCAsMK2iBBCCCG3Af0QgLDCtogQQgghtwH9EICwwraIEEIIIbcB/RCAsMK2iBBCCCG3Af0QgLDCtogQQgghtwH9EICwwraoy59f//vfb3x+WX7/r/AHo5wtjg0MzrJt8Gts/hGshLHAWxzZayAJ/4XB9/INJfWlkk94UH1O+SP7/EAJPzphb8v3vHfkp0E/BCCsfLEt0ufC8Ov37w8pKg3tZ7441ll5r7/y5MnS538fPOZr59uP7Pcvye07vqPfnaoD2fnJJcW26CRsixRJwzccNPlh0A8BCCuPbouEj+iMEMuLh/Lktgjn+/Svz8d87YiR7zuyo/hfvXlO+DG/v6Wk2BadhG2R8F3vHflp0A8BCCtPaYu2unrxbiLyewMfwR7Zi9+Qlff6C0+erNx49nFe+Nr58SPTtApv+Iz+mN+y8cZTS4pt0UnYFgmShY2TJ/1ne4ueXxzkgaAfAhBWntQWfccT8kiKx/gI9IG6e1u0f/U/+zQvfO2UBfgIvvfITFf0jn+eSfz+ieL+lpJ69Lc826J7cLE4S3m84RtwZ9APAQgrT2uLvuENeRwSBj6D/Tv2xat95b3+wpO3PxMYPo3zXzs/fWTIzNv+R6J4/5NtEYbP4Qsln3K+Pq/BtuhnuVicZQnborcC/RCAsPKQtuh4Xf/8UUnhjb4wku/YJrTXZOW9/sqTJ0uff5QrYXh++MjQgr1RjQfE/R+pbtn5yZljW3QStkWKpOH0QZc1bIveCvRDAMLKg9uiwv4f7W90yeRRwOdKiePla33lvf7Sk7ctfsm/oP/DR6Y5fX5inob4/zNN/zeU1JdKPoFt0V24UpxyamyL3gr0QwDCyhPaIvnpMYX3+dIQh/H5vXh6W/Q9PKgt+j7E35/pKh7D2wcw5NElz7aI9GFb9H6gHwIQVp7RFuFyv1GhsC36Yd6zLXrnl1ACYFu0CNsi0odt0fuBfghAWHlKWwTx+9yy9/LWctu2SBbg8/ejX1/P/5p8HhIA26JF2BaRPlIdbIveCvRDAMLKc9oi/HLR23xtiLf4/F6wLfoR2Ba9NmyLTsK26AuwLXo/0A8BCCvPaYv00X2brw19jDB4L+7aFv3wkX3X1+TzkADYFi3yXefNtugdkeSxLXor0A8BCCtPaov0lr3L14ZGgcF7sfJef2Jb9MNHdtrfl0MCYFu0yHedN9uid0Ryx7borUA/BCCssC3a+OHv2K+w8l6zLXo4p/19OSQAtkWLfNd5sy16RyR3bIveCvRDAMLK97RFMopa4SYeKvuPhPz1a7XUfv9WY7+aHzosUvmUW91W6lKD2ogvVBntlmRJG3Xn9RTtVBji233J/oGdIk+277zX+z/tLobUqE506aQwrJXtVEH1rbbb9CB1MWQ3D6Pmw02sH9kOMrZ51s41EbUBeNRVjze7vJ2ojfYqtnS/7l3Ic2TY0xWikkW+oFav28AnEevHrlX1FwPQv8DCKKM7Lsxsj41pNnvosta3mv1u+g/XIYi4PC22RX2jIpVP01d0JWEV0dSP3YOVDTUPsO2TsnSKebWCyfkNqmj15NUABiEkmfLGN7MQHyBNYOmy7CmVAfkO0A8BCCtPbYswcG/wQaaz/f/+U4+EpZ+spU7s+BUi2v6/ZxUSh85mbZF4W7dLfBN5CFQNpUJrIdyvaFyE5YPfXkde2cW6XeKQ6IxuCsNa0Ss5ORbsybT+uxcgczFmN9Hx+TAWIXHoyvRLxfzc9UKswiSijWDD4s0pRn11u3pIUcPiTjLR7Oao4r0xL7qMi0W7x8p1G/oksvJBjwJ4x3Q9BoJ3MvowyaiwVPKzbA5QF4Jnkxu74V3PsmsVti/OtIIDQ6Mi2/7fxZpsvJQwQ50eHKz41avtlVPsV6swOT+/2K1dP/mQhxqSM36E7fdUrPnhxiIrH6qVpEDIc0A/BCCsPKktEvFRXTKKWqECMXLXbmP8RGzEBX4fkQysYuzQ4owvlAy3/z8KPSniEJOAoscIqBCDDX9/Cp33LmyvdpxqvWQApjGb0W5eDYa1YnvLiVsgyt6EdT5xscluoxODMKcKgUNXxiMrNIay+RhRk31Da9CaXN3uKMj28lT6xyI0ezWmYtkbb2S0LQh7eG8Txj6JpC0o55jOYbAxqL7CLKOFqJOW/HibMbqBW9C6HWtmEthGk6esgj0ToyLZtOT/D6LFpYRZMN04jGlBjG47ZbW9coqDai2MAx/lpdm8uSiVkIc9pBj2brsNyxkfuyyS7f8PLTdNngn6IQBh5TltURSHoRIqUEZ/2qsxKZWm8jbMEh33rWZ1rXO65riYMtz/s0rAhEXN+RsvolR4iDI3fOSQxO11odXMElLAdEKbncNiOCTZ7lfYoxxs3NUcduviRshu0GnfOGMxy5WujEeWG/KFmEa0Eau1kpnct1zezqTc14UhOxZja5gjZeSNDloj4ZwCE59Qpa2W1dEtMchNHk7MM7qR6RQwLcw8H9PWcFaFPnn9Ot3JfCp0S2JuVAWtYb/xSsI8Mj052H5tXz7FQ2dyftn0HnNiuHvyqotBDamxUNdjbKlzE5eRU2scE+T5oB8CEFae0xaF4tICiFpBSUYqk189rb/qqtMdUFP6G7d/9ut3PAIynFkNniha1McLJUP3S8GYcMiEi3S/HImwulmjEEnd5Aiju70KjKIzZf/lXp3O0CVIof7OQHU2pEbMye/+izry/VvVdEsVmXAbFwshu0FHRodLGMkUCH4p8chq7n//EdP773pYS2pcJjSkPQBjJrKnNcS0vN2vPcBCb5/DmIyaJOh4nqNgYdfQgfyvnhssDOpk7lNitc2BSjAYV99KRmucw5Kfej5G9WWt4vY8kmc0dpFq7J4bhRBcTdUm0PmEqVEZq1jTsa/QacU5v/ZGyLSzuxvR+YLYyWr7zCkiNDhVVWbnp6vTKsJSXYk/6SOfM9QOBnu+1Vyxnb4OukFzaDOXNVEm+6P8kweDfghAWHlKW4QaxWgjjoVQgTIqHPXV2GnA/keVay0eS2RYGFkNniix2HUo/wsw4WhN1SUYCyrEoF5cDDfUTPPetdur5NCDxhHdfjlTbwVdYlZsgmowxFMd2BXUfPnfagA7VhPRRUGVasRBR3Y1J6ReWBNtnjeC0V2rKQ9rukZ0aMUAEtKYlrfDn9NVjLuOZjYkQYajHOWb4+O16yYKA58Oq4dIVRoBBsjksamrvt2lQ9AGtZ+gMaECVx4qGHg+Rvdo3GjsNSpxbB0XQRutMxuYGpVxYXCsSwkLQCHx9hCI3ay24UF02yWjFRVz+Dg9Pw3JRLQJ9jmZsm7+cht71CwGNVXWeTh62IOkOTSR9l2GQt2hgAnyfNAPAQgrz2iLcDEwKsg41mKoQBltmHqDDIMELUhvWETViIw2hlaDJ0osdhkKaP+tyYomxF4RERQaYRXIyLnYRiZjwW2v+x1LZehNtcfhkXmXQ7M8pAaX2GiLZNOKoiO66KIQsht1tpfLLZBpm8ClI1Orvjx04WFcdbxWCCAhjenMdpue2M+raCMu3JC1h/lZjmTsLfzR2inorJ9XCQYJc59EYcPq6CrrdwEDHbmcmbUrGc3iVB0TSbOo8XyMOnKsl6G319zYTeBKSG1gsJEEpzZGlTczKsMN65pKMNjQ8ThhEZ33q3TREYD6UvC1nQS6dIqmWmfnJ/M+lfh/5NQbxv8n6EYYHCFZ0yozp6A7xEObuXzkdFsrat23gDwe9EMAwsrj26L2N7JQAE5rI1SgjIJSU4GB9jbsRYrBmtXgiRKLXYaFcfGKitlPzMj/RGE1pNuHKKNQhgW/vcZSZbrKh6ub9Z88me3lOKRGt2vPNhNhEF1UQnZTHYPOY1BYObI08ChMIooBJGT+ntmuqdqIrgsJkaB7JxVzlJWCQWbDtFrobrDgk2jEHKisrvNHJ4POlksZzeKMOhey6QnnrXuGtanQ4ufho3dqaqMhLJDh+FhXEtag80FBZTWCTm2ntqMwc+pgen4yn6ctXIsJ6gcGNSS/s8ow2FDvwu5Tl2tOGy3yDaAfAhBWHtwWHb9B7o5aJLHmQwXKKFaIyHqXJa9HtbvbEY2Z1eCJEo3LcGNSw9GWZEhbRUg2/FWVQQxSt4/pabZXS1UooxhJGl4lvb+VsFa3cxlXDee+dyq4qITspjqGxsk0piWjujIohezP3EkVTmw3qGnQj6+7MuRIRv1tZDp6K7LumgWfRCNaDSl2ZpqDtSxlVEbRq+DqhWx6gicyiks1lLiLQbyoq9RkDUNRG0E4xBtdOVYZTRLWINOTg9Vhk5j1UwwLD2bnN6qikKAJYacQoRIDSg9t5vKGaGzkfpOngn4IQFh5SFvU4n95Py/6UDgySnW6NZ3PSu3tRVo+T632S9gUuwznNawZCctUGoT7MK4A4fLJqN3ea+Wm0vAqoyelWRucKiR7Zk6FDUJ2Ux2LzJtNVo5MRm3cIq2nP/Bu4E62SkRr27VFG+g4IFHjc4us2aPXjdyxOGR6djEcKz6JSmPAS93RDTMtc5OM5nH68riUTYc/705uvVKLzmOQB6ee9k+txRvND8Ada+68T1iLTE8OVi03WirF4ECkY6d2puc3qiIX/JSQB/UrGI67ZYc2dXlDVHpuk+eCfghAWHlOWxTrW4SxNEMFymiiE5DJ5i5JRe6GRGVmNd0lFrsMF+6XVxMrm5FWWK9DJ8awv4za7f29zU2Nk6j79OIKa5NnInnRvOuDp6WuSnUsMm82WTiysEXFL13xriVZdWa7UaSCqmFwILa6a2XN7kCaIINMx1MfLlrxSVSaWvJm3UizllffUkZzl730UjYd/rzzPadV49zIlWcmWkJsMhoe60rCWmR6crDqSszoF05xZ3p+ukdeRWp67ZgbR/y5gxhRFuHU5Q1Rab1WcSWPinwR9EMAwsoT2iL983YOlWOwEypQRhOdgEx2drOfZ1bTXWKxy3DheomxuqMkaFskUshgGoPcxQ2Re612e39vZdCYGicRljuRhbXJM5GIfOoShSa7qY5F5s1RLxxZz6bKz3jXkqw6s11y2h5Vw+BAxF23ZM0+K4PBPun8sFJWfBKVZldduafGbyKDLG9x2YHKsaV8npT8iudjvCsySHIr8l76ghve4s608BpCbDIaHqt8niSsRaYnB6ujqJQHusuHp7gTYtwRsRoQhWyXfXHfuCfkIXU+HlJ2aHOXd6cbn1VcWXScnAP9EICw8vi2yJeHIhPxfEMFymii40k2P4COfJ5ZTXeJxS7DnisGfyH2gbsPbtB9Cb1XMmi3191wtdRUc4+GSdxXbWSPSljrtlMSkQ8pUWiiTnUsMm8iWziyXtjnvWtJVp3ZbmBZEa0O3cUyixxNI5D5U9dN5jrsG8mgKUAftN9EHd1oD38lo/p5UvIy6DBIkcGddze3w/QFI6obg54eW0OoRBmNjnUpYS0yPTlYP9rpWW5PsRu2zHawBjbaKoIDifcZwduQXSV6m3kvog67po4wOFBxZcltchb0QwDCykPaIpwcajM5x1QeKlBGEx2Pbt4BOvJ5ZjXdJRa7DJPYGkQR68SIfC4f9tXlc71r+WOy4a+kDNrtnVLH1DCJGzovHH8lFoS13ichEfnUJQpNdlOdjfJPMR7umfAXjkwGWdgi322teNeSrBLJte1aRKtDdCvNke4ziEDmYz2lWd2RuQ4u59FqcCZscrgeq0+FGFhErrt04vR7yKDDIEUGd2zd3Dqtyh//74liWgf6+WBaeDu50c4B2GwsJaxFpicHm4cvwmunuCOzHbDI5CK+YZpTIfmNjEDIQxpSPKTs0ETUYdeUQZPTuLRVIA8A/RCAsPLItmgvwLb2RBzPN1SgjCY6Ht28A3Tk88xqukssdhmu1KhYg6K4KLdKpCKDZf3cuXkFPyGDdnunpIMm/cMkFlQB+OVhbeJsIvKpSxSa7KY69t0XTPhpTN6oDLIDcxMr3rUkq0RybbsW0erg3OrlaLqPzEd306zuyFyHcc59OuMmLgIbnAgmGdU4m5Pye8igQ7M0xaWzm9tkwv0QYwHTOtDPB9PCE7pGOzmz2dClk4S1yPTkYPO8iHDpFJOMKjLbYd+8W0W7lyD8ZaBIyEPqWDyk7NBE1GHXlEGbGREfZKkjXwb9EICw8tC2aK/NpvJEGs83VKCMJjoe3TxnN+QGO9FqukssdhlGSxnqln4+PopUrZmPG6qeXFY/IYN2e6ekA38/N4ZJFHThjv3PrbA2cTYRLbyWIbutzv7jrywm/IUjk0F2YG5ixbuWZJVIrm3XIlodjsWDHE33kfno7rBSZK7DvpEMmiT4dDabqK87R/XJcJJRXduclN9DBh2mRyG4dHZz20z4yBRM60A/H0wLb2NgtJMzmw1dPUlYi0xPDjbPiwiXTjHJqCKzHeoinxb3S0b+ngx/ySjkIXUsHlJ2aCLqsJuTQZYZ8nzQDwEIK49ti1BVzfUSYSyAUIEymuh4dHMMOojKzGq6Syx2GS4VsWjKQrEBE+WjLi+f4nUON6/gJ2TQbu+UdODv58YwiUCX7hzehLWJs4lo4bUM2W10dN+ACX/hyGSQHZibWPGuJVklkmvbtYjWYH9hlKPpPjIf3R1WisxNfBKdJgk+nckm6u3O7rUMJhnVlY1Xfg8ZzLI5xKWzm9swoWFHMK0D/XwwLbyx0U7ObDaWEtYi05ODzfMiwqVTTDKqyOz8/NTKjrMWGiNIE0IeUsfiIWWHlogaRGfgC3ki6IcAhJUHt0W4tPGsU2GoQBlNdDyTyySIysxqukssdhlGSyliTjTFQzgo0vJBDYus0I3CT8ig3d4p6aC5jMMkVtyvzFd3wtrE2UTkU5coNNkNOiijLeBfv/W/+3Qkk8LCkckgOzA3seJdS7JKJNe2axGtzNjBOEfTfWQ+7jCsFJkb+9TR8enMj04dVuC2fM42NBO6rDkpv4cMJp6Pcens5tZPqA8b5XyK0E3L5ybV08IbG+2EarOh+pOEtcj05GDzvIhw6RTjyorMrpxf/oYp9reb+7ZCHlLH4iFlhyaiicsrOuRJoB8CEFYe3BahRGIhiSwWQKhAGU10PJPLJIjKzGq6Syx2GS4VsfpVPtUPGyItzsqHw65q+0slqFd7eDJot3c56JgaJtFxPCv7tmGt205JRAuvZciu10ER2UhkbMJfODIZJGGr1m5rxbuWZJVIrm3XIlrDYpvkSPcZRCDzcYdhpcjc0Keejnemu0msPv0sHx02o504/R4ymHg+xh1bN7e6K7R0YP8sizMin5vgZoU3MdoJ1WZjKWEtMj052OAKEOG1U9yR2dXza9+wg9oadY2FPKQhxUPKDk1EE5dXdMiTQD8EIKw8ui3aL66vJBHFAggVKKOJjmf2hhREZWY13SVal+FaEYvqtlJMVAv7oPy/yU83Cu+VDNrt3b3tmBomMbA/KruRsDZ5JhKR9yNRaFz1OrqpCzZKFo6sF/Z571qSVV/YrqVn7GCSo2kEMh/rabjt3KdelWani4HHV19P0cbWidMvXfF8jIugm1u7jyq5k3ZGcp8mxzYzunCsSwlrkenJwQZXQM+ydWQS9sy5QHzDLHtjhGFD2CoNKXqbeb/isqg0OSXfAvohAGHl4W2RHnaoCJGMLuuGjCY6AZnsXiZBVGZW011isctwrYjF3qYq2al3qkg3odqFbEPHiWEv96OKu7cdU+MkRtTIrh7WJs9EIvKpSxSiitfRgT9XEZnIFo5MVcK+G96fFe9aklVf2K5F1TDIUIV+jjSCtl4q6fywUqY+bYhKpwD3oIebqN86u5LRTpx+jxXPx9gt+7m1cvns93RG8uAmhSezA6MLx7qUsBaZ7qzaNw+ugDxQr93NqKKqGKyg9vIV6k8vxSEPaUjxkLJDW3FZVLpBk6eCfghAWHl8W4SSdLJWshEqUEYTnYBMjmtPNGZW011iscswWsrZ7wT+D4gU/2fvUBk3u+/71w39qKJb7fdWBo2pcRIbdu+FsNZvJyQin7pEocmu05E9Q6gyb2QLR6Y2baYVXbr7s+JdS7LqC9u19IxVpjmSweDQZTrWU5rVnalPG6ISrQZfhptgF/NxklH53Fjze6x4PkYt7McmgzYCe2P1sz9nZyT3aVx4U6MrxyqfJwlrkenJwQZXQB7o0inu9Ez00RUYBGSqZyzkIQ0pHlJ2aCsui0qTU/ItoB8CEFYe3xZB5mpJJRjshAqU0fBCN6RF6xGNmdV0l1jsMlwsYtFVC+Zm6FA2sz53olCn6nIZtdv7xbommkrD66MmMQhrE18TkU+dKoQXImTXGZE9g77Mm/DTmIJRGTUpU6W6NHG/PfuGbJWILm2XIGqDapvmKM26QabjBuNKkcmBTxupSsjmeBP1Wz/Lx2ZDn1G1FhMa9pDR2PMx/tg6h6ibapzqpHysuGWqMElVYGp05ViXEtYg0xNvO2kR6eQUdWm3WmX2zPmpPQwCMtXbKuQhDSkeUnpoIhu7vKBCngX6IQBh5QltEUrLFlN66YJQRrFIJrdVZvvTG6Iws5ruEotdhotFLAZ/i4WQhV9qBxLBPxCVIJVRu72/t/nrMkliRIzsO4W1yTORiHzq0lcjCJ0R+Rz0RWbCXzky1QmGYo4S9zsuW7JV17dLCMsasmmR7TnSEPqnLrOxnsaVMvNpQzSiVREeMS9sAgsrGc2dCnsseD5GDewhdHJrpbpAP+94IzKINsaFNze6cKy6YJKwBpmeHGxwZWflFCfVmvs8QBZEd4HYarwEaeEE5XhI6aEtuCwaHSfJk0E/BCCsPKMtQo2bIs+qSwvwUJLR8EK3zGtPFGZW011isctwsYjFr9/yv5AUZCxmvcdZduDTdHu/FpkfJ3qG6O87hdQkriaikDoZedfhaFVxRuSzT5G6YWwEv5SVI8POGHWSHw01pKtEdmW7DNHrH5rMDnOkw24MMhuczbN6ILP96Q1RCFY15EM43kRmd2XRDOZCRjH0GdUtzB5x3FKW9I8lHFt6iropEq4a+hkET1XdBweV3qHNjS4c61rCIjId7MaDTbOyIeLxKfrktcjswL2ImItpUHTn1kvFpaoTklo4fI1jRYRDl0Uhd5I8G/RDAMLKM9qiveYPcbwEG7iIh0xG0VKo0gaZjmW7LaoSmZ9ZXSp+Ga4WseiW/3GXpQgEvxWy44RNYuMYBNeT82gSHfnjf+yrjzukKslUIgqpUxt2E0R8yJwRHehnBTGYuJJdm32xzCdNRGbhkqGGdNXl7TJUMRz45hckOq2flZgj5Nhttcn2sUwG87CBQcvEpw1R8BqNH36TherzG4qoyajTQSpMIFPPdUn3XHR9nW5i2ghbyMiGhjXHojDc2FW6hdfM5kZDnCHjSwmL6Lxb1CQhJKmycopZRjfZPp6d36iKfvt/78MnIxJm05Cc9YKMg3NTlzvLyPeAfghAWHlKWwSxKad4M6CwAcHShW7RCnXLxHQVNNOFaFW92W+R/l9T/DKMlnroDhvJjdzAeAdRGF1oGomM2+3V9arX5OP48a4QtJTJYyO1UA2EVIXtCokopC6Gd/hUVZwRHRwx6HjDhK+yyZHBe7OsyU/mfmIokq66vF0KkmRUNW8qUDuHYR1vNKKYDnyWOeOYoHtikDD2aUOmN7uHBlLQHAoG4+qDrnGzzWgjSUt+4vmePh21xGPDpsZc3CAMD6eqqHGpnmG38HR6ZFRG42NdS1gACsODjUmqqP3hKU6qtU2WPz8/p+ZhvRg2G6mhxElFpzHohKTmjacyxiL5GZvCxOXOWZHvAf0QgLDynLaoKQncA/2nav7oD48IFSijaCnotByGZa/687r2nWUwswojxbktIGjH4pfhahHjkgfXd2nz7O1e608rrj+QbA+iIIJ2e1U9FOsWItJ0qEymE7C3XWAMhlTF7TYSUUwdjMp/0f3R6FRUVbwRGWzDMoa66Jvwl45s18I/g/Rnf/9tGhP3E0ORdNXl7XJq1kS7FgWsYTTKEQyEo0VU8tl6VlAVDDLGPlWvikYZtgobbpPdKeeiDISFjCKl3obKZBrsK3PP96GOWnT+8Gs3h02ruUMDEukj4Hc0IsMam8zqGeKIWuZGZTQ5VliZJMwj04XBwQZXDlZOMWQ0VOvk/HZlt1Qdwd7e7ZigA12LQSek5nXQRWWLElrVhh/jy9J3hTwT9EMAwsqT2qK9GDHcFS1/VAfzaxc6Yb9ynlqaMppaRQEDe5+O4pfhchGLtrs7BUjDNdto07Ph1ETSbt/cWx9KISY60C6w9kKqmu1SUUzdHvfBLzVcVbyR5kx/qciGv3JkmySLzmUxcT8zFEhXXd6uQ2asWjufI2Wfd4MdXYBBytCn5KAVt4/bJLNnszPPaGYjK/mh53os/YNpj21f4bAKzX5/VHToNEe4uSMq/cKbGhXh7FgbK7M3Yu1g+7W9coqJV04nm97nszn40Sa53dmgljDohKQmzSH5PQ7rI5c3wpB8J+iHAISVJ7VFe6UcM+EZ2f6DJ9xEGUVLoUozkro3VuJYaK2KYEfVY/HLMFrqghsRLhSkGFnaKMyvVxdUhsFBe2/DXSxL5IPOJrRbG3MhVckzkYhi6ppncZsSSVUJRlp1icGFLzM7+ZEVgqkNP5+4nxtypKsKl7br0RozK1dypLtZ6noZxXpSmxjkDH2C1VhR2YlgMK6+Qrthcy5BpVPyI893LzBsSI6tddzf2DBfJsWKUYomdpV+4U2NqhiDHZ/xjbWEWWR6crBJkioLp4j1Fmdr5fwOjqXNupgeS0hVGpJuZv33ng+2drZkPHKGPA/0QwDCyrPaor0ijiqwlau/pCkfZa4go+mFzkjeiopKMNhprdrvbdR7LH4ZLhcxLgpGOyptH4SCv1qaIYNKMTjQVe7eukTLjHyUuZywNaRCSFWyXdcDF4HJr/5atyyrKtGIi0EkxyewcGRCaMn0V7QPEvc7hizpKuHKdn28Me/RQo6OX7kXbFGpAIMd3Q6DHiOfRLJZtTq+W2g3cS42Lk0zWnCpkHn5KHOGgeeYwqghPbbgeFMudjtZKgucEasi60WlsWSYGBX5/FgXE3Yg05ODTZO0s3CKg2pVvAmfJrfUZyBs3XFQCalKQ0peB7O7z8rIZZG09U6+A/RDAMLK09qivVJMkaDqf/1GcUjJ6McNncNgJ1RpD/xpJfnHxCECKsZgJ7Oqzm0GMG6KX4brRSzqzQ3MpcBEAYlBZzA40JTGe6uGqhkJBZ9zuluHVCXbJaLk3dj01BT+tXfVqSqtkXgeRT+EPz2ynd/6Rza2hIREbSTu9w1V0lU7p7cbcRhrrS3kaNPRLePJqhCDnXDaPfo+qbh8gk6Sg3aTYeEXRhlVFku+77mmEoOGzrHNHN+d2heWYdxXPXIqnSyAodEytXSsJ9+IMjs72Fltz0+xW607/fMbH8bC1oqqYdAJKX0d4qlU+i6rGAPyvaAfAhBWvtgWEULIzju/9NtX4LgfuTPvfLCENKAfAhBW2BYRQh7EO397bv9Rj0+k4Z0PlpAG9EMAwgrbIkLIg3jjb88//Nof8MYHS0gL+iEAYYVtESHkQbzxt+ev0R+LuT1vfLCEtKAfAhBW2BYRQh7E+357/ua3/oj3PVhCEtAPAQgrbIsIIQ/ibb89y98uwkeS8LYHS0gG+iEAYYVtESHkQbztt+fmNn8LbcDbHiwhGeiHAIQVtkWEkAfxrt+ef9gVjXnXgyUkBf0QgLDCtogQ8iDe9tvz+IfPScbbHiwhGeiHAIQVtkWEkAfBb88PhQdLPgr0QwDCCtsiQsiD4Lfnh8KDJR8F+iEAYYVtESGEEEJuA/ohAGGFbREhhBBCbgP6IQBhhW0RIYQQQm4D+iEAYYVtESGEEEJuA/ohAGGFbREhhBBCbgP6IQBhhW0RIYQQQm4D+iEAYYVtESGEEEJuA/ohAGGFbREhhBBCbgP6IQBhhW0RIYQQQm4D+iEAYYVtESGEEEJuA/ohAGGFbREhhBBCbgP6IQBhhW0RIYQQQm4D+iEAYYVtESGEEEJuA/ohAGGFbREhhBBCbgP6IQBhhW0RIYQQQm4D+iEAYYVtESGEEEJuA/ohAGGFbREhhBBCbgP6IQBhhW0RIYQQQm4D+iEAYYVtESGEEEJuA/ohAGHlJdqiP7/+97/f+Pwk/vyvYDb5/b///fqDz49hC2IDg4/jJ6P7zMxuFbjx2Bp0PH0DQgh5R9APAQgrr9AW6fP9C6PnENsiGT72K4Nt0bNgW3SJR2/w57ccxK9wU3//UjH7L0LIe4B+CEBYeYW2qDyqG099VkNbpN8Yj23F2BY9C7ZFl3jsBuWXdIE1iZu08dz/riGEkEeBfghAWHmBtmh/WJ/6rIa2SEYbj/xSuvDl/ef3799P7QYfx5u1RW+Q2fdqi/QCKcbk0SuxLSKEvAnohwCElRdoi/YH9zvbov01f+SX0oUv7/LFZf680yvzZm3RG2T2rdoi2xWZkzBd0bsUMiHk9qAfAhBWXqgteuqz2mmLMHwIF768ywK2RVM+M7Pv1Bbhjv76463hGr3Lr3gSQkgB/RCAsPICbRGe7+f+Gnxoi17kj1yXBWyLprAtusQDN9ATiFcUzRKbIkLIW4F+CEBYeYW2SJ7XJ3+HxbaoPPQ//hf0xSm2RVM+M7Nv1Bbp7Wn+w0XPhV0RIeS9QD8EIKy8RFv0DTRt0eNhW/Qs2BZd4nEbdCyJlH/SmhDyZqAfAhBW2BY9DLZFz4Jt0SUe3RZhcCDSN6lfQgjZQT8EIKywLXoY57+85duGbdGUz8zsG7VFcgDNLwvpnXpiAIQQ8gzQDwEIK2yLHgbbomfBtugSbIsIISQB/RCAsMK26GGc//KWFWyLpnxmZt+oLRJDTVv09AAIIeQZoB8CEFbYFj2M81/esoBt0ZTPzCzbIkII+QnQDwEIK2yLHgbbomfBtugSbIsIISQB/RCAsPKYtui3/tva//vV/LxbeToh3P+p7aiUtSx/fuuj++tX/uUGW/8b/MtX9R/8LhbiHp1v2i2Q3Mccp52brI46g1t4Kja4r5182TKIffMMgopI5dMfzXA3xSvRVdKvyJD0lVrYWdg7j3Ga2aKh0l7cB8fivZhksPO1sk9Tlh98v6lXIxgE7AY9J0TFR+XzLR8Du2mPc28pNXAqahBCyFNBPwQgrDyiLfJvpH/kZE4eTH0mFf+DFNtH3/xr3Rvx1d6eU8wo6bNqndq+/5baIh9I86AHnJObcmLSO2oi9DspR5jdZWvs/Y4SfmilyLb/d94nP9hyITqLbhnMZG2RjL6+dzfGcWaD7ba0PKJTPlSrxlm/k49iJVRd70XOOXPwKvB7CCLvRHFsYF31ZS2iuN7mWz4GvMEd466f9m7LXNGtJzE5BEIIeSzohwCEla+3RfrdZ3Cv3P4IxnfUPpVNW9Q8us6k/2IrtN+sUefXQlvUBGJdamkiakwOwmimNvbZcfRT2vS034NtsDHUeXQe1Q/nEJIuOtuosY35nfnegxjj4oJJ4Dg5EdHY/v9YVdWbanGntBKqzhlJ43qsl7YeVY5BZN8gZss6IYJYYDbf8jGQuLpRvVtLzaEVtyeEkKeCfghAWPlyW9R8z2zEp/5XomV0wrdnZtK8nNmL7F76jeZhBnEPDIQskMGDnakXML35MAwDAotOjpfNydLTfA/+aRPkMziNLqLbpsdQk75SCxvzvYcxYmyp+cvqYpRcUdh/tUrAROqkCWMl1JCylXrxidoQaS8CbDA8axlHA+qJfs5ynTu7n/NaaoxXx60khJBvAP0QgLDy1bYIb6D+MY0/+2t5vIL6qopYdP7U3/04dMK3Jx5M/cV+/DmQQ7luKKLE2sb+5P6Wf/B7/zMoG8cDrCIMCruOs9v/ynRuHEEZkxgjM1BwBkPcisiGy4bsjiB7cNNkR8YqVt/3FTqtzKOLqIrZpxACVB2x3a2Flb13UTfGPLO7eF+4kFyZNy5UJ6qTZXCx7FVSxzKqJqNvOo4RBRMBrCn/Ayfgp1khw5gBVcOgrJNx0NpDirsvpebXHm4hRkUIIU8F/RCAsPLFtkhfRvObWPEFVYWNQ4Qvpyioj6OM7Dv6q11sXtLm3a3fMIdo/zo4loWXv3VKPMfHFoTV+GWWyA7Gnu5o3QxxK/NlQ9Tmoa5jY0/GhZiLELqTtNFFdElwMwS4Ugsre6tkEGPceEekVjxLrkxXtwsqV9EXy151qo3xwWM1RjtxjWfFiTBUdGsMCjJu9gkBKGupwZ8rV5pzIoSQZ4J+CEBY+VpbFJ+8gojqq7g/zVYHb3PV0fH+OOrIPbZmIJNhVkRmA7zMGAnY8niA48svw2BXf1EhJVHHQ4/RxtbNOQWZtl8BPu6d6bIxm1GnrMnAYEOGG3YPlWCwoeNxdAHdxi1pAlyphaW9ZzF2M1twpmWlr1+LLBDwC0y6WM37ZSKqtldCjSkbH3zifFSJrDgho5gA3QuDgoybNMUACuup2bTEc+SUEEK+C/RDAMLK19oieT/Dc6kPIwb7I+ifbhXWdbpiV9FJDCL6YIcvgiCML78QF4aXX4fNw98jU8e2GCU0gfm4ewzzsYAsPzaRYXBdt6g6V6JTE+EbLgSoOiFgFdbNLmU2xphnVoXBRdkvKB7IikJSTN5HWMdgKdQ0ZQadxyBNgiYLg5bUiWBHBiGU1q6Mo1YawHJqEnuEEPItoB8CEFa+1Bbpexe/VORl3B9LfQTjCyjCquPNZA9rRTTjZHjpdcvgVXQ1vPwyWn+nRd1+bxSaL5OAumC+RfL0RZplJwn5FGvRnMiqjoxORpd9RcYAV2pBRxjszDIbY8wzm1oRze7By4oNH1d+bmJ+V1wJNU2ZIRy8jLy6SPr1s5zvqNJkSsZNlpIAllMzSDohhDwX9EMAwsqX2qL4baTI07i/jPnb76X+Lc2NKrqu+SLw5mSQfv2Zlf7l79jtkas3XyYRmTer8u+QhjWtLuosBqPvwV14KTp/BCAEmOp46aW992UYbGSZVVncXW3jc4MsaRa5bFXE/r7lQqgdHYPM1yDamGLUkZETux0ZpOVgDcu4iTixv5yaVokQQr4J9EMAwsqX2iJ53+xDLcgjuL96+dPsn3g/0jc5/7Zo3mvFGfDWdqLUW+rY7ZGrT43IvHEs97RhTauLngAGK9+Dl6JLzzkEuFALFzMbYkwzG3WA2M6rDclqsiXC5kBkzzNln+sYZP7YR4ZWX1wftBcrTshgWA4FGTc7JfZFspKaUdyEEPJU0A8BCCtPaItUjM+dt1+k+0OZfVvkz31vRuTYcuXLIL78Mkh3TMnVp1/eMm/SFX3qsKbVRfOBQcd357p8PhtdmvUQ4EIt2M8H08yGGNPMRh0g4l52ZUnjsQjbJSLG54VQezoHMn/so/rHWIMcrB85sTsqn4flUJBxcyyJfZGspKYxRggh3wX6IQBh5SttkT5wHZzO+GkOX2L6KGcvZ7ePsO+4fo47xqXu5e/azVH1xr3myyQi82bZ4raidf1rJJyAjKI16/q16NJzDgHOa+FqZqPlLLMi6tA7A53FYEc36+B0RqF2dQ5k3iRDxoc3kpVRXaw4IZ9H5SDIuNmqtb+emuXLRgghjwb9EICw8oJtkQ4F/Su8le6LaneRj/ZNF+IXpXv5T77UHfXel3f5x0t1rmC+XLIv74PusjX++H8ztZ6AjEbfgyejA/YEKiHAVMcd2NnMdmJMMyuiDr0zkMmYLHWyg9MZhdrV6R98WCCjnueFzgYum/J5VA6CjJsSbO2rpIPTacMmhJBvAv0QgLDyQ22Re3jjl5iOgf1ZLt0X1U7IR/umC3EP58DJl1rVmy+k5sukYL+2BfPlkn15g9GyBdwPZhZqcDIafQ/+/9n7ty25cSZN1x3jV2uu0upZ6qzs+7/XSZh9ILExECDDI8LD/X0OMh0g9gRBk0sKXZndIVzCZoKDZS5avtL3yRzDlbWsgfAebOxiu1hdv6WqzNlUB2Xm+6WurkRoZRD2+Ww7GEt3W7Bv33MGqjLdqADgqygeEmXuPissymfoytHcvcTqf3Hp+MpoeKKWF+xj97po+6gGcPGk9uLdq7R7mcz+fbPo5Z1Mqk35+Gr75Cx19h5cnl0lXMJmgoNlLlpe7/t0juHKWtbA6NbbxXaxoq6zK9u+LzO78X5ZNSzRb57CyiDs89l2MJZuSwXtf2BpAOCrKB4SZe4+HhYpMbByNAcvsSYwUu7wRC0v2MduVN8RFlVzyIqXyyAsmlWb8FZb++Qs1bZWDn11drVwCZsJDpa5aHm178kcw5UNsqasTrtYPkglBuZTDcr41UbZezmthVGsDMI+n20HY+luC/btLwxqNCoA+CqKh0SZu4+HRecH3MrRHL3EtprFW0In8rDD8oJ97I7mto9qABdPai/evWGbl8n+6v7167f/0xGesosmnPe82rl91VL1VL+ZnKXO3oNrs2uFS9hMcLDMRcuLfc/mGK6sZS0vowvrDKZRmU+1K7Ny4y3H61tT59NZGYR9PtsOxtJdZ337H1gaAPgqiodEmbtvCossNx+8YXhg9tDIj2RvLCjoxbwX+1ie6abtozr5h+3GBsXrl0l+yxXFLF28XKJ5L1Q7pQUr/kRWcwcsdfYeXJpdp+nFNRMMy1Q3bK3v6RzDlbWs1WWUsM5gGpX5VNsySze+mJd9ssyhlUHY57PtYCzdrV3f/geWBgC+iuIhUebuI2FR9PJpnR3N+Zw9aycHRpYYFizP8e5MN23VqtTKRAqD4nXHnqpeJW1O1M5CtTPe5NmrylJta+XQl2bXCe9z09R8Lyz1PZ9j2M5kAiGr0i7WYJCVlW1fl1m78ZaVJmGV25E1VgZRJbJuqSzd9da3/4GlAYCvonhIlLn7SFjkp+WdQ9CPz3zOnh+mfkbb1bpaocz38nGPRx/VyT9sNzYoXjXp867nZFlFtWDeK9XOWNn63d/cAUu1rZVDX5ldL7zPzQTne2Gpb0uczjFaWZVRYpFV6YZjuXXjrflUmzKemN74vY79/3wMg0HUPVminWF3ry3drUPQvuXcWRoA+DKKh0SZu4+HRedvmpWjOXqJFYqr9rHvsHrd1G1nbR/TN+2ZuHjVpCWaF4ldL/KCea9UO+Et1qvd3AFLta1VQ7fP57Prhfe5meDCXrDP530vzDFa2bqfRValW3rLHS9FsjDVuszijfeJ/fK6sz2xMghLnG6HxNJdd0H7lnNnaQDgyygeEmXuPhQWLZxwcZH63I1eYoXi6qBDb04N5DdHre2jHoG3OxxByyufTsoSTYN2vRhYMO+Vaie8RSWkWTNLnb4HF2bXC1ewmeDCXljoe2GO0cpeWscsrhLPo7Iw1bqMXVoYsCpFxTsrg7BEs5pNkY2lV9Yh7rKyUAQAPpPiIVHm7kNhUXyoVsJD0DP3YzZ8iR3sqrfgJbsO69w6JW0f9ck/aHfEx98OuGrSPk9ec8G8+5y+2gkfmBLS3AFLta1VQ1+YXS+8h01mMxLnmXlAC317ESWkbTkcTdz2KavRL71lj9di0w7IeObeXFXGPjdjs7ymd83M/qusoZVBhLe1y7R0tw5R+5Z1fWkA4OsoHhJl7j4WFvkRd/amCQ9Byzsyw5fYzq+qcNicn+J7fU/Wh7g3UvTRnPxNExNqrR6GN5GbtM91e924gnn3OV21VKt7Q4kvjxKuHaul2vrVaizMLmCXzxd9YS8s9L0wR2W0t9MyT6bQswr9YvsQ2uZLC1Oty9jn8xvvtBib0SbYXR6E9Pfa0l1/UV3Pu7w0m/RTy8sfaQ8An0TxkChz97GwqD1k3Xas7jl+CNYl2vO+eYn9rg9HL61E9Nr0LopD29JVGVUrjuuq1bjdLa9KF7ou93dJbtJLKGFUoKzUZSxU85FWlQp2sXwp5ZnvM7FUW71ejfnsAl4i6vq4sZ6uFtWrHX0t9G2J0zkOZhm0bZXbcoeoQmL57e7YxrnnrE9VRTzhn52m3faeJ3t2L2RlEGpOKdOu98bS3TpUE8gsr83sl6YpsA+kyweAR1M8JMrcfTAs0mFWnph27O0ZOprL15iOXaU23kgukWoU7Xnp/bTsT081d5aTB1GMwssosVGRdpj63OqmffxUbmV4e0eBfQjlUnmOf7YfS7hQTekyLCj4xWO19mHtWZYqB5HUqzGfXaC9MUedfah5Mu0il80u9O2pszlGK5uoZFHOKxcZNSveLtamG6Umt2esTNXLqG9PHA3mBvrelR8Nq7EyCOUEHZeFLN11WE0gW1yabsnV73xWAPBBiodEmbsPhkX5FPyP/7Th4+cv5mPvOGX9J/d2BTbehg7vfKxaMv9zoMVhmet7c/u/F1o0t785yjb83yQ/XhDejBJJ3W4eZvFGqeRuy9KeZ5c3ltjGlQqUQyjPfe8kDTO9nq0pyzmrlhKbwcA0Lvu+7Y+/8z3LGk8s1b58fCBKrMwu4G34ov/R0C1nH2putm54sw9uM+9bJU7mGK5sog5/eeP75hm+i4dXH7Ht60FbYkum9Gi/mNx00dLAyiCKmaSEF/Fyft1YejCSdhxrS9PWyvM/22EA8BCKh0SZu4+GRfspWNtPveNorpXHojeht2fUXnUghy3Wp2zfxi9/Ix7BRHfy51dmZfi+DEr/8V51vR/DL88qm6zL2JVpNcusmyl14/rjWfsCWWZbu12N6ewiVqDULvrKXljpuyvRzjFcWdM3vhmt5SgcSLr7ZPYRrEy1jg+6BoP94uzqyaB31sGvfqT1evdD7e+1pbse6wnslpamq2W5GyUB4NMoHhJl7j4cFkWnYHGA6hBsXkf1nx7yFva3Z/fqas7jvsPuT2q2RbbrNpCzsCh6P+hKpJ3RlmUf/OqmKZC6Tv+vJlN3ad3Nqun6MZNaM/O0MtbJPhXPViLrVmM6u0B7k7chWs4+1JW9sJn2PZ3jJlhZ0zSetP0X7Hq7WK7fhWXBlamqjFLtyML9YrzeaAMUrKCiq0I733qp0iC9hi4nlu5G0kxgt7Q0Su00/XixAeCBFA+JMncfD4vC19QuH4JVmeZI92tHZvM26c/d+iD33ytrlE3YdatzlPTrSsj+myombLZQzsinbB/tmqkKWM7xKSt6zMs2qabL48GVM7cmrY99ES2/ffn0qzGdXaRa9FSpXnSf7PleSOZ9T+aYBCvryrqb07tsJdrFyqppNL2sTDWXyappW87xqeT1lDhjBbf61fM0W2+7bh/tmrN0N5J2AoeVpWmoStQaADyU4iFR5u4RYdF2pulPEKR//FtZchyC+c8t9OGGn4hV9m//gxX5T4F0ig6V01ETewMpdRT26krs/vz28Z40W8gzymVtHvps8p8RKQu0L5fcRjHP82r+kjt7fbRNpuRePl3qBmEVmtWYzi6i26J/At7r7Eu5shfcvO/TObq2yO7YXP21mhdTolfswmYiK1M9ymQr+0U3a7BwFWvN2p+vd7qcFsTTVto/Gr+qxK6fwGFlaWppX8/uBwA8gOIhUebuMWHR2NnRiZu2FdWnH+WN9sInTtVCxB95/wHgKSgeEmXuCIt+nu3FuPJlwdMhLHoE/95HCQDAVYqHRJk7wqKfZ3sx/sgFJSx6hM9rGQDeguIhUeaOsOjH+fNTvywgLHoAviwCgI9RPCTK3BEW/Tg/dj0Jix7AGv6Rv4cKAM9B8ZAoc0dY9NOkv7Cjjz8MYdHHecNKAACuUzwkytwRFv0w24L+1N9CISz6OGuXL4sA4D7FQ6LMHWHRD/Pr5/7BEsKiD/N2lQAA3KB4SJS5Iyz6aX7uVwWERR9mzfIHrgHgAxQPiTJ3hEX4KoRFH+XNKgEAuEPxkChzR1iEr0JY9FH87XwA+DDFQ6LM3WeHRQAAAE9D8ZAoc0dYBAAA3obiIVHmjrAIAAC8DcVDoszdQ8Oi//p/9QEAAOAJKR4SZe4eGxb9P/oAAADwhBQPiTJ3jwyL/ut//S++LgIAAM9L8ZAoc/fgsIiviwAAwPNSPCTK3D0wLNqiIr4uAgAAT8yioZ0yd48Oi/i6CAAAPC2LhnbK3D0uLLKoiK+LAADA8/JoJVPm7uFhEV8XAQCAZ+XRSqbM3cPCIkVFfF0EAACelqIVUebu8WERXxcBAIAnpWhFlLl7VFi0R0V8XQQAAJ6VghVR5u4TwiK+LgIAAM9JwYooc/egsKiIivi6CAAAPCnFKqLM3WeERXxdBAAAnpJiFVHm7jFhURUV8XURAAB4TgpVRJm7TwmL+LoIAAA8I4UqoszdQ8KiJiri6yIAAPCUFKmIMnefExbxdREAAHhCilREmbtHhEVdVMTXRQAA4BkpUBFl7j4pLOLrIgAA8HwUqIgydw8Ii4KoiK+LAADAE1KcIsrcfVZYxNdFAADg6ShOEWXuPh4WhVERXxcBAIDnozBFlLn7tLCIr4sAAMCzUZgiytx9OCwaREV8XQQAAJ6OohRR5u7zwiK+LgIAAE9GUYooc/fRsGgYFfF1EQAAeDYKUkSZu08Mi/i6CAAAPBcFKaLM3QfDopOoiK+LAADAk1GMIsrcfWZYxNdFAADgqShGEWXuPhYWnUZFfF0EAACei0IUUebuU8Mivi4CAADPRCGKKHP3obBoEhXxdREAAHgqilBEmbvPDYv4uggAADwRRSiizN1HwqJpVMTXRQAA4JkoQBFl7j45LOLrIgAA8DwUoIgydx8IixaiIr4uAgAAT0TxiShz99lhEV8XAQCAp6H4RJS5ux8WLUVFfF0EAACeh8ITUebu08Mivi4CAADPQuGJKHN3OyxajIr4uggAADwNRSeizN3nh0V8XQQAAJ6EohNR5u52WFRT66JMAACAp6JQRZS5IywCAABvQ6GKKHNHWAQAAN6GQhVR5o6wCAAAvA2FKqLMHWERAAB4GwpVRJk7wiIAAPA2FKqIMneERQAA4G0oVBFl7giLAADA21CoIsrcERYBAIC3oVBFlLkjLAIAAG9DoYooc0dYBAAA3oZCFVHmjrAIAAC8DYUqoswdYREAAHgbClVEmTvCIgAA8DYUqogyd4RFAADgbShUEWXuCIsAAMDbUKgiytwRFgEAgLehUEWUuSMsAgAAb0OhiihzR1gEAADehkIVUeaOsAgAALwNhSqizB1hEQAAeBsKVUSZO8IiAADwNhSqiDJ3hEUAAOBtKFQRZe4IiwAAwNtQqCLK3BEWAQCAt6FQRZS5IywCAABvQ6GKKHNHWAQAAN6GQhVR5o6wCAAAvA2FKqLMHWERAAB4GwpVRJk7wiIAAPA2FKqIMneERQAA4G0oVBFl7giLAADA21CoIsrcERYBAIC3oVBFlLkjLAIAAG9DoYooc0dYBAAA3oZCFVHmjrAIAAC8DYUqoswdYREAAHgbClVEmTvCIgAA8DYUqogyd4RFAADgbShUEWXuCIsAAMDbUKgiytwRFgEAgLehUEWUuSMsAgAAb0OhiihzR1gEAADehkIVUeaOsAgAALwNhSqizB1hEQAAeBsKVUSZO8IiAADwNhSqiDJ3hEUAAOBtKFQRZe4IiwAAwNtQqCLK3BEWAQCAt6FQRZS5IywCAABvQ6GKKHNHWAQAAN6GQhVR5o6wCAAAvA2FKqLMHWERAAB4GwpVRJk7wiIAAPA2FKqIMneERQAA4G0oVBFl7giLAADA21CoIsrcERYBAIC3oVBFlLkjLAIAAG9DoYooc0dYBAAA3oZCFVHmjrAIAAC8DYUqoswdYREAAHgbClVEmTvCIgAA8DYUqogyd4RFAADgbShUEWXuCIsAAMDbUKgiytwRFgEAgLehUEWUuSMsAgAAb0OhiihzR1gEAADehkIVUeaOsAgAALwNhSqizB1hEQAAeBsKVUSZO8IiAADwNhSqiDJ3hEUAAOBtKFQRZe4IiwAAwNtQqCLK3BEWAQCAt6FQRZS5IywCAABvQ6GKKHNHWAQAAN6GQhVR5o6wCAAAvA2FKqLMHWERAAB4GwpVRJk7wiIAAPA2FKqIMneERdn/BgAAJb0hX4pCFVHmjrAo0xYAAABOb8iXolBFlLkjLMq0BQAAgNMb8qUoVBFl7giLMm0BAADg9IZ8KQpVRJk7wqJMWwAAADi9IV+KQhVR5o6wKNMWAAAATm/Il6JQRZS5IyzKtAUAAIDTG/KlKFQRZe4IizJtAQAA4PSGfCkKVUSZO8KiTFsAAAA4vSFfikIVUeaOsCjTFgAAAE5vyJeiUEWUuSMsyrQFAACA0xvypShUEWXuCIsybQEAAOD0hnwpClVEmTvCokxbAAAAOL0hX4pCFVHmjrAo0xYAAABOb8iXolBFlLkjLMq0BQAAgNMb8qUoVBFl7giLMm0BAADg9IZ8KQpVRJk7wqJMWwAAADi9IV+KQhVR5o6wKNMWAAAATm/Il6JQRZS5IyzKtAUAAIDTG/KlKFQRZe4IizJtAQAA4PSGfCkKVUSZO8KiTFsAAAA4vSFfikIVUeaOsCjTFgAAAE5vyJeiUEWUuSMsyrQFAACA0xvypShUEWXuCIsybQEAAOD0hnwpClVEmTvCokxbAAAAOL0hX4pCFVHmjrAo0xYAAABOb8iXolBFlLkjLMq0BQAAgNMb8qUoVBFl7giLMm0BAADg9IZ8KQpVRJk7wqJMWwAAADi9IV+KQhVR5o6wKNMWAAAATm/Il6JQRZS5IyzKtAUAAIDTG/KlKFQRZe4IizJtAQAA4PSGfCkKVUSZO8KiTFsAAAA4vSFfikIVUeaOsCjTFgAAAE5vyJeiUEWUuSMsyrQFAACA0xvypShUEWXuCIsybQEAAOD0hnwpClVEmTvCokxbAAAAOL0hX4pCFVHmjrAo0xYAAABOb8iXolBFlLkjLMq0BQAAgNMb8qUoVBFl7giLMm0BAADg9IZc91/PHwIoVBFl7giLMm0BAADg9IZc9V8/IQTwSCVT5o6wKNMWAAAATm/INSkoIixyal2U+bNoCwAAAKc35AoPigiLnFoXZf4s2gIAAMDpDTmXgyLCIqfWRZk/i7YAAABwekPOHEERYZFT66LMn0VbAAAAOL0hz5VBEWGRU+uizJ9FWwAAADi9Ic/UQRFhkVProsyfRVsAAAA4vSHH2qCIsMipdVHmz6ItAAAAnN6QI31QRFjk1Loo82fRFrjon3/+/dfr//vP37/KfDX/+AyTf5X1RP6mG/Dvq649AHwnP/oHoqCIsMipdVHmz6ItcMU/iogO/+jKh6itua+KUZ46LNrvwVMFRsWSPWMoCQBrdI5F4qCIsMipdVHmz6ItsMy+pOg9IDBSS3OERUVU9FxxEWERgJegc6w3CooIi5xaF2X+LNoCq+KgKPnw21ntzBEW1bdBec+AsAjAS9A51hoHRf/rf/2/z07jFM1oR1iUaQus+atKoY++BtXMHGFRvVYP+S3MxyAsAvASdI7VzoKiH0dz2hEWZdoCS8ZfFZkPvgfVyhxhURl9PNXgCIsAvASdY6WXCooIi8a0BVZMoqKPvgjVyNy7hEV//zHB707WYdET/S4aYRGAl6Bz7PBiQRFh0Zi2wILT30FzH/rzRWpj7k3Cotx98HtkhEUA8Jl0jmUvFxQRFo1pC8wtREUfi4vUxNybhEXqeh4WPVH8QVgE4CXoHHMvGBQRFo1pC8zVv4X2r//ezt/me4uPvArVxNx7hEV7GBqERXWIyh+5BoDH0jmWvGRQRFg0pi0wVcc/xZu4fkV/4OsitTBHWPS04QdhEYCXoHPsZYMiwqIxbYEpFTf1C6+Kiz7wzYVaMB/6Q0qP8r1h0d57uKTFd3dPsVZCWATgJegc27xqXKTp7QiLMm2BmbP3XRUXKe8GNWAIi47AJ4408+Xn+lfRCIsAvASdY+Y1AyNNbkdYlGkLzJR/skhZh/JleP8trQYMYdGxHIMv4OyPdf37gW/nPgNhEYCXoHNMXjEw0tR2hEWZtsCMSif9m7j8uuj+e1oNGMKiY02fLPI5RVgE4CXoHNu9XmCkie0IizJtgYnydaeskq4khEWPQVgEAN9G51jh1QIjTWtHWJRpC0wUv4cWve0eEkCoAUNYdHROWAQAX0znWOW1AiNNakdYlGkLTBRhkXIqhEUPR1gEAN9G51jjNDD6f34WTWlHWJRpC0wcYVH4siMsejh1vCEsAoAvpnOscxIYqcRPRViUaQtM/f37z/bOG/zNJ8Kih1PHG8IiAPhiOscCw8BI138qwqJMW+Bjirfh/Ze4GjCfHxZtUZ6+Avt39LfcPycsWuh4U/zlvvsrWnV1bUn/es2t3qXuPxQWbaPNw/3n7+JwPzDFXPNqPQBvwE6HgUFgpKs/FWFRpi3wMd8QFulluBn/QMNhbPH3qO3CAGUQFikr6Xsuf1aBskorHfv3cgMqUw5isOLtP1c3mORGl49JNlUH1SJlxWthUY5uDvNoZXmKe7m9yeZGXJgigHegs2EgDIx07aciLMq0BT6meMfMA5oRNWDmrZRvxEHpQYjSv0yTILgqyj0kLFrqWLkjKlUWC1/p5TgOcdSgi/ss+2EuRw1l1SthURcTmfPA6MIUj2Gpxb6/064AvBudDENBYKQrPxVhUaYt8DFqK1HODWrALLykVDIZvICLd/Txroxjk6R7nxYlHxAWLXasvBGVKosFcUAcMSTRV2u6tEmpsO5qiFPOcT0sioOiJIpx3KUpHsOyBsO6464AvB8dDCe6wEj5PxVhUaYt8CGD+OEitWAWwqLyXaqshi4me3vD2GTTjn0wLWUlF8Ki1Y6VNaJSZbH+fX7WV1BcFzbbfAYhyuJ9LXte3QrdbyyWRr9Dem2KR+k0qI9NEcA70LlwqgmMlPtTERZl2gIfUrxnFuKZEbVgFpopX4zhr/SDCOX0Ddy9GIseyivKSpbDovWOlTOiUmWxbvLnffWvf+Vv/hnXXQsaboRF4699XLgVLk6xGNZJ3XAXAXhLOhYmqsBIeT8VYVGmLfARxXtm8VUYUhNmJbqadVu8C/XCm72Bmxfj48KiCx0rY0SlymLt23wSMvSrpexN/ptgkZVbciMsmi5N1PHVKRbDOgtQl6YI4B3oVJgqAiPl/FSERZm2wAeU7xll3aImzMoLqnwFK6uiS0mfM6CC7nFhkTJOqOD0ja9SZYtNWDQNGbqgQbkTS1HO5bBoHhUFq3x5iuWd1P9DKg7g7elQWLAHRkr/VIRFmbbAbdVr5kO/3FYbZqkhlU3ab0w2xaswX52/Tqu36ePCogsdT367TaXGc593tamrKHMmWOPO1bBoJSrq9uj1KZbDOrMyRQDvQIfCEgVGSv1UhEWZtsAdx4/Skw9FRdUuXGqp6Dx4BetKsremtNFPDGymYMXkcWHRxY43yk3Cd7WuJdX1Js74x/+Ou34446EauPIKXquptBLmXA2Lmh78Z0f+bf/SXt3SjSn2YZH/Pf6/bRBarQuA96UzYZEFRvr8UxEWZdoCVzWvk2T0l4ZWqZkzdYRQvuyUdQjjk6NGMdbq1Vj2ULT/0bDoWscb5SbNFadrSXVdea68I3UAUAUaytsdtepKyjxT3pKFsKgsPlz7TbXOynNrU6xbq67VQVa41gDej86EZVtgpE8/FWFRpi1wVfUGMh9+o6idM00fxSC6d3DxJixqqUb1mq16HrxMy2xlJU07m0FYdK3jjXKTcGF1LSmvV+//prPqlpWVlCX1QJRpwoHUyv67W9JTSdMUr8MVZSZ3pljVaWoNOwLwxnQkXPBf+v9PRViUaQtcVb2AtnfawitzRk2daXop33bK2ik7Kd+CadzdWAftFNkfDosudbxRZhIura4l5XVlma5eec8GE+qmVI5wIc65Vrws3d3AKlwpRqUcszrFqqN2iqOOALwxHQlvhLAo0xa4qgmLFv4Bqym1dKZ9DSo7aS4V77r2iv5fUsmkKF28TD8eFl3peKPMpJ200bUkHnEUlZQ3railHNMNsqjz8LBIBc35Uh5reWuKVVjU9VReDRcbwNvRkfBGCIsybYGr2rBoe0dFr/0r1M6Z9qU1fmcXr7qFcRXtxEFG2bqykvN3ubLOxB1vlJmEb2pdS4rryjHBtAchhXKSvq+yjrJODHqIzQrrktlno7RZnmKZe37P7j4PAF6LToQ3QliUaQtc1YdFg/f3OrVypu1h/M5WZqKcM0U7xfs5zi2b/nBYNOii6mQ9LCo7j+KM6q4pa6OM5EvDonI0QYRTTScP7N4UJ5PQFRMNBMDb0YnwRgiLMm2Bq6Kw6INfGKmRM91buxhGda14e/Zv+l5RvHjXfkFYFHe8UWYSTkDXkuN6+fYPb0VZ4KimjKTvq5zQ/PZeCotULonL6qJR1r0plpnKKk0bBfBudCK8EcKiTFvgqjAs+thLRU2c6d7axQuterMWw1sZ0guGRcqqlaM7qikj6fv6mrAonGP0zc+9KU5qxcsC4I3pRHgjhEWZtsAdf//+U75vkumr8ISaONO/s3QhUU5SvOeWXnPle1FZm2JyXxAW1cWVl4Qz0LXkuK4Mo6yGLibHlJSR9H2VI3xsWFSWje9SWUJ9K2U8p6WLyT6CsiFlVXQp+cgOBvAydCK8EcKiTFvgtuYnEi9FITG1YOYvYIl/F60Y01JLrxcWDe5D8P3LpNrXhEXKapR9a2RKJRemOOtKlxLCIgAbnQhvhLAo0xb4iPJFtB7QdNSAWW4lDl2Ukyjn3GuERUEU0XqmsCgcS6Wf0M0pzsKi+VAAvBcdCG+EsCjTFviQ8q0zeFktUANmPbgq3mjKqV6eZ8NJvwn4b/lGNLq4+bSwaNbxRnlJOAVdS/brCzFDGB8onfTVyka/OizqR3ZziuG0C4RFAGo6EN4IYVGmLfAx5WtlPaJpqL5Zb6R44+3vySJv1FD7r5EeVGBTFHlcWLTS8UZ5Sfj617Vkv74QwZSd70WUTvq+viQsGhXV5cSL3JximaesCmERgJoOhDdCWJRpC3zMwi/i51TfzF/AO9VIlFNkxYMZRiaJymyKYg8KixY73igvWQ6LooCgERZROvnKsEjFkuWw6OYUyzxlVQiLANR0ILwRwqJMW+CD1Fjy1WFR8UpT18WbPGhn/HWNU7FNUfARYdF6xxvlJYRFCWERgC+kA+GNEBZl2gIfNHvvrFB1M38B74qu9XYtcjyjVI40pHKbvmWjrKQf5jgsutDxRnkJYVFCWATgC+lAeCOERZm2wAfN3jsrVN3MX8AHVUnajO4tX77kB1RyU0zqw2HRpY43yksIixLCIgBfSAfCGyEsyrQFPmgUDVyh6mb+Aj4U7zx7qxfptpmF4KQYftHQR8Oiax1vlJcsh0VlJ2GlOGY476tsdH5XyvZnYdFCLKKribd2c4plnrIqhEUAajoQ3ghhUaYt8EGDaOASVTfzF3BBdTb27tTnTfviLN+O5p9//v5NXcXDL4p/MCy62PFGeUn4+te1ZL++EDOE73+lk75a2ej8rpQT/XhY1E/o5hRnYZEuJbNRA3gLOhHeCGFRpi0w98/2vhm8iU5e7xeourkUFhUvwi1VDKVppRzkNpfiajz8h4VFVzveKC8JF13XkuO6MpLB211XjbImfZUjnN+VK2FRWTZuuSyhkSmVXJgiYRGAa3QivBHCokxbYCLFRMnopTF776xQdTN/AReKzreXZ5HS9Uy5poxNPj0sUtqsdLxRXrIeFhXx4TxmOEooI/nKsGjectmaStybYtmQskrl5XC1AbwbnQhvhLAo0xY4lYOizeCtMX9bzam+mb+AS6q02TofjjV4yWafGxZd7nijvCRccl1LjuvFXYjva9ndUU0ZSd/XPHgplVOdbYR4MKVgPvemWA5LWaWTGwTgPelEeCOERZm2wKnJW2XzvWFRMb6ymaYR5SZt83F0UjT7obBIyWSt443ykjBk0LXkuF7ep7BW/P5XRtLXKkfYT7dVdjDdCCpnlFXTtSQ3dm+KZaayStNYC8C70YnwRgiLMm2BU+GvwEvTAitU38xfwBXV2hRDaQZSjLF7YZfjV9ameJkOwqLzKKLPWux4o7wkXFBdS4rryknCoETXjLI2ykjOJ/TgsCiMYArl9X1gSifrUzwPi8opTgcN4C3oSHgjhEWZtsA5lTXRC6z85fbFiOag+uZiI0f//xxvwKaN4uXXvfzj6KR4mZZvS2Ul51FEn7XY8UZ5Sd/JRteS4vrkPpS9FbWUk5xPaH5XyvhjGmGUTUeFwy9xbk3xPCwqmwwXG8Db0ZHwRgiLMm2Bc5O3XXm5bvBveuX8uxbkeHW3VmN3DKD4h+l1LSsG2bUeRydFjXLS4ds6Cy5e73ijvCR8U+taUlwvb0Rwn3TFFENRTvKlYdHkjsdt3ZpiWUlZh3KG/VUAb0lHwhshLMq0BSZU2HQvo+q9Ur5YLShK5m/Ijcqa+Qu4pmpbT/p//4YvXo3dyz+OdIoa5QTKd2zbUtlQbul6x5viQlcp0bWkvK4sc9pZ2Ztykr6vzwyLytLdRhztKuWY1SmeDkv5ZmmjAnh9OhPeCGFRpi0wUb3AmndH9f6q2lPWZuV1o6LmalhUjc+1TRRF2tHMX6ZljWq+VS/1SuSWrne8Ka6Ea6drSRkbVOvQBA1VZ+W4lZV8bVhU3fLmO8VqLcum7kyxqtOMa7gsAN6YzoQ3QliUaQvMqLSUb4/qtTJ8gfXv245KzkXvW106dB2Wo1GWi4OZTVGj6lJ5rliKeiWisGix403Zlqbyt5ySLiXVTKsxlKPev7kzo/n0t+l+WHQid1OvQNl83VDV8Y0pNsMqmquvVMsC4I3pUHgjhEWZtsBM8wrXTyX8W/xEI2eFXV1FmSdUcC56ebXjCF7hupAULfxt3+TK3xRXqi6bV6n/Kx7dShwtKZmsdbypLqYoIr3zi9q6klShTHOf/h3cp2p1lJd8cVjU3rZ/bC3/d/Hn5k19w29MsRvWyrIAeGM6FN4IYVGmLTDVvEAGyvdK/fqav3FUcK5+S7ruxaf8QjWD/F7s3+NW1hTXqi6b9/KQil/veNNfTo6oRRlJHcos3ae6ijKTrw6L1pZShbPrU1wcVrSxALwlnQpvhLAo0xaYU/lTJ++i/oXbUsG58O2la1nQ3eK7UaU3RYW6y7UQcW/pcseJMhu6WF1uprowuGYBlZv0y/a5YdFSXNR1e3mKa8MiKgKQ6Vh4I4RFmbbA3MIL7OxdNH+lquBc+Ppq3nxRd7oUKV60Krwpmqy7PFuKqCWlI2HHSfzq38ehdNKGMtOgoV0/ZSdfHhYtbKug16tTXBvWfHoA3oWOhTdCWJRpCyyYvsCad+rXhkV19bDI+OX4t3jRqvBmGBadLMXf4pIKX+/YKLeRl1HJpAtlJkFDtzbKT74+LJpuq7DTi1NcGhZREYCdzoU3QliUaQusqP+mT6d7rSjfKe+ECs7FYVE1uPgVNxr/Vvp4carsZhwWjVpKf8lcHzcqu7nYsRmEC7qqVNKHMqf3qS+uC8k3hEXncdHoR4Fem2I5rJN7AQCig+GNEBZl2gJrTt56QaxSvoD6921HJefisKgam/Ja4SvR37xKlFWLFrsuw5WwUvq8sYLuWscurJNf3komwdKOA9gozNCl5DvCorNa421zaYplB4N7oZIAkOhoeCOERZm2wKLub5VL/It6XdysvHVUdG7QmK4mw7dp8ErUyPcrnkyKufZd9t9xaA2OWpaUSx1Lv9bHMisjCScb/LyAZHKbviksGm2r811zYYpl8+GtG4dfAN6SDoc3QliUaQssC95gw5fK/iv6pV+Lq+zcoLViYOP3d/slw1EyX1ByUzQYdVkvxLEIe77S7krHWVPH/26/U1YyWP0+aijrl3Q56dv6krBo66aruRCqLE+xbLxNL/UE4M3oeHgjhEWZtsAVf//+k/9F1n///ef0nWIvrtHr+JvkwW9D/+DA8o8D3FZBOafudHzUuTHW4j59eK6fL20rjXayqwprU2zDosu3DsCbsQMi0fv9Vf2XpklYdNAWAF5XHxYBwBkdGIRFV6ldUebPoi0AvC7CIgDX6MAgLLpK7YoyfxZtAeB1ERYBuEYHBmHRVWpXlPmzaAsAr4uwCMA1OjAIi65Su6LMn0VbAHhdhEUArtGBQVh0ldoVZf4s2gLA6yIsAnCNDgzCoqvUrijzZ9EWAF4XYRGAa3RgEBZdpXZFmT+LtgDwugiLAFyjA4Ow6Cq1K8r8WbQFgNdFWATgGh0YhEVXqV1R5s+iLQC8LsIiANfowCAsukrtijJ/Fm0B4HURFgG4RgcGYdFValeU+bNoCwCvi7AIwDU6MAiLrlK7osyfRVsAeF2ERQCu0YFBWHSV2hVl/izaAsDrIiwCcI0OjBf0X4pYHGFRT1sAAAA4vSFfEGHRjLYAAABwekO+IMKiGW0BAADg9IZ8QYRFM9oCAADA6Q35ggiLZrQFAACA0xvyBREWzWgLAAAApzfkCyIsmtEWAAAATm/IF0RYNKMtAAAAnN6QL4iwaEZbAAAAOL0hXxBh0Yy2AAAAcHpDviDCohltAQAA4PSGfEGERTPaAgAAwOkN+YIIi2a0BQAAgNMb8gURFs1oCwAAAKc35AsiLJrRFgAAAE5vyBdEWDSjLQAAAJzekC+IsGhGWwAAADi9IV8QYdGMtgAAAHB6Q74gwqIZbQEAAOD0hnxBhEUz2gIAAMDpDfmCCItmtAUAAIDTG/IFERbNaAsAAACnN+QLIiya0RYAAABOb8gXRFg0oy0AAACc3pAviLBoRlsAAAA4vSFfEGHRjLYAAABwekO+IMKiGW0BAADg9IZ8QYRFM9oCAADA6Q35ggiLZrQFAACA0xvyBREWzWgLAAAApzfkCyIsmtEWAAAATm/IF0RYNKMtAAAAnN6QL4iwaEZbAAAAOL0hXxBhEQAAgCEsAgAAMIRFAAAAhrAIAADAEBYBAAAYwiIAAABDWAQAAGAIiwAAAAxhEQAAgCEsAgAAMIRFAAAAhrAIAADAEBYBAAAYwiIAAABDWAQAAGAIiwAAAAxhEQAAgCEsAgAAMIRFAAAAhrAIAADAEBYBAAAYwiIAAABDWAQAAGAIiwAAAAxhEQAAgCEsAgAAMIRFAAAAhrAIAADAEBYBAAAYwiIAAABDWAQAAGAIiwAAAAxhEQAAgCEsAgAAMIRFAAAAhrAIAADAEBYBAAAYwiIAAABDWAQAAGAIiwAAAAxhEQAAgCEsAgAAMIRFAAAAhrAIAADAEBYBAAAYwiIAAABDWAQAAGAIiwAAAAxhEQAAgCEsAgAAMIRFAAAAhrAIAADAEBYBAAAYwiIAAABDWAQAAGAIiwAAAAxhEQAAgCEsAgAAMIRFAAAAhrAIAADAEBYBAAAYwiIAAABDWAQAAGAIiwAAAAxhEQAAgCEsAgAAMIRFAAAAhrDoJ/n9n+SPUj+CjfiXEk+nXdA/v/7zn9/6/Ame8/79sVF94rTxk73b9vich/QHHt0TNqHiZN9m+OtV5kdY9Bi+62OPCwp+4LNlI/4pYZEnP2+0z3n/CItwgrDoEX7g0T1hEzrOSku+ygQJix7Dd32MsOinhEWW+sQFfs77R1iEE4RFj/ADj+4Jm9B+svv8nvekv4aw6DG0K0LPHhb9+f3790PajBqyEf+QsCjfxE8b7ufcv48iLMKJl94e24nVzuxzHtIvefQfdpKvsAntR6WlNs92uN1DWPQY+Y0aefawKLX6kFMvashG/EPCIn8BEBYBh5feHmlq+ph93iH76Y9+dAB/GpvQflT+suTzHW73EBY9hu/62LOHRanNhzxMUUMp76eFRZ92snzJ2XgZYRFOvPL2sAdSn7PPeUi/5NFPXXzZjbIJdWGRkj8cYdFj+K7/7Nf/5zxbqc2HPExRQynvp4RFn34Xv+RsvIywCCcIix7hSx791MU3hUWefLqz7SbCosf47Beq+5Rny469RzxMYUMp78eERTaFTzxXvuRsvIywCCdeeXvYdxz6nH3OQ/oVj/7DTvIlNqHiZN/Wkr+gv0btijJfke96wqKuoZT3c8KiT/bF3S0iLMIJwqJH+IpH/3vDoldCWPQYvusJiwiLznxxd4sIi3CCsOgRvuLRJyx6FMKix/Bd/yPDImv0EQ9T2FDKIyySL+5uEWERTrzy9rCp6XP2OQ/pVzz64QH8aWxChEV3qF1R5ivyXU9YRFh05ou7W0RYhBOERf8zkyIAANomSURBVI/wFY9+eAB/GpsQYdEdaleU+Yp81//IsMi+SH7EwxQ2lPIIi+SLu1tEWIQTL7w9fGpKZJ/zkH7Fo/+wk3yJTYiw6A61K8p8Rb7rf2RYZG0+4mEKG7JMwiL3xd0tIizCiRfeHv48KpF9zkP6FY++dUFY9ACERY/hu56wiLDozBd3t4iwCCcIix7hKx5964Kw6AEIix7Dd/3JJrEvOKPrfuqUz8uf31b4P7+6HV4/W+GTFh9iv395k/W/mPPnt3oq1EPMBX6d/1M7Jw0VnzWvX/283GJniRXzj7NWi+XsSzRLOHgD5OkVfYzfFd6kEo2yO92S0WTjO5adL9U2Xr86WxMvsPze28cU9Jry8222Qie9N8Kp9o9F5iNXwo2Xw8bimeqm6mSvp4zGdG8tLPXufCijljyznVZffWOZdeXRJpqOxfLG2+PCtFunWze+IT4YJUrBJlkd2TYM76qgBSk/e6HRQ3r17q+1OjsIg8vbQLrZRAu2sYGo4mwgxwYKStiFoxMvqMQhr3PRh6eDLv/Ylcs76hMQFj2GHg+lAl4guOXNdsovFddsnvLZalMSHGL1E1NcqrqScgr19WAfZycN7Z99z7vomVjuLLEy6YNPV4LVrZezu0F+de8segNUA99a0FVPBcO0/MFGOLorh9UfOcM75s6Xqhmvcktl/W0+4aw7zbnb9mqZ2/+r3hd+vNtwqp6hRKEf7dly2LVUeB/XviCnT1pSX//9R3Q1mS914WQoZy11GRtfgzDzGN7ZJjodSznvcHtcmnatWtPmXg1viOfXMzA+DiWS9ZGpSMV79N7S53I8Qe+XlmHQar0CprzeLtEmvFxnusGArKjNphx+eCJX8+tGYplHJ929SOpxabaeGQzPLwQj+XKERY8xvNW7QYH60KmftKTaJPW+CXdRd4h5qcIxhu7S5rjqLRXGsztpKH9uJtaeMhc6S6xIv1xdpX5gdcfNEvZvgG7gWydW3Gu208j5SrRyd+2463vYDbqe1mSPnF/ddIsWzbrVd9ocpJaXV/Bw3urZVPNatXwgSmzOd461sg3iKKXLk1UM2s2OKU2XujIcyuakpXa+iZZNKfFMJXKRw9qy9AvTb4/zps+cLfrZNUv2vXQjO2mipQIVL+3T2x5S+/+uCxwudLbZW22qtZWaXttpD1apuyWbwV3Jt76t0g2k6yl64o9OvLgSrpuL7pV/to8Vyy5u5/chLHoM32Rn54OX6G66Z2vDtVs1KfdiVbhJSXNU9Ju7HKUySvvFoGL36Ox0vaSG/PPZmZdc6mzjBfrHrr4B0eTrIs0SNou36bvYWPniY8ly62Ec1F3faNHM+R2LR3RUj642i6Lc1ulxFG3Meu6eczqzzsrmVOJguUepyc6xgf8qJ+3zjCZUDXW0TMdCzZe6NhrKpCUfaX13LCvMVNZsE10bS3L0NWn6RNR2XvTTGxKtwcaz91Kny9ga96dWuwJ1U5c626jVvlq162bbebiC+lwajMYG8ivoqB5ItD51Ecs5OvEGlTDRXKy8t92NT0uk1LciLHqMwZ0uxSUsVw983or+ZWP+DrPYJ/XGCbeRPzj7CWKprVvLyI96NYimQqbOVTEfs2d7Nm7IMr22TWs/bvyyu9yZXa9azU34dZPPkN/+Gx9RkWYJuynkJn5VTfhn+9hON7wnO9VJ/7G57lMtaihjeMe8Rr5sqePqMWVL9bWbRdnXrZ9JSYX05wP2W1hO0zK83Pg+N1RiMFVPl30knqvEfOdY8V+5p8RK5sH5hPIiFH1pmfy+F6u0Uan5UjcGQ5m2VKeSXCHI1ODs81YgXtmlsTQT1/W9hFbOm1avM95YuHU1xNENsVQ13cRy24Gt35A8ICUyr+ijS43vu2rvaXO5My9h/61brRZPecPt7BnhCuYxlaOM9NPT4OOBVPOriljG0bvXUCJRE/USeXn72J0Mlnl6v74MYdFjlDd9oNs3iW9l7bYqsfF00aj3kgvUKWmeDd/+Smx8EGWd+GHSfE7G0okbsszkuOBj6KZ1pTO7nhx19BQWU1M/bbPjBe2n0DWaTiq/rsbs88EqDAfu3W2OEt2Y2hZ8CPsItFZKbfW3jGN4drGfwFEgr9yR4yX6G1fQGM/WZek+t6zAcKrdzTB1nenOsQL6c6POGvRi42qaTrtKv4o/emoZ/ZocVVqDoUxb8pQSxme9Udp4phJWZ7iy07F0Ey+G58miqV9lR2d8hEfhcutObki/BpumjqWu3JBBs/vqHtvKeyqb8owLnR2tKmNvtVg9rdDRSrMMZyuYCxdjCi0NRDnd/LolOOq0axnPzj952WakXmG8gl+JsOgx9s3WaHZFtxfqzbTtjOq6V1Fi4+m8c+qU+OY6WtkOrapEc7mvYDyzGPzGsk52bdiQzo+6oucocaszu7wpS/hSHs348tTNepmjVrOE7RSiJva/z9a2ZSyvXYOdt1e36H0W7ZzfMeu1GlFROBiSdXm6KBrAcNB5tatmu3tm6U1ZynOUiJxP1VJN9foOdaPYWNbeal7xrZRV8m/9rGY1YS+nRG64HFu77vOlbo2GMmvJ65WjtYyky9wzJo/9aCyeX83Bl2Kv269MuQFPnW7d8xvSr8HGF06JGzekbUHy2pQV2/6vdxa1qqXdm/F03YZl5RKnK6jqzSJ1VgaiHVbNrxtck27X0pJNE/kXFdFENTAlvhlh0WPkzdYqb32/qac7ubnsveStVqdk1mK3+cIKwVBVUIlA3LNlNk35GPaSNzqzq+11z8vr4S00RdrMZgmbKah0s8JZ1EF7NjS8u2aNwoEW6jtmiXaRxVtqhmtDyhXCGfmgB21uwuttpiXP7/NcPdVoOvUCW+p052jFu1IdK7QP1arVVayAPq8sdWcwlHlLVqCoZDXsP21m28yhXtnzsZxuj6ahK6zm8m6oSwd32gebS4TzP78heWpKZFqbuinPU+JOZ2q1vq71ViqapIooYZ+HXdQLMhIOxDP3rn1dmpbaTEseo23W0pPNZHZBB2uj/yKERY+hzdYp94WXqW68ZykRsO1ztOHF8+NYp2S2u/x6USmqEDdig2m6K8SVLLOtZXl5Wnc6syrtZV+P3Gy4Ou3z2BRqhhI3sbOr9WXLaadyqEeYWea4Gx+ULleJlk9Oicxq1IvSDDC+AQe73A7aKx2debIZmOW1NU80s7NUPR/LyUONB17tHJ/wwiCs1l7K6tSTqTfkfKk7g6HMW2pLWEP+506Us/HWleg1KzsYi2efbg/rdjjHM80QZuqOgpHVM54vYyCs5O2GK5MHf6OzsNX6qakXOrO+vMRkBeP6rYWBeKot4s0fs7bUUahZE0tdWQ3PUuK7ERY9ht/UXnXnBzknG7nZK57Mm7dOyfTZaK9HFWyLd3vaSo5bjnu2zLapqv07naWLfZ0q1xL9Y2a5e81mCZspWGr8pPYTbm5XJ7xjyh0vbHnZ+2xbcINrtr76bCXaAfbzqAyG18zFUu0diW/tCWtl76pfLs9RYmXneIWFMdQtW6JeSessN7uw1J14KAstecXjBljSc5vMI9mprw+WxXOVyOrtcfmW7gYzHalviA+t6tdy8rDu3BBdbq97x01TVft3OgtbVa4mES+tdeYlJivol/d7PHA2EOVWozo0NS11DLdey0ETu37lLWM2+K9CWPQYvg161S7v9sJkn+dmlWg3ZrNN3fTZaK9HFYKsjZUcn4hxz5bZVqoWwj5f7MzqdFfLZkfrED29+xLWlUZN7Ox6eQOs8fEKxXds2k952cvGXTSbJbNBeZ9xT5P+6xXbNbUsdXqfV1j5vc1+YPUCd5eN1cplfE26Je/Vi2eJupJ1nTubL3UvHspKS1Zkn7XNbxtIn3k2Tbu+r1U8ln69kzrXb+lJR0PeTrtFhpqF8X6VSOoZ37khQaOJN3W2NHc6C1utmy0/H6yIr9pkBavGhhYGEi5L176ljsHUlQZN7HwUxVS88fGt+lqERY/R3eZIt29nm6d9AOstHW7w6bPRXo8qBFmJZetzL+7ZMtt1qeZtny92Zhe71S4XJFycjefn7ppS9RRGTezqtnL1aYWugOWOF9au5l4sEXfirStxsGyvHvc/2TJ2NdjZlr93Z4nT+7zCyh8jaes3I61TO8vWZ5/x5LlM6sWzjuuW7XpeuvlS9+KhrLRko9kr2qVtIJarPK2MEhG7vg/tbCzn28PLrKxox2uGWzfgHSkRbNJ6bzSlM8se3ZB+f5lwFaru73QWtprXpPjc17fs4uNwBbslCp0NRHe1/Fyy/H3eljhK1WtpibNNYgWKNaz3+HcjLHoM32yz29rce9/HZ8dEs4frZLjBp8+GXS8GGlTwhgdUphf3bJntupSP0K3O7GK32t6UjyA88Tb1KJslrC+OmjhYgaPE9NEO71h/RDTsam7Wh7jpWlEzsXJR2prxjcuGV+vlscTZfV5i5Y9WvO9jvPUCr+ycckucqm9NfyfroVhiYNRZPBTLHMhlvaYSXmP7v+VWRc6mma63S9eWX9oeXqi/13Pe0KbtIeRDHN37dmtYamC8LD4XJbK2Y1Mtgn0eGHUWtpqbso9eYsBKTFawuVED84EM26mXyxLHDakuzofiw9hLeIVwWt+BsOgx/C7PjopmL3hSidKf+p/+23dLvaXrlIw2ZGrxaLIYaFDBGx5QmV7cs2W261I+Qrc6s4vdantTPgL7GNW3/Fy1WcJ6CpYYz3fTVLdUv/SHpnxWHzfZ4I4V++L4ITpGuaGzRRltGVcuaqWejCXO7vOZ4eb0DCXaBfYBDFRluhXfDZ60vlo9FUsMnC9kOxTLHNhbKlN2t+xz+pDXKn2OpjlY2bOxKLFrtocnjf/l/nXFQjdb9zA6+jTkvUdPKrG4jJ36pkq4NtUi2OeBUWfxipcj8BIDVmKygufPcTYYiOWWAwnaqata4nhWq7UcNnGwEs2GVOL7ERY9ht/W4kCP1aUs1e2d/UeK7vY9XO/LOiXhs1EeNKYYaFChG0BJZXrxU2mZ7bqUj9Ctzuxit9rlCOxjdD+qC80S1lOwRNTEzsvXZ4ESofCOhafzyR2rrpTLraxQsSjdCOMblw3G3F6wRLtYwcR6Z5uz7qNZYE8OVGWC0ScnT5qP6ajXpC01cG0hLXNgb8l617JYM9aK5VqebqF/PoxX9mwsSuza7aFN7+qfjjRTjSdYp5Mb0u4wSx1NWHJgdEPyeJTIwrWpFsE+D4w6i1e8HEE395KV2Jyt4PlznK0OJLix9QVLHM9qtZbDJg5ePo+2Snw/wqLH8H1QHOixau/4Nm72jjdU24vUuy3ce/2zsf+U+UIx0OBhisaQjacYP5WW2VYql+FWZ/HVcgT2MapfXWiWsJ6CJYZDMD4R1bdEO/9KeMeaXbGZ3LF6xdqfuxzzQv7ZPhbiG5cNxtxesES7WO3Eepc2p5U9rtbrUMulhqOPq+8FveP8ww5V9Oja07Gws008FMsc2Mt6Vf98fLRcX5viY3a6smdjUWLXbY+65UtfGXm/WfOFR33RFSWqzeSDOq5acqCd5qFqMgvXploE+zww6ixe8XIE0fSz0aavVrC7UaHVgQQTqS9Y4hhYtZbDJg4+WtX3hH9+BoRFj+H74NgkI1asfLrqKr49Wvv2qndbuPe6Z8O3a6PoNXiYvGEllgUNbSyzXZfyEbrVmdXpVrscgX2M7kd1oVnCegqWiJo4lBUWJhLeseZEmd+x3E6W27NEu/4VK9ENMb5x2WDM7QVLtIvVTKw3nWq5qO1Iymsjw9H7pFtHweB6MS5Lny51Lx6KZU5bOkrZuFQ+ffRBpU910+crezKWle3RBEbKXeI9Z8UIZjekXir7/MEbMtif4dpUi1B+XjXYiMUIvIR9PDVZwdm4VgfSlWgvWOK4BdVaDpsolBXs4+Xb93kIix7D98H8gLC94MV8F1dbx3fKJv3jS+lKs73qZLj3mmdjP2x+5X/PyVN20QQP08qm7sVPpWW261I+Ebc6szrdapcjsI/R/aguNJ3XU7BE1ETByvhMils7MphruRwLdyypfrtBDdrn0wFYidzRLr5x2fD+1Bcs0XZeTay3MFUv4p20CzwcWWFUxke2GTxpW897ESmH1WUsiIdimdOWjqlbK2rEctMHXyXLc7OVPRnL4vYof4Pu2kJEW3flhpT3v9wWxtLXxrHxTpXIwrWpFsE+X+wsXvFyBIMSgXgFBzeq9YGB1BcscaxCtZYrc/EyNtzudn43wqLH8Hs8f1SKveAfLVd8Z5W/X99srzrZXHT1s+Gp6lmxdDHQ4GEKG56Kn0rLbNelfIRudWZ1utX2pnwE9rFaXeejzFWbzuspWCJoolTUsE+WOTSYq+WqqjdYraOlg611nI7eon0Myh2sRDfG+MZl5aJW/CbmyVji7D73lqZqOd6EfSoKD1azMigzfdI2++q6qg3LOV3qXjwUy5y25HXTp/3DxnJTg/ahWJnpyp6M5cL22EOjiyvRb90LN8QG4h8t11n68jDC/RmuTbUI9vkhd79c8kGJgW4Fz25UaXUgQTu+XLmqJY5VqNZy2ETpqGHFZwP/SoRFj+H7YOFROcrZp3Iv6DArd2yzh+tkc9HVz4bvvGpYbU7wMK09X624lmW261I+Qrc6szrdapcLUj2lhbq7Zgnri6MmKlYmFbKmuiHVwjtWz2bhjh3y2egjno83LnF+B4ZX68Ys0Q7yfERLUz2WzD95rlnZOfGKe80quyuYRvdLL/5f1Z/h2JxPLBYPZbElK7VN1Ua+Tzkn0v/bsZ+u7JWxnC1zDoyUvKDeuis3pJyDfSqHdOeGDGqFa1Mtwp3O4hUvb8vZOkfqFdysNfCBgdTztsSxx6qLS0PxkaRC9qEb0jciLHoMv8XHJhnygtsH3zme6SyjftqaPVwnm4uu2pBeot6dllUMNNrBQdZc/ChYZrsu1SNkny92ZnW61fZmfUHKz6XTJaynMGqisrdg/59MI7xj1V0qP2eWNdhaPmJfSq9rHwfiGcU3LvOrQfd1fp2S6j63FqdqWamUNVYV3y+NxStumadPmvU2WPPNfKl78VAWW7LJb+Ox4nsbGqTfIuVtvM2zlY3H4rerzT3fHl7n/CbEyq1bfMyCIe5r1U64uHSJD16JLFybahHudBa2qlw1W35eUq7g5vxGZfFAvK7vj/Jzpc6vU/VaDpuo5EJ31vNzERY9ht/Z2T5IrOC2d/P/d76V6v3a7OE6GW7w6tmwrdoMyq4XeVUFsayr+zRqqO/OVI+Qfb7YmVXpVtty1ZIvTjua3PVgCespjJqoeJVfXnh2/8M7VvWzcscqXrn4eDbeuER843Z2tb8/eeJSp6S6z63FqeYJFhPNLGfUvPNa4TNSZzYFrUh7ow7xQp4Lh7LakhfTlJWnXP2vaGK+slfGsrI9rq2E5Dkt3RBjWVtf+f+HeOgz4f4M16ZahDudha3WLdnndjSnvLoSsxsl9wfi7e97qE41a2mJ2Vy8yh8v3I7oWxEWPYZvq+YoCunM6k8Cz1FCmj1cJ6utnFXPhvXVFLDrxUCjh6npdk3UUN+dqR6hO51ZlbbVegCW6G6Il9lXuem7mYKlZs+2moiWuhNPtVyNlTtW8Sb9s30cFUx8fufr1hncHx91vVZty9V9bi1O1Uf3x4vXlwYjK4VFvEklpCloKX2O2PWzpe4NRmu585asmI+8WDNP2tIUDc9XNh7Lre1xfvWMDyJ9WrghRnvAi9eXLOvaDRnsz3Bt6kWwxCPufjWAuMiZYwWTyY2S+wPxMnv7ljoWoV5Lb2I2Fiv0K7r734yw6DF8G6w8KdrK+l+hz+k2aJ0MH4Mqs/ycWV4x0LAVy7u4U+On0jLbdakfIUtc68xqtK1a5r46zUMsvoJ7brO+zRTiJhqqY/9V1lDTnfNMTcY+Nz1a3mhrWW1d9JbOxmsF2mHGN27nl7u5NbmWOr/PDbu2MFXL+z0ufrrqJyuuhDQFLaXPkflSd8KhLLdkS/nb7kXRRMr91Q3W0qcrOxiL5bbTnmwPu9q1tMIGYUPy4VjmLhqiyo2LX7ohg/0Zddwswo3OwlY9c78rlmqHc8bq79UnN0oWBnLria/XctBEw+tY5xfv3CcjLHqMZoOf8QPO/lttTm9CCafNtRdrtrSl6j5VQ5us/Cy+E4tK4cPkHV3bqmFD0RjbR+hGZ1ajabW9BU3StE9rs6DNFNrSMZ9M0k6z03TnLC9n2ud6Kbo7VrGr+aKVPBtv1JZmOV7/cNTe0lHJku0gvZASLbu2MFUfn/23bX++c8Kxe6YSTouwF7RU213JCsy2RiUcysaypy1Z7d/2X+Uklraxl4uQ0ucrOxjLje2hu6NUaqCf4YD1Zp35cCwza2+IsyrB2blJedduSJ6vElm4Nj6cfREsdamzsFXLOzK9zNl2bhwrmDRjHFgfSFPGF+to3pLHXmnWsi0d0k1Oms7sUr0RvxJh0WP4Rlq6j1Y0eiAtq9xIedeMtmu/9XIN5Xl5/+y8SjXQLiOxzODBaHdvyaqEDbWZzewtdakzr1E1q6kfdbyTumvLKso0C+ptHAsaNfGrnU1e84Vj0rur51X3Mb1jf+qfKlyP2Gv34ztyrEA1gDz8qtmKSlSj7npq08bHrkRrOtXMM9N/ujHapXpk1sye4500Bbxa2VhehL2gD6X7G2iH+VK34qEst2SF0n+qRUgZpmx3vrKjsVh2ld9tj9/1v/fhLVeJvll3snWLj667Ic7GXXW5W1zGSrgMYWY52M31zrxG3axPpKhj6bbrrVTOOX34Vf1svsnKQLT6VZluyk26uStRE1tePzdXzWvj1Wdz+TSERY8RPicDVjRp9oJn7hvn+Hlye5b3cpSwZFRDLbej0iNRD9Rz/LP9IDX74LllOat8NkOr0TZkeW2t+BG60JmV3wrsU89tlEvqvRSN9P2EC1q0Yemum/C+nQ03y+tfNKBbppRKHC3lGntW+tzO+iiu1o4C2hN7Rldg76CZVEnrVrTa92PpdgW8mBKt6VSz/ULfUn9HvfSe4XWLcRrLPHKHT1op/WBEXTT9EjRL3YqHsllrqR9jsucqbbyns5UdjaUbyl41zz1lFMvtFVQ+F/ZUJ106Wq62rn0+LgY3RJQdbddu7LMbkgespsoZdrV8tEenlzvLa1OMW20olcy2c/p89FCtYGJptbcfwK0rAyna6Cds6aN3L6DERv203eiz5MF066bsovaXIix6jP3+RlQm0w7r9oLasJfwH3/KPGsv1yRzS/ZriD/+cyw8K28nS2w1rEm/bgXKB89rpDZSn7lxPRj/8R85e/zktqPzTtiQ5/nnnZdUYnO5M1333vLUUtKuSm5VZXxF6zLNgnqNvHibponcjV3b5dyTpZFcMs3U0v1Ec0bKCe6YKtTVi47zHL1AsC45o5iQd1DMuqNW1e3eatHv2n2u2cXxVHf5JnQXNrOd40MtB5poAidP2j7liq+aqMR4qRvxUJKllnJuvZw5t757yhyv7HAslp+GkhJlVXWg9a6fh9xsHoySDc0y3Lq5IRtwfEOcqoVj17XlG7LPJlXYSqtg2LGXLFb5ame5QLsAdUfn21mpcAUTz0n3pjzJG2sDyZnN9MoylnHM12sokdRN5G6KJUw8s1825Telvwxh0WMcmy2gMlne+91eyBtpp79/s29G76XYm5Yu/fIaeTvlrnb6axxl13WZ/UpX1ZTPRStsqPy884kqkVztTAVaTT85EKpUZZoF9WGUz2I4sHZcnttOMmDd6W9elIoGu4v1HYtmVA0nKlAOre+82TKhcHtX/VpOuwTdfa5MpnrIc2rXPennk+wluyfGdavUPmmDKVdj6xpJusHvBkNJllpSdvxeqZudruxwLP1y1tsjWu6m1cEcw0nuJbuL/Q0xuf9wmZeWsVLX8M7CtfF+y7W/2Jm1ev7oJ9H67oWiLsv6deXBWPJNajUDCYtVZSzn6MQHp4SJhtsOSt00ne/b+vTmfSLCoscYbbaku7faL91eaJ6J9Gsna3cv6L0U9doX//bcWs7+/PbXbcdVQ6rHvrcePKGTXRo1ZB/bej4qJczFzrxAW6k8tVwz/U1dpllQb7Aq0odW/bi8lb73jhXMf8H4UNzQ6R3rF6qqHU25LhF0bsM6H37fbfE7i4nnKZH197kymepOnccboh9ZWbC5wVlTqX/SVLFXDmK21LXBUMxKSyrT5CtXqWy2suOxTLdHN9RjRXJdJVv9nToGML8hEq+CdGMbFdyplPOZhGvj46uekWudWauTRz/pFylY4ENd34eexaPR9JrRN0/zpu/r/In3BpVw9XCSfkier8RBwzs/lD4PYdFj9DvgcGwdUWGlSuVmtS1kRffNpC2tlClr2NeVVubYTuXu9oEcn7Ji8NXWb56M/tFpBQ15wj/vgkfoWmdepJ59WKN5/P0L3UOzoD6G5lncv2U2bQuJt6LEGSu4DbsaVfvoz+5Yscibdmk39ZT7o6Xr3FrsijWabtvinqtE5j0pEVjZnIkVGI7wbOf4qIN7Vi6CXbaSuaDfoKLan30XVKMoW9mcruFwKGbekm6AUpnndsUnK3s2lnIk1q4VPnpoHqmyEV1RqqcpSH2fy1atTSvcDVFNKNWZL2OtnIwKh2sTngxXOrNWtxmfPvrmbDufruCmuDw6QPP0qm7CgTR9tWU8V4m8GErI/ptvpmshsSv9BY0uHNcXICx6jHoL1brNO9oLiR+/+i3rTUruGzxv6YoObP1L2b6hyrZ9b/7aN2Uq0A6p7TXLb4P2z5sOdQ15dSUyb1SJ3YXOvFz69Ft/aiIKV5xKhEWaBfWHsev8dx7YoBO7urI+1p21obked6U0u2PFQimncUw5HrGu7xdTaj7+0279ihKZl1citLI5821SIjDeOc0NLukOVIugz7YPuveK+lBKZkt9OBmKmbZkV7tLce5kZc/HMt0ex1CbJrxPJSJne+jkhhRSmbPNun5DXLtO8dqMToZrd9/KnD76ZrydN6dPYb+InWN6lwainIJfUWL0rP/xnykx7MTXVYlS9wuTL0VYhB/IHqaz4/crDR9t/FCDI9nfIkogtL0Fh29ZPIEw6vs29kgNNsx2RZ++HGERfqD0MD3NC4q35YuxODc4q+2Nwo0+tT0M+oSn9FRhkf+KMh7Mdu3bAmzCIvxA9jQ9ywvKBvMsv/zCx9mbg7Doju1dxgI9tacKi86eqC3A/rZREhbhB7JH+0nOX74sejWnYdG3/Qr2R/jOdxlWPFNYNPmy6PsOVcIi/ED2OD1JKGJj4WX5Qga/hj07xGG2leMXCM/tmcIiH4sSjW8dJGERfiB7nJ7jAD57tPEz2S1tI13/uzHEvydS4KiPeFLPFBbZUOInanvYvvFRIyzCD2TP03OERTYUXpYvxQKgZn/5d0W89c9s68OXaU/uicKik19Rbpe+83gnLMIPZM/TU4RFfFn0iuymbjtMP/5F/97ghrf+iS1yZH2e3ROFRTaS0ZdF33q6ExbhB7IH6inCoucZCR5HXw11eOuf8n/gFM/secKi05F87/fvhEX4geyBeoZghC+LXpT/PlqN3yrFj/c8YZEN5DmfKcIi/ED2RD1DWGRvT74sekHNP/21HeB8FYKf72nComf+i52ERQAQ+J3/yauzf2MKwIshLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIaw6Ln9+k+ixKI/Vue3Uj/H7//859cfff40f7YV/faluTHTL1mcsa37zdcMwLr6pUTsybf4U2yy1/TNzwHeAmHRc3ujsMhG/dlvXn+99+/c379spb/mxLUxXJvpjSoPRVh0wWiT4cNsZb/xOcBbICx6bu8TFvm75LNfJt5Je66q781XvMtuzPRrFueED4CwaIkNjpf3J/j25wBvgbDouX1ZWPTn9+/f33qO26A3nzqIHP/U56ovshkfuNv6POg1rK6uzFQ1vu9NS1g00u+LeJO9glc9JL59XnguhEXP7cvConSWf+trJscmn3o8+dI0b6wiKjpZgnRVHz/oxky/ZHHOEBaNpIHoo4Sb7CW86iHx7fPCcyEsem5+Diix6NY740aVx8onnpKfI7+xypmq48kvFy0u0OcPujHTL1mcM4RFA8G+iDbZa/j2WX3Sc5CafL27hdsIi56bnwNKLPqZYZG/DT/7zeuv9/Kdq5fYrN8HhkV3ZnqjykMRFg1E+6LfZC8iTesVD4nUJmERdoRFz+2rwiKr880nwzbXL/gL+u08fYWn/Voxff6wGzP9ksUZIywaCPdFt8lew6seEs8wLzwTwqLnZqfum4RF38MWa/4r+/D19z4IiwbeaV+86iFBWIQaYdFzIyz6bIuLRVi0ISzqEBb9fIRFqBEWPbevCovstfeOJ4Mv1vx9b8X0+Q0RFg3YQPT51b3qIfG2hx8GCIueG2HRJyMsWkJYNGAD0edXR1iE90BY9Ny+Kiyyft7xZFh83/uaKvGGCItib7UvXvWQeNvDDwOERc/tq8KiG1Vew+L73osp8YYIi2JvtS9sri94SLzqvHAXYdFzIyz6ZIRFSwiLYoRFL+BV54W7CIseSf8Oe/ATk1Oun/R//CT9z69fgwcx/2Pu1shiWPTnt9exNgfvjD+/veem4y3bOynU76RBxY3l+1w1ak+kT+Vsy4rVUEvNVKu27ZKWpDceYKdcG69UCxrY1qdbII2r+FhO32mi1eTdjZneqOKGyz0wWEvPzD16k4O7UUy879MvKJE1k7NUFxatbPFO3tyD6Z+NdPrIjvdFM7qVm+W5SmReWIndhd2eLc/TinVLb312c62LjUdVTd9a8UT6VK5vWbG62SUfhRJrS+uOJVCG+ci88LoIix6mfsCah8jytv//KQsFP5esur494J7UxRF/cN329IbvjKrh8tEvK2flyTCsuLG6qaO9kF+3j2X2Ppz8hkma6TdT3duuBtjMKjkbYOdGWKQrFZ3E6WOq0Ux/U87zETO9uTj1KH7/EV3tDdfSG0oVyyb7DpuJd7cjzGwmZ6mmTNnqaIt3mtk0xScjnT6yyqn41SgsSqmqw3o0ljVZF3Npt5vVeR7D6/dH3YQrGzob1T79vZBft49l9r4mDz8khk9jlS+r88LrIix6kO4Bq58hy9oeM/v/oX2A21b0WOpqrH5001Nv/6ubbsoUR9/kZBhX3FjdraNjWtWJV83WrjTzr8bYTDW33c1O17PTAXbKtYlmXq+a0ZWK9zGafndLmkHdmOmNKt1qH4JZmm5F9o3gV/60TXZ3o2uh6cuyiu1lmslZqiqztMU7TaVmtLORTh9ZpSveQTO6lZvlOUpkzbok3ZzaG9BZnWfRdN9m30g12tNR5ekfC+k17WOzvnalWfFquM2SrCzt2dP4kXnhdREWPUT/5FWPl86AP+0R2z5nQStGl0N9m648TaIy+/CULu3XTivqVPlVlvJe7WM7222qXXPl9H3ySuxtd0tSLetsgB0v76OMblp1Brvo6PRxj6Y/ruFuzPTO4kRr44JZJsF65AZ9Ss0XD5vmbgQt1EX6nE0zOUuVZUYTGUzDhZX2uzAf6fSRHd9lr7OPbuVmdRlJsy7xnNpKteV5lsPThZKulPZWJqPS9ItSvjL28QsOifF92ihdWp0XXhdh0UPkh8Ye+PzyKB8hz7D/2u997y8Yv+z0dPvvjv8p3kF+OZSf3d/2WyP5d9A3xTvjKGOpYHheonvNzCpaxq/ybClOPLtoNVXPm/tlA1VWeeRFJ54vmS3rn3zCl6Ocz6zRTDQvcjGMWDM4Z3X76ee5+aDyHfnYTG9U0dr4ehcbYxNPVxU07HphVjos7oY3oSL9c6BE5uWU6MvUzcZbvJMr+fTzWPJ0FkaqYdh/bUn259Evu2bozlvfR7eydpaerEsxaktpPG2t0vI898klutBqppXNRmUZ33dI5Ea0rXW9aPLuvPC6CIsewh9MJTb+8BXPnqWT4+nzMkWl7sHLD+bonEpU4uhKz335mHcZ3fAGJ4PlnlS0EVd/8NTLKnFMxYrYkbTX9VrdmilxHGfFN+Jaj2LcnnE2s0Y3Ue9mXEGawbnB9L2Po0lPf2ymN6q0aW/i128/5gN2vaiw3a990Et3Qz0cOSpSTL1Nm2Zylmrr9DOpbmNHZYqxbDl7hYWR5m7LblRNqcRzlMi8sb3a8s2arItGVMzaCxytdJbnuY8x0YVWM63Mck9GZU1/3yHh6XYFygW4OS+8LsKix9jeINXzYk9Q8UhZelMW8hwlNp6umvEncXRObfxcqM5TnQtH58Hj7EeVEpv4ZJhW3E8l/7Xa9os1y86zLWqmZGquySqn5r0pcbRdzs3zLs2s0U3Um6yaiDSDc6Ppb51US+nllNjcnqkSS1V8puXErI3Z0lQrcSRWxujpuoP2DlmyHUMzOUsdZYJmfaj1IteisRx/DWtlpHkTVyviOUokzdBds8m8t7o/zzsmYMl6QF3j3QjVTFvtcGme23AsN+/iTjMtmY4qT3/LsrpffUhsw67G7NeVSG7OC6+LsOhztM+eJZtHqnl+/Tmsi+j8V6qn69XDq4b2dr1MXcTLVEdHlTbzij6DTXNW9Jkq2Wcdffqwldhr1N17nhJLM2t0E/VumjZ6zeDcaPodK/SRmd6oYkXqgVXXO96mEq2FDn1pmwbaTEu1y9VMzlJ7GbVQ9+w1hnc5rrRbGqlmV4/Vl6HoN9wX3tReamHtVtbFW22mZEVG63BlnpvBau2aabn5qDT9bnp9pkr2WUefzXqvLG3HLhfTuDkvvC7Cos/RPlWWbJ8yy9sPAUu1T3NzDLTaU8M0j3nYhJU5zp/wZJhXzCdeeZAlnquE8Q7qBbCco2p44jUte2ZuZWVmjW6idYtDYVej6Xes9kdmeqNKlXDWyHCqzSBrCx02IxYf+L7glmo7aSZnqb2MN3u+xTvxWLKlkWoTN6Usrxh/M3TXjM67O127lXUZd9XWy67Mc7xau3DR56PyUfSj9FwljHdQD8RyjqpNdytL27E2ijo354XXRVj0Seypas6e9oGqnk9/mJuHM346D3Y1fHj3ljzVHRLWsD7HJ8NCRZ143UER5FpWPVLLOUo1Uw0Pt2pMSzNrdBOdnaES3ofR9DteUIkbM71TpUo4a6TdYTu7OprLaof9wlvu3mydkmZylqprtM12t7Fhl/uxyOCq5R5ja5KuXaRm6K4Z3cr9tdTpujQVMiujzy2rsTbPoOVOtOgLo/Lpd5OLpmxZ9YAt5yjVrPfK0na8khKbm/PC6yIs+iT2WB2PmiVPj73mgZc4N4se6Da3PQTEGt4f+6ihhYo68brTw3KnJ14ztyZ5cuJpnEsza3QTDbvpNYNzo+l3mpFenumdKpaoS1gj9U0ueBeDycw77FZW6pFb4vQ5aMrEzY46k/PLayNdGmqXNk0HK/fXUqedNXsoszLxTbsyz67vQNTewqi8SD9Gy627tay6tWawTXJlaTvtmG/OC6+LsOiT2GN1PGqWPD327HN3ODXHQCM8FZrHfPCAW/Y+vgsnQ1nRi/QnapAdZH30xFsYYKebaNhNL7wPo+l3mpFenumdKlajXga7PpyqtzmYzrzDsMTG8/NALNF20UzOUrlM3Gx3G2ujsbi1kS4NtUubZnRhf00ZS5125q0ocbDseCGuzHO4WIVo0RdG5UX6bRVkB1nNAjfJcIqTzdGN+ea88LoIiz6JPVbHE94kXfmM+7PZHR7hsbvzq+2JVj/mlhjYH/DoZLCsAZX0w6M/Jiz70088+zzQj8l1Ew276YX3YTT9TtPJ5ZneqWI1qgX36+Opeh/BJtzMOwxXaFOPyhJtB01VS+Uyfq0ddLM6rdFY3NpIl4Yat9U0tHJ/LXXamX0eiBfiyjyDYp1o0S1rQCV9+v0QLbuecpDVTKJJrixtp610c154XYRFj5X+7UF/dJPjCW+SrnzGB4fH6GhzdrG7Wj/mlhjYu/vQide+s8LZBlkfPfHs80A9lUM30dEUGuF9mNX9U/9LlHvByzO9U6UvEs6h4PWN/1XqwrxD+xy1bvn5zleJrBmYpaoKXbPdbazZ1eFMh1ctfx9cnZJ2DcM1bUa3cn8tddqZfR6IF8KvKVGy/PN5RqJFt6wBlQynv7Hsut8gq1ngJrmytNnoabw7L7wuwqIHKp86czzhTdKVz7g/4N0T1xwDDbvYXa0fc0sM7N39yBPPPg/UUzl0Ex1NoRHeh9O61c8NNnvByzP9QJWjTJvueQNS/xyueYf2ud3hSXWhSmTN5CxVVWgXvr+NNbsajcUMr9YX6pQ0Q+3Sphndys2y1Gln9nkgXgi7dHOekWjRLWtAJcPpbyy77jfIaha4Sa4srTl5Gu/OC6+LsOhR8o+dLx1PeJN05TPuj233xDXHQMMudlfrx9wSA/vRcPVkUMXvPfHs80A/JtdNdDSFRngfTur6pdpe8PJMb1XxZP4ZehpQuwVr9R4uvzKad2ifo+arC1UiayZnqapCu/D9bazZ1eFUh1frC3VK2n3Qpk0zupWbZanTzuzzQNO22KWb84xEi25ZAxpVOP2NZdf9BlnNAjfJlaXdaPNXjko354XXRVj0IP68No4nvEm68hn3J7d+NjfNMdCwi93V+jGvEiPDk+G84veeeOXnVd1ER1NohPdhWNd7ae0FL8/0VpVoFO0O7DSBkXKX70bUfnWhSmTN5CxVVWgXvptqw65GYzHDq/WFOiXtPmjTphndys2y1Gln9nk444jVuDnPSLToQVZr9JRYdt1vkNUscJO8+xxsjko354XXRVj0EPuT9yv/m1OesotJk3TlM+4PePcghsfuzi52V+vH3BKzU294MpxX/N4Tzz7PZtboJjqaQiO8D6O6XniTNkO63BS8PNNbVbaMfSCytFjlbwXvFeYd2ueog+pClciayVmqqtAufD/Vml2NxmKGV+sLdUqaoXZp04xu5WZZ6rQz+zycU2RYo75Qp05Ei25Z55XD6W8su64aZDUL3CTXH53N4Gm8Oy+8LsKiR/AHq3q0LH08V03Slc+4P6v1s7lpjoGGXeyu1o+5JWYP+I888ezzbGaNbqKjKTTC+zCo62XLP5zTFLw801tVNl5s1410aA+N8vLOO7TP7QptvFBup0pkzeQsVVXomu2nWrGrwVjc6Go90qWhdmnTjG7lZlnqtDP73JY4ZTVuzjMSLbplnVcOp7+x7LpqkNUscJNcWFqvcfI03p0XXhdh0SP4o1c9RU1Ok3TlMx49m5vmGGjEV6NjQYmRqPeFit974i3NrNFNdDSFRthXXNe7qLKbgpdnequKV/qlCOfX8B/Oj+XAKCctcdZhM8JdPSpLnD0HG0vlMnGz3VRro7G4tZEuDTVuq2lo5WZZ6rSz8zlFPjLPSLToC6MKp7+x7LrfIKvpoEnOl9YTVZG20s154XURFj2AP2f1c2VZxxPeJF357Pmz2Z1O54+nX21PnPox98EpMRKdDAsVw1NpY9mffuItzazRTXQ0hUZ4H+K6llkXbQpenumtKlan21DrvMtqrc869OLtYrQ1LXH2HGwslcvEzXa3sTYai1sb6dJQu7RpRrdysyx12pm3osSSj8wzEi36wqjC6W8su+43yGoWuEnOl9Y+1wNsK92cF14XYdED2LPaHCz2WB15TdJVz7h97h7E5hho+MNbP9DtYx6XaYxPhtOK7QGTWfann3hLM2t0Ex1NoRHeh5Mh1plNwcszvVXFkk2RS8rm5h16iWJlxUeea1ri9DloysTNNlNtjcbi1ka6NNQubZrRzddupbPzOUU+Ms9ItOgLowqnv7Hsut8gq1ngJjldWv9cF2gr3ZwXXhdh0QPYs9o8QvZYHU94k3TVM94cVtIcAw1/oNtmm8fcUm2ZRnQyLFT81hNvbWaNbqKjKTTC+3AyRCWkKXh9pneqWEqfb7EG1NxCh5bo7oaX2cdRp6SZnKVyQ16/bbaZascuDydvF2cjjUu1+yDcF83olm9W207duCW6UZ+JayzNMxIuuuWd1g6nv7HsumaQ1Sxwk5wurX+2j7u20s154XURFj2APULnj1WTdNUz7g9r00p87B7sanu5eczjhmvhyTCvGJ5KG8v+7BNvbWaNbqKjKTTC+xDW9UwlpCl4faZ3qlhKn2+xBtTHQoc+xPZueMXRPFyTaal9q1iqrdHdxkY8lmxppNGO7ccfzqcZ3Y376+rMdnxzH5hnJFz0+ajC6W8s+7MPCS9gH3dtpZvzwusiLHqA4Anyx/d4wpukq55xfzibR9xLNI91oe0mUUPHeDytRCw8GeYVv/XE21jqfGaNbqKjKTSawbmwrmcq4dpbe2Omdxen3XIXeHPq426HmrpScTs+t6OMpfZ2/GrdrFotbmOj7bZhFycjjdev3Qdt2jTrsrJ2y+synFPEKtyaZ6QZsljm2ajC6W8su+43yGoWuElOl9YL2MdMC3BUujkv+1sJbS28BMKiB+ifPZ1oxxPeJF39jHsrVRk1c/Jw+vXyXNBTXzywQcNWrMyJiswreoFvOvE2KzOrNQ2Mp9AIi8V1LbM8Lr3PouCNmd6ucuFvoP2u/72PqssLHSrlLKuop7VQyni1Is9SRzOWrLrOC1ouciMay689vTLSdhiuWpTNyrqslFlZF2+nGdFWsR3j4f48Q2HJ6ajC6W8sOxhbndUseJOcL2352WilpyswnVe0uHgNhEUP0D5Bnt4cWU3S1c+4ntej0PHT+JQRUJHjGd/7Lo6CrpCabk8G9WM/88zMKoan0say69kGWd66El0ybLs68TazAXbaBoZTaHgx1cvV47qWeeQed3HPujHTG1W8TCn9rFFdDKTyxQ3yHnMXKx2qStGGF6hue1vmGKUyuq3iNYq+9yonk9FdKAI9G0uusTLSLiPxikpsfCxqNrce3oiLaxesS78Ss91+f54hK6nx/JRDwj4fJYKn8e687GrXOV4BYdEj5CckPSL+r0/5P6N/POEp1Z094TO+PdD2pPkPjvE8uxyz61vTdgqUfR8n7v6E65uD/Z9NLMbjJVIr6dnfH/VJRU/vpXeWXc82yPLGlRisRtN2+zJZmFmta2A0hYZeJ6mbrYaaj+tqDPZG/uMnaVPwxkzvLI7Wptb92/iZZjj4J9SWOlQTuY38CqpuRi5jN+yPb/JmcpYqKll6b9VG0m/xVjMWzSd3sjLSPmfTDDU3VO8Lz9xHd2ntTtbl+m6/P8+Q105t/ZxDQqM5eRpvzisnj1uIl0FY9Aj59Nn98qzjCVdurXnG83NY+OMt63qk63rrxdqpHte+5U05nLqZ48p5xfBU2lh2Pdsgq5l+kwzb9mGWU5vOrNY1MJpCq+7Hyw/qdkP641l7wRszvVEll2oNFqfeAe4outZhfu1Wmv76QbWb3FJFrX5k0RZvRfM5JrAw0iCruw/xvmjW5e7NCh7+aNTdGEt35xmql/Socj6qcPoby54OpVnwJrmwtN3g2qfx5rzy/TrdhPiZCIseonmA0qOS/n88YZZdP/DdM941k8rbB78aq59pe9ztia0f1+AJrw+T+lQ+rp1W/OYTz0xnVukaGE2hY+Uyn8igbnNH0i9UreRe8MZM7yyOF+q12zDrVrIouNThpr8b1eWkGda2PN6OLgdbpVnQNAxrpGu60kcE1cSnI7W8dq2a+7CxjMyLN+uyuHbTdUn6UbcNt+7NM1YP8ej4dFTh9DeWXfcbZDUL3iQXlrbZPP3TuLkzr9xu0zleAWHRY5QPnz/Xx6ekuHBonvFN1Yw9cPbRrg2VD68dBvaUN4df+4i3l8ujwXvOTiqGp9LGsj//xJPZzEpdA6MpdMq3rOoP65YjsstWci94Y6bXq/h4iyL6nZnNaIWaQKJsfmWMpmnDfwOi5tWc/+6OfbRriaWireKsho1nNI9sn7BphzIbqeUuPLJFMxpQsy6razdbF1ePeroGN+c54FMxP+eQKMdmRa1WVenOvPzS4sLhRyEsepT8hx7yk5MezuOZSde6R6h5xk3+YwVlM+3J2PmtP2uRn+iUKp9gp1Jb29HLKl33q93FYcVvP/FkNrND18BoCoH2Dp/VbdcyJfeCN2Z6uYol6iN+o2VSKnCsZF11ZYzZqI2D/pzG/ifArbx/3Pg1JTI1Wi1o13PntyKjwVBOR+pXlMi8vBLS7YtmXdbX7nxdsvXdnl2f59CPPCROn0Z3fV6p8+Oe45UQFgEvyM7y7oz37OV3IAC8HcIi4PXYr5iDX8raL68JiwBghLAIeD0W/hAWAcBVhEXA6zkNi/gDEQAwQlgEvJ7Bt0L+p1H7P3IEAHCERcALsvin/VrI/642XxYBwBBhEfCC9LeKlXL+XVH/N74BABlhEfCKPATafxLLH/1MHH4LDQDOEBYBr0hfDXWIigDgBGER8Jr899Fq/LkiADhFWAS8qOZfw9qCIr4qAoBzhEXA6/qd/0mn6J97AgC0CIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1g0878BAEBJb8gXRFg0oy0AAACc3pAviLBoRlsAAAA4vSFfEGHRjLYAAABwekO+IMKiGW0BAADg9IZ8QYRFM9oCAADA6Q35ggiLZrQFAACA0xvyBREWzWgLAAAApzfkCyIsmtEWAAAATm/IF0RYNKMtAAAAnN6QL4iwaEZbAAAAOL0hXxBh0Yy2AAAAcHpDviDCohltAQAA4PSGfEGERTPaAgAAwOkN+YIIi2a0BQAAgNMb8gURFs1oCwAAAKc35AsiLJrRFgAAAE5vyBdEWDSjLQAAAJzekC+IsGhGWwAAADi9IV8QYdGMtgAAAHB6Q74gwqIZbQEAAOD0hnxBhEUz2gIAAMDpDfmCCItmtAUAAIDTG/IFERbNaAsAAACnN+QLIiya0RYAAABOb8gXRFg0oy0AAACc3pAviLBoRlsAAAA4vSFfEGHRjLYAAABwekO+IMKiGW0BAADg9IZ8QYRFM9oCAADA6Q35ggiLZrQFAACA0xvyBREWzWgLAAAApzfkCyIsmtEWAAAATm/IF0RYNKMtAAAAnN6QL4iwaEZbAAAAOL0hXxBh0Yy2AAAAcHpDviDCohltAQAA4PSGfEGERTPaAgAAwOkN+YIIi2a0BQAAgNMb8gURFs1oCwAAAKc35AsiLJrRFgAAAE5vyBdEWDSjLQAAAJzekC+IsGhGWwAAADi9IV8QYdGMtgAAAHB6Q74gwqIZbQEAAOD0hnxBhEUz2gIAAMDpDfmCCItmtAUAAIDTG/IFERbNaAsAAACnN+QLIiya0RYAAABOb8gXRFg0oy2AT/PPtsr//qsEfqq//6b7+Fepd5c2tVze2v/8829ay1T1n39Y0FW+ZKZetEfsTHZ3z5b6JREWzWgL3KYDLvlHWVcVR+w61X16fzXe62+Pn0MzNK96su77nFdHcjss+qc4MOQHh0bjo+vfh4d8ajipGn7EzmR3B7QkL4iwaEZb4K7yXCAs6u1R0SvHRZqgedFztXiZ8+bY3AyLBo/6j/2aYnZ0/fvA0EhNJmWjj9iZ7O6IVuQFERbNaAvcVLz1CYsCD1mfp6f5mdc8Vov3xs/5ovIz3QqLzh70n7lvFo6uh4V8ai8pmnzEzmR3h7QgL4iwaEZb4KbqgSIs6tRTU+bL0fTMa4ZFmpx72fD2gjthUXVYdH7kl6lrR9djngk1lhQNKsfd3Jmq7djdmRbkBREWzWgL3FMfdIRFHQ1WXjNkqGf5knOsd+jr/m7ouhth0XlUtPmBW2fx6HrIjlFbybFSj9iZ7O6YFuQFERbNaAvc0hwKhEUdDVa+4xdi/zilPoemZ94gLHrZb/0uKFZk8T2q0md+3t5ZProeMDW1lIzCons7k90d03q8IMKiGW2BO6o/OLMhLOposPINYZF6/tzlUh+GsOgtFCuyFhZNvyva/LwvKtaPro8/F2ooISz6ClqPF0RYNKMtcEd70hEWdTRY+fqQYV9apT+H+jBvEBbx2wzViiwtx0pU9ANfyBeOrg8/GGonGYVF93YmuzumBXlBhEUz2gI3dCcdYVGnXiNlfqG9f6U/h/owLxkW1d+LfsOXfk/nYljUfrEc+oEv5CtHl6rcpmaS4yF7xM5kd8e0IC+IsGhGW+C6/qQjLOpptOYbIgbCoscotyi/nN5cDIvqXx78qz/r9vdv9ehb3s8y2hfbzLpT7aP7Rs0kxUP2iJ35iDZekFbkBREWzWgLXBb8+u+Rv86omlfeT1TM4zsOHHVNWPRRxXv9RWd4TfEmXdjXdYhQnRPHpZ/4NcVpQFHP+sMbR60kZUuP2Jns7ohW5AURFs1oC1xW//rPEBYF9ol8x6l/rKIyPof6MK96rOb9zr8bZa6FReVhMYwelPxRTsOi7pBU7k1qJKn24CN2Jrs7oDV5QYRFM9oCVzUPvCEsCtm/AvU9/+7TcWor43OoD/Oy56r9hs+/P/Erjc9wLSxSySQqbYfJj9w4k7CoOSY/NkU1ktQNPWJnsrt7ttQvibBoRlvgovL3hvR/wqLnc9wbZXwO9WFeNixC6VJYVAYP4f74+88P/ZpiFhbVcdHHDkg1kvCQfQUt9iX/9SOiAMKiGW2Bi1Q5OU4GwqJnQ1iEz3EpLCpig3nhH2UaFlXn2MeeQrWR8JB9BS32BSnc0MenRlg0oy1wTXHO/XM8+IRFz0brt1HG51AfhhP7LVwKi1QwebHfppmGRVWJjz0caiPhIfsKWuxlHmwo8dQIi2a0BS6pfvV3PPeERc9G67dRxudQH4YT+y0QFpl5WFQdZB+avdpIeMi+ghZ7UQ41lHxqhEUz2gJXFE/6dhQQFj2t4tBWzudQH4YT+y0QFpl5WFQ9HIRFP4kWe8kRaCjjqREWzWgLXKGayfZ8EhY9LcIifJIrYVH5LL9fWFT+oet5BHlCbSQ8ZF9Bi72gDDOU9dQIi2a0BS4o/2DRlnyisOjvX/vL8Jt/r/xdU/+BtCc11GzQ6N9/9ksfn/0//3pb/0z/Nn/udjJRlUmUM3Zz7YzXcxdP7K3X3O3CzzBYnHdDMztt/7jFH76P601ts7kw+aF7q3Jlv+U5HR18YVh0d3rLru3BykJYNC9STXA8AC9irm6W6uG+uYaXDohtRb3wyuZ6VjaBBXWQocynRlg0oy2wrj0Qj/Qjz6wbYVF5/iTxw6uLxw8uK6p5lhL7GfZXp4Gp2qwGeVQolYNSVrJn72dGPnPc2d9XrsaTRBM9jtpeUHxt7UZUyVw5BLuZnB6hS/M+ZpIvFtV0g5Q67lf9b1CE+1iXkvKysi41Jd39uff+uLgqN/ZbOycrW2RF276mgkZZq6bTK+7ucA7Fc9otzrU92CpXZrAOkyLN4m5Gz54uJ+UQlZXEFRd6UH4y6LxtYzTIpN/ZuvDDaPQTdYhBWLRRu6LMn0VbYFkZCljG8cA8cvdfDou6Zz8JjiFdydeqU9EnoIRKtMfm0WQTFCV9f+WwlJUc2X7CdYfz6JDvz7ikKRyWOXT3aXXtRlTDrL9U+jkng/pL807ala3qWU67BYLp9ztZF5LyorIuNWXC6fzff0oLK3l7VZb3W3ibtskWHc83igqaC9tqaXplkcEUuiPrcGkPBsreBzM7LRKcIZs45tDFpBygspKo2mAN66LKTcKu4zZ0sRGWHTwFz01jP9UGRYRFG7UryvxZtAWWqVriD+fxGDxy718Mi+LjZdONSfmblKrr+YSU2HQFknwahAdqd1SUh4SykmbRVg/n+IxLqnkOS7lmTdbXbkTlzfIrJZzzJno/L847OVtZ3R2lNikVTr+7j8pPyi6VtUmptaY28du4EVWsPGhVsvjOxXMqu56Os9ofC8VlcXrKSwZNF+00C3NlD4bKIc57b4sMn7xwALqUlFeVlXS3ve69Ug1FeUnQxpUDYrizV1f0iWjkJ/qgiLBoo3ZFmT+LtsCqYtfrwToevOApua16FJU3dvKWaQ8rZW/Sg6qP4o+uEpu/4YngLQ66bLsrjyVlJUf2VmF07HQnyfCM25Qdn5Xb1PfpwtqNqLhZPP7Gb4QPzDupVrapqbJKbbaeFu+jspNy+ZS1WW/qfPKHvl7t9qos77fNaHcU+bNxdndhbYusTq8cobIauphUXV/ag7FykIN1KMdXFzmbYXCY6kJSjk5ZSV9ndPuSohHlJJfa6KZ8sqTzbfJsNPChKCgiLNqoXVHmz6ItsKh4kvM2P7L6J+q+6vlS3sj5W6Z5HJW72YbbPPF+UCixKX5UZSkVG54VTXfl0aespMg+GX59Nk++Yig6Pj1v6/t0ae1GVNrUYx45n8rdeSflyrazU1GlNv8s30flJuXyKWuz3tRkzXfni39/VVb3W3LeiTsfZ9Lux4XfH7y32cMTqJyvssx5D2v7uOx8sA5lN1WRydp2zSk/KQenrKSd/mSjHcWVkVxroxnkaeHB+jwvjXsgDooIizZqV5T5s2gLrCm3fX40j5MhPJRuqh4w5Q1MHv7mcVTm5p/uuG5LDA6urcG2ZqFehbKgspIi+6SpauSzaRYdnzSZFCO8tnYjKmyWXieTN0LVyIV5J8Xcu4NNJZXa5L+BFKknosyk7E9Zm/WmphOS07W/vyqr+y2Z3Scz3yP9YGeB0ZXpFYMMh1LOXVnJlT04VC7mYB101Vzpv2tP2Uk5NmUl9T2fL+Leg9LJxTbqQSpzoGn66WnYoVFQRFi0Ubs/2f9Pe2BJ8Sjve/w4GR657avHUXkDKjRWjUt5m3+7R74tMTL4Fkm8FSnPTWUlJ++m0uD4G1DB6VlWrIdyxpbuqcqalbfJfPYqmCjnhAq6ou3uGwdNRqmJ4YFfLomyJuqmpu9DaV+LFZU5oYLuxn7bzN6J7nScLpryaWSkMidUcDN4xHa6lCgnubQHh8pW4nWo+im2zsIuaBpUbrIWFil3bG9G6eRqG2X5yZxU6sfQsAPjoOh//a//9wcgLJq5EhYV2/54ZI/nvnmiPmQ9LKqfxX/+2rNe/x3R+BTpf33flhg5+2agOc0GZ/b8UHYqnpz2aaJ7Ejnu08W1G1FRs1C+ftvqR7XUf8C2WMMr894UzXSroIkrNVNtZ+UlZb6yZsoq9Tztyt/oTxef/xGc+6tySsXdWlRUdxQbtDSOjC5NTzlJdc9cuSGUtbm2B4fKCnH5aibK28wnuKlno8ykXDdlJXX5aRfHgJWRnLUxOSAm+yW4N89N4+6cBUU/EWFR70JYVJ4BytocuY/c99UjprxQdZIVx0X5VUF5YCkromJK7f61w6D7YRzGjtP2pWatSLxm9aiNXpDNhWJC1bjCA6pdp0Hfh6qzhbUbUVFTjnhAJU3ZfDmcohnlmPm8+5U9qFGlCv5WbFqtZq68pNzmyipMm1JWUmZXFU5/rJ9TSXNnVeb7rX2vxr006zTQ1tmN5qnLZja9Ij8Yi64kRV/KMWWlwR4cKYuH61CdY0eJJoLQT2PYf2BiVo1AeUmZr6ykOoDbIEUrXXRhaaOMpGqj2h1Fp/EBUQ2+C/jD5XlqGnjj1YIiwqLIelhUPmfFI3I8OtUT9UGrYVFVrjpFyqe0GJlyIiqlVLY32v8p0KO/6lK5DuXBoqykOm82xZlRXYqbGhxQ7Q0Y9L27vHYjKmnqdiLliOuzshyQsjbX5t2ubElFlNodzda3WJlGWUnZm7J286aK8dWTL8rP1/Cjq7Ky36rtUX15VU1t6X1Xt1ULZ3tleuUMlHWI91TZ0MoeHCm7DtehWqpjyMpw5dLW26ZqUnlJuWTKSsq712zNsoYGXWR5hhnugLKFal57DaWTYEp1/Z/Aht14vaCIsCiyHhYVT0L5uB4nQ/VUflD1RCovUp5L7YN3DLgYr3IO/mXQ/z5+fJ7y3ehgSsr+ytMsXp1qGmX2php5dU15Rn000/RMUw122Pfu8tqNqKRpG+qpYNK2Xdz1op1L825WdpN/JR4d35t6DMo05X5WVjLI3qw0VWyU4YSWHqSPrEpVabTfipG23VRV2psYqp7nVjTfK9MrRtoNphhq0Y1ykrZGvAcHyoWI1qFaw2Nxq/VreqmqDPZaWUVZSVn69BalPgZNj9po1+IYZm68vMXKkvSV0Xwtn47mUnjFoIiwKLIcFo3OnuPZKZ+oj1oMi8pi/YOnCxtlbJSxO6vVnifVUdPUVGZSViqrKCupWmoPLWWbqpP2NDODHjbjK+b62o2ooOlbaoz2kTmGXM7zyryrld30fVTDbcdbVi+rKispR6Ist9SUMjbtyIryyjl3e1XajpVtjjlUT2DbT3kxWOFA1Vwn2DUXpjeYtFF2cnRyYw/Gyp6DdSj62RwFlGG6TspKZZvKSsrlUlZSNqUsE4ys/s1LlUuKNi4eEMVazBfuJ9Bkdq8ZFBEWRVbDouEzcuUUWVedosoLFI/i6al0DE0ZElQqS7SXle2UJ+VhpqykPDeVlZwepyfHUX86VYOq78Cg7+z62o2ooIlGWFG5pC9bTF05bn3e5aQ3k4r99eI+lquirKRcD2WZtaaUsWnXdTT3sXursrjfTquMlulE9UR3okbWp6ecpFnVote4fN/JlftwukjNVjxaO1/a6iiZD1pZyeCGz6ehckncxukoVaUoHt25n0eTkVcNigiLIothUXmoFU/O5nga6vyPqQ5R5QVUIDk9RI/HWhmyfvKawYGVlOdQ0WqZraxkkC26kCysaTGo0Z0Jl1BXksW1G1FBMzsRi0FFDR+XZw0N5l1OejB2XUv65R0smnKSso6yksWmlN50E1T+Rhl3zFdFWQVdSI46yjD9vSganO8PV/+xmdZiK/H0itymnWKcxyQmgz8uT/dgua5VU92fnl5f2lGjyknKSspKihVRjpnOYt5G1IQu7WMMl/on02TM6wZFhEWRxbBo5eApnqgPWwuLylLKqhyjVka91+PHV9eSdkblgdXWVXZSXCprKCsZZIsuJAtrWrRV35rzTm6s3YjKmdmJWGykaG7HqKYzj+ddTrrdqqKLSd/JYNGUk5R1lJUsNqX0plsp5W+Uccd8VZRV0IVkn0W5PYJbEXczUT3UrbVmrk5PmYlyNp+yB2eO4ZbLEM66GF8xamUk5dZRVnIMt1pp5Z1QwSRuQ1mV9oAo1mK6bj+CJuNeOC4iLOqthUXFg9o+yMfT8MiHYe25nj2Jx7D3g0RpE5/Eupi0jY6P36raR8Oik9UOFEtVlz7v5MbajaicmRVWsUQ5lWMy05nH8y4nPRiMLib9zMuNV1RXTlLWUVay2JTSm25wyt8o4475qiirEO23skawjMXl6Y0qlEvSWmonnl45g+o+FMWLfOUkyqkclaZDqnbbqaKpydLWBY5hKyMpaykrOcpOu6ipZBK2Ua1p1h4QF/t8fppL9rKBEWFRbyksKk+zdscfT0P48NxUnZ/K6+l6Ej6IwdiUNvHDq4tJO6PyyVfWTtlJUW1Q46yhLwqLdCFZXbsRlTOTA7EYUzw1XdwoYyiedznpweLpatJPbbDVlZOUdZSVLDal9KatcBRfuOlD81VRViHab0obZZWm93Gk/UlJhek+28TTGw6nmNlxM6dj18WNMobGk2kMBht3UG6cY1GUkZQPmbKSo+y0i5pKJtP+dkcfXqccdVzjh9FUDi8aGBEW9ZbCIk036c6u9ul4jOoZU15P15OlJ3ejtFFWQxeTdkZnp42yk6LaoMZZQx8Ii+q2zjvRhWR17UZUzkzOw2JMH30lxfMuJz0YuK4mfYmy1WIuyknKOspKFpsq7q1ysmPkCzd9aL4qyipMwqJoONP7eGIYGU32TjLa7MpKlJMUpYvbMx27Lm6UMTSaSavqR3lGWQ1dTI6aykjKhVJWcsxRGUZZZ1QyCdsIb8wxd69T3ppt3As388lpJqWXDIwIi3orYdHpa7p9Oh6jesaU16kfxFP7uJVOBuPV1aQtUZ6CytopOymqDWqcNfQ1YdGdtRtROTM5DYupzczO1Xje5ytrdDXpt0DZajEC5SRlHWUli00V42smqNxN39S6O6sS7LeymWg4RYMLW7QTR0YL846nV02haCVe7AfuwXAavXpiykwGMy5HqKyqWjksZSVHa8pIFhY1LF6u9IR2gFK7+ocA/DyaRu0FAyPCot5CWBQ+pLvjZFh5AFdVD6XyOtef3JUDQ1eTtkR5Ciprp+ykqDaocdZQ+Jo6MXpTnHdyZ+1GVM5MjsIHvpLieZ+vrNHVpN8CZavFCJSTlHWUlSw2Nbxbxcoo55Y7qxLst7KZfmZVgwtbNBJFRrp0YrR88XiUkygneeAeDObQaxZotrSbcoTKqiZTDktZyd7aQhcVFU3CNiY0waDGj46MNIfWywVGDw+LXoC2wIlytwe7/DgZVh7AVdUjprzO0qHk9qNJ6WQwXl1N2hJlh8raKTspqg1qnDUUvqY6f//+88/oH7uV007urN2IypnJOahSK+KWZvM+X1mjq0m/BQa7XTlJWUdZyWpT5QCLDlbu+djHViXoezDKXXH9xnCl+yv78S2fT29TXFVOdQMGN21mNKCsXKaBbnnKbdFvGhPeLqWTcljKSvbWBpt4SEWTvY2FqWV5it0dSn5uZKQJ9E4Do//nxyEs6mkLnFDBJDoBj8dn8IzfUj7YwzHeeHLDE6Cmq0lbIjyuRNlJUW1Q46yh+Sty/MdWVcCddjJsoheOoaRyZnIIqtSKvqWVeZ+vrNHVpN8CgzeKcpKyjrKSO03t//JM9UKxUus+virBfisrRLe0uD7dHieqxzxaw7XpbYpSeytFXjkHZa2Ipl4ajk38H0ytLcQsZbN7EaWTwWz2zha6qKhoEi3eTN4Bzd3crQzhCWn0kZPASCV+JMKiTFtgLPwlwKmPnJNZ9YQpr3Phyd2fdqWT4MhKdDVpS5QdKmun7KSoNqhx1tAkLDqdtMq4005Om6kN1umgcmZyBKrUCtXIFud9vrJGV5N+aoM3inKSso6ykuWmRi+P7NJL5CGr8n1hUfuFkXJ3i9NLlJkop8iqbo7yVqjG0HB8//777/GP8NVmS7sJiyidlNWUlewdLnRRUdEkbGNirzPc2iuDeDoae2wYGOn6j0RYlGkLjF14PISwSFmbQY2zhs7CovEvnZ2KudNOJg2VBut0UDkzOQBVaoVqmPV5n6+s0dWkn9rnh0WTuOjCG+RRq/KNYVGzGMp0lzZ7OQndiqLpagrKW6EaQ+UIV9dhtrSbsIjSSVlNWcm+BRe6qKhoErYxcWz94U8yf8Q74atp6CODwEhXfyTCokxbYOzC4yGPeASq01J5nfWhHQ+uMpIjs6KrSVui7FBZO2UnRbVBjbOGTsKi6YRVzp12Mm1qN1imggqaySmsUnPVX/C9MO/zlTW6mvRzG8QyyknKOspKlptqNndj5S0mD1uV7wyLxk/6hekl/YiKHM8Q5c3N/5J5OcbVdSjrDDoIiyidlNWUlexbcKGLioomYRvnqp0/qvbBXfIdNPKxMDDStR+JsCjTFhg7O8Njj3gChodlqXwElTWl4kn/IjO6mrQlzjpUdlJUG9Q4H/kgLFq4EyrpTju5s3YjasZMTmGVSgbL37s074V56WrSj6HsrJiLcpKyjrKS5aY2wxld+GkvD1yVbw2Lqt2jrM2l6RnlJm1GfW+UmSzvwYFymVbXYba0m7CI0klZTVnJPpuFLioqmoRtKGtNWbHwiLfC19LAzwSBka78SIRFmbbACRVc94gHoDoVldcpCylrSsWTwZmoq0lb4uyoUHZSVBvUOD9z4rBo4UVRt3XayZ21G1EzZnIKF1MbLH/n2rzPV9boatKPoeytmItykrKOspLlpoyyG6tLsnnkqnxvWBSO7dr0TNGOrWORrmdwYw+OlGNfXYdyaoP+y2b3sSudlPNRVrK3ttBFRUWTsA1lrYp/9/Oja/3lNO5zXWCk/B+JsCjTFjihguu+LCwqh6acKRVPBs+priZtifJxV9ZO2UlRbVDjrKFBWNQdNf/889f+HtPw/DrvRBcS5dymZkz0Di0UY1o8Ji/O+3zSRleTfgxlq8VclJOUdZSVLDe1Cf8IRvS3loYeuirPFxZdnJ5T9saGpM+bZmGLxq8seaQc5+o6lFMY9F9uD2VVe628I8pK9tYWuqioaHIUV0ainAuiyCjaR89Mw55pAiPl/kiERZm2wIlgg5/76DmZlA/2eIy6nqwecCqeDKroatKWKJdCWTtlJ0W1QY2zhuKwqFqQ6oeBDN8U553oQrK6diNqxkzOvvMxBa7Oe6EDXU36mZetFp0pJynrKCtZbqq4kH8Yz/ivLQ08dlUmYVE0tOL6Rx/3YGxXp+eKaWypomS1+EtbZFXZ1PI6qHwyqKOrRllVZjkhZSXHjVJGsjIsFU3CNq5tTukio1utfCMNe64KjJT3IxEWZdoCJ+ozasFHz8mk6lR5veIgXH3mVDwZVNHVpC1xdqAqOymqDWqcn8xRWKS0qX9C2vBNsdzJR88rNWOaV1Dr7LUWUlmzMu/zSRtdTfqZl60W3SknKesoK1lu6sifrNUJNWA+virBfiubibZH0eBHH/egKSXN4mbfFA1tQy5Sup6dtXFRua7L6xA93jVdTY4SykjKJVFWctwoZSQrw1LR5GjjAQdEudIb5f4UGvWKIjBSzo9EWJRpC5xpNvfM8vFwpupTeb35AdNR8WTwsOtq0pYoT0Fl7ZSdFNUGNc4aCqdVVmjepsNT/nInd6kZM3nVl4OdFDWX530+aaOrSb8FBgNUTlLWUVay3NReqa+x6sGrEm0FpZNoexQNfnT39L1fnp4of7M1dDTbrvPovtxQjnR5HYoJx1u0HOAxeGUk0W7aHGWnXdRUMgnbuH+LywVaGckz0ajX7IGR0j8SYVGmLXDfsfHvH/O98mQYj/Hk/BxR6WQwXl1N2hJlf8raKTspqg1qnDUUHkhKJu1Eh2+K805urN2IWjGzplQsWTlsVTRZm/f5pI2uJv0WGLwzlZOUdZSVLDe13937q64GkkesSvQCVNooq1Q0uPTOPJmrmkm0hkola9OTYlBlI13fyk/uv/BNua7LTZWV+l2zKQscg1dGUs5IWcnR2LSLmkomcRsnt29i8BD8BBr1KgVGSv1IhEWZtsB9x9Oz8PwtK5+mkzGqQLJ4Kql0MhivriZtifKkUNZO2UlRbVDjrKHoNVUsRzfP4ZvivJMbazeiVszs6CsHNT8mr897MulEV5N+C5StFuNTTlLWUVZyvanbb4lHr0oUFhV5wdTKBhc2TxpU0IjpN8T16WW6sCkK9h1f24NnypYW1kFUIQkr6ZpR1kYZSTloZSXFTJWTLIxLJZPbbYwUG+lja/3lNOp1Fhjp849EWJRpC9x3nAyjg++O8vg7GWN5eK89dCqcDMarq0lbojwFlbVTdlJUG9Q4a2gSFnWjHr4pzju5sXYjasTMWipHOz9sr897MulEV5N+C5StFnNRTlLWUVay2lQxwLur/uhV6fdbXSO4T1GVIY0pnm65DT3n+vSyo61/jvH33ZaNzEd/ZrJKA5MnrxxesQDKScpKykqKwmUX/c5sqWAyaOPuVn3Ifv8mGvUVW2CkTz8SYVGmLXDfse2Dx8/+JvKFn1G3K4+GkzEuHHBN5yqcDI4LXU3aEuUpqKydspOi2qDGWUPVeaRJnZ0twzfFeSc31m5EjZhplYXD9si+Pu/JpBNdTfotULZadKmcpKyjrGS1qXKA/1d/7fyiR69Kv982yjDKOizsnUJuv1+iemQf2ezmqFn8e/u6Vrq0B88Eo18wqaUrphiGcpJycMpKBudOtAL1tlO5pGhj4SYvrFIxEuX8FBr1Ncc/R/8DERZl2gL3Hdu+O/X2H8+yfmJk5SN5NsbygIt6Se1U+V7URKf0RleTtkR52Chrp+xkcDwpKzlrKHpNFeW7UZdLoCx3/gq5sXYj3oSbHpTVnY1Kp2Hl/OvzPl9Zo6tJvwXK4RWjU05S1lFWstpUOcCS/eOia0HSo1elqHPc77PdUd3D6RYpOu+mV61Gn7c4vZ2ubIPS/6P7cnEPnimHv/SoONUwp1Ms56icpByaspKyKWWZbmTpYC4zVS4p2yhHEs0uLeOVu6+cn0KjfiOERZm2wH3Htu8ecOVvLhwZrjq4lBepjtX+8ff8hVOkoKtJW6LsTVk7ZSdFtUGNs4aq40hTKsq3kxydovUaRmf85bUb8ZJuXl4FXVfcp5NHc33e5ytrdDXpt8Bg0ZSTlHWUlaw2Va16oP4L6aFHr0q/3zbVOOvJVZ30O6elcq6aXf1jLe/f9F2wuOFy6prrSngv03ltyv5WysvJ2tZTLIemrGSQXbYUrqz4xiwyLe3KNqphdtMLDogtsh8sZnJhgZ6Chv1GCIsybYH7jmenfTGUT1V7baZ8pZyOsXr8626OQ7d4VpWTDMakq8nZjJS1U3ZSVBvUOGuompOOkmH5ap3OrimvOraurt2ISs6odD3k+ndY93nenvf5yhpdTfotUDY83zbKSlabakYe6ZuqPXpV+v2WKMcV+e0EZm+7epdts9NXYn//aa7kNbo+vYMuHeK1rFs634NnyqEuFN9VMy8rxoGiUV5SjldZSTVX5blyhnsXR54ykqqN+gZVl4IDwhe1WfBiheJb8bw07DdCWJRpC9x37Ptm29dHjzJXVZWVF2qOyvxPKPytfsZqcbwoJxk8prqatCXKNpW1U3ZSVBvUOGsofE0pmRST6X6QrPJFmcYOxlS8PCD90m62diMqOaPSwXvSsuvXpIapVLI27/OVNbqa9FugXJNiqZSTlHWUldxpaqgoHlGp5BGrEuy3TX2b9D1A18d0g7SbbOhoRxnJ2vQO7d4areSVPXiiHNJkHSrNouyL2wyrGoDykjJfWUm1BZvVyl2UcdfejNJJ1ca1AyK3XDZRFlTWj6FhvxHCokxb4L5j4zcvhvqZqp7wuaqy8mLN4x8Lj9xmvJmuJm2Js4dc2UlRbVDj/LQoTq488Oq8zIdoP3Uru1NmQxeTi2s3ooIzKr1Y3jfM5Xmfr6zR1aTfAuXGK/ascpKyjrKS5abad02oKB948KoE+y1R1sxkf1RjPVE0c2uzu66U8ju6fO78LmzK7ubPSWFpVeotpcykHJeykrr8vIt9xEondRv9ogfUTFFWXwjWYd6l9XkGGvcbISzKtAXuO56G5sVQP1L9W+PUeli0dMIcj6QyksGQdDVpS5RTUtZO2UlRbVDjrKFqRnng9WIOqbTElcpJXVu7ERWcUelLgcHleZ+vrNHVpN8Cnx8WPSAuevCqBPstWdkcm9n+WGymmPDl6RV0Levvinz8JiTlSOfPSWlhVZoGlZuU41JW0kxWuSdyO0omTRsXDgilhrzUD6JxvxHCokxb4L7jZGieqPpwm54wterUUt7I/NEtDhjlJIMjU1eTtsTZ20XZSVFtUOOsoWpC+8iVjhTFVVgGR395Ky6t3YhKzqj0ZuGdFJzYrXDe5ytrdDXpt0A5tGKhlJOUdZSVLDe1NP3JsqtQ5MaqRPstOdkcxe/FTDdI2flQtUDKi4TTKzSdVc1WLuzBsbK3hQeldLK4rm1P2Uk5MGUlzRacznBvRumk3cbTYeZxzgrOF/PZaOBvhLAo0xa47zgZmieqPqAuPhXVI628odkTWR4wykr6F5nR1aQtUU5JWTtlJ0W1QY2zhqr57EOvV7P0tyivwllcqTpvr6zdiIrOqHSyfmJfnff5yhpdTfotUI6s2LPKSco6ykqWm/rf7Z8giZ0+MI9dlWI81e0e36W/RYvzHTK92+1sr06vpIvubGzre3CsHOjKk1Ka7IKuOeUn5ciUlbRb8HyGxR/DVk7SbePZZs3jvDqd56eRvxHCokxb4L7jZGifKGU75a2qnmjljY2P0aQal/KS/kVmdDVpS5T9KGun7KSoNqhx1tDgNTU6drbD7WhNZXdxpWpaF9ZuRGVnVNrUf+OmVf7FmYvzPl9Zo6tJP71y4xWjUE5S1lFWstpU+T2L/h86X/mHrkrRWP32Gr1Xq15WXnhl/4GuiYvTK1VVizvYW9+DQ+W8Lr/4T7vvb78uJOXQlJV0dc7ionK4ykr6fs/v3VH+rLPri/MENPQ3QliUaQvcdzw17RNVPvb903auesaUd+LkhGuON+UmgzHpatKWKE8IZe2UnRTVBjXOGhq9psIp+uyU6NsKj6rmiFpfuxGVnlFpOTlt62W/Nu/zlTW6mvRboFyxYvbKSco6ykoWmzoy69vwt/1DxZMXySNXJd5vSbg3ml4mA3XB32A79Et3a7O7qiPljZwMKhhToGxgaR0q4ycvevB0KSmvKitZXcek6kB5SdDG6gFxUi6YzvPT2N8IYVGmLXDfcTJ0T5TyN5ePjIth0TaM+JnszhflJ4OTT1eTtkR5Ciprp+ykqDaocdbQ8DUVzFCz2694shAcVf20V9duROVnVDobvSm78V2a9/nKGl1N+rX45LBoFBW5akWUN/LAVSma6kbVd9P2svhsDwOjfuGSS9Or6GoSN11Y3oMDZfXLZ9xm8OQVG++ga0l5XVlJNOo4VmlKKjcJZ756QAyW887KPAGN/o0QFmXaAvcdT0L3RO2P5PXn4nJYFD67/pd7K7qSDM4+XU3aEuVTr6ydspOi2qDGWUPj11R7xB2zyxeULNUnlX7ySGtt7UZUY0alD8EpGo7vyrzPV9boatL39slhkZKjR6Ksoqyhx61K0VA/rKabY5o5f/3hXr3f5tZmT4pOihs4cmlMnbLy9UMu6Z+80YOny0lZQllJPO6uh/6XO7qQDOa+ekD0y3lhNZ+MJvBGCIsybYFPYk/ThRfsB/39J/8TkenH0H9Zt18oTzD9+1nKmso/PmRbE+VEvmft/u4/2yT9m2DjXu/M+wkdL43BLIq3inLOfNGqHN18uJd0v49tNnljftH0VvfgZymW5JMm+s8jVnL5gCiWc3aHn5pP4p0QFmXaAgA+nd4XG2W0jrDo3pcPAB5DD+IbISzKtAUAfDo9dBtltI64ibAI+E56EN8IYVGmLQDg0+mh2yijdYRFP/m3H4CfTw/iGyEsyrQFAHw6PXSbOOq59keLAHwaPYhvhLAo0xYA8On00CXRn1ktoiK+LAK+lZ7EN0JYlGkLAPh0RdzT/wXN/Bd4Ev5kEfC99Ci+EcKiTFsAwKerfhxXioxyaLT/pWa5/deoATyEHsU3QliUaQsA+Hzl10Un+C004JvpWXwjhEWZtgCAL1B/KTTAb6EB300P4xshLMq0BQB8gfbftIjwXRHw7fQ0vhHCokxbAMCXmMVFy/9CL4DPo+fxjRAWZdoCAL5G8+euG3xVBDwDPZBvhLAo0xYA8FXGf/CaoAh4Dnok3whhUaYtAODr/I0io38JioBnoafyjRAWZdoCAL7U37///Jv/nNG///7b/XRHAN9Iz+YbISzKtAUAAIDTG/KNEBZl2gIAAMDpDflGCIsybQEAAOD0hnwjhEWZtgAAAHB6Q74RwqJMWwAAADi9Id8IYVGmLQAAAJzekG+EsCjTFgAAAE5vyDdCWJRpCwAAAKc35BshLMq0BQAAgNMb8o0QFmXaAgAAwOkN+UYIizJtAQAA4PSGfCOERZm2AAAAcHpDvhHCokxbAAAAOL0h3whhUaYtAAAAnN6Qb4SwKNMWAAAATm/IN0JYlGkLAAAApzfkGyEsyrQFAACA0xvyjRAWZdoCAADA6Q35RgiLMm0BAADg9IZ8I4RFmbYAAABwekO+EcKiTFsAAAA4vSHfCGFRpi0AAACc3pBvhLAo0xYAAABOb8g3QliUaQsAAACnN+QbISzKtAUAAIDTG/KNEBZl2gIAAMDpDflGCIsybQEAAOD0hnwjhEWZtgAAAHB6Q74RwqJMWwAAADi9Id8IYVGmLQAAAJzekG+EsCjTFgAAAE5vyDdCWJRpCwAAAKc35BshLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIte2O//JH+U+mR/fv3nP7/1+TFmw7fLv5T4kf7YFB67aiHrp1iqbWl/re+La6URurGIP2fdH//wd77sWXljtsQ/+kR9EMKie37/2g6CtId+/3neg2sWV3yQ1sBPbu/roY/UbPh2mbBohfVzLJUllzfGtdII3VjEG1W+ySc8/B3Cos9nS0xYRFh0j4dE2e9nPbk+NSzyxhN7jvT5kZ0RFj2M9bMvlW7d4tJdK43QjUX8QevuI/3cCI6w6PPZEvOgExbdUQdFyTN81f17o4/ZLK74iGIR0nOkM/yhz9Rs+Hb5Rz/E3xQWWWqztjNU+HNfes8peKZu0hpeWUTV+AHr/hkPf4ew6PPZEl+8i497Rp4IYdFlfVS0+f6dkUahj9knhkXlIqS5+6FFWHTNN4VF+eat7YxrpV9KmrY+ftCNRfw56/4ZD3+HsOjz2RJfvIupij6+DsKiq8qAoPDdz6vFEPqcfV5YpEU4fvswn4yPXAbCooexfrqwSMmJa6VfSfRM3XRjEX/Oun/Gw98hLPp8tsTXTtQHPiNPhLDoonxY/bIH9M8ff3d///Mabc9ZXHGbzsGyZe/roUHKbPh2mbBohfVzLJUllzfGtdIvJHqm7rI1vLaIN6p8k094+DuERZ/PlpiwiLDoKj8Ayq3jgdG3v50tXNPnzAf7Ceeqx4Z1w+nYeuyhNRu+Xf72hf+I7wqL0g288OfhrpV+HdEzdduNRfw56/74h79DWPT5bImvnagPfUaeBmHRNbYLmp2TfmjHt59e0fb8tLDI2v30gISw6GGsnx+9VN/hNY/8H4qw6PPZEhMWERZd5G/qbuM8wc8u+vqw6NNPKMKih7F+CIsuIix6IoRFn8+WmLCIsOgif1M/47NpA9Pn7LPCIj+hPj0UJCx6GOuHsOgiWzV9xjcjLPp8tsTXjgmros+vg7DoEouNn3IXRAMjLHpyhEVPzVZNn/HNCIs+ny0xYRFh0UW2CZ7x9eKHhhLZZ4VFn9Vug7DoYawfwqJrwmcK34Sw6PPZEl86Jl70GSEsusQ2wTPuAg8hlMgIi54cYdEzC58pfBPCos9nS3zpmHjRZ4Sw6BLbBIRFn9RuY9aNXSYsWmH9EBZd86JH/g9FWPT5bIkJiwiLLvI/W7QSEOR/Yr/+d2QtL9p3/tBXZf/89j33y39y5NDv3FXBGyrjivzP3Q/+XdvlzqxUzWq0p1ZK+UTzT7wcNR2v1KYcfsQud4s5ncmf395ff91y7ZPKeMLay6uYPqe6d5cxt2wFZkd90fPsBu4F/vM7KGEXjqXygkoc8k5q+hiXtgvxiFY31G42/LXNNJqDfPyZSp+CPSL7JLrOPV+JrWBKeaHhnrpRxVV7bM34Zhbd5ul1haJ9PFvqs/stS8/KI7ehOtzaUsauWnwrVPV2ebenGsXkemfLY/Xsk35oXtHE8N5brn88LVNv6GQwufEzkp3fmmdGWHSJ74/m7ve0HaTYTF6/3Y0br6BEkn4Y0qHbqgUVqfgAvbf0WeM2Qe/rnZUNZdZgFBal+VRN9z+cbrhSm2P4MbvcDHY2k/2nkptmPJaXPuyF7Lql0tCqureWsWxhO2SWwiK7fL6KW8mq627RLPMYkBdWIqtmV77YF0q3/a1vKLcy/O3/k810MgezPioVqXhj9jF92LvaOznby2GMM9lTN6ok1eXtjSS6GqqqNKu/d3uyB/t9PFnqyf025aCGz8rZyC9uw9OjYV+FvcmitUvdmKZGN6vz5bHM7f9FK2qhnMPocPOFlLpny2rGP5yccipll+e35rkRFl2St9TpTa73w6bdS/2D0z3z9XOxGfeoAhUv7QP5Uz8I3fNyqbNubhsbdjsDSzbP4KZ+DM9W6hj+iF2u13Iyk/oRT6rrlrP9/yhll20c28CbyteXse3+V3fbG7nndp1mDXeD8zwl8kiVcO2NKoa1Urq+Def3tbM2/L7XduFO5pB0vbTLeFCBipe2j9v/j8bUymTOzSKu7KkbVaI1kHaxDuc3M3fbzq/s1lsoejhfjOn93iw9K+cjn4yi1o+pGlJehaPHo63zmpHz+d96HGw4zXpUi2U5fePVkvQ5J5NTunKMc3Jrnhxh0TW6x8G35Fm/qcst4U9q/XBvPHtvs9tTm+G26h7+jbekVrsCdVOXOotmZ7PxVo6JWfJP33axbucr1S1Kxy6XFWYzGa+UU0YxLMu2ar+Cs8yuZvNljEok3W7YjXpuFiWaV13Eco7ReINKmOBW7OXXShf9BZfrlagtDv98M23O5rByfwrjnaKPRV+WPdvL7SKu7KkbVYZ77GSXBSMvF3bUbVnGez16CJosRjm/34vPymTk56OojW+40yoUw8ojubSvTDi3o7P58nhO08xWomu4rOUZfd/lWLuMs8mdLtnk1jw7wqJrjn0yCox02b8YzSHJsdWapFju/sjnTryLvo2Ab0MlMq9ol2w4f/LhXR4ulzvL/zxuPX9v5mjYkt6dtbwHZ345UcZopTT8wTJv7HJRfjaTPAK/nr+oLtr3dC6WWLZnfHgZjxKpSP5jDJuymVrfcx5cMep8BOl38MMilnEMxmsokeSxVE3k8qPSunF5Nfb+NFEts7dVDqaxt1b2HQ3f/mul9kJ+2amdeA4L96fjDSqRWd7ef3Jk70sStN40ptHZf/zOavjFZrhRRbP81W6xTbmeJZUZ3cylPejd7uM43wB7h5YVtLY57lbKHjwrk5Gfj6KWB6HCbVMq8Cvf5kQjOUZqKTV0tq9yDb9JubO9r31WVYPVyC3DLlghFfGGvdWglqW9cau1L2ox1jY9n5y3ocRhn0RKBJvq2REWXaQ7bHy3NKxAsXO8wl7Sk0pkvv32MpbqD8izXRU2mx+O4ktYbfSiKc+41Jm3W5fwho9mLJkcWd7wsTSWHq9U2E3JLhf1LX0yk2aVlW4b2BctsewHLaOlyxwvUVVqHD0rY++5GLVyuq67vo86XkCJTd9o6lkfu9I+quKL/WYhLVX0vp2/Rcut5eEnRykv1C/DYA4L96fTzttY3n5fEsu2skXfXetNY8ed3ctoAkedG1XatDfx6+SPNc9uZtGtMoKl9ox9cS1VjKLaAKp8er/zDW8nUlWbjdxSo1E0vOqwKXVW/RFjjaRKJF6mnEyjm26KGvYGVpbHM4rxWRELc/ZSXquYgaWTo6GuZUt2dc4m5xlK7Kab6tkRFl2lvSTBebM9fVWml1NCO6bYaUm9ubq9l59KJSLh9vS+6opt/3c68zbqmfvGP9q15KYs5TlKbM5XKuymZJePcc5nso2xWnjvQImNJY1+eeSNebGPLqPXqJa1PwIbUc+qdHTVpjfdKdSk291iyaaJY2c3pbvGN5alCn69bkz/j1jpleFvylKeo8TG03U77Ryqq8396bWrZCzPVHtkK3y6l9vGojvree2eUmKpSr/01sbJJGc3M+7Wax1lPJ2H0Y+i3AB2sblqWUUH3mc1KnV5rE5XaWNZavp8FK2tdHGz8giU2ORV2Hq0csdtT6qGrWg9rlIwt//7++jbrk6Wx9JVoZRMQ2my+uejbtpHf7TcJBcm50WUyGa35vkRFl12PCGuepwCXl6J8KDybZTbCZ5n1TrpKdyeGmndlOcpca8zb7euVM8hd1PP0+uNG25WKuqmZJf3Hu7MxKq0Y97UrTxkGb1EU8Tv2mxBmgKeuc87bKPNtORxM/yyEjlZ36tCVLop7LPzz81dnGhHatpMSzZ9ejd7mfM53NkcalKJzPKSprFWuwpNY7qzZ3vqRhUrUq9Bdb1nfZzczL3bep08c6/nNXIRv6hEyyfVrHqTufSszEZ+Ooo5q330plVoe7y+r8K5HVaWRze9HImG12e1lZoF8bx9MJZq7ur55HxoSmSWeXJrnh9h0XX5t0p3o0fANbur3a6b+gkOd5o10uy0UljJ221qeWYezq3O6iacz7J9CptnyvLGDYcr1TRRsMt7a3dmYnWOyymVNF0+ZBm9fLNT2kVrhT03S2uJtog3fAzIUkehZriWaps41KXjMVsZH1KzqBOp8Nrw29tieXvNOtWa359AWMnyNs1gOj6Fo1TT2MKeulGlSjhrZDjY6c1c24N1M1Z9tK5Wsr3Y3G/v8vxZmY78dBRzTXUfUteglbq0r+ob1rGrk+XR4ithVKJq1nKOpizZdtzc3Dq1MrmozPTWPD/Cojv2Pwgn9ffnLSty7BJLVjvfcnIB31Rdg7ar9DkQbuHwEazav9dZ1G77LFiymuXG2m0zC1Zpb2NygtRd3JqJ96CEGuwbecgyWol2JO2iteIVqHI90TXR1LTU6DAbNLGrS8f30GbiTcQFBm4N31X9nM/h1uZo5i2WF7TVsWLHgJrGmgm6ZpjXq1QJZ40M761dPbuZg26bXB9FrhE36gb3qW7OEu3C133MR342igU+IiVysm2vXv2dda3PLasxuri2PN5GPRTLqpu1nKNUk5Q6t0otTc5SzXSmt+b5ERbd1ERG3e4pWIFjQ3Q7qd5/zfOYWa1xL9H2bJ8m571pOPc6i9qtmt1Ysn02wlEW7PLeRjj8gl3OXdyaSVPJUv0DHY7j4jK2y+Pi3EO8AlWtwZI2LVvqmFhdaXZX6uuW6IZs/XkHXnx82yq3hu+qqudzuLU54jYtr98jPSt3rFPT2HxP3aliibqENdLdrswqdFeLm7m0B5uUjzte13BNmwbq1rImt05lVujGNuw1W8aTbXNNocy6jnuO57ZbWR7Nvd6CllVXbNqyVLdv60KWqG+8Eod6ctGALevs1jw/wqL7qshIeRG7fmyT7smot9ZgN1r28HEKt6eaap7Pqvt7nUXttrOyZPsUhKMs2OW9jXD4Bbs8eYjPZ9JUslTfYTiOi8sYttFvhUZcq5p4+blk+fuQLHGUqu+DJYImsqB0P2TLtk8+6LMGC6Oiln8+wmpY9nnY5fz+RMLdanknm3Jn5Y7Gm8bCO9tsh+tVrEY9Ibs+HK5d7VfAsvU57FZF8oLXo/Aa8d0YXbF8dRn32CxOndpZtn06G8UCr65ETraNNYUyy+7HlniN8/sxWZ6wVJDVbB9LdU3X47FELrM0uegZsayzW/P8CIs+5IiM4qfA2PViQ9pWOtL+vO9PiqUGxp1E2zN+BqvjxT4PjDuL2m1OrW7SJhxlwS7vlcLhF+xyLm2JgeFMmh4sNTgIPriMPvN2Lu2itQYrYLk+zmEL9VJb4rgb1cXZIOrSPqQBL+Ll+7sfuDd8VxaZzMGuDownXo9BLK/NjFi5wZKv7Kk7VaxGtU5+vdtBsnAzF/bgYODRBhjep3Ku/rntsa762G0YaKbtyXbgljkQTHJTzrO3tDzd1kqCrKYrS3VrUc/LErmMJQb2IQbzWbg1T4+w6IP2P3/dHRzpHwPMF6sN2TxinlRicTd2gu2ZW24GVj169nlg3FnUbvtEW7J9CsNRDlcqHH7BLl96iOVP/nca3d6DpdoxP2YZ/bN9LAyPQRmsgOV6Y14iaKGuaoljZtV9GDaRBaUHvIjPyvjfZh67N3xXDmsyB7s6MB5hNe/M8vo9shs99U1j9fyk2Q7Xq/RFwjnsvPxAVabpttqD3cDHG8AbC5a87MU+dmOOZjrgRcajODE6GuJVsMyBuEu/pkRnaXnCPRhkNbfeUt3GrXu0RC5jiYF9iMH+Wrg1T4+w6MP0FNWbuXy2TLkh6wxLHbUtORA8MBJsz/Zhkup4sc8D486idpuTsZmjRKMcr1Q4/IJdvvQQJ+0fly96sFQ75scso3+2j4V20VqDFSiWcbhG9QVLHDOr7sOwiSwoPaAyxQtp6/Wk5ZvDd+WwJnOwqwPj1a/mnVlev0fcyVPfNBYOuNkOt6scZdp0w5scqMp0TVSDa/fxcAMM71N5wT626970sTLy4ShGTo6GeOCWORDvK7s02j5ryxM2EmQ128dSXc/1qloil7HEQLPhlHArt+bZERZ9nN/w4jHo/gb/ptyQwZFyPAqWHAgeGAm25+Apqx4E+zww7ixqt36+mics60Z5ulLDQ0Ls8spDXDQRPbP7ZUu1Y37MMvpn+1hoF601WIFiGYdrVF+wxDGz6j4Mm8iC0rG9g/q2nvxa/d7wXTmsyRzs6sB44tW8M8vr98jm/KlvGgsH3GyHG1U8mX/koNeIR2tUIJRrDZa2Gly3j0cbYNBYfcE+tuve9LEy8vVtaKIm95HGA7fMgWCSG7s0vCFLyxM2EmQ128dSXc/1qloil7HEwD7EppNk6dY8OcKij/Otddzx6mHMqg1hOdqL9rm4aunJE9wLtufgKasehPLzuqjd9mS0ZPsUtKM8X6nhISF2OZe2xGwmPsjW3oOl2jE/ZhmtxOSo7w1WoFjG4RrVFyxxzKy6D7NljkorMda8kZTbuTd8Vw5rMge7OtscnWremeVF86kmnA2WfDDgZjvcqBLt8OHi5yaVGBgsbTW4YB/HG2B4n8oL9rEbVd3Hysg3i9twEyzcZh9pPHDLvLSvrMZwHEvLEzYSZDXbx1Jdz/WqWiKXscRsck0nyeKteWqERQ9g+yDvpv35+pX/KSJP2UWxzeQ5Xrx4Eizdbd+ZYHsOnrLqQbDPlzuL2q2fr0HL9ShnKzU8JMQu59JVYsC736T+UrtND5bq23jEMlqJ7qxoF601WIFiGYdrVF+wxDHA6j4Mm8iuld6Vv6M0Wpt7w3flsCajsqun9ydSzTuzvL6p2V5uGwsH3GyHG1W2jGLVzdm0J8tmBmWqwcX7ONgAww7LC/axXfemj5WRu5VtmKezGRwNcX+WebbAnfMaS8sTNhJkVXdo1HO9qpbIZarESNNJsn5rnhdh0QPYPtAG8n1WnRGWrvaX7xwr4h8t11l6ths7wfYcbNDqQbDPlzuL2m1PRku2LVejnK7U7Pmyy7l0lYjp6Cv+lEHTg6X6Nh6xjFaivT/dorUGK2C53piXCFrwyeaqljgGWN2HYRNZULob0sD+Thoszr3hu3JYkznY1dP7E6nmnVle19R0L7eNhcvYbIcbVTZebHd6p8ImG4MylpsHN97HzQbwxoKCXsx7sY/tujd9rIz8MNuG6v7kaIj7s8xL+8pqdHPLlpYn7DbI8jpKjMZa92iJXKZKjDSdJNduzXMiLHoA2wfaQL5Pqt3U5xRZ9ql8EIKNtiCsFW7Q6ni511nUbnsyWrJ9qKrupis1e77sci49n4mPsGqv6cFS7Zgfs4xxiXbRWoMVsFwf57CFukNLHDOrLs4GcbF0I7+RlGzcG74ri0xGVbe2Kqxled0e8aJVdpPTNDbfU7eqeKVfWvRfxz+VG1u5mfM9eN5OtQGGBcu5NvOWuurKyEsr27CaZDPteBXikZ45r7G0POEeDLKarixVl9jU87JELrM0uaDQ1VvzjAiLHsD2ge8m32b1nrCsekN6se2D7yHPdPulS8I9HD7L1a6911nUbvswWLJ9CstReiNnKxUOv2CXc+n5TKxAXaLpwVLtmAfjuLiMPvN2LrMTJF4Br+XjLD9X6vw6Ve+WYRNZvbcsMR5yz6vHNe4N35XDmsxhfn8i9bzF8tqOvP2zvdw2Nt9Tt6pYneEy9Jrakfke7EdR83nY1eF9KvO9/ORZqVMLilF07FJ9p5tpx6vguUosieeWLS1PmzJBlnelRFhiU4/HErnM0uSaToxlXbk1z4ew6ON8//g+sG3SbD67HOVtVfL/D2Vr66LtGT/L1fFyr7Oo3fZktGT7FJajnK9UOPyCXc6lpzPxAdatNT1Yqh3zYBwXlzEu0S5aK+y5bss+94eXt7zPpU41u8USfRO7a6V7VmEwSbt2dfiuGpZ9Ho5qfn8i9bzF8sKhNJlNwaax8M422+FGFUu2++WE1R4um1nYg+0oWsVV+9h3WN3v+G41fVjqfOQNqxCO0Zuup9hMe2UVVkxq2NXJ8qhUvd2CrGb7WKousbHcvZAlcpmlyUXPiGVdujVPh7Dokj/R19Ll1rDPzVayy82GtHK/wgfSsrr9OxFtz/hZro8XS1ztLGq3ObXihq+tVDj8gl3eS9epng9QCWl6sFTfxCOW0Yu3JdpFa8UrUN3rwSJ5mb1lSx2917vFmxiPIio9vis9qzBo/tbwXbAMwznY1bP7E6nnLZYXDqXpvCnYNBbOu9kON6pYSp9XLNzMuEg9tsk+Lq4OOvTm1MDSs7Iw8oZVCMfoTSshTfuD7iz30r6yGsNbtLI8Ya9BVrN9LDVZVEvtZepUrOnEXL81T4ew6Iq0i7rb7btA28c+N8+e5TXbyyvl/1U8L3x+x6LtGe/P+km41VnUbvOAxc9UOUr7eLpS4fALdrkpPZ6JX1dCmh4s1Y55MI6ry2gF2hvULlor7Nkz8zC9ibbhtjtLHTOrd8ugiV10vo5L96z84DbeGr6rhjWZw/z+BMJnyvLaoVhm07rljZZ8ZU/dqWIpfV5iFU5rLOzBdhQtu+otLN3vOiVtH5a8MlcrH25Dn40S0kw7XIWcPZx4wG/qqMbS8niy3oNBVrN9LBVu3GNaltrLrExu/Ix0k/hJCIuusE3QbC3tZG0t+1zvJN857Ya0XP+DgO3DlvKu7qpwe4bPcnO8WOpiZ1G74akVTlqd2cfTlRocRTu7fHRhyfFMvDUlXH3rBmN+0DI2czPqv16EUtiz5R2ZYRnvbXg3mt3Slm40pb3D8aBbPstmgLs7w3f1sCZzsKtn9yfQzNtZXjsUy6z79srDJV/ZUzeqWKod3Zn5zQy7tbwjsxlFo9oAK/e7XbvEGyn6mI+8Vo2i5k0p4dTdXjwc9sayr+wrNaxUZ2V5orscZDXbx1LN1vDejswoeT65phO3cGvSm2/93n05wqIL/HbXu1Z5eS95UgnjG6fdkF4w3FRqo6mwPU5tEyWv0zxOYaY/l/uWfFRnTbPhU1g9RN6If3btSoXDL9jlo4vZTOxy+Sj6kIseLNWOeTCOy8toBapWcv/j48Fbrbv2VTraVStVmW40TbrZd1ETW15Ot7vUknVpK5Rzftf/0EJbvXZn+K5udzKH+f3peZ2qxcFQvKQSxgdXFGxWIWzb57Bvh9tVpn8DrWAV2la3ZvYc77YuoW6U6kZxtgG8aN2cd1EsqqWrMqpWPiueUTVUjfzCNrRL5WOYu9tbCBd/0w092Wq3O+TQrl3ya0+vL0/dQpDVTNhSdZGuM0seRRYmF6+LZba5xa0JF+GZEBZd4Dcz8R/5VfwjOnZ90+6kvUC3BZRfP45O/RTbyn9IW7v7Ct6PmsothnvWn4Wj09ud1QXaZi3ZTtr78s/zlQqHX7DLRReTmdjF4+rxc+/2LEt1N+pBy9gV2Cd8NNLKRYoSakappDvcgrFY+piZF1Bio37absrPRWl1WC6UNaCM9Lm7K8VYGjeG75phnc8haPTeM2V57VC85JGrsZRZ4Wibzps9daPK3vEu/XhJXQxNbubRZNGK1lKpTT2KVKNoz0vv4165313OPq9iFJORn4+iZteKIar/IssHEFTvBz/bV1a+/BFJNpE8sZXlsXQ57zDL6ymRu+36rZbU0t2inU3O10VN7C1Nbs0+mKLh50JYdIVvgsBxf3NGyvGoyf9B7XrLbrTlwr2ha/pV3x58dW0c8hbfKmylVdDrNe17yeJZuN5Z1G7brCXbNrwrJezz2UqFwy/Y5bKL85koaefCH3+8mx4s1c87HMf1ZcwZVqeccNFIIzeylbFW809fqQeTM5ueyzKWcQzFayiR1E3kbjSwrrQ2W/7FQT0qXVz9Z7nuDN+1w6rbaeawX/7oM2VZXSXL3cac2i5v7VGwGa13X85x0+ypG1X2Raic/mtgpzczZSjtCxEU2FSjUIPDDZDrn9zvtWfldOSzUVTy5VQzPhqaZEG9ru+rPOx6aPt9Xlgey6g7CLK8ISX2Je36LSt1GdPJaTLp+nZxzz+9Nft6V3fzqRAWXZJvd6PYs10J/XWzestucsHuQpK3USUsmdU1fEC++4rBJd5vuSEvdxa12zZrybYR70mJ6UqFwy/Y5aqL85l0V/941t6DZfYTf9AydvPdLlqd8eGQj49WM5awWFXGco6x+FCVMNHgc/m+dD+VxHuMrvVrWro8fNcN62wOm+jy+cjqGj4i+9jV6ibdPfXNaFf21I0qoy1zNs3ohhX9xA3WA6lGEbVX9T+/30Eb0bNyNvLpKCrd3miPhnDxXVc3ubzgR9Pz5bGcuocgq9k+lupVdfqc6eTqAvswz27NMcXxyffNCIsuCjZttBt36c6n/9eFEhUMH7VoO8YFdyrlvLfwWfYNW23Iq51F7bbNWrKdtHekxHSlwuEX7PLp4idH/eZRTb82tC72Ep6txOFRy9geFbn/qpGKem4aLr4Hl7blroznKdHeB+NdlfYWgtJ9h13rhX5Ja1eH7/phncwh6cbVXO+plPP+i4+lpu1uL7ejXdlTN6oES+DObsHpzczdNhOsb1A48ELT+/R+90W26zaQ+lk5HflsFKWmoTQc624fVrj40nU0Kpi1i9kMbbo8nqeEC7K8EyVyibbtej0tK2qlEgwmO+qe3pp88XyZvhFh0WXNyXP6RPtOOD6V1I5SnWY/1vs3UD5rKhw+yz68prlrnUXtts1asp1086ROVursKErsctzFrp5JedHatS72Hix/dKMesYzldbtoTXfFdrnncqHi4l4y8+/JC56rRHcfzP4NuSlbiErXQ9rKl8vTHPvjO7i7NnwXDGs8B1eP62ThJXim7HO/R2Z7uR1tvrOVZk9dr+LjLYr82X/74nSuZzczd1uVaVprBj7fAJP7nZRN2HWr05Y8G/mlbViWtZLW3V4nXPxd3dH5Wpv9thj/LarCZHk8VwkXZHkXShwlyp7bd5dnKpFNJhc8I+7s1nidtqfnQVh0w5/f2ta/wgdap/NxMW2QYAukQmeP0G//7fTtseiem1Dbbfwst0eYXOksardt1pLxE6ZEcrpS4fALdrlf19OZ+I3T75VvUnIvlS5FDVp+09bNZdT1qv+ukd3Rs95vgw2X5DdgWMSvKBHcB7Nta8tvW4hLlx32PR7rEC5D78rwXTis0RyyK9s86Z4pr61E5fypb0Z73NlCs6cuV7FE+6pTM+GQD+ObeXQ73oPBwzDbAKf32609Kw/bhppd1d1eK1z8wtV99X/3V0hc/urjEGR5A0qUJTTWoOejTGUyue4ZyU5uzVZlfOO/H2ER8KxmhzFQsJdQv1v8JajEZezB12C38fYueDOERcCz4pWEdfadTfBLcP/FvBKXsQdfg91GwqI1hEXAs+KVhHW2WwiLELLbSFi0hrAIeFa8krDuNCy6/Qc52IOvwW4jYdEawiLgWfFKwjrbLf2Lz/889O1NxB58DXYbCYvWEBYBz4pXEi6w3dJ+LeR/gfr2l0XswRdht5GwaA1hEfCseCXhAv11aKWcf1fU/3iFZezB12C3kbBoDWER8Kx4JeEK2y7by08/XuaP/0SZzQe2EHvwNdhtJCxaQ1gEPCteSbhCXw11PrKD2IOvwW4jYdEawiLgWfFKwjX++2i1+3+uKGEPvga7jYRFawiLgGfFKwkXNf8Q2BYUfXD7sAdfg91GwqI1hEUA8Dp+53/DavQPbgE4Q1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABgCIsAAAAMYREAAIAhLAIAADCERQAAAIawCAAAwBAWAQAAGMIiAAAAQ1gEAABghmHR/x8AALw1hQRvhLAIAACEFBK8EcIiAAAQUkjwRgiLAABASCHBGyEsAgAAIYUEb4SwCAAAhBQSvBHCIgAAEFJI8EYIiwAAQEghwRshLAIAACGFBG+EsAgAAIQUErwRwiIAABBSSPBGCIsAAEBIIcEbISwCAAAhhQRvhLAIAACEFBK8EcIiAAAQUkjwRgiLAABASCHBGyEsAgAAIYUEb4SwCAAAhBQSvBHCIgAAEFJI8EYIiwAAQEghwRshLAIAACGFBG+EsAgAAIQUErwRwiIAABBSSPBGCIsAAEBIIcEbISwCAAAhhQRvhLAIAACEFBK8EcIiAAAQUkjwRgiLAABASCHBGyEsAgAAIYUEb4SwCAAAhBQSvBHCIgAAEFJI8EYIiwAAQEghwRshLAIAACGFBG+EsAgAAIQUErwRwiIAABBSSPBGCIsAAEBIIcEbISwCAAAhhQRvhLAIAACEFBK8EcIiAAAQUkjwRgiLAABASCHBGyEsAgAAIYUEb4SwCAAAhBQSvBHCIgAAEFJI8EYIiwAAQEghwRshLAIAACGFBG+EsAgAAIQUErwRwiIAABBSSPBGCIsAAEBIIcEbISwCAAAhhQRv5FXDov+zTeF//keJL5N6lS/v+yE0+OS/lfVE/vt/tnH9zxMODABelL8Q3smDwiJVGlO5O9TC3BGJ/Ldyvjo4uRoWqezcV81D3SXPF32koMg81dCKe177n//5P/+HEA7Az6YD7Y28ZFi0R0VfHRcRFn2iPSp6rrENwyIhNALwg+kkeyOPCYuKOGRABe9QC3M5dqhG83+U+SUIiz5PERV9aDs92iws2nzpHgSAB9Ix9kYeExbNXw0qeIdamMuxQz0aZX4JwqLPo2G5Jwo0FsIiAiMAP5UOsTfymLCo+pV8SAXvUAtzOXZQUr7y9U5Y9Gnq6OOr1mPBUljEHxQH8DPpDHsjrx8WfeUv1AmLPui//48J+m6iD+U+gbWw6Pm+fQOABTrB3shjwiLVGfvIa11NzBEWfZi6S77jNZ7XL7hnPz4sIi4C8APpAHsjXxQWfeSVoCbm4rDoK99GhEUfo67nYdFXrceC5bCIuAjAz6Pz6408JCyavhk+9EJQG3P5XVn/lp4yvwRh0Yfsf4UwCIvqv+z4lV8BTqyHRcRFAH4cHV9v5AvCoo/+sGk1M7f3o7T50lcRYdGHnIVF1Sb7quVYMRjXf//3/+kei2caNgCs0PH1Rh4SFh1fzyjjodS0WXtXF98sfO2b6CNh0VN8laCxJN8wnn35wi+Dii8Bn2Kt5Cxcq7/ieq5xA8ACnV5v5LFh0acEIWrbLL5XTr92+ESERR+y76P4tuXLz/WX3c/CovrqRrkA8EPo8HojDwmLVOV5wqLtdZTeoV/+7y4QFn2Ieh5Gs/+d1vd/vjjUnTkPi5ovjJ4qoAOAKR1eb+SxYdGnvLHUtnny1wph0UccEcSTRT6nJmFR/X3RT5oYABAWFbQiK46D/1NepWrbEBZ9Lo0lISxaMwuLqpu8tCsA4Gno8Hojjw2LlPFYatsQFn0ujSX5+vEcq/dSYVFZ4HOeEAD4LDq73sgjwqLjrwgp47HUtiEs+lwaS0JYtGYaFlV/uujJNzAA1HR2vZGHhkWf81sEatwQFn0ujSX5+vGo481LhUU/aQMD/197d7YcR64DaHjiOHzhiejo6Jj3f9dJAshMAASZrE0qq/7voru4L7kQlmUJiOzd9UGeERZZC8IiwqKH2MAbwiIAeAv27vogTw2LXnOaWefi+afKv//JF7v+K39te/DPv/Kv/lvl0T8Sf++waJv/voDRYrVYPHE+CwNv3F823X8jhYt02xLW74TgOiyyCYkn7uruH13xtt6/KZoE8HeQN9dHeUJYdJ4L7xIW+ZPKspoj++hlP0LV7McE/hNqbsrI6KVh0VLY4KZpOWYPTA7l4W9ljSueL8t1XM1rZeDqN2UcrI6f22D98nONgkH8+sidkPkxbwqLLKcpdsRKGstqLOccKC15fGMAwD3s5fJBnhoW3XCY3MA6F2sj+KPCspo80S7UGR2H/XHbFJVdveeHRb72qPdBiBIP/V1xhlpJ4+YzX9ZgTLE0sOWOWC1frTz9w7c2Hy7C1xvvhIK/N74sLNpH6m/McmsA4E72avkgTwiLzre+ZTyZdS7WDit/WFhWc2bL2VGe2MUAdVDUdEeQq/mCsMjPY1DbShvL2dSxyaY/+62gcWXzZbne04YsDmyZI1bLVyvO/jooaqbh6013QslflfqiW6GwrMZymn6sq7BIMss1L914ALDG3iwf5Jlh0YteyNa7WDur/EllWc2Zvc10dIx2I/jOsrxiV3dpM6yuuF6an3IRGGyq8fuvhDh5UMtuXNF8WaOwaHlgyxqxWr5av/rZVeqrn7VvuRNqfuT6oluhsKzGcpp+qMuwaGsy2N+lOw8AltiL5YM8ISyyBpt//v3v/N7aG7/hdUw7VGt9+pPKshqXPTwL8xDTs707gtwIS4eT1RULS7OawrKiIkLxe1FIo1pu40rmyyoGbdYHtpwRq+WrdXHO/Cp1s77nThjx6ywv+qiCZTX9SJdh0b/jNS/degCwwt4rH+SpYVH2nMjIOhO3n1SW1bhsXyMJh8r4zDTxgHbdLp1NVlcsLM3PpqxuZY3lXMQL+TpbZuMGmC+rDotuGNgyRqyWr5bDosvB0rTvuBOGfAdlCz85P3HLavqLeRkWdd/J7qw9JQBwzV4rH+TxsGhyrmye8Ia2nsRad35KltXMZ3rwY1jWhFVUboRXhEW+fvcFk40b3oqvlxz7sczGzWe+rDIsumXgi6DGak0WfxkV5Xlfz04s3W2+r+qih7EsT1hW0w90GRbNLN17ALDAXisf5NVh0Q3/pGfEOhJrnfkpWVazeBj6JtcHbjiC3AhLR5PVFStLGyxsZyWN5Vx+tSuNa3mNy58vqwyLbhn44i8qrZafWwyLFqKi1OSOO2HI91XsTtiIUG55TX/tHwqLctgIAPeyt8oHeTwsujyUVs77GetGrPU1iB76w1D/9Xb+p2Z+EMsS//4jJenfV0k14zp6SVjkD8u+fjW6X5r9fMPZj7qxvMb1P19WGRbdOvDGspvyWLeyJpSnCMy+qc1+yOHJ71ecSHN9J4yEhVreKU4u9Gd5TT/Oclikq02LXbr5AOCavVU+yBeERUuHy4T1Ita68ieVZTX5MHSHRyjyh+5Z4IYOX96oa6+dTFZ3JkYIltnEgqaMTywjTigs1/KEZTVuvfNllcPeOvDGcpt+bRsra0K55Sn/xcn4VajhNDauLBSV80h8g253YlQUiy2z6W/rxbDobBgXa5kA8CB7qXyQx8Miqz/Tv/VvYZ3MhSH8SWVZTToMh03C2u20SUvQTDE4brsTsmJ1Z+LJ7E/Lblctv7GcjbVI0/HL9f1YVuOy58sahEW3Dbyx3OaWsChcudRlCBVco9AmtQplljfj66fF5r8dtGxjmU2a9mYpLIrDWaYoNxAAbmYvlQ/ycFjk399D/Wv/BtbH3PBks6wmHHj5fLdsETprR1t3ylyPUMQPPas7k4a23CZPajB4u0DTn9vo+7GsxjWZL2sQFt028MZym7w0YWXNYMp9Ox+XuKnfdyfUfF+uq/OXsx1SZ5bb9MOshEWp1WAiAPAAe6l8kK8Ji5Z7q1gXc+GI8AeEZTXTc8OvI503/aEVJuXOYjfC0sFkdWfSST9YWuNOYcsx1fwH/VhO41rNlzUKi24aeGOZTRfeNFbW1Ft+MTnXatpocidUfF9TeVGW3fTDLIRFXSO31qW7DwAu2UvlgzwcFm1v8P1g+M++J7n9SdmyduU5t8i6mAtnhB/dsppBtrGCZmG2dSzgRnhRWDQ5sy13szD29bHrep8vaxwWVQYDbyyzKfuxssaVW47oAoV4zc+5P/NO8H3NdDtn+U0/8evr009tvioAuIO9Uz7IE8KizT//dr+RM50WxYm1ynqYC/0PzocXHYbuvKtzx6zuTJ6LZTexyI29MH/fj9s7y2lc7nxZt4VFgyE2ltmU/VhZc5b78KHccv/lIst62Z0w0z8AVtD0hYRFAN6CvVM+yHPCoop/sa8dmAPWw1w4WAbnw/zYuO2vINzqvjYsGpyXYf6WM+Wqu72znMblzpd1Y1hUD7yxzKbsx8qas9xf0z642PgKR7Nn3gm+r6Hqp3dZUdOX3hMW+TblXgDAreyd8kFeFxal3bS8O1gHc+EYGJx6zzwMvy0s8m38ousJjf3AsMiyIh8rHM2eeSf4vkau1hM3oiEsAvAW7J3yQV4ZFvnX9APvaetgLnQ/OPWeeRjWx5Yb4WVhkRvDl7npW85cHcxYTuN2dL6suqehYXXLbMp+rKw5yy1DWFZihc0x+WfeCb6vUvf3y8aKG8IiAG/K3ikf5JVhUTgxBmfDAutg7u8Pi1YPsssDc2nkHxgWDQZ34x379cw7wfdVGe6JlTeERQDelL1TPshLwyL/nl47rCvWwdznhEX1cepGHp7DQR2dWE7j5jNf1jDOqQ2rW2ZT9mNlzVHuL8NgcDfecaWeeSf4vkqjC2vFTV+FsAjAW7B3ygd5aVgU9tOybmftxdrbfnDqPX4Y/tN+SJ8/Z4UVbubxQ8/qiuWDzJ1+59HoJmU5JZl/XoA7YC2ncfOZL8t1N4hMNlcDbyyzKfuxsuYoXwiLqqv++J1w8n3VBlfWSpu+BmERgLdg75QP8tqwaH78LLL2Yu1tPxj2scMw/xrRk1XYuCqvC4t8K8vxecOBu5+6vHMHrOU0bj7zZV2GRSsDbyyzKfuxsuYoX4gE/GXbqzx2J0S+r726n9amnpkVNn0FwiIAb8HeKR/ktWHRU17U1l6sdTI49R44DH3TjtXZuGoLB+o9S2vcKPvZ6LIsJ8q/nStwB6zlNG4+82VNw6LVgTeW2fT9bKysOcr9dRlsYFXF51mW83hYFK/soBcrbPqpExYBeAv2TvkghEW78WE4/jqRsmobV3HhQL1naU1xZJ6Tr8YdfrlGuQPWcho3n/myXOf5qF4feGOZTX/kb6ysOcr9lRlsYFXF51mWM74TKr6vs7q/RoOpWVnTlxMWAXgL9k75IIRFu+Fh6JuVrN7GVV04UO9ZmrAWjbWy1KY4LeexSWhiOY2bz3xZw7DoloE3ltn0i9hYWXOU+2sz2MCqis+zLOcZYVHIrudmRU1fTFgE4C3YO+WDEBbtBoehX8KA1dy4EV4ZFrlh9HB0GZL2rmKTcMBaTuPmM1/WKCy6aeCNZTb9kb+xsuYo95d0sIFVFZ9nWc5TwqJwbct+rKjpp05YBOAt2DvlgxAW7erDcCEqcn25ERYO1HuWpqxJk9LdsNfBiT9gLadx85kvaxAW3TbwxjKb/sjfWFlzlPvLUzaKV31f0z13wojvy1ePd05xda2k6Ut9a8tqLKfpl/uUpw0APHunfJCvC4ss63bWXqy97Qen3h2HoW8i/v33n3/aJOqVueoLB+o9S1NunNbMJbX8ZNmH/9oCWkEdzFhO4+YzX9ZlT+Ji4I1lNmWEY2XNUe4vQ9kojHfsjr+sluWUd8KQ7ytU9wXV5bWCpi8kLALwFuyd8kFeGxbNj59F1l6sve0Hw95+GPqDZjuI3OjfGxa50dvpeE49j+pjgvhbKFyJy7acxs1nvqyyp1sH3lhmEwuMlTVnuWU0gx23UmFZd9wJY76vWN0yVd+TFTT9tScsAvAW7J3yQV4bFt12wAxYB2LtbT849W4/DC0tfFD03WGRbxhS6awMR3Ysq6MTy2ncfObLqnq6eeCNZTaxwFhZc5Zf32FW2hw1br8TxsJSLU/5e6RYk+U3/bUnLALwFuyd8kFeGxZZX0150i2xDsTa235w6t18GPoGaeRvDovcQP/4uVjxznKbPJ86OrGcxs1nvqyqJ0s3awNvLLMpbxYra85y11t9y/oLdTS7+U6Y8H2l6n5y/QW27Ka/9oRFAN6CvVM+yEvDIn9ilCfdEutArL3tB6fezYehJZs88DeHRa7lv27UNOhsOnV0YjmNm898WUVPtw+8scymvFmsrDnL3Uh1K1/hWJLPtCynuBMmfF+5umWrSWF/7QmLALwFe6d8kJeGRe58eaA/60Csve0Hp96th6E7Zrrj8bvDonOkf93MrXDnltT17srcAWs5jWuxvHF7T7cPvLHMpgxwrKypZ1xuuZUJy7r9TpjxfeXq/i7pVmW5Tb9ewiIAb8HeKR/klWGRf00vRQk160Gsve0Hp96th6Gbf3cG1ceWG2FpwVZX3HaQufHPXvKYbkmWc6qjE8tpnhQWWc6pHnhjmU1/5G+srHHlrrtqC/11OlvdeifM+L666n52eX6W2fTrre+v9Ta33U0AMGDvlA/yeFj0z3//xe9GPvgzwbJ28gOQ/1t7d2sHaq3F4NS79TB09btxvzssck3PQfNJadmbfjZuua6Z5TRuPnVsYapCS25WB95YZtMN0lhZ48r9RS323ErEuaJb74SZ+QSsQMViN0w/iUGh5TT9HhEWAXg6e6d8kIfDIn0XV+eYf7PHA+H4rRALp87oZJsZnHq3Hoaufre++thyLV60tMM51DkVKzpYdmM5h0GkYzmNn49lCcva+Y6OnizZWM5hMPDGbWl1O/lOfbllia6dv0z1deomWN0JM76vvnrYnjg/3zBPfDDv0R4oPxZhEYCnsHfKB3k4LLLq/Xt49GbfWOZmJXiwquK7wqI8z+vjdmVldyztZM2cbkjLb1LvZTCzsZzGt/DLjaOEjTh6smSzOPCmvwCRlTW+ob+qOVYIs3MzufVOmPF9FdXjBvndCFsRtinGUn6GltOkpW4IiwA8nb1TPsijYZF764c3cTis0ivcl/Uv947VFGtv+8Gpd+thOKw/PLZci4UD9Yb7rejNz05Zwcnym7jP8ax2ZZbT+K0OY7m5pI04erJkszjwxpdZyT++hhU1oWHo029U/FW1vuTWO2HG91VVtyJjmcKylNvuuEmhjeU0cfMawiIAT2fvlA/yYFgUz2b79Q7/99/0Yo+nRTxLLXPCKoovDYv8yG4J/8RF+75cycKBesP9VvSWQ5Kijp+o27l8edwBazlN2GrLM/K9ZP90+3D2dPvAm9BdK2phjVuUlTShYdoI+063f/Jgfj1+KMtyijthwvdVVY/T8zOP+/ef/lKZPO2N1haW08TNawiLADydvVM+yGNhUXc01+I7Oja6fn9bRbH2th+cejcfhuGE2k/beJg1Ule4soUD9Yb7rerNig7zc9KO3f/7b3/uuoaW04St7s/q0t7T7QNv+o1tzjqW0cSGS5MLTW6+EyZ8X2X1OD23rYtPj5+h5TRxDxrfX7h6AHAve6d8kMfCorXTMr2i4/HXv90zqyjW3vaDU+/mwzDOdMhqb1yD14dFeXaW7VnJBXcJLKcJW714gh89WfpCuvaWm1hhKE4NF27DuIE33wkTvq+yeto7y23WHh/fwnKa/sEhLALwdPZO+SCPhUVLp2V+Q8fj/Pr9bRXF14ZFs/vBVbfKGzfCwoF6w/1W9mZlpqoStzo6F7AQFs1OcFd09HTzwE09RnUxckgwmZ1Km3P7nTDm+6qrx6fEzX32+LhJWO3Gcpq8B4RFAF7A3ikf5LGwaOVA6l7Q8ch867AoTtX7x9W3yhtXf+FAveF+K3tb2Egr6/3nDlF3wFpOk/obXuj/XNHZk2X0BgMLy072iViy6UKCi9swb9/td8KY72tQPc7Obew4LvrHFVnlxnIawiIAX8DeKR/k0bBo8mYX1UFhRcryJqyiWHvbD069Ow7D0XG7zePszepu3AgLB+oN91vZW9j6erzR1Wm17eNiWDTqaWt8rvrs6daBxaCRlVqq6UOCaVzUVb/jThjyfQ2qp3VZbjOYdvuzhH3cWN3Gcpp+D/wwhEUAnsLeKR/k4bAongtJ/XOs/VnQv9w7VlOsve0Hp949h2F5bumyLOH7ciMsHKg33G91b1YoBhtZRxpS+Zisa2o5Td7q+M/dd63WuWrX040Dq+EQjSWbruFoek1xE95zJ4z4vkbV42b4Wr71QSrY541UVJbT9HtAWATg6eyd8kGeEBYN3u3b2704vYSVbxaOnXBR1t72g1PvrsOwOG1tEkeJJhs3wsrK1u+3urf5glQRnlhnR4m7TJbT9Fvdb4W1tFQ8qm8b2PR30hnUWEZT3ljFP21vqlvmrjthwPc1rB6n5qfUb5Ot+OxXkspymn4PfFfVqgHgZvZO+SBPCYu2N/LsPOv8Jb/8Q+UvQ5xT2AssuXEj3Ly0qUFvVrqZDJcuzVlzP0bdAWs5TbHVaSuOX4V35Mej+paBd6MhNpbVlGHRNmC6VLG9c9+dUPN9javH6McyVdym888SR76lG8tp+j0gLALwdPZO+SBPCos2/xw/ie6///4dnFsHqTo4s97Pv//pyrZ1/ZXHzX5lFi7MlXMrVnbiroFvGyLZRtTWf9PFcttkOQDwHuTl9FGeFxYBAIAfxUKCD0JYBAAAShYSfBDCIgAAULKQ4IMQFgEAgJKFBB+EsAgAAJQsJPgghEUAAKBkIcEHISwCAAAlCwk+CGERAAAoWUjwQQiLAABAyUKCD0JYBAAAShYSfBDCIgAAULKQ4IMQFgEAgJKFBB+EsAgAAJQsJPgghEUAAKBkIcEHISwCAAAlCwk+CGERAAAoWUjwQQiLAABAyUKCD0JYBAAAShYSfBDCIgAAULKQ4IMQFgEAgJKFBB+EsAgAAJQsJPgghEUAAKBkIcEHISwCAAAlCwk+CGERAAAoWUjwQQiLAABAyUKCD0JYBAAAShYSfBDCIgAAULKQ4IMQFgEAgJKFBB+EsAgAAJQsJPgghEUAAKBkIcEHISwCAAAlCwk+CGERAAAoWUjwQQiLAABAyUKCD0JYBAAAShYSfBDCIgAAULKQ4IMQFgEAgJKFBB+EsAgAAJQsJPgghEUAAKBkIcEHISwCAAAlCwk+CGERAAAoWUjwQQiLAABAyUKCD0JYBAAAShYSfBDCIgAAULKQ4IMQFgEAgJKFBB+EsAgAAJQsJPgghEUAAKBkIcEHISwCAAAlCwk+yDAsAgAA+CyERQAAAIKwCAAAQBAWAQAACMIiAAAAQVgEAAAgCIsAAAAEYREAAIAgLAIAABCERQAAAIKwCAAAQBAWAQAACMIiAAAAQVgEAAAgCIsAAAAEYREAAIAgLAIAABCERQAAAIKwCAAAQBAWAQAACMIiAAAAQVgEAAAgCIsAAAAEYREAAIAgLAIAABCERQAAAIKwCAAAQBAWAQAACMIiAAAAQVgEAAAgCIsAAAAEYREAAIAgLAIAABCERQAAAIKwCAAAQBAWAQAACMIiAAAAQVgEAAAgCIsAAAAEYREAAIAgLAIAABCERQAAAIKwCAAAQBAWAQAACMIiAAAAQVgEAAAgCIsAAAAEYREAAIAgLAIAABCERQAAAIKwCAAAQBAWAQAACMIiAAAAQVgEAAAgCIsAAAAEYREAAIAgLAIAABCERQAAAIKwCAAAQBAWAQAACMIiAAAAQVgEAAAgCIsAAAAEYREAAIAgLAIAABCERQAAAIKwCAAAQBAWAQAACMIiAAAAQVgEAAAgCIsAAAAEYREAAIAgLAIAABCERQAAAIKwCAAAQPyMsOjX/xpL/I3+bCv4bZ+f47dsyR9LfQuZwS9LvNTzt+9H2O6BX99zB7zB3fc1/shCJ/eelH/JQ/BF8qXl2cOP89Sw6PfvXxqg/O/X7y99Jf59YdGf3zLnX/rC1FfNU1+enxQWvWD7fgLZle+5BQiLdlL+g8Oir3r24gsTeKXnhUV62zpf+AfVvy0san/CMrJJ/vOTfFJYJAN9xCl8C70DvudIJizaSfkPDosk9fJLnV+YwCs9Kyyyd3DyVbfwS8Ki3xv7+GT6LlVti/bNe+bb8w0OJpnB00+E/rK8Yvt+ANuV198DxYPyBnff13iLsOjPdgG+bK/jpf2aZy+/MIGXek5Y5IL55Gtu4peERS/oUvmHXEbYMwiLFrRO7aN5xfb9APsz+fJ7oA1iH3eERTspf/Gd2Xb7RX+C68VL+yXPXvfCBF7qKWHRMCjafMnj+oqwSB5/+/xU9pD/+nOcGvtj/8y9+qlhUXFZXrF9P8D+VFryZaoH5Q3uvq+h9943h0UXU3iueGm/4tnrX5jASz0jLDqjov07rf/80Yen+Yrn9a8Ki3Sy4U2pu/XUl+cbHEwygy8Ii16xfT+B7Mrrb4HxFfmAY+zTw6KvePaKFybwSk8Ii/ao6Fd8MvcY35Iv9YqwSPq0z8+k+5K2pWU+98X2BgeTzODZ17+8LM/fvh9h26sv+HcP1RUhLNpJ+WtfgjKH7wqLXv/s6RYTFeHrPB4WaUhSPRpyP3/Jq1HnYIknKc/fJ/iiE+OjwiJ8I8Kijw6LXu5j7iW8jYfDIr1p69v2z68velr/vrDIEq/zBi8TmQFh0U9HWERY9Epf9MIEDo+GRfpa+O434CvCohd0KWSur/+S8E8Ni6RT+4x3UF0RwqKdlL/2cZfN/rlh0Re9MIHDo2GRBiTf/QIkLOoQFuFrVFeEsGgn5YRFDyAswld7MCzSZ+TrnsmBF4RF+r6zxDNJv4RFd3rZZcGdyitCWLST8tc+7vL2+7lhkYxHWIQv9GBYpPHIt9+zLwiL9Om3xDNJv4RFd3rZZcGdyivyBnff13iHsEiGICwCnuWxsEhfCt/+xSLCot4bHEwyA8KiH668Im9w930NwqKXk/EIi/CFHguLNBy57ZTaf8t+9Wt8Wr4+APvPg/yVfhrSwbrRHyCZwqLyyR28wPbf+u9+5/+e5U1fA/bbnfNPbupIpUQK0txk/jri/gt43fSCYzdz8eLb689vrRj3WedTNda9soQYL16zLXHLRakXNb4so7PpnFpfdsMeH0IbaSSJlZXdPdxFv2LbGclNPWqmJW6ZwvHbyvttC8ZXxE/dKo2WW9+BY4PFqvpm1PEt4VX3+bT/JuzO6NY7SPn+TnMNCxc7UUxsa6F5TrXOxpVdXt9zG/sa6a4cbMA+MzfGeKu0S0t4kp9YkTleFcXVavlx5yUBXHksLGr32vSlkKWnON/Kkrn9P/yOteIn0oXy7YHQpBV2T64qn0qtubNHy1JBP4ed+4Hem+mPz7M6gRSkuUmPLRW67rc57mYsL/cgi7/Lzr01NMMSTt7E6eI1zxLLF2W4KMsJtLvy0sapda9EKW1NQrXUR3K0OfZNel1Z2d3DXe9Y6NFXr56KhSmE4u0MNlbqWZ1A651T970Vyx3egQNhcmlvhjej5hejpx3azPoXvsJ25Je3niflbV1+pVU8crET5cRiphrtoRS2DzplU00k9pr709JjZ+pnL67FSjVV7KnkV/OWgsSKmsXT5NymYmyg94ywyBLX4tPSVAdpenA3+dnNbwPr10q7J1cVz28ex4rtczB6oPolTR49qxFIQZqbzH9Lpb7TZuVdiC+Wcg+S8dxHreNGXyxeco5JrV2UyaIsI9Duikvbd1SVX+5xtLc57xuZ3crK7h7uascm1yBerMUpnGtLziEPVhJob/vUU2/dcseTL81uuFmZJPtzN+9lv/jcJg/yq+sik/KtG92RQzeZ+U4MFpc6Ff0ylRQWK7DiQ99nXN1+aU21Ad1k9bpry36vNN8SgRQkVlTtSbq9JG/7/1kvlgMDD4VF9gRZ6lL/wG3Cnao53csp3c7d42CsuHtyVf/8Vv20t0Q1z8EDdUPVTVVbCtLcpN6v4u1i5U3/Uojl5R4E/Ta7HjRpiZPk7pWuFi8ZR4/lhNLCp4saD9df2qqjsD1Le5xZG7dxMubKyu4e7mrHJBntLXQwSyxOoepP+e01OsFIB7epdxXicqvBZhsyW+x4Lhst7FZgs7RU/UI4Szej3Sn2Zifllxf+YieGC7fP3mgDpbB4t6aJVHsQqqRN0w79BpSb5GYbdrSR3HLa1UW1orIs9m05bkVWAMw9FBbpDTd6DjO7Pe2vxo9Xpr+TJUPrSa2jkhar0I//pbRavNGs8IgUz+/+uIT5HIvRUkuM7GNrF/vXdNPAgb2X0p6ludlU5D8t88/+snKz1wz7AvW+Ca7bcg+8/e0VL8feQ91cc0NisnhJH1Mqe0wLl9RkUYPL0l3ac21hbr4n7Xy+x5m0+bXPspHKKyu7e7iLfrWXfcO0fynYaJkl1qZgG6e/rHz/DhOV5nFIo6il5Z5XSVLaJl7uSPsoF2uNRzejpLqeJfeckTWy/vcZu3XH28ptj19VIuX6DTAyt32ZYTZXO6ED1Vc53xEjUks7sonY9OuJyEyqKjo5KW660fcu7BfeWxf6WT7mmaYOk/qF6S6WNDx21XejGXtJYwXA3BPCossHUtnj4mrbne3uZM3YnLW0knsq7DbPr4WNpQcPWn5+rVnq2T7u41piRPs4B9J09wgHVZU0t+NJPr8qbLM9h5L5uW50vmd5uQeelPeXw1qkGZkw6OXiY3rlolwtas+xxK6brNZyX1PXGr7rlT3OpE34LmMZc2Vldw8371ernMtqh8DRIO3VyhRy2jay+obWXXlFlparGd1lGw41W6z2fjbV9FG3nGRqY90P+9gn7EbRbsMaMqtRtDozrnZitvB9mpMpKKnVzCaiOd0e+E3QyRw1utG7Tlt8qeXWmXw+SQM3QCZtcrl1NRtXG+p0jRUAc18XFkldf9fu93b/3gm1NMcSG02HfuyJsFT35Kr8/Eoyzce9/bVPSwxtnYbl68iWqEmN9JSnue2Psq+leeE14N5em1Re7oGj6wvl0mQfU4rTStI0rxYvyWMN5YTyRblY1OCy5F50qLjHeb1a52KPk73N1koq6Z+Hl1Z2/3DTfmVVYaGuetqrhSlo537EfoCsvCIry81XZCNVhqNNF7tNPeykjmaJbmwRZ65rj4NL1jGIdhInIFm5Y08rFJvqOrraienC0x0xJLU2s4kUK+wmp3WOdB696uL492zFQotnPJHy2OG+mtBTdwElLexLSmlgYOChsMjuOUvN6ROR6uZMSeZnSvKOOvlJbuzlZKnuyVXp+a368bTcEuuk1XRHpEYaN81N558WoHmWKGgjSwz24KQjpmJZs82iqnC1J1J8Ll6Sx0pXLkonLWowhdSLJlOlnHnPHlub+g6dr+z+4ab9hkSW9mphClIlLi6UV8orsjCWLiStzt+BHWkwKuzE2tJxXFm6QkUNqxISaca6+smkpDzvT2x1uRNSPhwjrWNEauWJaN4+tK3QUiZnprsyjW6102J21QC6F5aoSHm6MOW250xJNoPZAANfFhZJ1XRzd4+JJtNdLHlHS0nlpyg9WgvnST0f5/JprUmzSbf1wGluOv9Up1yUo30c5RfVy+VJF/uwUiHWkJzJ5U6Ll+pHcuWidNKiZvPO25eXri0f2mMtXmtVzume4Wb9dtsTpL1amEJIKOlkOMVNeUUWxhpfydRsN19sR/o/utKxw42mWZbI18v41RddjJo5Up5nHbfnaicuFn45BSW15hPRVB5Ip3f0nyql0esuDlIaiyVnNnupkO6JKs+mcm6kJiezAWpfFRbp49JVTU+RpPLdHt5vdT/pxVI+m/H5HcznVL6rrmm/lihJhbTGlXeL1pk84lI+7+Mw6EwWbZ/7Di7XlipI6ljpZFGTy5DLy8uSepFUP0/JXZnOaNOsTXeDrqzs/uFm/c47WH8q9syQUNLJ5ALVV+R6rMHU/R2YzBfb0SlYIl/9RnKOlcnA+crqmFanfezmppOa7I+Ud/1KrvV1uRMXC7+cgpJa9UT2XEn0uy+5R8t0adPokuq72PVzTZepIBXixLVRt+Q0NUndcMcA5qvCovLlWT9U+dENTet+Um56PFQcajCf02WF2l1Ped6GhfkXYnnZx2EwTVm0tenHk9J8dbzUq6RGr1N146IGlyX2MuozNr1nOtqm29SVru4fbtZGE6NLkvZqYQqSiDWkk+EUN+UVuR5La+hnR3pL7cx8sZ00QDdN7e4YSlLdOqWSDhl3alfnOlLeTdrP5nInLhZ+OQUltaYTGXUUty5d2thobTv8BZbOpxdVWsQacUaHNLqk5n0Dla8Ki6RmcYdK/nGDSyLXCs+AfO76SY/J9Ut5OJ/D4Mm7MnjHOVIhjZye54X5F2J52cdhME3J3rvIO3A5gdyrpI6VPmNRg8sSexmtXPOntS6mo226u2alq/uHm7aRRLXY5vanQlrE+Uh53b8qr8j1WFpDPzuSPdgRaTGfjJMG6PY6zVtS/cCSLZ/KJV1dQutgcMtos+udkArDhV9OQUmt6b1brzBOtqsVRx91cYh97c0nDcqJF1lC8o+t1NS0b6DyUFikb5bLB3IzfHTjy0kS+Xb3VbSf7oFIr7jy4QxTGM7nkLpcdfleKNeYpnM9/4qUHx3PJyKFA/sQOuDZg+xI8S46pTElNZ/QjYsaXJbYy+jKxVr3TEfbdMUrXd0/3LSNpjaplrj5qSgusZZXve/K7b4eSz4PDHZkuthOnkJaW1qY1h6QGrrOPHTcv4KUd0+NDqfN5OOA1pgv/HIKSmpNJ1JeyU0cIO1rLBx1cZIKZ410VSrSoLgpixXH4SUxnQxQeigs0idkflMr//QF8SGTRO7P3+uDftLTmJ5cFZ6l4XwO1w+49yf+0sby9WWkQlpjes6v5++0kc+hj47LPg5SOHAMoUlLWLLastHiJTWf0I2LGlyW2Iskqisn+XtfN03HDPZ0paunDZfauH3vfrZQ2quF7voq5X4HZY3rseTzwGhHZovdDZ9EndLRtSYtsScHpIb76FxcwvwQ7Hwz+ThgHU8XfjkFJbW6ifhdkY/VtZb8wYMTR5dE1cUhNZfUfO5SJUzcTzqInUuiWzFw6Qlh0fQpMOlhOC3cyP69O3gg0qu5HC08v8P5HFKXM+EHqYpZx1IhrTG92a7nv/MngDg6nq9QCgcG09CUJU6TxUtqPqEbFzW4LMWrOd9ETShYn86pbLPW1dOGy23CXsWu0l6tdKdNzjo5XSivyPVY8nlguCOTxYrpkyjp476Q1NlF19CTGu6jc3EJ86g730w+DuwdzxZ+OQUlta4ncuuDE0eXRNXFQevv+6idWWJAqoQ+y7uriQWSmE4GKD0UFtktXtyf2SM3sn/vavXuFZBezeVo4fkdzudQvu0r2lU061gqpDXGd8vC/MX+6wm8o+P5CqVw4GgTR5TB8rWZLl5S8wnduKjBZYm9SCJPtAkFi9MJBnu60tXThuvaaK2d/2JC2quV7jR5/JoJS0nRSHlFrseSzwOpnTNe7CYWqtF26FzO0qrtTtevn+Wj012OTMq7HfTN5OPAMcHJwi+noKTW9USqix0K0qWNo0ui6uKkl8HaS+Ji6lI/9JmmcIoFkphPBqg8FBbpjXf9RD52I/vXmVbvBkyv5nK08PwO53NIXY5or9msY6mQ1pjebNfzb3SGydHxfIVSeHndtA//OfZ3sXhJzSd046IGlyX2Iol8EzWhYG060WBPV7p62nBFG623O+vf/FRsNB1UW+mUV+R6LP/5FqPFVjPf+ClIhh/erUy7tURJKnQ1issRSXm3hb6Z+zhzsfDLDqTW9USqqx0K0qWNo0ui6uLkG1zv+kbqhD7TFE6xQBLzyQCVx8Iie1QtNfHIjezfu1q9ewXcfAAM53Mo3/YdrbVpvzaqdXfdsVRIa0xvtuv578nNr/03VmlKCjfziUjh5RtDx9A+ZKWxxdXiJTWf0I2LGlyW2IskqrWFgpXpZIM9XenqacOVbcJfHh0N0l6tddd9ta7aSa+8ItdjyeervkuzxW7GT6JU0SF1Kq6wnHAgFboXwsUlHK3TN5OPKztRL/x6Ckpq3TeRUJB2Ko4uiYu1SB3dSHdFxqR6qDS8WLFAEle9A73HwiJ9JqZvE6X3a/Ho6ttseiP79+6gH19lUz424fkdzueQuqxpJf97vMqhA6mQ1pjebNfz33feL0HSR8fziUjh9RtDqukmyKewYZeLl9R8QjcuanBZYi+SKK6c1tr7WphOZ7CnK109bbhhm/PQ3Fvc/FSIoxuV6vfKK3I9lny+vgMHusWuPImaI+PrR8lV5YQDqdDdVheXcLRON5XbdqK/ytdTUFLreiK3PjhxdEkUXXiuhXySzAmpFCbuJx3oXbDPTRKrGwucHguL7DG4vLPHj258qUoi38i+yqCf9GpOT64KTYfzOaQuS9pLGKgcOpAKaY1pOtfzt+mFbmLOfCIrq9ucnegnzVXXi5fUfEI3Lmow8aoXSzix1sJ0OmWbta6eNtykzX5k7oVpI9a6a41+2Te9/0rfvFMqt/t6rNFVWhUXq12HAYspSI7cTPLJb+LFpdjUE75sJ+Xhlm785G7ciXyVF6YupNZdE4kDpH2NhUtrkTqtknTVTSmT2qHWcMVxeElcdg90HgyL8iM6ojdycYvG/Jgy/l4f9JOexvTkqvAsDedzWHnApUqsUw4dSIU0cnrOr+evNeK2S9bR8XwiWmqJCanWhpHtCONJ0XTxkppP6MZFDS5L3D6t0q88TuB6Or2yzVpXTxtu2kYL9+25+anYtDazx6JXXpHrsbSGfr6PX6z7uCumcIypTTXTSM7wUmzq2yrvX0fKux2VXJvAzTvhF765nIKSWt1E/KrqFeadTPsaRx91ERw9yP/vmLiOWdymMT+mgGUPhkV6610+CHs9S5zSDR5TJrx35XPXT6jSPbkqPr+S6OdzSl1WtMc4TDl0IBXSGuPcFuYvs0udSPmRN5+Ill6+kaze+f/TwuIlNZ/QjYsaXJa4faO1adN9AtfT6ZVt1rp62nDzNtrEEmmvVrqTZLfAqfKKXI+lNYZrX3EuVnuOw1VTkKxtzP3/jmTlZXj1hOeXYyPl6aaOo92+E+fCm8spKKl110SmD04cfWkt2uSXVu5m1LHanmT1F2vv2MQUsOzRsEjvxO4NuPsdn5eulj5xx2MkqXwjh/dufER36dVcPpw3Pr+py4r2aAkzWKcjFdIa49zqTkIdmV2avZQfHV9MREqvXxk66B8dL1RfWLyk0oTSnG9c1OCypO2TVLc2rXQ0vd7j3mBPr1f2yHDzfhNtYom0VytTkJR9XlRekYWxJHF9B06ci9WeJfNQTcFuY60ei8oZe9ooTzjtX0/K560kcctOnAtvLqegpNY9E9E6x3Bpo9Loksp3Q2ZdVM97QSrHWQ0ult6KcS637CugHg2L7FYcvFC2Qrst07O1S7mSyjeyjmAJfSDysxSqDF4UMXMwn1PqshLfTmrwwDpSIa0xTbjsJNTxn3eSd3R8MZF6H3tWzf7nLCxeUseEri+K/7yTPLdb5WVJXWud1FFecbk/5RxPZZuVld033Eq/iQyzb1faq5UpSMo+LyqvyMJYWmW4+AXnYrUvyTxUU7B6VXVd+mztZYWLS9jfwEIyj7npdG7YCWkwf7B6Ums+kXsenDR63UVibeS/ljUh1eLEtYOubcqVVF4xcO3hsMjuxepRCH8cSE+Tyg+RJPONHN679kDEfrSGe0okWT5Jy89vGLVWPNfl9CKpkNZo7wlL1XsV6vjPRid8dFz24UjxfH2Njir/jXNeWLykzlYp2ViL5UUNLkvYmo0k40j7UJZa2ePeaE8le7ay+4Zb6TeR7dlbpL1amYKk0s5dKK/I8li54S3OxepwkrmzjUpTkCb6DeX5Mmof44thC73pcmykPO2oDnVmSvKGnTgX3qRtHZFaKxNJdWyJluoubRo9167pVjZpsEpVL01Caa/nRkhyYQAgeTwssgdhU96l+yNi1UKd/FTWN7L2Y4m+TR6oyQ/IOcv8/Ib5bHlHunzwEqlSjTJrJhXSGrXd0VM5dqijNfSzsk04Or6af7GPMkbefa3W/uMXupGC6eIldfZ3eVGuF7WyNRttFlciWa7hUkfJaE8vV3bfcAv9/tGfR72LHWpzS6xNwXZu5V+g7cpuV8bSOtd34GGyWPdRaWE9r7QvO8nNDba6R05fYR8lzCvSCmFN1urs6GonplfZhhju2k5qXUzELr+llGR1kz3SeTJVF7/y7GzgjWXMSL26A38t+l3MaWDR42GRu8f/Zz+D7//5X9e437r9nWxVXI6k842s1Syxd3NWOn/+nGVs8lhnnfP51afIv9Gkkn22Yit1lTypUo3iltSRCuVTfoyS3jwq1MmvAFuMyyr78GyyrobOPzU5es5vMM08aheLl5S7UJKuWiwvas9LlyVt374218zG7jpKi+06iso2m8uV3TfcQr/t89mp1j8WqbUtsTYFreO1H6xphTVtYnX2qkvLtdW4WuUdeIhlYbHy+SwsbkZj2dWu2267e0SXkfbTdXns1mSHrIb7gUo2jm9zsROxLCy8kbRdZ/lJliWttTQR13e/KenSagXXh6S7YdIOSZ2N3+yRsqLNy62230NJr4wARE8Ii9xLqHDepnstDZ2On0vm7uP6RtZ2ljjfRdqNhl+aJ8XKxpI/Zf3RoTTLPZ5xPnsct1fY3xlb4da8frhsKvIK+KO7oFl+TZlUSP2ld0vZSawjia2ODK3rk/GPjq8nYgu2Lw0c16Oc2iZvgdWfLF5Srtkx4Pa5vCiSmC3qmE68LGn7jlo61D670M/KHnfKNs3lyu4a7rpfK4938DGIpi2xOAXrI9JtHLC9jldkbbk22MUduJst1ppeP4nH+nLBxlayjRCGOKta2u4qGUBv0Mn+tGLhW7WklO5sqHonrHBwlffy1n9buSsIpFIzm8i+BVZn4cHpLmrqYh9Gyg577miynlTs7gmbme3KMYzvUDK6hsClZ4RF513eCTdlWSs8GJKTb2R9ACyx2R+I0x99FK1cSIb3S9u557foyA8eS+vnt+vB/s3W7GmXemmN6d2iG5U6iXX2l8/B/n3N0XHZR1StP8/srNV1dbl4yfT9SYaXLsrlojZxVB0rbk2zv8+DsLSFPe6M91QKvHy73TPcRip4sd9qlecYWmqJ1Sloray7K5w4Ce1/cbnV/IdjTRfbFQ6eRJ3DYJC9MDq76MuLt0oi1frbMY9fLW6vVJX5dcVpjbbPirNU/eYHp7+o5S7m66C5o7kGg5rljRqGkZylIYDgOWHRGa0H7iuponitxBqaZ4mdPqiWaNKT2+rLBy1V+fHenlvJCS+wftJ+Opal6qcrLaitRvpM6w60piVMerekN49Kdfr1Sc9Hx2UfSeqj6RvYGvsNuFy8ZluiubwoV4tqpGSnJWlrRL+2WL6yx9l4Ty9Xds9wm6t+0zXYuCG0rSUWp6CVev3lP1kVpRVXl5uW11Tbq2aLTWXjJ9FGrEfpR4gLz+X7KJNLaPXSQvvtnO3EbOFNvGj10uxpzF31M+8nEuukS6sdhip9aNUvV3uZ7NtJahb3X78rK6cJcO1JYdF2k8Znc6NfRY1ipa6G5lpipw+ZJYR/IPRRkI9SdvDPpnylVcaOI8ZJp+n4p3v4/PpRZCbSY3w8I6ma1pjeLTqr1El+/4RNkJzz06bso5NeYOUyx0UXi5f8uNKri3KxqKa4LHlrRHo56xfbT0t7nMz29GJl9wwnrnZMO96FndKmlliagu6Zq/Ln+JukySyLK7K+XL++zWSYzWSxoSMZWCqnKRxdWKqjczykgzZdjpYh/U1mLTW3XvzUu17FbCdmC9+44rrvjZbGYcrKtz045UU97hqRe2i0F0vMSc1uyU3alXwVNNcSwLqnhUWb8xutf43/Lcv+ot2qWI6jJZbYaX1LGOvl6EMeTvt8sJGO7wOXhHw8bXOWSuV0NGoqi077TPYFt+Ro8U2rndeY3i3pzaP690+eXqtxdFz2Ufit3x2xLWB0ybQnSyTTxbei7mpeXZTpolR3WfqtUefa+qWV+zPqyMz3dLqye4Yz1ztmq8z3qWZbYmUKkhhEAvk6Bt0VuWW513egM3t9aNHVk9jqzPbcjVDWsumGUSbdSVfyyRqOX43TnZi+N/u197R1+3TLRPoq6dKOLuo+3cEgUjrZNkf7sUQy3RUtsQSw7plhEYC/mBwx/TEm2Rwvf7l2Ed/mKmowZQngzRAWAWjkrCr+1K1fCrIE/lLtIr7NVSTSxjsjLALQSPhDWPRDtYv4NldRJjP8OzzgexEWAWimYdHa94HgbbWL+C5hEV8swlsjLALQDL4qpN8Hwh/t/3JyFd8kFJG5EGjjXREWARDlaaX/Xpsz7G8nF/c9wiKJv/mGa7wtwiIAQgKgdHTq14o4w/56chnfIyySqRBo420RFgFQcl5th6f9sBn7NWwb/grtryfX8S3CIr5YhDdHWARA2ZeGOkRFfz+5kG8RFr3PTIASYRGAnf49WsRfd/wEcinfIRjhi0V4d4RFAA7pN2JtQRFfKvoR5GK+Q1jEv87HuyMsAuD93n8l1vR3bAHAj0RYBAAAIAiLAAAABGERAACAICwCAAAQhEUAAACCsAgAAEAQFgEAAAjCIgAAAEFYBAAAIAiLAAAABGERAACAICwCAAAQhEUAAACCsAgAAEAQFgEAAAjCIgAAAEFYBAAAIAiLAAAABGERBv7X/LLEo35vXf2xzwAAvKm/ISz68/uXnNDPOqKx5JlhkfT1P+IiAMB7e/+w6I/ERIJj9SvJlj8nLPotfT3tS08AALzG24dFf/REFYRFX0m2/DmRjHS14QICAN7au4dFPir6n+XhS8iWPycs2r/gR1gEAHhrbx4WWVT06w8H6pfTnbfEY/awyJIAALynNw+L9Dzle1K+wzO3Xvrii0UAgDf33mGRfrGIqOhbPHXvt/iWf6APAHh37x0W6b9g4jj9FrL3hKQAgA/yN4RFlsDXkr0nLAIAfJD3DovkW4s4mb8HYREA4NMQFmGAsAgA8GneOyziZP5GbD4A4NMQFmGAzQcAfBrCIgyw+QCAT/NYWCT/Usz+/fzvX/qdQL8H/57+t/4e/KKC68U62RLtf5lUOVjd//0uRmz5eqLvv35fEmGk9tk3nS/A+tmKLeMgufLpj/7Luf/96iupP7YJdYWtdN68c2xBnnK30DbxO9ZlC9NJF+319/i6poOrrJmWMOMbAgCA7/GEsEgORT3hVHV+WsBgYoWjl+OX5W+HsX0KtLrYD3uTR5TM9uEYV2ocI53T2SfrO+wihD3eUWl9krf93/2q/3oPQoUuDkmlljsRtyD2Vix0c8e6XDhZzUkLjnahO39RdKqWEMOqAAB8m+eERfGM60+58Atfm3DC7r2cta7CohhCNINQ5awn5ftIobmUxA5jb/1ooVhy+jV2Mch8zvMxennL6y1Nvd6+LutVPoewRkn2vtJJf1piic3F0AAAfIsnhEW/+iM9HXJ9eahhvbi4Yjtou2N/Y/XLsuJI/+MHluxyvu3kzzP0MUY1mh9MM7rIL+1BX745qlSlIc5J+rCimHNxYW5elzXQut2MNNsaVWvY+9OJWGJeFQCAb/OEsOh/cubJ3wn9Of5Wxh9ydjjr3xr92U/0s4a0+WVfkRD69QcLNbrD+OhPuiiH1Iy9pJFsHam11+lqT7+1R+lun9056N6Jjrb/zZUbTNLWk3Sxt9BitQcCv/74bvZe9tK4om7hJ62wb2lfX3NkELswNm/d2WZxXT4RVtRI5l5Fe9inpGNLwUbLLDGvCgDAt3lGWLQ5DzU74d0pZ0f0eeDmaEcq2DcPq/3wlkQ+MG2E84Df46R8pB+zayT7yNmral/tv8co1v3RmVWxVD97ixg254zs2LdU002xxSlHAy3sVuQWlEi56z/XPy9MWset69rTOoCbYRO6sMusiU0L1o7etbkl5lUBAPg2zwmL3GE6OH59DTtxjxp7L1stOXf1CyqNZlpiJ5nxGK2PdGFfgNHqNpKrKDnboZ2zXASwdR7CAS23xEaSGz8jzbHERtvElfw+es1BzUbnZYnCr/R9QtLDOU0dL/Zw17r2DqqrGFvIIkIFN8EYFk2rAgDwbZ4SFoXT1TKPU68/A/dD1hJ7L12tdDIbPWHjkF2mJJt43tpIPlNzpjFNJsV5rDhLHeioo+tNkzloaSqUFaVVTuiAligXet+64mWME9J571khkWlrS8yrAgDwbZ4RFuVoRjL3IzmenTs5JvdD287wPipKJ7Oq8va4w1L7FLpYI0UrjR7YoT+tlZo60uRsIbVzdck76sx7jDGDkRXlZY6l0Kq8MPesKy0h9KhZlkgTSMIS51UBAPg2zwiL8gEXctNZa+Rk3MMTrd9XKkMgrdx9qSHNRFIXM1NFf0WtQMstUU4yL1uq5MBnN4gSpAf7vEA6OZZRLuEqHJmvK0Q2QjL2IQmLAAB/v1eERXrs2XnpP5+kyn7kai/VOSn5MeLoT2cRxrSGXaxSzVezwgRTX515+KCKOGDUYepuJz2sxw7SyTFEsdBH16Wlbp3a3T6GprodN8V2jKoCAPBtXhEWhQNVPvdHsWTb5+7APRQFRZaQ/ONM11Q3sWK+RdZj4YMKccBgm0zqbifZ40lk0skrw6K0w7bEs1xKR6sM2zGvCgDAt3ldWKSHoNYYkBp7neq4loIQcQyP9urgtcSpmG+RtRY+HE0kNQ2L4twyKRwYTyKT6scsilU9vC4tPpprb+cQmt6kUUXcgWlVAAC+zfuERdURKQUh4khn8yl2IokcqpQjFVmD8OHP/ote1dFEUtOwSD6/JixqczondVdYtLyumKFDWKJxfehPRXDCdsyrAgDwbV4SFrlDUGsMSI1hLxspCBHHsG4skEQOVcrWRVYVPhw/uvpwNJFUHivEAfK5n42R0oF5WOSjGZFilrRNj65LR9vbh4QIs4nDhO3YTKoCAPBtvjMs2o/cQS8bKQjxxLBuLJBEH4gUrYusPnzQStHRRFJ5rBAHyOd+NkZKB4qF7vZf5+EdY5Tb9Oi6tL3laEI/72J3/utAOSyaVAUA4Nt8TVgkH4cGvWykIMQTw7qxQBJ9IFK0LrJy+KDp7GgiqTzWrWHRzV8xKYIiN0a5TQ+vyy9KPnaz1nF35/hdWDSuCgDAt/masGh+6I3rSEE4mYd1Y4Ek+kCkaF1kpfDhCEB+/f4tv0QkNZFUHivEAfK5n42Zl9aOiOZXm1TL0ZQUbsptenhdWkE60L7SAE34W7mjvAiLRlUBAPg2LwmLJPeVYVH3VYr94N07kUQfahQjFVkxfLDowf0KstREUnmsPizKQcFBCvu5zuxRkdsHSR+9lFv6+LrOVUnd4jKIM97ZOwvb4fVVAQD4Nq8Li/RATV+hKA162UhBOJmH/cWDVxJ9qFGMVGSFMSwC8RVSE0nlscJ0hkGBmJeWtEkYM+aUW/r4urRG60E+pP6dPdrZL9RkjbkqAADf5hVhkZ64dqDK5/mRV/fSSEE4mUPfXsyPqUMxUpEVwgf5HA/01ERSeawiLKoW2Gh3lliiLeKmStYxi2JVT1nXnnc9Zwu7LDUN/WJVAAC+zSvConBqy+f5kVf30khBPJklq+8vhUsxdShGKrJ8+KCf49xSE0nlsUIcEDakMy+tSOdpROnkyCtW9ZR17QGe/W9Ge7PENCyKVQEA+DavCIuKkGB6go6rSEE8mQeVdcwjtpBUDlXKxkVWHz7Ix0NqIqk8VowDJDE896Wwn+yYdJ7iqNhJuUvPWJdmVq0z7c0ShEUAgL/BC8IizTzOU0lNz7yyFyEF8WTWE7nrL+VKqo80ipGKLB8+aLF8PKQmkspjxTggBW2J9jcqLVT1Je+YRbGqZ65Lal5MWOrszRfCov5qAQDwxV4QFknemal1Zmdo2YuQgnRelrVz3CHJ/qAt2hZZF+GDBWZHE0nV4YMlRrHcbl7ak+pxR3W8YxblJj1jXVapSb1nMqO9+TwsClXbt2C7fx4HAMDXeUZYFA/IdEDvh3466LZaR055hgspSCdzPr0b7SGPmRpuipGKLB8+9CHIHhgcTSSVx0pxQLcpm19Hupt+s43TL0BpfUsI7f9sUKzqOevaR4ptmz+/Qk4YLG3HrKpNI00dAICv8JSwyB+RdmxaqrGDzh+w0m5+hgspyCdzf3DamC5H0t2RXo1UZIVjWj6f5eev3DiyJJXHinHA3o37KogMEqMGPwsdJ0zrpHM+h9T0Zr6lz1jXOVaeW8zTsY7WcTva51FV678bFwCA13tOWLQdtnLMHb+7NJyZeu61OnWl8gwXUtCdkHt7HfP4gYC+A8noj9ZipCIrhA/WuwQ0fzR4SE0klceKccC5B/p1kmPOUraxJf1KSxoFB1Ys+6mV9dfoH/XTFNUz1nWMXS843QdHX5oOiUFVTfmtAwDgqzwrLIqO41DtMUF0VCrPcCEF/clcDhvaS86oYahZZIXwYT/ET38062gimXWUYImm3INz2G6Upl+A6Tr7pVlHg3JLn7GuY/NT3+UC8vJCIoqjbiwJAMAXekZYdP4FjOq/YbaICdx5W57hQgqK4KDvL42peZY4FSMVWTF8SGO1gaTN0USzLbELcYDIu5TaFLFCXFKQare5tv8fPZZb+ox1bSS/D1v6a+KGj9sxq2orK8YFAODVnhIWxXNuP3eD4hA+lGe4kILyhNQ2u/gtvBvNtcSpGKnI0rmeXfoYRCpKm6OJ5OexYhygjr89FPp3SE4oHWzjwe+njn1+2pRb+ox1bSS/mp0Ougst83aMq9rC8uYAAPAFnhMWbaeZnvm/ugDlYDXkd75blinPcCEFxcncuP4sx9ESS5yKkYqsHD5slXRxRyTTkkeTVtSNpXOzxOH3PucuJhK/9TuEtuK6PNi/p2ifZ5v1MYtyS5+xrr0XSyTDa6LZlhDDqu2ragvLBwDg+Z4VFuFTSDSTQhkAAH4EwiLcRL9YxCUHAPxEhEW4if7VnSUAAPhRCItwC75YBAD4wQiLcAu94pYAAOBnISzCLeSC8w3XAICfibAIN+CLRQCAn4ywCDeQ680XiwAAPxRhEdZxvQEAPxphEdbJ5eaLRQCAn4qwCMv41/kAgJ/tsbAIAADgxyAsAgAAEIRFAAAAgrAIAABAEBYBAAAIwiIAAABBWAQAACAIiwAAAARhEQAAgCAsAgAAEIRFAAAAgrAIAABAEBYBAAAIwiIAAABBWAQAACAIiwAAAARhEQAAgCAsAgAAEIRFAAAAgrAIAABAEBYBAAAIwiIAAABhYREAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAvND/+T//Hz4mAUf0tsALAAAAAElFTkSuQmCC", + "text/plain": [ + "" + ] + }, + "metadata": { + "image/png": { + "width": 400 + } + }, + "output_type": "display_data" + } + ], "source": [ "from IPython.display import Image, display\n", "\n", @@ -298,7 +321,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 28, "id": "94517a3e", "metadata": { "id": "UXYSesx4Ifp5" @@ -365,7 +388,7 @@ " # Information that needs to be stored across episodes, aka one simulation run\n", " inter_episodic_data = {\n", " \"buffer\": ReplayBuffer(\n", - " buffer_size=world.learning_role.learning_config.replay_buffer_size,\n", + " buffer_size=world.learning_role.learning_config.off_policy.replay_buffer_size,\n", " obs_dim=world.learning_role.rl_algorithm.obs_dim,\n", " act_dim=world.learning_role.rl_algorithm.act_dim,\n", " n_rl_units=len(world.learning_role.rl_strats),\n", @@ -417,7 +440,7 @@ " if (\n", " episode % validation_interval == 0\n", " and episode\n", - " >= world.learning_role.learning_config.episodes_collecting_initial_experience\n", + " >= world.learning_role.learning_config.off_policy.episodes_collecting_initial_experience\n", " + validation_interval\n", " ):\n", " world.reset()\n", @@ -462,7 +485,7 @@ " # save the policies after each episode in case the simulation is stopped or crashes\n", " if (\n", " episode\n", - " >= world.learning_role.learning_config.episodes_collecting_initial_experience\n", + " >= world.learning_role.learning_config.off_policy.episodes_collecting_initial_experience\n", " + validation_interval\n", " ):\n", " world.learning_role.rl_algorithm.save_params(\n", @@ -508,7 +531,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 29, "id": "daed035c", "metadata": {}, "outputs": [], @@ -603,7 +626,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 30, "id": "632844c2", "metadata": { "id": "0ww-L9fABnw3" @@ -639,10 +662,25 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 31, "id": "13af92d3", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/4gHYSUNDX1BST0ZJTEUAAQEAAAHIAAAAAAQwAABtbnRyUkdCIFhZWiAH4AABAAEAAAAAAABhY3NwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAA9tYAAQAAAADTLQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAlkZXNjAAAA8AAAACRyWFlaAAABFAAAABRnWFlaAAABKAAAABRiWFlaAAABPAAAABR3dHB0AAABUAAAABRyVFJDAAABZAAAAChnVFJDAAABZAAAAChiVFJDAAABZAAAAChjcHJ0AAABjAAAADxtbHVjAAAAAAAAAAEAAAAMZW5VUwAAAAgAAAAcAHMAUgBHAEJYWVogAAAAAAAAb6IAADj1AAADkFhZWiAAAAAAAABimQAAt4UAABjaWFlaIAAAAAAAACSgAAAPhAAAts9YWVogAAAAAAAA9tYAAQAAAADTLXBhcmEAAAAAAAQAAAACZmYAAPKnAAANWQAAE9AAAApbAAAAAAAAAABtbHVjAAAAAAAAAAEAAAAMZW5VUwAAACAAAAAcAEcAbwBvAGcAbABlACAASQBuAGMALgAgADIAMAAxADb/2wBDAAMCAgICAgMCAgIDAwMDBAYEBAQEBAgGBgUGCQgKCgkICQkKDA8MCgsOCwkJDRENDg8QEBEQCgwSExIQEw8QEBD/2wBDAQMDAwQDBAgEBAgQCwkLEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBD/wAARCARgA9ADASIAAhEBAxEB/8QAHgABAAIDAQEBAQEAAAAAAAAAAAcIBQYJBAMBAgr/xABzEAABAwMDAgMDBQgLCQoJARkBAgMEAAUGBwgREiEJEzEUIkEWMlFhlhUZI0JXcYHUGDM4UllidpGTtNUXJFhydYKhs9M0NzlDc3SDsbK1JSY1NlNjd5fRJ0RVVoSVosHSRlSSlMUoRWRmhaPCpMPE4vD/xAAZAQEAAwEBAAAAAAAAAAAAAAAAAQIDBAX/xABHEQABAwIDBgMFBAoBAwMFAAMBAAIRAyESMUEEUWFxgZETIqEyQrHB8CNSctEFFDNigpKisuHxNEOzwhUkwzVzg6PSU2OT/9oADAMBAAIRAxEAPwDqnSlUr1832a96R6uZBp3h+yfMc4s9ncYRGv8ABXNDE0LYbcUUeXCcT7qlqQeFq7oPoewIrqUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUrnX98z3Qfwc2oH9Jcf7Op98z3Qfwc2oH9Jcf7Ooi6KUqPdANScm1e0ix/UXMdPLhgt4vCZCpOPzy4X4RbkOtJCvMbbV7yW0uDlCeyx6juZCoiUpSiLHXrIsfxpiPKyK+2+1MypTUJhybKQwl2Q6rpaZQVkBS1qICUjuT2ANZGuTPjLbgpozPDND8WubjJxwoyW5rZWQUTVciKnkeikI61/9Kk10K2oa2xNwugOIaotOtmbcIKWLo2g/tU9r8HITx8OVpKh/FUk/GlH7ak6qNHR0ynuCO29K32NRtM6ieucdoPfcpUn3CBaobtwuk6PDisjqdfkOpbbQPTlSlEAfprFW7O8HvExu3WnMrFNlvchtiPcWXHF8Dk8JSok9gT+ivbf7BY8ps0vHsls8K62ue0WZUKawl5h9B9UrQoFKh9RFcJNg0CJavEVxW1wGQ1Gh3y9sMtj0Q2iLLSkfoAFKP2u0CgdQTPLP4hK32ezurj3dOhPyK710pXyZlxZC3GmJLTi2jwtKFglB+ggelEX1pSv4eeZjtl591DbafVS1AAfpNEX90r8BCgFJIIPcEV+0RK+E+fBtcJ+5XOaxEiRW1PPyH3A2202kcqUpSuAkADkk9hX1WtDSFOOLShCRypSjwAPpJqId2mit63Ebfcr0lxrI2rLcb4wyYsp0q8lS2nkOht3oBV5a+jpJAPAPPB44OdVzmsJaJKvTa1zwHGApGxLNsMz61fd3BcusuR23zFM+2Wme1MY60+qfMaUpPI5HI55rNVTTw4NmOo20fHsvOpOUWudPymRFU3b7U847GjIYDg8wrWhHLi/M4PCeAEJ7nngaZvH3+Ix3WrCNuGieQpVdn8ptjGWXaKoKTFaMpsKgNq7guKB/CEfNHuepV09DmtNZlFhkvgdTE9BN+2dliHOFN9V4gNk9B8zp3yV7rxkFhx5lEm/3uBbGXVdCHJklDKVK454BWQCePhXys+WYtkTrjGP5Larm4ykKcRDmNvKQk9uSEE8Coa30Yji+V7UtSzkuP2+5qtWOTrhAXKjocVFlNsqKHmiocoWCPnJ4PqPQ1z48ET/fX1I/k9F/rNZ7P9tWdRNobi9HH/xV6/2VFtUamPUD5rsDSvm2+w8VpZebWW1dKwlQPSfoP0GvpREpSvmy+xJR5sd5t1HJHUhQUOR69xRF9KV8y+wHhHLyA6pPUEdQ6iPp49eK+lESlKpT4h1v3MWPQrN9SsX1+Ri1nsRbcYsuP2kxpEiIt9DR864KdU6HOHOr8EltPbpIPzqzq1PCbiK0p0/EcGjVXEi5Jjs29S8bh363P3e3toelwGpSFSY7a+ehbjQPUhKuDwSADweKyVcmPBHlyp2catzZsl2RIfgW1x111ZWtai6+SpSj3JJ+JrrPXTUp+G1pOon1K52VMbnAaGPQH5pSq+769Psq1Y273HTXDmZirjkl6stv82K0txUZldwY819QT3CG0BS1E9gEnk1F2aeHLtS090SymfEwi53O9WXG58pq7TshuCn1yWoy1JdKEPJaB6kg9IQE/VXK6pgpvquFmzzMAE/GF0Np43sptzd2EmB81dOlchvCs21aL7iNP86u+smJP5FLtN3jRYbq7vOjFppTJUpI8h5APJ78nk1aHTHZFhuiG6+4IxjHJ920vzbBZrEy23cOXKFDmtTYp8hS3uoFK0ELQlwlXLbnBPT26X0yyoKbtRPD2cQB55c1ztqB7C9uhjj7WExyz5K7FK4vS9DtNpfivL0JfsSxgj9yLqrKzLeaYANqMny0lCwpKPN79IIHHb07V0DyLw69ukyIteAsZbp3eEgmPdsYyiew8yv4HocdW2eD/F5+sVkw46DK+jhMarV4wVnUdRF9Lz+Ss9SuaFp3W7hNi2vUDQndhk6880/vZSqzZi810zGYyl9AeWv1cCFdnULKlp+clZHAVe/WFOot+wtmwaRXJu33TI5LUI38JQ6izwlpKnpqUE8OLCElLYHI8xxBPugmpN6YqU7g23Xyg7o13dFHsvNN9iL9M5G/48LhbhKyCwwbgzaZt7gR50jjyYzslCHXOfTpQTyf0CshVINU/Co24ZJgt6lQ5WVJzlUV2UnLblfpEuVIlpQVByQlavKUkqA6ulCTwTwR2r3eE9qZqLqRtlfRqFcpl0Vjl+kWa2XCWtTjj0RDTS0oK1d1hClqSCfgAPhU0wKmNvvNAJ3EExbrwUVCWYXaOJHEECb9OKujSlKhSlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJWMyfI7Rh+N3XLL/AC0xbZZob0+Y8o9m2WkFa1foSk1k6q7viuUnObfhO1KwXN6JdNYbymFcnYyh50Wwxfw895PIIBKUpQOQQepQ79xWdTGRgp+06w5nXkMzwBKuzCDif7IueQueug4qrevu2m762bF8i3E3e0LGoN8vcjUzoKeXmrU4kNtwwfXoRBQ0sD98itP8F/Xz7i5jku3m9zemLkDZvlkStXYTGkhMhtP1raCV/wDQn6avD+wplG0/cL9lvr/9zvZ/ZPZPu/bvK8jp6fL6fYOOnp7cenFcbNQcdybYxvDfh2SXIekYBf2Z9sfeIC5kFXS42FlIAPmML6F8ADkqHArbZ306W1GmLU3iBwAAaOwDTxgrOs2pW2YPze0k9SSbbgSXDgHAL/ROfSuDuxb/AISTH/5RX7+rS67iYHmti1Hwmx57jEpMm1ZBb2bjEcB55bdQFAH6COeCPgQRXDrYsR98kx48/wD3RX7+rS6UGln6RY12Ya/5KK7g/wDRtRzcjHwcpo8V/cxqlL1zY24Y3ktxsGKwIkJdwbhPKYVcn5IC+XVJIKm0pUkBHPTyFEgnjjb/ABLtuel23XQTAtQdD8fbwrIbTeI9lcudmdVFlTGXIrqlKedQQp1fUyD1qJV3V34NZ7xWtO9AczzTDISMpl23XC8GNbLNChMtuMzGHH+hkzlLWhMdsOKUEu9RVx1e4sD3ZF1+2hbs92WF4Vpxq9qFprjllx95uZc5VhZnTJM2Ulot+YG3ktoHCVL90KA5WT6ACudjHO2WGe14lzvAIOfASCNJhbvc1u0Au9nw8txIjLibg8JWrbE8/wAe0z2aZVuzyXMc9v1wiQpMe7QL9f1ToplRFq8oRAtIU15xdaSeVK4J4+FRbsDduG/bXnP9TdzqW80gY5BYNrsFwBdtMByS6sJDcVR8vhDbS0jkEkqKiSrvVq9ddqdowbw98v286QxJcgWyzGax5hC5M+Qy+iU6pXSACtwtqAAHHcJA4Aqq/gdzo7d81cta1gSHYtofSg+pQhclKj+grT/PXYxzK22VNQ1nlHMGSPWN0LleHUtjaci59+UtgHoYO/itbybXW/8Ah778r3p5iNzmJ0inToT8vGXH1uRYcWW02ta4yFE+UtpS1lPTxylISeR6Sr4zON2S04DhOq+OLkwcguN4Frfnw5brftMT2ZxxCVJSrpPBQCDxz3Peqx+JTYbpqX4gtxwXEoqpt2uKbJaGGWhyVPuR2uB9XHWCfoFWn8ZO1mybadMrKXS6YGQsxes/jdEB1PP6eK4HEu/R9Oo43DwAdSMQGfLuHFdjQG7c5rciwkjScM+pnsoT0Q2r6+7w9kEd236tvRY2O3KY3juNPOKTFuzoe633pjxUT5nKy21yClHl/DrJEj59tez7Rfwysgia3XAv5liE43LHVw7o6tVkjPyYraoyXEKCVJUAtRT7yR19u45qfPCOIOzGygEHi93QH6vw1bn4lf7iXU3/AJpD/rzFdP6S+wY/B7wYesNMj6ysICw/R/21Sni0c4dJIjlHfWVVjwWZlxynGdYbff7tcJjTzlrYJcludaErblBXQvq6kHj4pII7H4VTDWzSbBcO37zdIMetb8bFmc0ttuRFM19bgYdWx5g89Sy7yetR6urqHPY1crwOf/I+rn/ObP8A9iVVY9y7iGfFAubrywhCNQLOpSlHgAdUbua6MIP6VoNIsWtnj7H5nuVgCR+j9oIzDjHCzvyC6i6z6Iac6D7MdZMW00tc2DbpuOXSc83LukqcovGL0EhchxagOlCfdBA7c8ck1zV8LPSa86z5znuFNajXvE8fkWNhV9NjWGZ9wY8/hMZEgg+QgqPKykFSkjp7BRNdU93uTY/P24a1YvCvEV+7WnC5j86G24FOxkPR3fKKwPm9XlrI59QOa56eCJ/vr6kfyei/1mufZPtNsqY7/Z/APXRtP2eyMw28/wASz8/moRzqPeth+/OVjmkOVXdNvst5txCZEjlU2FJbZdXGkdICXR0uqTyR8ArsRzXQbxdMQsa9ti9TWGX4uTWS5wYkO4RpLjTiY7rpC2iEqAUk9XPcHgjtVBvEa/4QzJf+fWL+pxa6J+Lf+4vvP+WrV/rq53OL/wBGUHuzxC/Vn5nud62Y0N2+o0ZFk9cLj9dNyq34cOgk/dXoTluOam6q5dGwuDkKwqz2icWHJ0pyMzyuS+sLK2kJSnoaACepSlK57AQNtLv+rmju8OZoLpPqO9ZY99v8/EZUiQz7QyG0OONiX7PyEF9AR1oJ7BXY8pKgbreCZ/vEZ1/Ksf1RmqY6Hf8AClQ//abdP6xIr0C0D9JUqXuvaJ4yGC/QkcuQXCCf/T6tTVrjHCC826gHndbh4mO2y1bWM1wLULANQs0n3zJ/bXp11u91L84zYymSJCX0pQpJV53oPTpHHFdTtm+pOQ6vbYdO9Q8ske0Xm62hInP8cF95pxbKnTx25UW+o/Wo1RTxyP2nR7/Gvn/VDq33hxfuKNLf8myP64/WGxuLtkqA6Ptw9rL60C22kAV6ThmWGeNwFZOq1eJB+4o1Q/yfG/rjFWVqtPiRLSjZRqeVqABgRQOfpMxjiuXbP2Dl07J+3ZzHxVKfA9/879Vv8m2z/WvV1srkl4Hqk/LHVVHI6jbLYePq816uttentPs0/wAPzK8+h7dT8X/i1K0bXX/eR1B/ktdf6o5W81o2u6gnRDUJSjwBi115P/0o5Xmbb/xqn4T8F6Gyf8in+IfFUN8EP/eu1L/y/E/q5rpXXNTwQiP7l+pY57/d6H/VzXSuvS2v9oPwt/tC8/ZvYP4nf3Fcm1/8Nyj/AJ5/+QDXWSuTSlBXjco6SDxN47f5ANdZCQByTwBXNQ/4FD8PzXRtH/Oqcm/Nc7PGvxy1y9BsKyt1pH3QtmUiEw5x73lPxXVOJ5+jlhs/oFWp2VX26ZJtN0qvF6cW5MdxiG2tazypYbR5aVH6eUoSf01RnxB87lb09b8I2hbf3kZB9w57k6/3OKfNhxXyA2VLcT7vQw2XCtQPHU4EDlQ4rpfgGG2bTTA8dwCykIt2O22LaopXwCpDTaW0k/WeOfzmmy22eo45PeC3iA2CeU5HVNovXpt1Ywg8CXSB27aqqu/PUHeOxp9kONaFaDyHrC4w4xdMiRdI8ic5DI4dEaE055qQpPI6+6wkkhCDwoZHw2NyGnGtuiww3DMEg4RccDDUKfYoS1KjpQ51FElpS+VqDikudXWVLCwrqUrkKNu6oXsS09jRd3W5nUrD4qWMKdvhssFxlPDEiYHVOyQ1x2IbXyO3YeYKbNao6k64c0unUFsAdDigDQknMptF6bagza4CNDimeoDZnhuCvpSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJVY8m2I49lGtCNfpW4XWSLmMXzG4EqLdLYlqBHX1f3sy0qApIZAWodKgrnklRUSSbOUqIhweMwpmWluhWKbss9GMDH1ZTdXJgheyfdpSI3tpc6On2ngNBjzefe48ry+r8Tj3ap9qb4U2i+smYS8+1M1k1ev1/nJbQ/MfuVrSpSUJCUJCUW9KUgAAAAAVdalC0OdiIugJa3CMlA2jW0m26E6aXXSnA9b9T02Sc0puEZky2vvWZSlFTi4azCAbKuTyFhaRySlKVHqqHMI8JnQ3TnOLdqRhmr2r1tyS1yjMi3BN1ti1pdPPUSFwCFdQUoEKBBBIPPNXcpV8RFTxfe362VcIwGn7p00XI7xBNEse3Ob1YemeiuQTZOp67Wy1fok9lLVnhMMs+YlwyuouJWG1o5bQ04CVD3geRWmJ8IzevAb86FqHhYWyOW0MZHOQrkegSTGAB/SK6Q6ubJ9L9UdTout9oyDKsC1EhoShORYvPRHefCUdCQ824hxtwdHuH3QVJ91RIAFe9O3zVq4Rjbcn3iamTIBT0qbt9usdtfWPoMhmD5o/OlSVfWDWNFnh0g0WdcndJJNtY5haVX46mLSAONgBf8A3lGqrZ4Uupeu90kao6Jaz3a43g6czmIbMmfJMp2JIK3m3ovnkkuIBZBSOTwOeOxAqbZOxbDcX1Yn62aA51e9KsnvDbjN0atcaNLtk1LigpZVEkIUlJKgFe4UgEcgAkkzHpHovpzobjCsT03x9NuiPyFzJjzjy35U6Sv578h5wlbrivipRP0DgcCt3reo7E5r/eAgkWm0Hod2ozusWNwhzfdJkDON3UZzoclXvRbZTphpLqLdta7xdLvnWpN7dcelZLfi0XGlODhfs7LSEtsAp93sCoJ90EJPTXx3PbI8E3ZzbevUzUvUKJa7T78KzWeZBYhMvEdKnuHIjjilqHblSyAOyQnk82KpWbgHBrSLNyGg5BaNJaS4ZnM6nqoG20bQsa2rw3rHp7qrqFccdeW4+qxXuVb5EMPrCQXkluG28hXujslwJPxSaz25DbjYtzeGo0+y7P8AMrBj619c6Fj0mIwm4EKQpsPqfjuqKUKQCEpKRyeVBXCeJbpU1PtYx3j5ZdtP8KGfZGWW/wAqs+2PYVpvtNyWdkOl+pOobzF1aDVxtV1mwHoUzpCvLUtKIiFhSCtRSUrT68HkEg6nub8MfRrcnqO5qrIyjIMVv85LSbkq2+U4zMLaQlLhQ4nlDnSlIKgeD0g9PPJNxKUd5y1zs25I3yBwbrmq6u7HtN29DpWgtkzrO7JZ7w6p7IbpBuEZd0vxUgIUJciRHdJSUhI6WwjgJCfm8g61t48OTSrbDnYz7S3VHUxmU6yY02HMn256JOY5B8p5AhJUR1AEFKkqBHYjvVr6VIcQ81BmfhERyi0ZKC0FgpnIf7nnOuapPqH4UGh+qmd3PUvOtXNW7lkV2kCTJmKulsQStIAQEhMABISlKQkAAAJFSjrZswsG4LALFprqTrbqhLstlSkuJjy7Yy5cnkFRQ/KUIPvuJSrpHSEp4SCUlXKjYelVwgMFP3ReNLZfX5KxcS81NTaearbt42NYZtft2RWjSnV3UqPCySOpD7E2VbJCI8npCUS2QYI6XkAcDq6kEH3kK4HEaWXwmND8ez2PqhZtYdYI2VRribs3dBdrap4SysrLp5gEKJUSSCCDyQRwau7SrYjjFSfMLA6iPr0G4KsDAWaG5H19Z7yqv7j9gGnW6jILdftVtVdSXU2eP7Pb4MCZbWIsblKA6tKfYiorcUgKUVKPfsnpSEpEm7dtv1m22YIjTfFs7y3ILBFWVW+NkD0R428KUpa0NLYjsqKVKWVELK+CPd6RyDKdKNOAFrbA5/FHDGQXaZfBKr9r7s+t25CDNx/UTXjVNvGpklMn7gWuTaosJspIKEdoBdcSkgEB1xw8gHnkCrA0qjmh2auHFuSp5o74ZWmegGRvZZpBrprDjlzksezPuM3G0uoea5Culxp23qbWAQCOpJ4PpVvIMd6JCjxJE5+a6y0htcl8IDj6gAC4sNpSgKURyelKU8nsAO1felaFxIgqgaAZCVHGtejLutmNS8Om6qZpitlucRyDcYuOrt7RmsuDhSVuyIjzqeUkpPlrQCCQeeakelZuaHiHZK7XFpluaq7t92A4TthuM2bpDrZqnbo90Wyu4wJMq0yYszyyekLQ5byU9lKHUgpVwSOoVZO+22Zd7RKttvv8+ySJCOlu4QEMLkRzz85AkNutE/D321Dv6V76Vdzi8Q4qjWhpkKm8vwwNMJmqbutruvGtKM5elmcq9s3m2tSA8U9JKeiAEpHT7vSAE9PbjjtW5XvY1YcwjKtuoO4zXfKrW72kWy4ZehmJIT8UuNxY7PUPzmrLUqoADQwCwyGnZWJJcXzc66rQNINBNHtBLIvH9I8AteORXuPPXHQVyJJHoXn1lTrpHfjrUeOe1a7uj2x4Zus07Z0+zO9Xm0Nwp7dzhTbW8lDjMhCVJSVJUClaeFq7Ec/EEHvUw0pUHi+3fL0y7Iw+H7Fv85quWm20/PcNxdOC5Ru01OyXHG2/ITBPscV5TPp5ZmhpcxKeO34N9BA9CB2qccJwjEtOMYg4Zg1giWWy21vy4sOKjpQgE8kn4qUSSSokqUSSSSSazlKuXEyTrnx571QNAiNPTluSlKVVWSlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREqFc+ve8OJl1wj6Z6f6V3HGUKR7BJvOQzo0xxPlpK/MbbjLQkhfWBwo8gA+p4qaqURV1+Um/78lmiP2ruX6nT5Sb/vyWaI/au5fqdWKpRFXX5Sb/AL8lmiP2ruX6nT5Sb/vyWaI/au5fqdWKpRFXX5Sb/vyWaI/au5fqdPlJv+/JZoj9q7l+p1YqlEVdflJv+/JZoj9q7l+p0+Um/wC/JZoj9q7l+p1YqlEVdflJv+/JZoj9q7l+p0+Um/78lmiP2ruX6nViqURV1+Um/wC/JZoj9q7l+p0+Um/78lmiP2ruX6nViqURV1+Um/78lmiP2ruX6nT5Sb/vyWaI/au5fqdWKpRFXX5Sb/vyWaI/au5fqdPlJv8AvyWaI/au5fqdWKpRFXX5Sb/vyWaI/au5fqdPlJv+/JZoj9q7l+p1YqlEVdflJv8AvyWaI/au5fqdPlJv+/JZoj9q7l+p1YqlEVdflJv+/JZoj9q7l+p0+Um/78lmiP2ruX6nViqURV1+Um/78lmiP2ruX6nT5Sb/AL8lmiP2ruX6nViqURV1+Um/78lmiP2ruX6nT5Sb/vyWaI/au5fqdWKpRFXX5Sb/AL8lmiP2ruX6nT5Sb/vyWaI/au5fqdWKpRFXX5Sb/vyWaI/au5fqdPlJv+/JZoj9q7l+p1YqlEVdflJv+/JZoj9q7l+p0+Um/wC/JZoj9q7l+p1YqlEVdflJv+/JZoj9q7l+p0+Um/78lmiP2ruX6nViqURV1+Um/wC/JZoj9q7l+p0+Um/78lmiP2ruX6nViqURV1+Um/78lmiP2ruX6nT5Sb/vyWaI/au5fqdWKpRFXX5Sb/vyWaI/au5fqdPlJv8AvyWaI/au5fqdWKpRFXX5Sb/vyWaI/au5fqdPlJv+/JZoj9q7l+p1YqlEVdflJv8AvyWaI/au5fqdPlJv+/JZoj9q7l+p1YqlEVdflJv+/JZoj9q7l+p0+Um/78lmiP2ruX6nViqURV1+Um/78lmiP2ruX6nT5Sb/AL8lmiP2ruX6nViqURV1+Um/78lmiP2ruX6nT5Sb/vyWaI/au5fqdWKpRFXX5Sb/AL8lmiP2ruX6nT5Sb/vyWaI/au5fqdWKpRFXX5Sb/vyWaI/au5fqdPlJv+/JZoj9q7l+p1YqlEVdflJv+/JZoj9q7l+p0+Um/wC/JZoj9q7l+p1YqlEVdflJv+/JZoj9q7l+p0+Um/78lmiP2ruX6nViqURV1+Um/wC/JZoj9q7l+p0+Um/78lmiP2ruX6nViqURV1+Um/78lmiP2ruX6nT5Sb/vyWaI/au5fqdWKpRFXX5Sb/vyWaI/au5fqdPlJv8AvyWaI/au5fqdWKpRFXX5Sb/vyWaI/au5fqdPlJv+/JZoj9q7l+p1YqlEVdflJv8AvyWaI/au5fqdPlJv+/JZoj9q7l+p1YqlEVdflJv+/JZoj9q7l+p0+Um/78lmiP2ruX6nViqURV1+Um/78lmiP2ruX6nT5Sb/AL8lmiP2ruX6nViqURV1+Um/78lmiP2ruX6nT5Sb/vyWaI/au5fqdWKpRFXX5Sb/AL8lmiP2ruX6nT5Sb/vyWaI/au5fqdWKpRFXX5Sb/vyWaI/au5fqdPlJv+/JZoj9q7l+p1YqlEVdflJv+/JZoj9q7l+p0+Um/wC/JZoj9q7l+p1YqlEVdflJv+/JZoj9q7l+p0+Um/78lmiP2ruX6nViqURV1+Um/wC/JZoj9q7l+p0+Um/78lmiP2ruX6nViqURV1+Um/78lmiP2ruX6nT5Sb/vyWaI/au5fqdWKpRFXX5Sb/vyWaI/au5fqdPlJv8AvyWaI/au5fqdWKpRFXX5Sb/vyWaI/au5fqdPlJv+/JZoj9q7l+p1YqlEVdflJv8AvyWaI/au5fqdPlJv+/JZoj9q7l+p1YqlEVdflJv+/JZoj9q7l+p0+Um/78lmiP2ruX6nViqURV1+Um/78lmiP2ruX6nT5Sb/AL8lmiP2ruX6nViqURV1+Um/78lmiP2ruX6nT5Sb/vyWaI/au5fqdWKpRFXX5Sb/AL8lmiP2ruX6nT5Sb/vyWaI/au5fqdWKpRFXX5Sb/vyWaI/au5fqdPlJv+/JZoj9q7l+p1YqlEVdflJv+/JZoj9q7l+p0+Um/wC/JZoj9q7l+p1YqlEVdflJv+/JZoj9q7l+p0+Um/78lmiP2ruX6nViqURV1+Um/wC/JZoj9q7l+p0+Um/78lmiP2ruX6nViqURV1+Um/78lmiP2ruX6nT5Sb/vyWaI/au5fqdWKpRFXX5Sb/vyWaI/au5fqdPlJv8AvyWaI/au5fqdWKpRFXX5Sb/vyWaI/au5fqdPlJv+/JZoj9q7l+p1YqlEVdflJv8AvyWaI/au5fqdPlJv+/JZoj9q7l+p1YqlEVdflJv+/JZoj9q7l+p0+Um/78lmiP2ruX6nViqURa9p/Jz2XiFvkanWuy27Jlhz2+NZpTkmG2fMUEeW46hC1ct9BPKRwoqHcAE7DSlESlKURKUpREpSlEUdasbgNLNDown6oXi6WaB5aXVXBOP3GVCbBUUgLksMLaQrkfNUoK7g8cEc6xppvN256yXYWTS3Nrjk8rzG2nPudjN1dbYKzwkvOiN0Mp/jOKSkcEkgCvfu9YakbWdWWn20rR8jrqrgjkciMsj/AEgVVbwVmGUbbMrfS2kOOZk+lawO6gIcXgE/Vyf5zSh9o+o13utDucuiP8pW+zZTc33nEdmzK6DUrTntYtKmM5iaZK1DsCssmh1TVlbntuTOltBWtSmkkqQAlKjyoAdq1ew7sdtmT56NMce1qxS4ZMp0x24DE9Ki66PVttfzHF+vupUT2Pag8xAGqHygk6Ld9Q8+xjS3B73qJmc5UOyY/DcnTXktlxSW0Dv0pT3UT2AA9SRWC0N1x0/3D6dwtTtNZ0mTZprrrAEqOWXmnW1dK0LQfQg8ehIIIINbPmEfEpmLXSHniLWvHpEVxm5IuhbERcdQ4Wl3zPc6CDwee1abpJftv9l0udmaM3XE4Wn+PLlIck2h1pFtjFolb6i6n3CASVKXyR6kmoaQMZdkADyvcnhl19ZcCQ3DmSetrAcc+ikqoo3D7nNJtr+OWvJ9WLpNixLxOFviIhxFSHVudJUpXSPRKUjknn6OASeKrbpd4lulGoW5LNLRd9RbJjOmmP2duLZJl1kIjC7z/PHmyElfB6ekcIT+9BURyrgWV1sc22ZBgUC8a/P4RLxJLjV0gP5E4x7MpfTyhxkuH3lFKuwTyVBXHBB4oZ8NtUWBg33YiOhIBI5hSI8R1M3I3b4nsCQD1UlWu5Qb1bIl4tkhL8Ocw3JjupB4caWkKSoc9+4INeqvHZ5lsuNog3CyONOW6TGaeiLaT0oUypIKCkduB0kcD6KjjJd0m37EZ9wtt81Ss6X7Qvy7mY3mSm7ev4pkuMpWhgj4hwpIq9SGvLcuBzWbJcwE35ZKTJ8+Fa4Mi53OYzEiRGlPvvvLCG2m0jlS1KPYAAEkn0AqF8P3n7ddQtW4uieBZ2L/AJRIbkOqbhw3jHZSygrX1PqSlBPA7dBV+ipdxzJcezCxw8mxS+QLxabg2Hok6DIQ+w+g/jIWglKh+Y/CudsJllnxqphaaSguY2Vr6Rx1K+5SRyfr7CoYCdoFJ2RDuctaT8lLiPBdUbmMPYuA+aubuH3OaTbX8cteT6sXSbFiXicLfERDiKkOrc6SpSukeiUpHJPP0cAk8VJtruUG9WyJeLZIS/DnMNyY7qQeHGlpCkqHPfuCDUe6+Q9vMvEWXdyCMMVj0GSmYwcnWwGUyEA8Kb80918EjhPJIJHB54rbZ2Z4XjuE/Lm43232zF4sFE03B5YZjMxCkFKyTwEo6SPoqrSAxxdoegEa8Zk8vSzgS5obqO5nThEdVn6VCo3q7SlDkbisCI+q9M//ABrb9ONddG9X5M2HpdqZj2Uv21CHZbdqnIkFhCiQkr6SeASDxz9FWAJyVSQM1vdKj3VDcFopouplnVHUux49Iktl1mLJkcyXGx6uBlHLhQD+N08fXWX041W021esPym0wzez5NbAvy1v26Ul0NL9ehwD3m1cd+lQB4+FQPMCRopPlgHVbXSlfCdOhWyG/cblMYiRIzanX333A220hI5UpSlcBIA7kntUEgCSgE2C+9Kh6HvB2xTrzGscfWzGA/OdLER5yUW4slwHjpakqAZcPPYBKzz8KlG95BYcatT99yO9wLVbYyet6ZNkoYYaT9KnFkJSPrJqTYYjkmZw6rIVD+vW63RrbbccWteql6mQpGXylRYAjQ1vhISUBbrhT81CS4gE9z37A8Hj1ZFur25YnitlzbIdZMZhWPI3HG7TMXMBRO8twtrU0E8laEqSQVgdI7d+4r462ytrsi247lmvcnA34tvfRcMflX5cdZ81XSUrihfJc6uEHhAPPCTweBQA4hOWIA/MDiloO8gkfI8pUtghQCknkEcg1+1FG5Hclpztj01l6g55c2gstrRabYhwCTdJPTyllpPr8R1K44SnufhzGWyneDhmuemmLxcu1Nxt/Uy+ruMmTYGZbaZTKRJfWhtLIPUEoYCOOe/SkEk+tGfaFwb7sT1/wJO628KHeRrXO1+p5af6KkOJuu0Zm7h5G2Bi9yznMaKZK2TDWIxIaDxaDvoXA0evjjjjtzz2qYaidz9jNbdfI80nB0awXiKuI2pssKvTkdtoqUFBPLiUhtHHUrjlKQnnjgVKM6dCtkN+43KYxEiRm1OvvvuBttpCRypSlK4CQB3JPaoBim0nOLnTM5cIUkecgZWga5D5yvvSoeh7wdsU68xrHH1sxgPznSxEeclFuLJcB46WpKgGXDz2ASs8/CpgBCgFJIIPcEVaDE6JN41X7Sv5ccbZbU884lDaElSlKPASB6kn4CtEga+aJ3VzI0WzVfFZSMRYbk359m6MrYtrayoJL7oV0IJKFdirkcdx6VWQFMFb7So10s3J6D623CbaNKtU7Dkk+3p8yRFiSPwyW+eOsIUApSOSB1JBTyR37it7vt/sWLWiVkGTXmDabXBbLsqbOkIYYYQPVS3FkJSPrJqXeUS6wUDzGBmvfSoiwzdzto1CylvCcP1pxm4Xx9XTHhCV5S5KvgGfMCQ6T8Agq5+FYTd9u1wTafprJyW+TI0vJp7LjePWPzPw06RxwFFI7pZQSCtfoB2HvEA1qPFJmN3TjyVmNNR2AZ/Dmp4pVddqe6rTfVjSfFU3bVrHbvnScaau2RQmJbZkx1pQkyFraR3QEqVwRx27Ct8w7c5t71DyBnFMF1jxW/Xl/ny4NvuKH3lcevupJPArV9MseaeZ4dpHCyya8PYH6KTqVFF73XbbMayCRieQa24hbr1Ff9met8m5NtyEO88dBbJ6uefhx3rbsr1Q08wXFGc5zLMLZZsffDZRcprwaYIcHKOVq7DkenNZyMOPTetIOLDqtppWExjNcSzTGI2aYpkUC6WGW0p5i4xnguO42kkKWF+hSCk9/TtUazd4+163TBEma3Yw2gvmKJvtJVB80Hgo9rALHIPr7/bvUmzsJz3KBcYhkplpXnt9xt92gsXS1To82HKbS8xIjupcadQochSVJJCgR3BB4rUnNbNIGr7d8ZXqZjX3Tx+A5dLvHFyaKrdEQoJW7IIVwyAVD55B70JwmDx9M+2qDzCR9Tl3Xz1Y1v0o0Nsici1WzaBj0Fzq8tUjrW4708dXQ22lS18dSeelJ+cPpFfxoprZgW4HBGdSNNZsqXYZMqREYfkRlMKcUysoUoIV7wTyO3IB+oVFes2sekeuu0bWS7aX5taMqt8DFrvGlLhudYZeERakhSSAR8CDxweOQTUfeFTcoNm2PWW63OSmPEi3O7OvOq54QgSFck8UpeZ1Vr/da1w/iMfAeqVfK2mWe84tPQT8fgro0qNMU3LaBZ1e/k3hmreNXu6eYGTEgTUvOpWT0gKSnkp79u/FZbN9Z9L9ObnEseY5lCg3We2Xo1tQFyJjrQPBcTHaSp0oB7FfT0g/GhtBOqZyNy3WlangOrOmWqbUt7TnO7JkYgdAmJt0xDy4illQSl5CT1NKJQv3VgK909q2ypIIzUAg5JUfas686YaKsQRnWQlu5Xd0MWqzQWFzLncnSeAiPFaCnHDz25A6Rz3IrWt225GwbWdF7tqbdWW5dw5EGy29SuPbZ7gPloPHfoAClqI/FQeO/FRXsT0IvzdlO6fXWS7ftWdRo6ZxlTE8/ce2uDqYix0HsyCgpKgnjgEI+B5qyajnR7LYk8TkBxIvJsBe+Ss+KYE5uyHAZk8BlxNrXIlQ6z6yyreb5atqmWLt/T5iWJt9tUa5ON/SI3nqSFEd+hbiVfAgGszopuJ0214j3RrD5c6Fe8ffMW+Y9d4piXS1PckdD7BJ47g8KSVIPBAUSDUm1zi3P35WhXid6N51iivZFaiQY1kyJhr3UzW3JKowW4B84gFkgn4sp+irU4dXZRPvmBwMEjpaDbjpBq+W0X1fuCTxEgHreRfgc5HR2lKVClKUpREpSlESlKo3vh3C55kmp+MbINvt4Xb8yzdbYyG8sKPmWe3LBUpKCO6VlpK3FKHBCAAO6wRUklzWMEucYA/zoBmTuVgBBe4w1oknh9WHFT/kG6TBmsyl6a6bWW+alZdbiBcLbi7LTrNtJPAEuY8tuLHPIPuqc6+x92sVmG5vKdKLacp1k2/5VYMUaI9svlrmxLw1bUkj8JKZYWHkNj4rQhwD4+tb7ojopgegGnds0209taYtvgI5eeUAX5sg/tkh9fqtxZ7k/DsBwAAN0uVtgXi3SrRdIjUqHNZXHkMOpCkOtLSUqSoHsQQSCPrq1QFg8hxEb7A/MDqTrwVWEO9sQOGY+U9I+K8eK5XjecY7b8uxC9w7vZrqwmTCmxHQ40+2r0Ukj+Yj1BBB4IrK1zr8KLMLhYst1u25Ga5JseE5E7IsoWoq8htUl5l1A59EktNq4+krPxNdFKscLmtqM9lwDhyP5ZdFUYmudTf7TSQen5i6UpSqqyUpSiJSlfhIAJJ4A9TRF+0qJLxu024WJ+S1cdXbEGYT/ssuYytb8KM9zx5bsptKmW1AnghSwQfWpPgXi0XW1MX22XSJLtslkSWZjD6XGHWSOQ4lxJKSkjv1A8cUFxiGSGxwnNeylaFA180TurmRotmq+KykYiw3Jvz7N0ZWxbW1lQSX3QroQSUK7FXI47j0rxaWbk9B9bbhNtGlWqdhySfb0+ZIixJH4ZLfPHWEKAUpHJA6kgp5I79xQeYwN09N/JD5c+XXcpKrSdVdatK9ELCnJdVs2t2OW9ZUG1ySpS3SOOQhtAUtfHI56Un1H0itru12tVhtsm9Xy5xLdb4TSnpMuW8lplltI5K1rUQlKQPUk8VVrcnuW2+6jbdNUcSxbU+w3S4zcPuyoUfzCgTSiOskxlLARI445/BlXpz8Kxr1DTpuc25AJjlf5LWiwVKjWusCYnmp20X1owTX7BI+pOm02TMsMuTIjR5EiMphTpZcKFKCF+8Ekg8cgHj4Ct6qnvhPfuKsW/yldP62urhV2bRTFJ+Bu4fAFctGoajcR3n0JCUrT881e0z0xXDj53mlstEq5EiDCdd65cvj18mOgF13j49KTx8a+OnWtWlOrLk+Pp3nVrvUq1LCJ8NpwolRCfTzmFhLrfPw6kjmsR5slqfLmt2pXhvd8smNWmVfsju8K1WyE2XZMybIQwwwgeqlrWQlI+smoyx/dptuyjIYeLWbWLHnLncldMBp59UdM088AR3HQlDxJI48tSuee1B5jhGaGwxHJS3Sq571d4mGbTtN5NwemxZubXVhaMespWC466eQJDqR3Swg9yTx1EdI7ntndr243TrWfAMVtlt1TsOSZqjGoM6/Q4kxtcpp8tNh9bjaPmcOr4PYcE8Up/a4i33YHeT6RfdIR/2eEO1n5fGbb4Km+laLqBrlpHpZMj2zPc/tNpuMtBdj29TpdmPNj1WiO2FOqSOO6gnj66+mmutmkusUaTJ0w1CsmR+xK6ZbUKUlT8Y88cOtHhxvv++SKDzZIfLmt2pWs59qbp3pZZlZBqPm1lxu3jkJfuc1tgOEfioCiCtX8VIJP0VgbtuI0Pseodr0nu2qFgjZfefLEKzrlD2hxTg5bSQPmKWCOlKiCrkcA8iguYHLqhsJPNSJSlafnOsGlmmbsSNn+oNhsMm4OtsQ402c23IkuLUEpS21z1rJJA90GmoG9NJW4UqKM53WbcNNMubwPO9ZsXs1/cUlKoMmanrZKvm+cRyGeeQfwhT2PPpUpsPsymW5MZ5DrLqQttxCgpK0kcggjsQR8aDzDEMkNjhOa+lKi3UndFt70gupsOo+rePWW5oSlbkJyR5j7KVfNU422FKbB+BWADW443qHgeYYmnPMWzKzXXHFMrf+6sSa25FS2gcrUpwHpT0gHq5I6eDzxUAgtLhkNVJBBDTmVsNKovhniWaUZruqv2Jvai2SwaX45j7zMW7XKQiO1eLqZDILja18e4lAWlAHzh1q7gp4sxk+6HbthSrejLtZ8Ss5u0Jq5QPbLk217TFdHLbzfUfeQodwR2qRdjamjsu5HrEjgoNnuZqPyHwmDxspQpWg5Jr3oxh+MWvNcq1LsNosN65+59ymS0tR5PH7xauAf/s17dO9YtLdW48mXplnlmydiGQH3bZJS+hsnngFSe3PY9qmCSRuz4KJEA71uNK0bFtcdIM3yybgeI6jWK7ZFbUrXMtcWWlcmOlBAUXGx3TwSB3+mvpqDrVpTpW7Fi6gZ5abNMngqiQnXuuXJA9S1HRy6sD4lKSB8arIgHQ5cVaDJGozW60rRNNddNH9YTLb0z1FsmQSIB4lxYske1Ru/H4VhXDjY57cqSO9ZrONRMD0zsjmR6h5jZsctjfPMq5zW47ZIHPSkrI6lfQkck/AVLvIJdZQ3zGG3Wbly4sCK9OnSWo8aM2p1551YShtCRypSlHsAACSTULYjvP266gauRdEcEzsX/KJLchxTcKG8Y7IZbK19T6kpQewPHQVforZb5uS0GxvLLDgl+1Wx6FkGTIYctdudlgPPpe48klP4nXyOnr6erntzVJ2GGWfGqdU00lBdxkrX0jjqV9ygOT9fAH81SwE1msdkcfdrSflcZqHkCk5zcxhPRzgPnYrpFSo7zncPodplekY7qFqnjuO3J3jy4tympYcc5AI6Qrjq+cn059RW54/kNlyq1M3zHri1OgSOfKfa56V8Hg8c/WKgeYSMlJsYKyNKVhb3muG40+iLkeW2a1POJ60NzZ7TClJ+kBagSProizVK1T+6zpX+UvFP/qzG/8At6y9jyjGcnbddxrIrZdkMEJdVBltvhsn0Cigngn66IspSlKIlKUoiUpSiJSlKIlKUoiiXdt+5d1Z/kbd/wCquVVbwWQFbZcpSoAg5nJBB+P95xas9vJu1ts21XVaXdJrMVleJ3KOhbqwkKdcYWhtA59VKUoAD1JIqqvgp3i1vbfcwsTU9hVxi5Y5JejBY8xDTkSOlCyn16SW1gH6Umo2W9XaP/tt/vTaf2VD8bv7FHGomjWnMrxbLBp1CxuPZ8bvVgD9xt1nHsDMoGA+XG1Bnp9xzy0pcSOOtJUDz1HnYfFv0j010y0v071G02wix4pfrRk7UKPLssBqEoM+S46lJ8pKeelbKCnn5vfjjk1681uduPjR4gj21nlqwJjLHWPddNtkkIP8YhSe3r3FZzxqp8NnQLCbe7JbTJfy5DrbRUOpSERXwtQHrwCtPJ/jD6ayxFmzUXtMHHn/APlPy9FuAH7XUa64wD/t/mPRWR3QYPheqW1XKbtnmKWu8yoOFzrpBdlxkuLhyvYi4HmVEctrCkpIUng9qiTwhUpVs3hJUAQchuYIPx95FTVrFd7W3syy68KnsexOacy1of8AMHQsLtyggg+h5JAH08ioP8H2bFkbP2YzEhtbsXJLih5CVAltR8tQBHw5Cgf012NaGbXtTBYYW/8AcK4QS7ZNlcc8R/sUc7DWGF+ILugQtltSUzpfAKQQP/CKq9/jLac4I3oTadSWcTtbWUnJocFd3RGSmWuMYz/LKnAOpSPcQQkngFPatS2q6hYRo74iu5CHqlldpxRN2fmvxH7xMbiMuATA8AFuEJ5LTgWBz3AJFbx4veXY/km0DFLxZ7h5sa+ZRBl28uNLZW+z7LJV1htwJWBwUnuPRQ+kV59T/wCn7KRo2n6v+YXfTtt20A6l/wDYreWPHbtl22q24rYb+7Y7leMIYgRLm0kqXCedgpQh5IBB5SVBXYg9vUVDO3jGMJ2f6Jx9uOXZaxneYOqnTplmx62uTJkhMlRPvsIClIb6SEl5/oQfioAVndWdRsrwTYDK1G0ueLt5g4FAkW+THAcLIUwylchPqCW0KWv6B0cn0qD/AA+Nx+2zDtsMA3fPbcNRbvOluZFBcWqTf7zc3Hl+WpLKeqRKKmy2ElIUB3HIPVXbt3mr7WG7oO8y50ADnmb3gRu4tmluzbOT03AhoknobDmZGv8AXg03q4S9F8+x99132G0Zg6IUdxXPs6XGEFSB3IA5TzwO3JJ+NYON/wANTI/kx/8AkpNYrwfNScGsFk1CwDIMot1syi85eFwLLKkJbmyR5B6vLZJ6l9Plr6iAQnjvxXqh3i1ueNXJSi4R1EWEw+zg/bxaQS3/AI3b09aufNtVEj7h/wCz+as6G0q4/e/+YLL+MvpzgqNCrVqU3ilsbyo5PDhLvCIyUy3I5jv8tKdA6lI9xBCSeAU9qt7D08tOrG2iw6eX51bduveM2piV0pCipoNMrUjg9uFBPSfqNVe8Z2dEY2u2OE7IbTIk5hELTZUOpYTGklRA9SByOfzj6asza9WsJ0q2sWLVvKbs03j9mxC3zlvNqCi8PZmwhtvvwpa1FKEj4qUK5qDm09n2guyD2/2LWq1zq9ANzLHf3hQV4j25aRolplbdCdImuNQNQ0C02qHbkcOwYKyGlONoT81ayfKbA47lRHzK2/bFoTZNi21S7XG5RmZOSRbRJybKJKOCX5LUdTnkJV8W20p6E/AnqV+MarjsPxGXuO1vyPfnr1PgMuvS3IuG2uVJQExkI5R5qEqPzGkfg0HjusuL9QCbgZ5qJies+S5RtgxK/Wi6O3bBLq9eJUSUl77nreLcaO2ooJAKvNeUUn3gEJPoaio2rS2d4H7aoCeIAaXBvpiPHspYadSs2f2VMgcCSQ0nl7o4d1W/YvqRrhctJpWsEfbhLzu+6iXWbc7rlHyotsVcvpeU03HS28rzG2WUt9CWzwkcEgcGvNtw26bltM98F/1ig6SN4Rphm/tP3XtKb/BkCOpbPWlQbZcPPEkEp6U+6lxQ7Dmo18PTeLiu2C3ZBtS3OyX8Nm43d5K7dNmMOFltS1cuxnSgEo9/lxCyOhQcPce71XTxfdVj2u+odtwbbfcBkdrtshEzLcoTEc+50CKnumI0twJDsl5XCR08hCOtXPIArq8gqsqULjDA3BpABmNAPha5E85xmm+nWscRJ5gkiOJ0000tYSqPbqcrc1e3n6Q7PbjJWMNfYcyvKISVlKLr5KXnI8V4D5zQMflSD2V1jn5o4vDXM/xHYucbed0ulu9fGbI/c7JamWrRd0tg9KChboLa1fiB5h9xKVHt1I/MDzhzW16Rf7OK+4WOEnk7CVuWudRqhntYTG/MTH8Mq4+6rbLju5vQ+bo69PasBS5Hk2ma1DS6mA+yfcIaBT7pQVIIBHuqPHpUF7xdvuLYh4edyxrLyxmF+06xmNGtuRT4iRLbcbeaSVtElSmgU+70hR90AEmt2tniYbSb9jMW8Y/nU653qelCImMRbTJXdn5SuyYyWgjpKyohPV19Hx6uO9eTepeMpc8P3P7nqbHt1ov1ys6FvQWHOURVuy2y1F6yT5jiEFKFKHZSkqIABAGW1tczZ6sa3P4gCBB3xu4TmFpsrmvr0husPwkgmRu58Y1UYbLtnmgmt+yzB5ureGN5LcLtGeWi4SHVty4LLUx8Nx4ryCFstD3lFCSAtS1KV1E9o18W3SHTPA7LpFfsOwq1Wi5Lvf3LelxY4Q+/GaZaDTbrnznAgIASVEkd/pNWt8NGfCnbJ9Nkw5TTxjRpjDwQoEtuCa+ShXHoeCDx9YqvPjRXKBGxfSGI/KaQ98pJMnyyodXlIbbCl8fQCod/rrv2vybYMNvtW/3i/bXcuXY/Ps5m/kd/abd9NFZ3fuhCtl2qClISSnHiUkj099v0rX/DWgwpeyHTpqTFacS9EuDbgUgHqSZsgEH6iO1bPvciP5Dsv1ORZke1l7FnJLfk+91toCXCoceo6Ek/mqI/DP100mRtV0307czq1fKpMqba/uG2+Hbh5xlPOg+zo5cDflqCy4U9CU8lShwawpiX1ma+S3V4+cKXGKNB4yl1/wCFpUO2/AcM058ZCw2LBMZt1htr1hcmGHb46WGA8u2PdakoSAlJURyeB3JJ9TUu7qcrc1e3n6Q7PbjJWMNfYcyvKISVlKLr5KXnI8V4D5zQMflSD2V1jn5o4jPJb1ah402PoNwYBbsIhK/CDs+q1vENn+MQpPb17iv48R2LnG3ndLpbvXxmyP3OyWplq0XdLYPSgoW6C2tX4geYfcSlR7dSPzA0pPAbspf7OJ3IeZ+GeAOE9lrVaS/aAz2sDY3+y2epGK/Mq4+6rbLju5vQ+bo69PasBS5Hk2ma1DS6mA+yfcIaBT7pQVIIBHuqPHpW66MaezNJ9KMV01n5PKyJ/G7Wxbl3SSjock+WngKKeVdI44AHUSAAOT61Bls8TDaTfsZi3jH86nXO9T0oRExiLaZK7s/KV2TGS0EdJWVEJ6uvo+PVx3qwmnsvNLhh1tuWoVuh26/TG1SZUGISpELrUVIjlfJ61toKUKWOApSVEAAgVoGuYHgZEgniQCB6ZxwnMLMua/BwBjgDBPrv1mNVlb1Y7LklrkWPIrRCululp6JEObHQ+w8nnnhbawUqHIB4I+FctfD60V01yzdXuEx7I8YjXCwYtkC126xvDqtqVomykNLcjftbpaTyGwsEI6iQOeCOrFc1fDPudvm7u9z64k1l0Sry6+x0LB8xsXGVytP0j3k9x9I+mq7OP/dj8D/TDHbTcrV/+Ifxs9SZ7rw66YjjOi3in6ITtMLFBxtGUxGEXONbWEx2Hy45IjuKLaAE8qbCeeB3KQfXvUibjdUMszbfjheh1l0/czuy4LY15VIxkXGPDanXNYIZedVIIbWGEKbWhJ595RIHbto28q8WtrxQ9u7Ts9hCojNvS/1OAeWpya/0BX0E8jjn6R9NeDfPd8t2m758C3gxrHLuWJXaA3aLuWE9iUoW08xz6JWWVIcb6iApTZ/eniuzOHg0C8wPEqid1jh5AOII3G4urV2k1awaJPh0zG/LF1LZHEZ2W5b4tHtyO7LDbBCx/a07jOVY3c0TIF7dzC1LW2x0qC2QUOBQBV5ax37FArN78Gc9b8M9xvVeGhnM48OxMXoB5D399plMpdV5iCUnqI6uQeO9Sk54kG0qVjke74vqK5kd2uCUot2OWy3SXLrMkr4CI6WCgFKyohPKiE8/jcVoe/qJqTf/AA78ouGfW5hnJHVwbpcIEMdSLe0q4NrTH6h88stqQhS/xihSuwNZbSDT2dzQLY2k8wRl0uY4E5idNmIqbQxxN8JAHA7+th1AyMTfsuZZTtR0ldS0gLOI27lQSOT+BT8ap3jlui7QfFRlWoRWoeIa4wVmEoJCW2ZT6+voT8B/fbSk8D0D6fqqxexbXTSbINu+keCWfOrVNyb5PMwXLPFfD0yO5FaIeL7SOVMIHR89wJSSpABJWkHQvFg0nuOSaHWnWzE0rayXSi6tXdiQ0PwiYi1oDpHH7xaWXPqCFV3bbUFDbv1g+zicD+FxgxysZ4Lk2NhrbH+rj2i0EfibcflHFQ94pF+smmW5PSDWvE7Y5LynC0s3XIAyx1NotqJiBFLyvxStwyG08+v6BVst1eWw9RdFMd03wuamRJ1zlwrFbXW/e6bbISH5krj96iGl08/AqTUb6WaKXjdFto1I1F1WtjUPK9e7emREZX3Frgx2uLU2kkdgFp9oPHHJdqPfC6g6i6kIiZHqfCcah6IW6VgWPtvclRmOvlyW4efRbTCY0cfxefpNZUqWEHY6vunGd0OnE077hrD+InJXfVxFu10tRgG+R7LuXtOG8NAWQ8SPM3cbRoltAw+U7Y8czm6RLddxEWWz9ymnmI7cYEdwg9ZKh8fLSPQkVeCdphp/ctPHNJ5mJW1eIuW/7lm0eQkRxGCekICfQcDggjuCAR371SHxa9FM1vuPYPuJ09tz86fpnMW7cWWEFbjcQrbdRI6R3KW3Gve49A51Hskmrg6Va8adaq6PW3Wiz5NbmrDIt6Zk592ShKLctKAXmnyTw2ps8hQVx6c+hFUYRU2ap4tzjcXzuIGEnhAPIzqSrvBp16Yp5YQGxoQfN1mOgG5UU8Oe9XPDdbtaNiuaPHIMSsDs6RaotwSHm0MtSksut9Ku3luodaWUfN6go8e8edP2p6E6aX/xE9d9Op+NxFYZYxLebxwNJ+50gJmseU06zx0rZQpXWlojo6ko5B6QKmvYJptdM23A6y7zZ1skQrDnFxk2/EjIbLa5sAyApcoJPfoUGmQk/E9f0Vo+zO92d/xQdxCWbnGWZzNwRG6XQfOUibH6wj98RweePoNX2fEa1Dxfb8F2Kc5AETxAjlzlU2iBSr+H7PitwxlBN44EzzVi9SduOju3na1rvH0hxFFhZyXGrtPntIkOuoLiYboSlAcUfLQkKVwlPAHUap/sN3KMo0d082hYXlaMSy3LZt0lSsinR1dMKKqQvhqCFJ6HpjoSoIKvwaCDz1rAbroNu7lRoe1vVh6W+2y38j7qjqWoJHUqMtKRyfiSQB9JIrnboRtSj7mvDqxy84M+iHqbgt6us3HpzDgbdLiZHmGItY7p6/dUgkjpX0nsCrnKk/DVrOeJaGU5GuEOItysY1AjVa1WzSpNaYcXvg6SWg353voTK6e6VaQ4BotizeI6e2Fq3xOsvyXlHzJM6QruuRIePvvOqPJK1En4DgAAUFn7kv2Gm/jVOduHx65nGtTRAXY8lZjl4RITCOltCR6qZT1lLiUcqSpsHpVzU57Bt6UDcTh3yB1EmNWzVfFQYV4t0jhl2eGvdMptB45VyCHED5iwewSpNbZa7po5vAVqdobq3jdivsvBMmlWt23Op6ZDUXhJjy2lBXmNqIUpBcQR7yFDtzxWr2vZXD2EO8pPBzSWiRuzEbhIMXjJha6iWPBEuAO8OEm+/IzvMG63HRvFtMrhmeW6/aUZJabtZ9S4lsXIctikrZdlRQ+lT5Uk8dakOtpUkgKCmzz3J4lyuc+yLSO87ed8urWi+m2TXG8aX2yysTJaZDvmIhz3Syphlak+756UqeTyACUJ7jkdujFDhLGPZkRYHMXIj0tpEQgxB72vzBvxsDPY34zK5g+Jr7ZqvvA0B27yXF/caa9FlyWgfdWZc3yXFEfEhqOoD/GP0107YZajMtx47aW2mkhCEJHASkDgAD6OKoT4jem2R4jqppJvNxuyybrB01uUdrJ48ZsrdatyJAdS+Ej1SOp5Kj8OtBPbkiy6NXr3qpeoVl0AvmLPW9yyRcgl5Jco7twipYkrdRGZZjMvMqccUY7xWVOoDfQAQoq4TTZzGy4PexvnrBbP8IMciBeytXH/ALjH7uBsd4dH8RE8wTa6mKuXmpTb+5vxbcXx2xAybLpGzFduL6By20qGoyXOT9PtDzbP5x9VWQzzedd4OLzdM8AxprKdfFzp2Ps43Z0qdYiyGXC390pBUf73idJQ6PNUCero5PCljY9le0pjbNh9yumU3RF+1HzJ/wC6OU3rkq8x5SivyG1EcltKlqJUeCtRKjwOkC1EfbN2j3WXad7iPLG8AEk6ZDNRW/ZOoe87ykbmg+aeJIgdTldTBq1qrh2imn931JzueqNabQ0FrDaep19xR6W2WkfjuLUQlKfiT8ByahzItf8AWDTO1YJqNqzjONWnGM1yCFYpVjjofVc7CJoUIzr0sueU+pKgkOtpYR09R6Vq6e8T7z8gybUPdxojoHjuKyMmgWAr1BvFnZltRxMLClJjJWt5SWwEqbX2Ue/mfTxWF3vZ1qVqlkeiW3676TzsTmZZnsG5JW7d4c0uxYZ5ePTHWopCQ6Fkq4HuHj0NRQ+0dTJ954aB+6HAOPXzDgGyLqa3ka8D3WFxP72Elo6eU8cUGwXQWlaLqhrbpjo1bXbnqHkyLahqI9O8lEd2Q+thocuLS00lSyE/E8cD4kVsmJ5Nas0xaz5hY1uLtt8gMXGIpxBQssvNhxBUk+h6VDkUHmBI0+c/keyGxAOv+PzHdZalQ3ug3U6bbTMLgZtqREvU1i6T026JFtEZDz7jnQpZP4RaEBKUpJJKh8OAa3XSXVXDdbdO7LqhgE9yXY78wX4y3W/LcQQooW2tP4q0rSpJHccg8Ejg0b5wS3QweBIn4I7ykB2txxW31y98OFh/WLe1r1r5kAL8q3Pvw4Zc7lkSZS0oCefTpYi9A+o8V1CrndtrszezTfDqXpbnnFrxjWBwXfDbu/7kWU8h5xwQw4fdDoEhxHSTyShHA99PKhbagf3HgczFuZAMckrX2VwGjmE8gTfkCRK6I1rWpedWjTHT3I9Q788lqBjtskXJ8k8cpabKukfWSAAPiSKiSfrVqixp/L3ApYxWLp5C5ni0vRZC7rItCXOlUwyw8lplwtgvBgsL7AJLgJJTCmvt7yrf/eGNv2g8x9jSaFObdznPUJIhzS0oKECAo9pKgoBSlJ5R1BHJ6R72dRr6jfDp2cf6f3juA+PlzstGFjHY6nsj14DeT8L5XWmeDjgN+kY9qXuAyJhaF5zeExoa1DjzUsrccfcH0pLr3Tz9Lavoq4usmuS8DyTGdLMIszOQ6iZq44LTbHXi1HixWxy/PmLSCpEdsfADqWrhCe/JG8ae4Di2luE2bT3CbWi32OwxEQ4cdPfpQkeqj+MpR5UpR7kkk+tUb26ZvqPqJur1z3C2TSe4ZrDg3L5A2J9m7w4iIEWGrl1CRIWknzCGnD0jjlSvproe5j6zaTLMa3+lsDpicRO4EwZhYND203Vn3e53q6T1wtB5wLXVnMD1jy1nXi77eNTlWKVe2scYyu03OzQ3obEuGp5TDzS2HXnlIcbdA94OELSoHhJHBmmqK6F5FlGs3iP6g51eMWfsEfTnCI+KyIjktqT5Mp59L3QXWSUE/tx4B7dPfvVisg3S6a49b5eRu2vMJ2L29SxLyS3Y1MlWxlKFEOOB1tBLjSSDy62lbY4PvdjWYI8Jj3ZkE9MTg09W4ec8VeD4r2DQgdcLSR/NNvkFMFKxmM5Nj+ZY/b8qxS8RbrZ7rHRKhTYrgcafaUOUqSoeoqv+b7/tBNPdxcXbRk7l9j5DIeixV3EQ0G2x5MlKVMtOOFwOAqC0e8GykFQ5I78TB8QUveNo1USMBqe6BM8FZOo/1+07yDVrRnL9NsVypWN3TIrY5Bj3NKVK8gq46gekhXSpPKCR3AUSKkCq0+IrqRqDpXtMy/K9NJMqHd+qLCXPi8h2DHeeSh15BHdKuk9IUO6SvkcEA1z7SWikQ4SDbvb5rfZw41W4TBF+11qVtj6d7f8AaTO2rzr43n2TWPErlEulvx+2qlKQp5DzilyUo5RFbBWeFyFI6gnkck8Vr3hQFnPNlLeM5nDjXu1RL9cbciFcGUyGDHBbdDakLBSUhbiiARxWD287gNsuP7GoGG4flltfzG8YvMYn49BBk3yfe3I6xIW5HbCnllTnJ81Q6AjglQSnt5PB31KwFrb6vTN3L7UjLHcluMpFlMpHtqmPIZV53k89fl8JI6+Onkcc89q7Q0nadpD7ktbfQnGcuhPQ6LkJA2fZyywxG2o8uvX1tdRl4fWiumuWbq9wmPZHjEa4WDFsgWu3WN4dVtStE2Uhpbkb9rdLSeQ2FghHUSBzwRsGumI4zot4p+iE7TCxQcbRlMRhFzjW1hMdh8uOSI7ii2gBPKmwnngdykH1717vDPudvm7u9z64k1l0Sry6+x0LB8xsXGVytP0j3k9x9I+mv43lXi1teKHt3adnsIVEZt6X+pwDy1OTX+gK+gnkcc/SPprLZT5v0fxwzxlrgZ35R0Wu0j/ncJjh7K3jdll7mtG9/SDZ5PfUcMbAyjJ4IUQ3c3G0PPMx3h+M2BHBKT2Pm8nukcT1vgwjFco2jak2q9WOI/GtONyrhAQWkj2WRGaK2VtdvcKSkAccduR6Eiqe79BkG2nfNphvDctEuZiDzbFtujzCOryloS6y82fgFKju9SAfnFCh8DVh90W6vbxle1HO1Yjq3jl+lZLi0+NbLdbJyJE95bkdXrFSS6gIHKnCtICEpUVccVyVfN+jn4c5qTvxT5T/AC4cPKy6Kfl/SDJ9mGRuj3vWZ9V4/Ce/cVYt/lK6f1tdXCqjPhF6jYXeNr9t07t99ZdyOxTrg9Pt4SrzGW3JBUhZ7ccELTx3+n6DV5q9TbL1Z4D4BefsvsEcXfEqqDeisTRfdvmu8LVjWmztY9f7O3Y7Pb7gny3oileT+BbUo+93ZV0oaBUsuntz86BH88aufi74RecWs97skPJcUcizRcbc5b3bk0mLLUl1TLgS50kstdPmJSr8EDxwATh9GdwuGyPEW1Wu26rLLfZJOLmZaMIOQSEsQLS01I6SGVOENtOuMhKgvsV9S+CeoA+DUPXPS65+KjphqscrjQMKbx1UZrIbkDCt8geROb85p54JStkuL6A6PcKgeCR3rk2Yg/qrvdIdA3AtqWJ1JPx1zXTtIIbtTfeETxILMtwEW5aZKbNR8jb3B+IrYtumSKEnBtMrEcpnWdw8x7ndSltTKn0ejiWg+ypKVcjkK+mpk3nbSbRu50ygYQvJBjV0s1ybuNsuqYYkeQQkocbKOtB6VJV8FDgpQe/HFUv3U5ld9q++bEN69ltz9/04z+1RmJcuEOUPNGOlpxtKjwOvykMvthRAX0kc9iRbEeIztlyC0w29L8ql5vld56WLPi1st0lM+XKWPdaWHGwlkA/OcWQlIBPJ47wxoqbMxsS4E4hrjDics8sIHAAKXOLK5cDYtbB0wloBvlniJ4mVF/ik487i2w2Hjky7P3eTZ59kgOXGSB58pTQKC6v195RT1Hv6mp406zGJphsnxzUdcNtz5N6axLqUcceaWbclwJJ+spA/TUK+KHbMzuGw59/J2Yj17hT7RLvH3PbUI7Thc6XOgKJPQlbgSCT3HftzW66G59o3r5tPxPQu0Z/ap15yXTpNmlW+E97RJgdEEMPLkNt8lgIWeAXOkKV0hJJIBhxqPobXhMvLwRG/wzcRxRoptrbLi9gNIvuxix6dVpnhVxnM40pyrcRmMj7r53nmSzBc7q+Op4MMhAbjoJ+Y0klRCBwACkccJHEX+JnbZe2bWTTLeHpQkWi+Sbgu1ZAmMPLbuiUJDiUvJHZfW0HW1E9yEo+KQay3hY5ZcdFrvn2zXV1AsOX2a8ru1qiy1dHt7S0JQ77OTwHB+CQ4np56kuFQ7A8bD4jWPSNzWoGlm07AFpn3k3k5DkrrHvpsltSjy/OkEdmyoOOFKTwVFKQPnDnatBq7O7Zo9zDuiAHdAMWLkZWdGzazdo/fxepb1JwxHABRv4xOJ4HcdKtOtXrNjMCNf79emm3bq3HSmU/FchqWht1YHKwnoRwDzxweOOTVv7Dso26O3HCNQ7jgTUzMcXfYvDeQKfcbmzp46XPaJa0KHtCvMAWAsEJ4CUgJ92qveM6m1WXQrTLHIrzTXs2SJTHjlY6/IaiLRyB68DlAJ+sfTXQ/FZ0O54xaLjb5LUmLKgsPMvNKCkOIU2kpUkjsQQQeailhDarqWQqW4DC2OXTM8lFTHNAVMzTM8Tij64LKVzC8XfB8Ut+baJ5RZbHCtV8vWQyGJ11gR0MS5ASuL0KW6kdS1I590q54+FdPa5r+MLcYMfINAmH5bTbjeQypC0qWAUtBcQFZ+gfXVGgHaKH42/G/otST4VT8Dv7THqpb3t7ZNC8f2ZZ8bJprY40+w2w3SJdfZUruJlocSpTzktQLzri+VBalqJV1Hnmtb0Z16yPTDwoLfq/7WX71YMclQba897/S6JrkSITz6hHLfb6EcVNu/wAuEKHsz1UkSZTTbb1gUy2pSgAtbi0JQAfiSSAPz1XHQjTSdrr4RA02xVSJV3l2u4+ysIUCVy49yckNsn6FKLaQOf3wNY1S7wdpDdzD/eCRxj5K9MN8TZifvOHSGntN+63bate9YMc28Y3Ct20mVlDOVWxF3vF6kZdagu/vzE+a7JfS6rrV1+Zxw5yQnhJ9OKwXh8bc9x23vV3URjMcGGOaYZUHp1rt5vcWaYclL48hHQ04o8+QtaFKA4Plp59BWpbAPEJ0kxTR63aHbgMlGGZLgwXa2Hrmw6lmVFQohCCoJPlutj8GpC+OQlJHJ5AtlpXuLj7htQCrRfzJum+Otupu+TPw1tM3SeodLcOF5gSpSW+S466BxyG0jnqJrvqQNodUpXBBA3YTlMWEWjc6AL2XGzEaAp1bEEE78QzibmbzvEk71U/acwwrxUNwramWyhNvlcJKRwP75ifCpL8WHQ4albbV6gWWClV902lJu7K0I5WYKuESUD6gOh0/8lUNaKZ1iGkHira4q1OyW2YvGvVvkCHLu0pEVhZUqK+geY4QkdTYURye/BHrXRqJMxDVrA1vRFpuuNZPAdYClsrQiXFdSpCiAsAlC0klKuOFJIUCQQTw4XP/AEfs5p+01gI4ODiR8Oy7A4M26vjyLz1BaAY6Ktt81S073D+HFdNRc+daFqu+FvC5hCApTNzZSW+G0/8ApBLQnoHxJR9NaD4R2p70rQnIdFsrYNsvul14kMyYshPlOMxX1rc5cB7gpdEhJ59OBUH7PdMdTEa0ZNscyWM47p9pvm3y0ur7nPEplkD2GMR6Ft90Rnyn4hpf11m90enupulG+h+16PtKjQ9zljVj8t1HITFeWttE2QOPx220+dz/AOtVXTjx1TUpC1ceUaAxibyl2Jh3AA5Fc/h4KQpPP7AyTwnCecNwvtnMZhWl29XK3s4pq7vJulv97OJcy7wCU9K1WC1sqZhD6vMSyt36/NT9FRl4V0mRq1juou53OnU3TOctyh63vTnh1LiQWWmltxWSf2toF0+6ngcJR+9FXFe02xv+5Y5pFCjCLYjYVY800gftcUx/IAH1hNUB8Me9XPbbqZqNs01gUmy343JN4x/2pXlt3QdPlOGOpXZzrQhlxIHcgL+KSBFHCNofTbkKYazf5SMXUtAJ33jVTVLjQbUcIl8v4SDhHIOMDdAnIL7+KhjczQ3KtOd4+lKhZMtt95TaLpIjDoFwQW1ONB8Ds4Olp1tXPzkKAPZI4xvi02XAsz2vYDrzb8Ut8fIb/dbWpNzDCRKMORAedDC3AOpSB0o4BPAI7epqR/Ext03Xx3TnaTp10XHLL7kLV7uTbPvi0WxlpxtUqTx+1oJe5HVx1dBA5JAOs+L3a7Nhuz/A8HgSG0N23I7dDhMqWOtbEeBIb5A9SAOjk/DkfTXI62ymcvFbh5S3HHDFbnI3rqbfaWxn4bsXO+Geg7QdynbB9ke3HJsR05zLK8BZu2VWhi33xV/cecbnTZvltr6pLjah5yApKeltXKEJSEpCU9qqDrnrLjG3/wAVS/6s5hGnybZYsRDi2ITBdddWu2hDaBx2T1LUkdSiEp55JrpdpDPhXTSjDLjbpTUmNIx+3uNOtKCkLSY6OCCPWuel/t+J5h4xdyxXI2YNytV1xR21TYj5Spt4LtBC2VD6Sknt613bWH/rpp0rXqx/I4fkMt1jkuLZS39Ux1L2pzyxtP5nurU7esRsOubVg3Zag3a0ZdfrrEL2NMRQXbbi0VZ96PGSsAqlAp4ekLSFlaSlIQlPTVkK5eab5fe/DA3KSdEdQrnJkaGahSFzseur5Kha3CQnqUfh0EpbeA9R5bvbuD07t9wgXaDHudrmsTIcptLzEiO4HG3W1DlKkqSSFAjuCO1QSx9NtSjZhyH3TqDxnM65m5KtDmPLKt3b940I4RppllCw+fLzVOH3ROnTMBzJHGPKtxnrKYzTqyEh1zgEqSgErKR3V09I455qkev2xTbzZcXs3yqtt1zfVPUPJ7bYzlF6vEtUuRIfeCpT4ZbcSyhDcZuQpKAjhISkd+Oav5VRBl51x8QuLitrX5+OaDY9JlzHE92zfp6Q0Ek+nKGFKA+IUHBWTGg1mti5ufwt8xHAGMMjUicgtHkik52gy/E7yt5wTPIGNVol12D7NXdzmO6V2zRyK1bY+G3HIrtGF4uKi+tUuNHi8qMjqSE/3yfdI57c88VarRXbXoht3YukbRrAo2NovSmlzy3KkSFvlsKCOVPuLUAOtXABA941X57OPk14q6cZvLhaZyrS5uDayvsFutSnJBSn9DT36RVvr9fbPi9kn5JkFxZgWy1xnJkyU8rpbZZbSVLWo/ABIJqWvwbO15MTjk8qjxfkAOyhzMVZ1MCYwwObGn4kr31pmkmcztR8QVl8qExGjS7ncWbcGiT5sJmW6yw8rn4uIbC+3bhY4qA71rvrLqZorkut+CJgYLhKYD68YM63mZer+o8tx3ulS0tQ2nXVICEqS84oKCvc5AOQsOX5ntVyHSXRTO7pbMjw7Lkoxez3SPBMOdbLi0wC00+kLU3IadCSA4lKFJV84K55qWtOLC4QTAAO8zbgbAQbyYMGyhxGGW3AkkjcBfmLzbdIkKz9KUqFKUpSiJSlKIlKUoi07NtGdH9SpjNx1G0pw7KpcdsMsv3uxRZzjbYJIQlTyFEJ5Uo8DtyT9NYmyba9umM3SNfMc0C04tVyhuJejTIOKwWH2VpPKVIcQ0FJIIBBB5qR6UFskPmzUZPbX9tEicq6SNu+mTsxbnmqkLxK3qdK+eeoqLXJPPx55rI5ToHoVnE1q5ZrotgeQS2GURmpF0xyHLcbaQOEtpU42SEgAAJHYAVvlKaQk3laDJ2/6DTMei4lM0SwF+xwnFuxrY5jcJURha+nrUhkt9CSrpTyQAT0jn0FejDdENF9Obiq8ae6Q4VjE9aC2qVZrBEhPFB9UlbTaVEfVzW7UqQSLhIkQo+ynb3obnGZRtQ8x0lxS9ZJECA1c51raefHR8zlSknqKfxSrkp+HFfbLdBtDc+uqr7nejGC5HclgJVMu2Ow5j5AAABcdbUrsAB6+gFb3SqwAIUyZla3immunOCWiRj+D4BjeO2uZz7RBtVqYiR3uRwettpKUq5BI7j0rX8F276EaY36RlOnukGJY7d5XUFzbdaWWXgFfOShSU8oSfilPAP0VIlKtJnFrkqwIw6LQbDoDoji+SXrMMf0nxWBesiS6i6zmbWyHZaXefNSs9PdKySVj0UfXmsanaztjRKE1G3LS9MgL8wPDELeF9fPPV1eVzzz8alClQBERpZSbzOq0DJNvuguZTUXHL9EcBvktttDKH7ljUKU4ltCQlCApxskJCUpSB6AAD4V9EaC6GNYs9gzWjGCIxuQ+iU9Z045DEFx5HzXFMeX5ZWOTwop5Fb3SmkJxUVfsTtrH+DTpV9jbd/sa2XCNG9IdMpcmfpvpVh+KSpjYZkv2OxxYDjzYPIStTKElSQe/B7c1uFKkEjJQQDmo81F286F6uXBi76maS4rkk+OkIbl3C2NOvhA9EFwjqKf4pPH1VteJ4biOBWRnG8Hxe04/aY5Jag2uE3FjoJ9SG2wEgn4njvWYpUDyiApPmMlK8V5stnyK1ybHkFph3O3TWy1JhzGEvMPoPqlaFgpUD9BHFe2lQQCIKkEgyFHGA7b9AtLLy5kWnWjuI49dXOoe3QLSy3ISlXzkocCepCT8UpIH1Vm830k0p1NXGc1I0yxPK1QgUxlXyyxp5YB9QjzkK6efjxW2UqTeJ0UC0karTsL0Z0f02mOXDTvSjDsWlPILbj9lsUWC4tB45SVMoSSOw7H6BWPyTbvt/zK7P3/AC/QzT6+XOSorfm3LGYUl91R9SpxxsqUfzmpBpQ3uUFslg8bwTCMNsKsWxDDbHY7Kvq6rdbbczFinqASrlptIR3AAPbuBWsYNt40J0zlXKbgGkWJ2GReG1sznYVqZbU+0v5zSiE/tZ+KB7v1VIdKaz06bk0jqovVtZ2xrlGcvbnpeqSV+YXjiFvK+vnnq6vK555+Nb9NxrHLlYF4pccftsqyORxEXbXoja4qmAAA0WiOgoAAHTxxwKyVKG4wnJMjOqjjAdt+gWll5cyLTrR3EceurnUPboFpZbkJSr5yUOBPUhJ+KUkD6qkelKmSbKIGa8V5slmyO1ybHkNohXS2zEFqTDmx0PsPI/erbWClQ+oitHsG3Dbzil3j3/F9BtOrPdIaw5HmwMXgx5DKweQpDiGgpJB+INSLSoFjIzUm4g5KNrtto24X66SL5fNv+m1xuMt0vSJkvFIDz7zhPJWtxTRUpRPfknmtuu2EYZfsXOEXzErNcMdLCIv3JkwWnYfkoACG/JUko6UgAAccDgcelZulRAw4dFMmcWqjfT7bfoFpTdDfNOdHcRx+5kKAnQbUyiSkK9Upd6etKT9AIH1Vvt1tVrvtslWW926NcLfOZXHlRZTSXWX2lDhSFoUCFJIJBBHBFeulS7zCHXCgeUyM1pGmuiGj+jqJaNLNNcdxczyDKXbIDbK3+PQLWB1KA+AJ4HwFbTe7JaMls07Hr/bo9wtlyjuRJkWQgLafZWkpWhST2IIJBH117qUd5hDro3ymW2XwhQolthsW63xmo0WK0llhlpIShttIASlIHYAAAAfVXisGL47irMuNjdlh21qfNfuUpEZoNh6U8vrdeVx6rUo8k/GspSpJJMnNQAAMIyX4pKVJKVAEEcEH0IqKXtp+2aRfXMke0HwdU950Pur+4rHQ46DyFrb6ehSue/UUk89/WpXpUCxxDNTpGi8FysFivNmexy8WWBOtMln2d6BJjIdjuNcceWptQKSnj4EcVH0Xa1tjgyUTIW3PTCPIaPUh1rELehaT9IUGuQalClMji1TSNFq+aaWaYakRosLUTTjF8pjwefZWr1Z485DHPHPQl5CgnngenHoPorHYxoTohhKpSsM0bwewGcyuNKNsx6JFL7KxwttfltjqSR2KTyCPWt5pTKeKbuCjGPte2zxJyLnE276ZMTG3PNRIbxG3pdSvnnqCg1yDz359a9t92+6HZLcZF6vWk2KyLpKdW+9cRa2m5jji/nqMhCQ7yr4+93+NSDSiLA4bgWE6d2k2HA8TtOP29TinlxrbDRHQt1XznFBAHUs/FR5J+JrPUpUkk5qAAMl/DrTT7S2H20uNuJKFoWAUqSRwQQfUVDh2d7cWbzNv1n04Fgl3IcTPk/dp1nakjkq4cahvNNrHJJ4KSOSamalVgTOqtJiNFyiwTepguxLWbXLRq+aP3aS1IzB+dj7doS0lwtrQlLbLqnVBZbKQ2tKh1nlxXY81f3bEjVC74NL1J1ihu2rJc5nG8mxFxakWOF5aG4sMBXosNIStzsCXHF8gHsJAn6eYBdcljZndMHx+ZkEMJTHu0i2MOTGQPQIeUkrSB8ODWwVamS2kGvu4ANngI03mBJ6amavANQuZZpJdHEzruEm35BVdz/FpOnO9mybhrvZrtMxa74FJxWRMt1skT1W+e3KS+35jUdC3EodbKkpV08dSekkFQ592lul2Uam7hbjus1PsMuzR7bbzj+n9huDfRKhQSSX58hs92n3yVBLZ4Whs8LAPYWUpUU/sw393FHDEST18xA4HKbqX+fF+9E8cIAHTygniM4kKIt3v7lnVn+R12/qy6zG3D9z3pl/JCz/1NqvprJpD/dmxidhdx1FynHbJdoL9uucSypt4E1h0dKgtcmK84g9PIBbUj5x9TwR6tJtMXdJ8Zi4izqDk2S2y3Q40C2tXtEAKgsMIKEoQqLFYUvlPSCXSs+4OCOVcxTt4hPvYY/hxz/cI6pUvgjTF/Vgj+0z0WP19xqwZTp89bL3o5B1NdVIbFvsc2Mw6wZZ5CHXFve6y2jklbnchPUAFEhJ/nbro9E0H0ex/TKM5GcctyHn5a4rflsGU+8t54NI/FaC3FJQPgkJqSaVLfKHRr8vr4bkd5onT6+uZ3pWvZ1p5gup1gdxbUPEbTkVpeIWqJcoqH2woei0hQPSofBQ4I+BrYaVBAIgqQSDIVON1u0vR7FNquoqsDwi5e0WfHpcq2QVX65SosVaQVqWzFdfWyggdSvdR696hXSPxE8k17wnE9tegek10t2oNytjFouV6UGkWyxsJQluRcGg2SohKepSUqCAFlI988BXS9aEuJKFpCkqHBBHII+isFi2n+B4OZSsKwmwY+Zy/MlG121mJ56u/dflpHUe57nn1qWDzOFS7DFuIxehm/LscfK0ss4TB5x8IkfU5KyWpqx2WBZGH33mrfFaiocfcLji0oQEhS1HupR45JPcmqg6CLyXa1i2qWmQ03ye/ZbKze63nGIsG0yHI16jzA2qK6ZgR7OyhJHS6pxxPl9B5HJSFXLpR0ue55PtAg9S023GWj1sobDWBgFgQR0BF94hx/NQjtX2/SNEdPrk1mFwZu+cZtcX8gzC5Nc9EifIJKm2z2PlNg9CfT8ZXA6uBhsgt2Kbf9Lv2OW3nDHbzkdzhy02fHzLcfbiIkrX1zJz7qleRFStajyo8rI6GwpXYWFcSVoUhK1IKgQFJ45T9Y55H89VWX4f9uF6vGQw9225C3zr9KM24uQMzjxRIeI4CilqIlPZICUjjhKQAOAAKpUAqksIhpEGLWFg3kB20uZFqZNPzgy4Gb7zcu5/nusZQ2paUwNENA8V0rg5TGyJWOtPxZc+MsKaXM9ocVIQjgnpCHVOI6T3HRwQDyK0rXnbziutuq2NMq0fs7b1tmwLzfM6kwWUyTHiveYzb4rg/CuOuLbSlajwhtoqHJUpKamXSzTi0aS4DaNPbHdLtc4lpbcSJ12kB+bKcccU448+4EpC3FrWpSldI5JNbXW9R5dW8WZIMjnoe946GRIWVNuGkaehEdDn149RdK8t1tVrvltlWa922LcIE1pTEmLKZS6y+2ocKQtCgUqSR2II4NeqlZEAiCtASDIWg6d6BaJ6SSpk7TPSrF8ZlTwUyX7bbGmXXEE89BWB1dH8Xnp+qvjjO3TQXDW72zi+jmH25vJAU3dtmzsdE1BV1FtxJTwpHV36Pm89+KkSlSb57o6buSjLvPXfzUdWDbht5xS7x7/i+g2nVnukNYcjzYGLwY8hlYPIUhxDQUkg/EGvndttG3C/XSRfL5t/02uNxlul6RMl4pAefecJ5K1uKaKlKJ78k81JNKIsNdMLw694wcJvOKWedjpjoiG0yYLTkPyEABDfkqSUdCQAAnjgcDj0rRcX2s7ccKjXaJiuiWHWxu+RXINx8i1NBUmM4OFsqVxz5ah6o56T9FSnSmpO/Pik5Dcsbj2NY5iNpYsOKWC3Wa2Rh0swrfFRHYbH0JbQAkfoFZKlKkkkyVAAFgo7zLbroNqHlLGbZ1o/iN+v0fpCLhcLSy88oJ+aFqUk9YT8Arnj4V6s20K0Z1JuVnu+faX4zkEzHwE2x24W1p4xUgghCeocdIIBCT2578VvVKgeWI0M9d/NSb58um5YnJMSxbMrFIxfLsbtl6s8pAbft9wiNyI7iR6BTawUnj4du1atpxoBojpDKfn6Y6UYvjMySkoelW62NNPrQTyUF0Dr6efxeePqrf6UFiSMyhuIOS8F9sNkyizTMdyS0Q7ra7iyqPLhTGEvMPtKHBQtCgQoH6CK1vTbRnSfR2HJgaW6d2DFmZqwuSLZBQyp8j08xQHUvjk8ck8cnitzpQWJI1Q3EFaXqJotpJq2iMnUzTjHslXC/3K9cYDbr0fvz+DcI60d/3pFe3BNMdO9L7c7adOsIsmNxZC/NfbtsJtjz1/v3CkAuK7/OUSa2elB5ZA1Q+bNaDle3/QfO7y7kecaJ4FkN2fCQ7PuuNw5chwAcAKcdbUo8AADk+grPYbp7gOnUBy1afYPj+MQnVBbkazWxmE0tQ9CUNJSCfr4rYKUHlEBDcyUrQMp2/aC5zeXsizbRHAcgusjgPTrrjUKXIc4HA6nHG1KPAHHc1v8ASogZqZOS0a56EaH3vH7fid50awafY7SpaoFslY7DdiRCs8qLTKmyhsngc9IHPFe7CNJtK9MjJOm+mmKYoZvAk/cOzRoHncenX5KE9XH11tdKsCQSd6rAiFGOZ7Ydu2omR/K7OdE8Mvd5UQpybNs7DjrxHp5pKfwv+fzUg2ay2bHLXGseP2mFbLdCbDUaHDYQywygeiUIQAlI+oCvbSoHlGEZKTcyc1H+a7fdDtR8ohZrn2k2K5BfbelKI8+42xp91KUnlKSVA9QSe4CuQOTxxW/IQhtCW20hKUgBKQOAAPgK/qlBYYRkhuZOaxcHF8dtl8umTW6yw412vSWE3Ga20EvSgykpaDih3V0JUQOfQGlxxfHbvebVkN0ssOVc7Ep5dtlutBTsRTqPLcLaj3T1IPSePUVlKURK0/UTR7SvVuKxD1N08x/J24hKoxucBt9ccn1La1DqbJ4/FIrcKVBAOakEjJapgOlOmmlkR+DpzgdjxxqWoLk/c6E2yuQoeinVpHU4Rz6qJNeDL9B9DtQburIM90ZwXJbotCW1TrvjsOZIKUjhKS462pXAHYDmt6pUnzGSoHlsFq+K6WaYYLapliwnTjF8ettwSUS4dqs8eIxISQRw420hKVjhShwQfU/TWrt7WtsbUoTmtuemCJKV+YHk4hbwsK556uryuefrqUKU1nVNIWiZRoLoZnEliZmujGC5BIispjMO3THYctbTKQAltKnGyUpAAASOwAFZ3DcBwXTq1qsen2FWHGLctwvKh2a2swmC4QAVlDSUp6uAO/HPYVnqUBiY1TNK13GtO8Fw2732/wCKYna7TcsnlideZUSMlt2e+AQHHVDuo9z6/Ek+pNbFSmRn6+rJmIWuXDTnA7tmtr1IueI2qVlFljOw7dd3YyVSorLnPWhtZHKQeVen75X0nnVdzGl921o0DznS2w3BEK5ZFZ3okR5xRSgPdlISsjuEqKQlR+hRqTaVSpTFWmaRyII75/mr03mnUFQZiPTJU2uWq9nY0+0b0YzLDMsxe7QbpakX6yuY1OkFLFoZ89Xsyo7S0TGlSI8VKVMFfZwc8HkDcP7nea7jNd8Q1dzrGp2KYBpmt6Zi9nuaQ3crvdHEhPt0hkEmM02kDy218Ok8qUlAPTUyyMHuE7WKFqLNmR1W+0Y6/aYEYdXmpkSZDbkh1XbgDojsJTwefn88dud0rbxC53jO9ouLuAcfLPZoIvYmcxIxwBo8JvsgBvMZx6lp3gRlmpSlUV0pSlESlKURKUpREpWNk5LjkN9caXkFtYebPC23JbaVJP0EE8iv6k5BYYQaVMvcBgPoDjRdkoT1oPopPJ7j6xRFkKV5IN2tVzQt223OJLQ12Wph5LgT+cgniv4g3yy3N1TFtu8KW4hPUpDEhDigOeOSEk9u9EWrWLW3SbJ9RLxpLj+f2efmFgb865Wdl/mRHTyASR6EpKkhQBJSVDnjmt3qH8U216CYBrpe9bcfsLELP8vZd9pdXPWoupJSX1tMKV0pKilBWpI/m5POobyd4WL7YcdtdriyYc3OMolsxLRbHF8+U2twJclvJB5DSATxzx1K4A7BRBtxTafadAO7ETFuHPjNkI8zyPZF+gE345+isJdbvabFb3rtfLnEt0GOnreky3kstNp+lS1EAD85rAYBqnpzqrDuFx01zS05NDtUxVvlyrXJTIYRICErKA4nlKiErSeUkjvWH1Ysen2pumGS2m/2yw5PBZtsp7yZDTMttp0MOdKwD1BCxyeFDgjvwapl4MM2FbdsuaT7jLYixY+YyFuvPOBDbaRCi8lSj2A+s0pfaPqtdbA0O7uj80qeRtNzfecR2bK6H0rF2DKMZyyIq4YtkVsvMVCy2p+3y25DaVD8UqQSAfqrKUyRKV+EgDkntWHnZnh9st8a7XLK7PEgzHfIjyX5zTbTznJHQhZUApXII4B55BoizNK882fBtkN24XKaxEisJ63X33EttoT9KlE8AfWax2O5ph2XpdXieWWa9JYPDpt09qSGz/G8tR4/TTNOKzNK/h55mMyuRIdQ000krWtaglKUjuSSewFYm6Zph1jtUa+3rLLNb7bM6TGmSp7TTD3UOU9DilBKuR3HB7iiLM0r5x5EeWw3KiPtvMvIDjbjagpK0kcggjsQR8a891vFpsMB26Xy6RLdCZHLsmW+llpA/jLUQB+k0Ns0F8lq+oWtOkmk8cSNSdR8dxzr48tq4XBtp53n0DbRPW4fqSkmtyQtLiEuIPKVAEH6RXLnxe7Rplcsc0v1HwqBjcmfdsjfjyr3akMLcmpS03wlx9ru709I46iePhxXUGB/uGP/AMkj/qFKYxUy45hxHo0/NKnle1oyLZ9SPkvvSsBd9QMDx+5N2a/ZtYLbcHePLiTLkyy8vn04QpQUefzVnkqSpIUlQII5BB7EUzEpwX7SvNJuVuhPxosyfGYemrLUZt11KVPrCSopQCeVEAE8D4AmvA9mOIx783ir+U2hu9PDqbtq5zQlLHHPIaKusjj6qcEWYpWNv2S45isIXLJ8gttniFXQH58tuO31fR1LIHP1V64M6Fc4bNwtsxiXFkIDjL7DgcbcSfRSVDkEfWKZovvSleefcbfa4yptznR4cdBAU8+6ltAJPABUogdyQP00ReilYW75rhuP3CNab9ltmts6Zx7NFlz2mXXuTwOhClAq7/QKzVOKcEpWEyHN8LxFTKcsy+y2UyP2oXG4NRi5/i+Yoc/orKxJkSfGamwJTMmO+kLaeZWFoWk+hSodiPrFM7pkvtSvO/PgxZEeJKmsMvzFKRHaccSlbygkqIQCeVEJBJA+AJry23JccvM6bbLRf7bOmW1Ybmx40pt12Mo88JcSkkoPY9lAelEWSpWBv2fYLi0tqBk+aWG0SXwC0zPuLMdxwH06UrUCf0Vm2nWn2kPMuJcbcSFIWk8pUD6EEeopndMrL+6V5nblbmJrFsfnxm5kpK1MR1upDjqU/OKUk8qA5HPHpXgZzHEZF+cxaPlNodvTQKnLaic0qUgAckloK6wOPqoizFKxOQZbimJstSMqya02Zp9XQ0u4TW46Vq+hJWoAn6hWTZeZksokR3UOtOpC0LQoKSpJHIII7EEfGiL+6Vj73kFhxmCq6ZJe7faoSSEqkTpKGGgT6ArWQP8ATX92e92XIYDd1sF3hXOE7z5cmHIQ80vj14WgkH+emaZL20qtuve8jEtLNaNOdA7HOgTcpy++xGLsFuAotFtWr3luHn3XXB2Qk+g5Ue3T1WKhXCBcmfaLdOjymgop8xh1K08/RyDxzRvnZ4gykjtE/GOco7yuwHOAe8x8O0L0UrBXDO8HtN4ax665lYoV1fIDUGRcWW5CyfTpbUoKPP1Cs7TMSnBKV43bzZ465bb91htrt7QflpW+kGO0QSFuAn3EkJVwTwPdP0V+We92XIbc3d7Bd4Vzgvc+XKhyEPMr4PB4WgkHggjsaIvbSsFbM7we9TpFss+ZWOfMiBRkR41xZddZA9etKVEp4+PIrO04olYjLMtxjBMdnZbmV+hWazWxovS5014NMso+kqP0kgAepJAHJNZeqM3nJF7xN8atJkPe06WaE9N1vUdJ6mLvf+rpZbdHopLS+rhJ5HLLvPPUOIEvqNpNzMnkBcnpoNSQLTKkwxhqOyEdSTAHU9hJvCslie5HTzKsms+KLiZHYZuTMOyceVfrK/b27y20kKWYxdAJUEEL6FhCyk9QSRyalSqP+JtlrmHzNv12tbvl3iLqZCkxCk8KLaE9LqfzHrQkj481eCrNhzC4aOLewaZ/qjpOsCrpa8NOrQe5cI/pnqlKUqFKUpSiL5SpUaFGdmTJDTEdhCnXXXVhCG0JHJUpR7AAAkk1D0Xdro9MegS2pV6TjV1uSLRByxy0vIscqYtRQhtuWR0qSpYKEu8eUpXYLJIqC95GeXfWjXbBNiWFXR+LFyRSb1n8uI4UuN2dvlwxOofN81KCVf4zQ9FEHY/EojY/h+xHMLJbYUe3w4Tdpg2uOykIQwUTY4bS2B83pSk8cfAVmagZT8d3s4oA1IkBx7yG7yDNs9BTx1BRGcSToJEj0udwI1Mi29K0rRO/XDKdG8Eya7KUqbdsatk2SpXqp1yM2pZP5ySa3WuitTNGo6mdCR2XPSqeLTbU3gHulKUrNaJSlKIlYjFcssObWZGQ4zO9st7j8iOh7y1oC1sPLZc4CgCQFtqAV6EDkEgg1h9X8z/ud6WZZnCU9TtltEqWwj4uPpbPlIH1qX0pH1mv3SPDf7nul2KYQpXU7ZbRFiPr+LjyWx5qz9al9Sj9ZoL4uEdZn4RfmENo4z6R8ZtyK26lKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlEXMnxYtG7LhOWadbtrPiMCai03uNCyiKqMlTU9KHA9HU+njhfUEOtKKueQpsH0FTZv+wjR/WPZHc9QhHtLCbdZod6xS5pjJDjfWWyxGaKR1BLocDXQO3KwePdHFgtxukNv130RzDSm4JRzfrY41FcWOzMtPvx3P8ANdSg/mBrnxsFvudbhLbg23LN7PKasGgl5lXfIVyB7sx9pwi1Qlf8k8qQspPbiO3WTKfjU37HMQQ9p3A+2eJaRIHGNVpUqeE9m152LSN5HsA7g6cJ76Kx3hg5zgmSbPbK/arPabPKxoyLTkXkR0M+Y+xyrz3iACoqZWhRUr4lX0Vk9lmkmGzsZz3XS3YyzYV6xXubOt5tyPYn4tiCy3ES0trpUyXAgvkoI7upPqAap7Ox/UDQbdbqzs0wGHJYsuv7sR6zyGeyLbDkOlUt5I+huMqc1278tI+iusGP2K14vYbdjVjiIi260xGoURhA4S2y0gIQkfmSkCuk1BtAO1xGNsRuNjUB5OAA4YgufAaB/VZ9k4ul8B6gmdZAK5n7d8Vi4R4uef4rb7peJ8ODYZRjuXa5Pz5CUuMRXSkvvqU4sBTiuCpRPHxNfnjAYLhUTL9HMki4lZ2btkN+fj3eciE2mRcGm/ZUoQ+4B1OpSkkAKJABIrMaUPNK8ZjUkBwEmwuIHf8AGEOFyP8AQa/rxknEwl6GXaTyiJEyKWp50/NQB7MrufzJUf0GsqJmnsLnfuerytng+NtQbud/2wru/wByfS/SzT3L4+menWNYo1cbVJXLRZbWxCTIUhhwJKw0lPUQFK459OTXO3wr9umEa66F5GvVh+4X3F7dljwj4v7S5Ht65fsscqlSA0pKn1hPQlCVnoRwogEq5HS3N8isEjDsgt7F6guS3MclXBMdEhBdMUtKAeCAefLJ7dXHHPbmqSeCotB26Zg2FAqTmTxI+gGHG4/6jU7OMVauXaMb6Pj09DxCpWMUaGHV7vVk+vqOBWjaZYbadqvitxtItJjIteGZvZFPSLP7QtxlvmI68AOsknpdjlSSSSkLKQeDXUOua2pLzX36HT4eYnlOPpQe/oo2+aQP9I/nrpTU0yXbNTJ0xjoHuhKoDdqeBqGHqW3PoqgeKNh8e5bTswzNu/ZFAn2FmIY7UG8yo8R5K5bTa0vxkLDT3KXFDlaSR24IqHdv+wvTbdHs801vGrOU5Kq8JtgFmk26aWmrRDS85/e7UdQU0rzOOpxxSSsqI4UlKUpqwHiZqCdkepRUQOWIA7/8/j1mvD3WlezHSooUFD7ikcg/EPug1TZ2gsrTo5kfyu+upGpm9VxBpcn9vJb1+G4LRtTdsGrGb7oNKJk+Xa7zoRgFmQh+z3aV5vmzWmXW0uvRynpfd58hSVq5SnoV6HsqA95Of6aYDu40Bzfb5dLLGvar4uyZK9jqUpjyoxkRkezPrZHlOKCXXklBJUkFPIHu1termtDur3iLWzalqXf3LRpfZI6VuWf2gxmciuCoiZDTcpQILrRUsBLJPQoo4IJVWo+JlkensbXDbZhuK3SyNfJbIT7Zb7ettLdsaVKg+WlaEe612bWQk8dk88cVpszi+tszzk6oTxMkyT8I+7ExkqVmhrKzB7tOOAhstA+M/emJUreL1hdonbX5mcrkXVu6Wq52+OyG7pJRFU046UrSuMF+Sonq56yjq91PfgcV69HNlGjWuG2HBsh1oZuuYX67YXAEKfMuDzYsrCoiPJZhMtKS00lsdPfpKnFAqWVc8V7fFomRJGy/IPIlNO9N8tjSuhYVwsSByk8ehH0VOO05Sf2KGlKuRwMItPf/AOk2652gfqu0nc5scJYTbdz571pWcRXoR913o8Z/ly3KrHg45vkNx0czrAb5dXpcDC8hDVuU8sq8hh1slTaefRAW2pQHoCtVe/a5dbdvq1z1H1r1Ohs37CsDuibDg2OzkB63xjwornLYVyhyQpIQQtQJT5hA+anjSvBxjt3TDtboKXgBKvzDfUk+gW08AawXhTZ7B0R1T1S2p6myW7Jkjl4Ei2tS1BoSpDPU080gq+cpSPKWgD5yeojmuyTU2kYva8JpbxdDMR5hs9zqsXgUqTw32fFIPAS6ByJieAvZeHxd9F9PcBb02zXBceiY89eb27FuUO2NCPElrQhCm31sI4R5yQVp8zjqKVcEngcWU8SLcrlO3zQW0W3T24G35ZnMtFngzk/PhMBvqffQfgsAoQk/AudQ7gVCXjTZJYW7BpTjqrvF+6jN8kXByGHQXURvLSnzVJ9Qkq5AJ9SDx6Gs/wCLJp9L1Y24Ylq3p3KZvkXBZ5kzV291L6Uw5DSAp4FBIIQtDRP0JUSewrjkDZji9jxRP4YZPTMcL7l1RO0sj2vDdHOXR8vRWfwPZ9oHY9KYun+Qaa2DI1TYSfu1c7rBRJnXOUtH4WS7IWC6XFKJUFdXKe3TxwKqbtC1AzHbVvVyzYtf8hn3jCZJek4j7c8p1y3p8gSmm0LPcIUwVJUkdutsEAcq5u3tv1Ys2t2h+HalWWY2+m7WpgykoUCWZaEhD7SvoKXEqHH5j8aprguGO62+LDluruOtl/F9LoDdul3FHdl25+xez+zpUOxWlTjvI+HlHn1HPY4Fm3FhyIeHDSALHoYDedlwsOLYcWowkHXESBHUTi5cFHWu2mcWxeKtpri2I5XktlTkFsbkPzhdXpsxhTrUtt4suy1OqbK0N8A9+kq6k8ECtu8SPaJohpNt4f1k0xxmRYcyx++QZP3bTc5T82Up14IWp555xalr6lJWFk9QKexHJr+9wS0Hxf8ARkdQ5Fkjgjn092fUy+LIpKdlWUhSgCq52oD6z7WiuFzizYab2mCKjhOseILcuC7wA/bntdceG09cDrr66QbYdK9y+iOI6rbjrbK1DyzL8fjTX590mPITBQ80FBmGy0tLcdKQR7yE9alcqUok1FfhD5HebczrJolJuciXZ8GyRItSXllXkJcckNuJT9AJjpVwO3UpR+Jq1uzNaHNp+kikKCh8kLYOR9IYSD/pFU98J1aFaz7mOlQPOQsEcH1Htc/vXo1QKe3VqTLNwvtpZ7Y+JXnsJdsNOq67g5l+YIK6T1yz8WbBm7NqzovfrFkmQMTcnvjyH0S7tJmw47rT0XynWIzzimmekuq5S2EpPxHxrqZXNXxe5MeHne3aXLfbZYYyCY4644oJShAegkqJPYAAc81yMj9aoT99q6ST4NUfuO+BUw6weF/oBqVhV7cKb+/qNPZckIzK43qTJmSZ3SSlT6FL8ktlQAKENpCU9k9PFaR4T2tuoee6T5to7mF0cl33TaUiJbZM5anFtsPJdS2y4T3Ulp1hYH8UhI7JFXqyLKLDimMXDMb9dI8Sz2uG5PlS3FgNoYQgrUvq9OOB+muePh+2vIdJtv2u+7+djjwVljlwyCyQHUFJkRIaZDyFkeoQtx1aefoQSOxFVDxTFYOswMkxaCCIjcSMWWgKktNQUi27y8ATqCDineMupClLa5oVK0RwXOc73wysGuGU5Vf3npF7vUhiWpyGW0Jbjl18cBJUHChlHwUBxz7ojvwwss51c3DaP45PljBLNfnJmOwlea0IDLkqQ2Q0lfC2gpCWvd4BBSDwDzWe8OTNMU1lwfJNzWtmZW6/aiRrxJYlTbxJbS3jkEJSWm4raz0RGlAqJUgJ6u4JPSa0jw3sqxy7bwNz9+gXmI5b7ndXJsSR5oSh5hVxkdLiSfVJ60cH+MPpFdFNhG0im8Z0jYZQA0t5nXS855rKo4HZ3VG//wCQXOc4nA8hpraMslpmY6Xy2fFktWnGPZ9lUOFcbQt1c2ZeZM+dFYetzpktx5Eha3G1LSFhKuT5fXyn5qeLc4BsMwbbzfNQ9Rtud4vFqyjKMblWu0wrnPMmBBlKHW271LSp1f4VDZ5cUvj3vXngQBkzjY8ajHAVpB+TwTxz8fuU+eKunun1XvGh23vOdVMegNzLpYLWp6E24kqbDy1JbQtYHqlKlhRHxCTXMXCnsbahkWqCRnGJ4gdMv8reDU2w0xB/ZmDlMAz3zUD7dNC8Q0H2+TIu8xnB5eXZZcpr15uF2canyrl5p4bZLzgLkh3pHZDfVxyOkc81o3hEZhd8j0z1N04n3WfJsWK5EY9mQ684h2JFfS5y0hXIW2AW+oAEFKlKI4NbZ4f+SabXvQMbn9Uc7tt4z25vzRk2TX6c2XrZ0vLCIbanCBEZDQQoNo6Uq6ueCOOIz8Hi82pxOuLvt7KBJymG40HFhClBwyAjseDyo9gPXntXVhIrVKbo/ZgEDKzmADjaYNtea58QNJr2z+0zOdw+eV4kcp3LR4GlMGN4td600x7LMos1om2Rbkh5i7vvTyy5b23nmUS31LebC188rSoLSFKCFJPBGR8S7bZpRtm0/wAG1z0Ex5WHZVaMrZj+2xJkhxx8radeS44p1alKWlbI94nkhagSfhstsWk+NZcQFAkY7we/x+5CO1bb40Skja7jySoAnNIfA+n+9JdcT3up7Hs9Rhh1r6/tSM+Xz3ldbGiptNVjhIIy0/ZA5b5+W4KSpey/RXWfSQZnrBaJeZZvkuPJnSckuU172lh91jzEiMhKw1GabUr3Gm0hIAHUFEqJjLwitUbpN2u5Ra8tujsiDgF6ktRnXVlRYglhDxbBP4qVeaQPgFcegq4enzja9DcbeSsFBxSGoK57Eexp71QXwi7Ecs25a04s28ELvF3fghXPzfOg9AP/ANdXXXmjW2xlEeyyw0kPgQPRctCK1HZn1jm4SdYLDN1ldvOsWGbgmMi1z132+51qbNvN5lRcfZawtd7s9ktLRCG48ZKgWkuk9RdWE9aiRyfhWu6KY3qzpDvwF90L0P1LxzRLN32mLxa7hjsiFChLca4U75ah0NJbeAWlQ+ahSk9k9qeFluTxPSK3ZRtR1rvUTEb/AGS+yX7aq7PJjNOrJCH4pWshKXEuN9SQSOoLPHPFXgzDcphTGVWPTHTK82jMc4v8ptLdtt8xMhFvhBQMibLW0T5LTbfUQFEFaylKfUkWa1lOpSfRuC0AbnAgTPWSTvlxOZUOLntqsq2MmeEExHIQAN1lRTeZpdpqjxKNDbGjAMdTbsrXFk32ILYz5N0ecnPBxySjp4eUoAAlYJPHerE71tQbRtC0Dt2G6A4vbcOumfZA1ZICMdtbbBiqeH98SmWWkgKf6EpQk8c9SkH4CoY3xTI1m8SrbdeLo6mNCAgNl9w9KAfug6O5Pb1Wn+etm8Xu03K66T4vn+GTGJk/TDJ4826sR3A47ARIb/AuuoSeUAuNtgdQHPWD6Vy0y1uxU8Vm+K4H8ONoI5Qul4Ltsfhz8NpH4sDoPOU1M0+24ZdoretOrVs01STfZFsdTCyJ/Tl5V1Nx6CW5T00gvLWpwArKlHqBII47Vs/h7WbXXLNu170e3MWHUDHnLFNRHtM6VIm2me9b1o5S23JbUh0htaVDsrslSUnsAKmrQfePoVrdppbc6h6i47apaoqFXa2XC5sx5FukBI8xDiHFA9IVz0r46VDgg1smkut1t1sv2QzsBYan4LZVIgRciST5V2uAKvPEU+jjDQ6El0chS1KCeyCT0OYWOqU3CcQvuEGzhpwB3EALma4PZTeLYTbfcQQdeJHAlc1/Dr0Ds2vF614wLUrI77ccRjXeM1PgN3F5l+6SErlIYckyEKDi0thKlhHPSpZSpXISAZj3TaFS9l2w+84NoheMoulll5WxOyN+RIHtKbY8Ql5oLaSnobUW2G1EDuFq57KIrG+EO42rO9xAStJJyGKocH4edN71ePWTUrGNP2sTtWZ26HKs+c5C1icj2wp8lBkx3yjrSoFK0rW2hspPA4c5+HBycHGlSwe05tL+I+UgHfJgcrZWW2Jra9Uuya555CDJG4gSefFVtax7aXvn27/cXQSBicC+2OOxLtMRuG1Cn2KW0UqS26hKetLSiC2pQ6kLBJBUQKuogEISFevA5rl9vc2KY/tvsc/dvtbyu44DdMWfZmS7THkKEfpceS2VRlE9SPeWOplXU2pJIASPdPQ7RLKsiznR3CMzy6AIV7vmPwLhcGAjoDch1hC1gJ/FHUT2+HpWwcKzHPbaHXHEjMHWQPQarHCaTmsNxBg8ARY7onlcr16q5m3p1pjlufOgFOOWSbdOD6KLLK3AP0lIFc8vDIwfc1F0dvuren8jTkjUO+yJsqTk7M9yY+plSmyoFhQT0eYXj35PJVV491OOXPLttep+N2Zpbs6filyajtoHKnHPZ1kJA+kkcfpqMNkOT4fp1sGwHMb3c49usdmx1+4XCU4oBLXS86t4n6T1dQ49SeB61hTIpmtVcbBrByBLyf7AtqgL20qbRcucecBoH9xVWdeE6y64eIJoloXqe9iMtzDXm8klpxhqUmO0z1iQ4HvaFKPWURWwOOBw4n6a6Fala9aQaRKcj6g57bLZORCXcRbgsvznIyerl1EZoKdWj3FDqCSPdPfsarhsU0uyTLs4z3exqbZ3oF91QkFGNwJSCHrfYUkBkkH0LiG2eP4raT6Lqxu4X/eE1I/kld/6o7UV3P2XYwCIcA55G4uvHQAA8QVNJrdo2owfKcLQd4Fp5EkkcIKyek2p+Naz6dWPVDDkzBZshjmVDEtoNvdAWpPvJBIB5SfjW3VXjw9v3GGlP+RT/r3akHWzcNo7t1sUHI9Y80Zx6DcpXscRSoz8hx53jkhLbKFrIA7lXHA7cnuOenamtoVnMBsDA7wFhs7nVqTXxcifSSpGr8JAHJPYVi8VyrHM4xy3ZfiN5i3azXeOiVCmxl9bT7ShyFJP/wBj1B5B71knUFxpbYPBUkj/AEVjUxMBtcacVo0h0EGxXLXZy1rjrNup133MaYuYWp37quWBh3KW5biExlO8toZ9nIIKWo7APJ9FDj1r6+JbfNyuQ2bTnbxnknT6TI1ByNpUOPi7M5MhSmlJaT5vtCyPLK5KT2HPKPXtU1+FNjgwvRvUaxXVAj3W16i3aJcw5wlSFstsJ97n0HAJ7/XXw0fxh7dlvMu+6y4R1O6c6atLxrA1uIPl3SYgrTInNc9lNpWt3pWPUlvg8oPEsYwnZ6ObGtY4/hDQ71eQBz4Kaj3NNeqLOLntHOS0dmgk8laC75ph+h+MYxhjrNyuU8w27ZZrNaYapc+amO0lKi20n5qEJCSpxZS2nkdShyOdcsW6vT2XqJb9KM0sWU6f5VeklVng5Tb0RkXTj1TGkNOOsOLHb3PM6uSBxzXsynQORkW4vEtwEXUm+2r5M2eTZpFgjcCJc2nVKUPNPPoFKCinpPJbbIKSnvXLxD4cjV/UjRPQDThky89ayhnKHpEccqslrZ91cl5Q/a0qUQRz84tcDv082a81ajDUuXug8ASb9G+c8JFolULBSpuDMmNkHiBkevlHEg3yV6KVruoWoWG6VYbdNQdQb8xZsfszIfmzXkqUltJUEjhKAVKJUoAJSCSSAATWI0e1u0s19xIZxpJl8bILP5yozjrTbjS2XkgEtuNOpS42rgg8KSOQQRyCDVR5pjTPgrHywTrkt5qt25ncbleK53ie2/Q2LCmap571OtSZiPMi2C3J6vMnvoHzyAhZQg9iUHnnslVkapFilmcxvxXsxuuZ/g/lVgDKsSde7JeS17MmQ00T2K0lp5RSO/BJ9DUNHiVmU3ZHETxwtLo6x2mIMKScFJ7xmIjq4NnpM84my2bU/bVgCXNOsKzJc3UXKstyqL91L5lTxnPORIaFzpXlMr5ZisrEdLPlsoQnpe6TzyScTrNdkbMta9J7tp1LkQcA1Jv3yWv+Jl9bkCM850+TNhNKJEZSSo9aW+lCgB7vPepuCk5TukddUoKh6eYgE8k+6mbdJHJ/zksQB+h766rxl0F7e3u7wx7FUqlaR6Fzl3CffUDmLd78FIUIsZfo6lstt9Sk8gfhBz7yebUTNSmBaXFzuDQcLpG4tb5R95wi8KtUQypOjYHFxGJpB3y4TwaZsCrzUpSoUpSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiVqOCaUYFprcsou+GWBm3S8yu675eXEEkyJakpSpff0Hu89I7cqUfxjW3UoLGRujpIMdwOyG4g8+tx8z3WpXLSvBbtqXZtXp9jadyqw22TaYE4k8tRn1JU4nj0J90gH1AWsD5xrY7nbo93t8i2SnJTbMpstLVFlOxnQD6lDrSkuIP8ZKgR8DXqpSPLh0/MyfUkpN8Wv5W+AUAWzYjtjs2eL1RtWG5DFy911b7l8bze+ic4tY4WVPe2dauoEg8k8jse1bzrZt+0q3DYQjT/AFWxw3e1MPIkxj7S43IjvISUhxDyVdYVwSDyTyCeeakalQQHNDDkNFIJDsYz3qFMR2cbfcG04n6V4xiNwt9iu/H3UVEv1wjTLgkIUgNvymXkPONdKlDyivy+Cfdr06ObRtA9v8yTM0fxS6437b/ulhnJ7q7GfPSUhS2HZKmlqAJ4UUkp+BFTFSrSSS7U2PEblWBAboFAE/YjtiumfJ1UuGG5A/mKHkSEX1eb30zkOJHCSl72zqTwkAAA8Adh2qe2GURmG47anChpAQkuOKWogDgcqUSpR+kkkn419KVAMNwDLcpN3Yjmo21k276T6/25uzas2a7Xq2N9P/g9rIrlCiOFKupKnGIz7bbigfRS0kj4Gv60g296V6DW0WXSu1Xmz2tKVpbtzuSXObDa61dSi2xJkONtqKu5UlIPc9+55kelG+ScNpzR3mjFeFBOvGyTbhuQyGHluqWDKl3qG0mP7fDnPRHXmUnkNulpQCwOTwSOoDsCBXmzTYftYzjTmy6VXDS2HAx7HpSplvatj7sV5t5aQlxSnkq63SsJSFFwqJ6UnnkAif6VAaA3CMpnrn8b81JcS7Ec8umXwtyUHZNsp205fgdl0vvunsheJY+kewWWLf7lDhpWCo+atpiQhLrxK1kuuBSz1HlVbLju3PSzEtOntJ8ci5LBxZ5pLAgt5heCWWhyPKZeMousNkKIKG1pSR6g1JlKk+YOB97Pjz3qB5YI0y4clDGjezzb3t+vL990ew65Y3JlgCUhnJrq7Hk8BQT5sd2Stp0p6ldJWg9JJI4NeTXXZVtw3GXVrItTtP25F8ZQltN2gSnYctSE/NStbSgHAPh1hRHw4qcqUd54xXjLhyRvlmNc+Krqx4fO0dqxWvHJGksabCtc5Vy4lzpLzkuSUdAXJcU51vgJ5CULJQOTwkc1N+P4ThuJ46nEMXxOz2ixoQpoW2DBaYi9ChwoeUhITwR69u/xrN0qSS6Qdc/h8LclAAEEaf7+KgaBso0Lx64XKVgsfLMLh3pwu3K1YvllxtVvlrPYlUdh5KEcjt+DCO3YVK2A6c4NpbjLGHae4xBsNnjFSkRYbfQCtXznFq+ctavUrUSonuSa2OlQLDDopNzJUDXnY3tqyHP2dVL3iOQzMwjuNusXtzNr77YypHzOhz2zqQE/AJ4ArZNV9sOjWuNlg45qrZL5kFqt6G0swnsquzTCi2CEOOIakpDzo6j+EcClnnuo1KtKiBhDNBeOO9TJxYtcp4KNMK27aW6c4K9prhMbJrRjjqEtohsZfeOYyAoq6Y7qpRcjAlR5DSkc89+a1vSzZbty0SyR7LtLMNu+PXSSUmU7Hyy8KRK4V1APtrlKbeHVyeHEqHc/Sam+lWxHFjm+/VVwjDg03JXNTxfY0WZnO3eHNYafjv3+a2806kKQtBeghSVA9iCCQQa6V1WzXzYlpxuTyqDleqOo+okh6zrWu0xYVwhxo1tC1JUQylMXq55Qn3lqUs9I5UeKoB9tSeRIa4E8hor/APTe2blpA6iPRZcbINAnEt2ydAymfjLLyZDGJzMsuT1hZWD1J6YCniz0g9wggoHwTx2qcE2e0otIsKLZETbBH9kEMMpDAY6enyujjp6Ont08ccdqxmF4xccSs4tNyzi/5SpKuW5l6ET2hCAkAI6ozDIUO3PUpKlEk8qPbjP1d0EYcx9f6VGzOLVVlxfw3dnGJZyc+tmkUd6al/2liJNmvyYMdznnlEZxZbIB9EqCkj4AcDjPSNi+2OXrJJ13lacodyuU/wC2KcVNfEZMvgf3wlgLCEu8gKCgOyveACveqfKVA8sRplwn69ApPmmdc+Kr7J2F7XpmdJ1Pl4XkL2XofTJTfV5xfjPS4kcJUH/besEDsO/YdvSppl4fjtyxN/B7xAVdbLKhrgSY1yfcmGQwpJSpDq3lKW5yCQSpRJ+mszSoIBZ4Z9ndp2SfNj136qtumfh3bRtJ8yTneLaVtO3Vh7z4Zuc1+czCXzyFNNPLUgKB9FKClJ4HBFZbFdi+2TDNVblrNYdPAzlFyfflB8z5BbiPvclx2O319LLhKlEKTwUE+4U1PlKsCQQd0gdc++qggGRvz6ZdlArGxrbTG1CVqyxiORIzNbpeVfhm999uUoo6Dy97Z1EFHu8c8dPbjjtWc1h2naFa/GInV7GLvkbMDpMaK9lF1ajNLCejrSw1JS31lPYr6eo8nknk1L1KrALQ3QZcFaTiLtSo3t23vTK06dHSi2tZRHxfy0sJhozG8eY2ylBQGUP+1echrpPHlJWEcce72FYPRzaFoBt+uT900exK6Y25LUlUllnJ7q7GkKSFBJdjuyVNOEBSuCpB457cVMlKtiOIvm5zO9VgYQzQaKAtcNiu2PcJkHyu1G06Q5flBKXrnbpb0J+QlI4Ad8pQS4eAB1KBUAAAQK3DRLbVoht0tkm2aPYBBsIm8e1yQtx+VJ49A4+6pTikg8kJ6ukEngCpNpUM+zBDbAqXecy66iPcDtV0S3OQbXD1dxVdxcsri3IEuPLcjSGAvjrQFtkEpV0p5SeRyARwe9bDguhuk+nGnruleJYPbo2LyUOImQH0GSmb5g6XFSFOlSn1KHAJWVEgAegAre6VAADS0ZHPjzUkkkOOYy4KqA8LjZN8o1ZErSV0hTnmiB92pohpVzz2bDvpz+Lz0/DjjtVh52mmGzMJb05i22RZseYZRGZh2G4SLQWWk+jbbkNxpxCfpCVAH481tFKk3bgOW5R72PXeoI0z2P7atGr85k2l2F3rGrk/0iQ7BzC9IEkJV1BLyDLKHU89+lYUPXtUgat6LaZ66Y9FxTVTGU320w5qLi1GVJeYAkIQtCV9TS0q7BxXHfjng+oFbvSh8wAOQyQWJIzKh+VtX0ru4gQ8rfyzKLPa32pMKy3/ACifcLe242eW1LZedUH+n4B7zAPoqX0pShIQhISlI4AA4AFftKmTEfXNRAzX4QCOCOQahpraPok0+Iy7Jc3seRcV3dvFXrtJXYW5il+Yp0QCvySOslYbILYUSoIB71M1KgWOIZ/XzvzUm4wnL6/1yK/lCEtpCEJCUpHAAHAA+ioV3X6s6X4No9nGM5jqFjtmvN3xO5pt9tm3JlqXMK47jafJZUrzHOV+6OkHv2qbKVnWp+NTdTJzBHcLSi/wnh4GRB7Kpvhz6saYXTbLprptA1Bx17LYFncTJsKbkz90Wuh5wqKo3V5gABB56eOCDW97v9KtE9T9PIjOsenVzzNcCb/4AtlpVIROkT3EkJZaUwpJSFhPvqWQ2lKSpRATyJ3pW20kbS4ucIkz6zbdwzg34LHZwdnaGjQR6Rf55WUUbV9IJehGgWIaW3B1CploiOKlJbdLjbTzzq3ltIWe6koU4UBR9Qnn41K9KVL3mo4vOqMaGNDQomyjbBpTld/vd+lR77bxlRbVkcC03yVBhXsoT0gymWVpSslPuqI4K09l9Q7VJdjsVlxizw8exy0w7XbLeymPEhxGUtMsNJHCUIQkAJAHwAr3Uqg8owjL6+GisbnEc1WveNvTwDa3bLbYJ94Yby7JB/eDbkZyQ3Aj8lKpr7bfvKQkghLYILix08pAUtNedM9821jGGXMc0PuuQZXq5qBdYUORfr/ZXPPus599toOSF8pCGW0rUUMo6UISkJSPUno1SlLyOl978rbtY47zyACp52w23rffp03DmSdL1mxDTjO9MMixnVu1JuOIyYanLowfN5LTZDnUnyfwnUlSApPR73IHHeoa2Pbe7JofYc0vWM4ncsTsea3ludZ7Bcn3HZcKAyylppb/AJilKQ86rzHVNk8oC0IPBSQLM0ozyFzh7wj1B+QjdffY7zBoOhn0j/e+266tQ1H0l0+1ZgQ4GeY8ieq2SBLt0tp9yNMgPj0djSWVJeYX/GQtJPx5rb6/FJCklJ54I47HiocJCkGFVPRPbrhWodrzDL81v+Z5HacjyeeyxCn5PNMebAhL9iYMlLbifagRGUoB0rSUrHINWesNgsWLWiLj+NWaDabXBbDUWFCjoYYYQPRKEIASkfUBXmw/E7HgmL2zD8aiKjWu0RkRYrSnFOKCEj8ZSiSpR7kknkkkmsxVpgBoyAA5wIk8VWJJdxJ5SZgcAlKUqFKUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKItTy7VzSnT+e1a881OxPG5r7QfajXe9RobrjfJHWlDq0kp5BHIHHINYq37h9ALtPj2u1a5afTZst1LEeNHyaE4684o8JQhCXCVKJIAAHJJrXd4mPWTI9ruqUW+WuLObj4ndJbIfaSvy3moy1tuJ5HuqSpIII7giqgeCbj1jTo9nWUC1RTdnMkEJU0tJL3kIjNKS2F8chPUtR4545NKH2j6jXe4A7nLohK32bGOHvEjlAmV0ipXmlXK3QXo0ebPjR3ZjnkxkOupQp5zgnpQCeVHgE8DvwDXgVmOIov4xRWVWcXtSeoW0zmvainjnnyurr447+lM7JksjNmw7bCkXG4ymo0WK0t9995YQhptIJUtSj2AABJJ9AK1bTDV/TLWmwvZPpXmltyW1x5K4b0iC4VBt5IBKFAgEHggjkdwQRyDzWx3qzWzI7NPx+9w0S7dc4zsOXHXz0usuJKVoPHfgpJH6ajnb9oJovt2x66YLozbUQork8zLk2qeuU+JCkJCQ4paipPCAkJT27d/Ukk3N2LKLc5vPCPVHZDDnN+UWjjPopTrB5vnGI6b4tcM2zvIIdksVqbDsydLX0ttJJCRyfUkqIAA5JJAAJNQNed52Gu7tsd2x4rc7bLU3Eny8ouK3h5UNxphSmYiF89Pm9XCl/veAn1KumY9TdNdPteNOrnp7nMJu841fW0ofQzIKeroWFoWhxs8hSVoBBB9R35HIqrsRph9PXLuR8jz5XVm4RUwP0z7T8Csvhea4pqJi9vzTB7/DvVjurXnQ50RzraeRyQSD8CCCCD3BBBAIrN1pmj+n+nelmnVo0/0qjsMYzZUuRoiGZRkAK8xRd6nCSVL8wrKuT87kduOKzWRZnh+IIacyzK7PZUPnhpVxnNRg4f4pcUOf0VpUwBxwZaSs2Yi3zZrM1o2Va5aOYRfYGLZXqdjdtvd0lswIdrduLXtj77qwhCEsAlw8qUBz08DnuRW4wLhAusNq42ybHmRX09bT7DiXG3E/SlSSQR9YrmH4guBYTh29HbTcMSxK0WV+7X6Mqeq3wm4/tK0XOOUrc6AOpX4RXvHk96hgxbRSonJ7g3l9QrP8tCpVHutn67rpblmWY1gmN3HMMxvcS0WW0sKkzZ0pwIaYbHqpR/mAHqSQB3NeXAtQML1RxSDnGn2SQr7YrklSos6Ivqbc6VFKh9IIUCCCAQRwRXz1I06xHVrBrxpznlqFxsN9jmNNjeYpsqRyCCFJIUlQUAoEHsQKxOiemGmWjenkHTnSSKzHx6zuvNoQ3LMlXnlZU6XHCSSvrJ5BPb04AAFQ33sXCPWZ9IR2TcO8z2tHHPot7pWKcyvF2nFNO5JakLQSlSVTGwQR6gjmv6YyfGpTyI8bIbY864oJQ23LbUpRPwAB5JpmiydK+b77EVlyTJeQ000krW4tQSlKR3JJPYCsVj+aYdlin0YrllmvKox4eFvntSC0f43Qo9P6aZ2RZmlKwma5ri2nWK3PNs1vcW0WSzx1SZkySvpQ2hP/WT2ASO5JAAJNVc4MBc42ClrS44Rms3SoA2jbo7Zuawe8Z8r2K1RnMmm22zQXHkpkqhNJbDSnEk8lxfUpR47Dngc8cmc7rd7TYoLl0vl0iW6GyOXJEt9LLSB9alEAfpNXcC0AutIB7gH5qoIcSBeCR2MfJeylYrH8rxbLYq5uK5LarzHbV0Let8xuShKvoKmyQDXvlzIlviuzp8pmNGYQXHXnlhCG0juVKUewA+k1BtmpF8l9qVi5WU4zCuMGzzMitbE+5jqhRXZjaXpQ+lpBPUsf4oNfa832yY5AXdchvMG1wmyAuTNkIYaST6crWQB/PQ2uUF8l7qV4LJkFhyWCm6Y5e4F1hrJCZEKSh9okeoCkEj/TXvoRCZpSsEvO8HbvwxZzMrEm9KPSLcbiyJRP0eV1dfP6K9WTZNj+GY/cMryu8RbVaLVHXKmTJTgbaZaSOSpSj/AP8AH0qC4NbiOSkAk4RmsnSq87S92Nj3O2XL8virh221Qsqfs1gYecDcmRDbYYKHXEk89bi1rVwB2BCe5BJsNVi0gAnUA9wD81UEEkDQkdjHySlfKVKjQozsybIajsMoK3HXVhCEJHclRPYAfSaxd4zTDsegxbnf8ss1thzun2WRMntMtP8AI5HQpagFcggjgn1qFKzNK/hp1p9pDzLiXG3EhSFpPKVA9wQR6ivFe8gsOMwVXTJL3b7VCSQlUidJQw0CfQFayB/pobZoL5LIUrxWe92XIYDd1sF3hXOE7z5cmHIQ80vj14WgkH+eq9a97yMS0s1o050Dsc6BNynL77EYuwW4Ci0W1aveW4efddcHZCT6DlR7dPUF3tp6uIA5n6k8E9xz9Ggk8h9d1ZKlY05JjqYyZqr/AG4R1rLaXTKb6CoDkpCueOfqr+mcgsMiO9Lj3uA6xHALziJKFIb59Oog8D9NEWQpWMj5Njct5EaLkNteecPShtuW2pSj9AAPJr6zL7ZLc+I1wvMGK8oBQbekIQog+h4J5oi91K/FKCQVKIAA5JPwrA2nUDA7/c12WxZtYLjcGueuJEuTLzyePXlCVFQ4/NTOycVn6UrHSsix+DHnS5t9t8di1kCc67KQhEUkAjzSTwjsQfe47EUmEzWKzjU3TnTK3G7ai53YMZidJUHbrcWoqV8fBPmKHUfqHJrJYxk1hzPHbblmL3Nq42i7xm5kGW1z0PsrSFIWnkA8EEGqVeK7i+nWSbRLtqZBslguN2an2owb9HYZdfLSpARwiQkFRQUrUOArjvVgNok6Fbtpeksm4TGIrIw+1AuPOBCQTHRwOT2pS87KjnWLHBvcE/klXyPptGTmk9iApppWOiZFj9wfTFgX23yXl89LbMpC1HjueADzXxyDL8TxNLK8pyi0WZMhXSybhNajhw/QnrUOT+aiLL0r4xZkOc0l+FLZkNrSlaVtOBaSkjkEEfAj0NfaiJUVZXuU04xi/XfG4bV+yafjbXn39OOWl24os7fSVf3ytsdKF9IKvKBLvA56OK0LfpuPuG3bRNx/ECHM4zCUmwYyynutMl0cKfCfj5aTyPh1qbB9a3Lb3o3aNvWg0HDpjokz24btzyW4vHrcuFxdR5kt91Z7rJVyASSelKR8Kye8inUqjJnq6JjkBE8wBrF8MOYzMu9BMTzJsOROkGSsUyvHM5xu25hiN4j3WzXiMiXBmR1dTbzSxylQ+P6D3B5B4IqGcA3b4/qNuizbbTYMQuLi8HgCTNyBL6VRlPgthbBRxykgu9IPUeShY4HHJgrwzdVGbBsdyPLsikKFmwa8Xx5kqV2RDbQmUUD/ADnF8fnrc/DW0lumM6S3bXDNYxGY6yXR3J563E++iI4taozff4ELW7/0o+iuosArn7gaDxl48g6eYn8PFYlx8KPeLi2dIaTiPWwG7F1VvqUpWSulKVp1z1UxW1apWTR54y3civ1rl3llDLPU0zEjrQhS3Vc+51KcCU9jyQfTiguQ0Zn5Ak+gJTIT9Zx8TC3GlKURKUpRErB5rnGI6cYzNzHOshhWSy29HXImS3OhCeTwEj4qUSQEpAKlEgAEnis5VGbHki95m+C6WtbvtWlW31xK24oPVHumSKUpCXnB6LDRQ70eoBa5/HNQJfUFNusk8AMz8ABqSBxUmGMNR2QgcybAfMnQAm+Ssxh+4bA8vyuBhLkHI8evN6hu3CzRshs71vVdYzfHmLj+YPeKQpKlNq6XAlQUUcd6k6qP+IVlrmJ627WJlrd6Lr8vilHSeFKjOKjMvJ/MpLoSfz1eCrNh9PGPvFvYAz/VB4g8hV0tfhOrQe5cI/pnkY0kqUpUKUpSlEXylSo0KM7MmSGmI7CFOuuurCENoSOSpSj2AABJJqHou7XR6Y9AltSr0nGrrckWiDljlpeRY5UxaihDbcsjpUlSwUJd48pSuwWSRUF7yM8u+tGu2CbEsKuj8WLkik3rP5cRwpcbs7fLhidQ+b5qUEq/xmh6KIOx+JRGx/D9iOYWS2wo9vhwm7TBtcdlIQhgomxw2lsD5vSlJ44+ArM1Ayn47vZxQBqRIDj3kN3kGbZ6CnjqCiM4knQSJHpc7gRqZFoMvyi04Ril5zO/PeVbbFAfuMtY9UsstlayPr4SajHafuPi7p9Jm9VoOE3HGYztxkwGo0x9L3nJaIHmoWkJ6knnj0HCkqHfjkwLuy1Ay7K9j+nmDWFS3s01wj4/jsdHJ61KkstvSVn+L0pUFH4BdWu0f0ysOjWmGM6XY02E2/G7c1BQoJ4Lq0j8I6r+MtZUs/Wo10eGab6wfcNOAcxdx7Fo68Fzip4lOkW2LhiPI2A74u3FbjSlKzWiUpWn6faqYrqbLyqLipluJxC+v45PedZ6GlzGUIU6GlcnrSkuBJPb3gofDmguYG6ekgfEjuhsJO+Otz8Aey3ClKURKUpRErRtQ9ZsG00n2yw3qVMnZDfCoWuw2mIuZcpoT3WtDDYJDaR85xfS2n4qFZHVDUTHtJdPMh1KyqR5Vqxy3vT5B5AKwhPIQnn1UpXCUj4lQFVo8PvH8j1Dsd/3g6pJL2YapynPuclfKk2qxMuFMeIxz8xBUlSzx84BBPJ5NQzzvI0aJPWwA4m/IAncDLvIwO1JgdLkngBHMkDiLG6a6sYdqrDub2LyZTcyxzVW272yfGXGm26UkAlp9lfdJKSFBQ5SoEFJIrcqpBoNlri/E83CYtbHSbbKx60ypSEn3fa47MVvqI+nh9wVd+pb56VOqPfaD8QekgxwhVPlqPp/dMegPcTB4jTJKUpRSlKUoiViLtllhsd5smPXOd5VwyJ96PbmQ2tZeW0yt5zukEJAQhR6lcDngc8kA5eoiCvlXukUPnxdPsRA+pM26yP+0liB/M99dBdwbvn0BPrEcyhs0u3R6kD5yeEqXagPQbdvj+v2rup2l+M4fcY8TTeYmEu+LfS5GnO+YttSUpABQeptfSOVdSQT7vpW9bg9TY+jeiWa6mvrSlWP2aTJjhXouT09LCP851SE/pqJ/D00GkaG7c7R8oI6k5Xmazkt/ccH4Xz5ACm2lH15Q30Aj98V/TSl5nvJ9loHVzjbsGuPZKnlY0DNx7Bo83clo7qzVKUoiUrT9StVMV0qiWKVlBlqVkl9hY5bmYjPmuvTJS+lsdPI90AKUo/BKSe/pW4UFxI3x1gH4Ed0NjB3T0uPiD2SlKURKUpRErw3y9WzG7LPyG9y0xbfa4rsyW+oEhpltBWtRA79kgntXuqJNzzy52mbeBR1kSM+vNuxVIB7liS+n2s/oiokq/RUGTDW5kgDmTA9SrNwgy7IXPIXPopRtdyiXm2RLvAWtcWcw3JZUttSFFtaQpJKVAFJ4I7EAj416q/lttDTaWm0hKEAJSkegA9BX9VZ0ScOSzbiwjFmlKUqFZKUpREpSlESlKURKUpREpSlESlKURRZup/cy6sfyLvP9TdqoHgn/wC8Bm38rVf1Rire7rnW2dsWrDjq0oT8jLyOSeByYbgH+k1UDwTXmlaC5wwlxJcRlnUpPPcAxGeD+ng/zU2X9ttH/wBtn96bT+yofjd/Yo63KaaN2fxSNLsbw3Lcisa8mtzMh64G5PT5UVx72tp5cdctTnlqLbfCfVKCeoJ7cVm/Ep2kaM6IaIW/W3SWyTsezOyZFDK7yi6yn5UwulXLjzjriip3zAlYc7K5578GstuYWg+LjoWAocps8QHv6HzJ1Sh4vjiEbOZqVKAK8itiUg/E9Sz/ANQNc8mnsFOoyxFVwnWBUbblddEB+2va648Np64HXU3QLYjX/a9jVzy+732FIv8AicS6SX7LeJNteMh2EFKJXHWhSk9SiegkoPbkEVVLwWC4/o7qQZTq31uZYPMW6epSyYqOSon1J+NW22+OIG0nT50rHQMBtxKue3HsCKqP4KTiFaP6jJSoEjK0KI+oxkcf9Rr0HNDdt2toFsI/7i4GEu2PZ3HPF/4FaCxoxpDI8XaZptI0vxR3EzYjJNiXZ45gF02xLhX5BR5fV1kq5456jz61aTfho7hds2eZknFGrhisTD7I6/arbj1xftlvb/ChakriR1IYdSrqV2WhXHUSOD3qB3Zka0eNWlVyeTGTcLAGYxcPSHVqtPCQOfUkoUB9Yq0W+e9We57N9YV226w5aYlmkQpBYfS4GZCVoCml9JPSsEjlJ7jkc151f/6a0jMNceoc74WXfR/58HUs7FrfjB53Wu+GD1nY/p95ZAXxc+kn059vkVrW2bQi/wCkTWpuq+96dg1yvmSXoqZvd3ksyUtwAkhLSXHwEstKKvcZTx2ABHoB9/D5yn5HeHTj+YMw1TVWG1324ezoPd1TEqU50D6z08fprQvDkz/H9xMDM9x2u2VW++Z9ab041Hbub6BGxm2+UhTZiMLPRGSpSnAXQApXRwVE9XPo7R5tsrRo286AuGXEkXvlPALz6Hl2Zk6vMcSMWfAT3hYrw5sxiwd2G4LSXBpbjenjMxy9WO2hC22IhMro5ZaWAW0qS4BxwOQhH0CvJ4lP7sTax/l1n/vKJXj2MZfi978RDcfklrv0GRap8eVKizEvp8l9gTWvwqF88KRwQQodiCD6GvV4lTrad4u1oKWkFF7ZUrk+g+6UTv8A6D/NWOzXf+jnHM4efvi/QQt69m7eBl5v/E/G6tLvrwW35Ztnz+7Sb3kNvlWDHJ9wiG13qVCbU620Vjzm2VpQ+n3OOlwKHBPAB71HfhMEnZdjpJ5Ju11/rKqmbeItDe1TVpa1BI+R90HJ+uOsVC/hKrSrZdjwSoEpu90B+o+0KP8A9mo2Wz9oA+4z+8qdpvToH9939izk/bztS21aL5DqNrRpbgeQv29yde7zd7pYY02TMkPyFuJabVIQpXJLiGkIBA54+s1WTw29uzOs+rV93sZngdmxyyC4PJwuw223tRITLo9wvttNpSnpZSOhKuOVOdaz7yeT6N0eV5D4gO6q07QtMLm63p5hEszcvu0Y8tuPNHpeVz6K8vkstj0Lq1HuACL+3y8YNt000sdqtVpaiWeBJteNWe2sLCCtyQ+3GZbRz6nlfUfUkJUT8TUbL5QNo+95WDhli6mze4gptPmJobvM88c8PTN3Yqo2pOuFp1a3k5NpLm2BZhm2nOltuZ8zHMfsjlzYuN5e6FF+eyg8ONNJKkIQ4CjrBPBNQ9utwvI06gYLrPsk2xanYNl9hkOIurcHBnrVFlse6pvrbaT0L5IWhYI95KgDzwKyDWqsPZj4oWfTNUlrt2G6psIdRdHEnymUvdC2nyf3iHkOtKP4vJJ7Cr7Z1ul0HwLFflTM1LsF1TISlNtgWi4MzZl0eX2bZistKKnVrUQBwOO/JIHJqlG2z0awPmFzGeOTIO86RugDIK9X/kVaRHlyE5YYEHhv531Uh4rdJt8xi0Xq52t+2zLhAjypEJ9BQ5GdW2lSmlpPcKSSUkH4io43XYbiWZ7e89Yy7GLVem7bj1yuMNFwiNyBGlNxHS2+2Fg9DieTwodxz2NSBhU/JbriVpueY2hi03qXEbfmwGXS4iI6odRZ6z84p56Sr0JBI7Vre4Jh2ToNqPHYbU445id2ShKRyVExHeAKp+kf2NW0Wd0zy5aK2wftaQmbjr/tUt8I7R/SjIdukTUW/aaYvccqt2VTTDvcq0sOzo/lpZLflvqSVp6SSRwe3PapbyXbzqvm2+FOr2qb2PXnRrG7EoWO23F8Otw5hZSFumMsdAdDnmLLyueEdIB5AAj3wg8pxu2bQLi5dL7AhN2vK5qZjkmQhpDHmIj+X1lRAT1dQA59T2FYCzaus7lvETyrQzWW7JawTA2pLWP4jId8qFeLiwtpJdlNngSlcF1xDa+UgJHCeyue6tfa6bGZ4Z4D7MSY33txi646Ntme92WKOPtmBytfhK1/UnUDAMQ8TDR+67e7rambfl0dFlyf7ggJt9yUp51s9RbAaeWkFHKklXCm08kEV6fGfw9i36c4lnEK/ZCmVcMhTb5MFd5lLty2/Z1rSUxFLLKFBTQPUhAJ5PJPbjx7tMuwi6eJRtyt2MX20yWrA9Ct8tEJ5CmobpmL6WFFB6UKAKfc9QFJ7DkVs3jSS4srQLBXY0lp1BzHpCm1hQJTFfChyPoIIP0EVw57HSduqkA8PEb6XMc13M/5tRv/APrB64Heth2UpM+HjpXnt80+1szLLcskZ9ZnIF4n3Ni4kNXBxoIcbYDKwpLDDZSEoS10EIHCipR6qjTSLK4m5DxItVMb1cgxrzatNbdIg4pYrk0l6HFU3IaZelJYWCgvK6iesgkBYAPAHF9cOUleIWNaFBSVW2MQR6EeUmqI6iWbRbLt8lxzDR3XGx6T6iYTHS1ll1uL8dyLeXnW+gREwXnGvPUlCR5zqXEhP4MAKX76Ot5wbaGe6PEtuJEYunztc34aPn2LF7xFO+8Azh6/K9sot3U6C4vpjv8AtKbbgsW44/iurj7Me82nHLg/akl4PeU84j2ZSCgdLjTnA4HUFH4mrBb0tTZmgOF6Ubb9N7hlTbmeXduzvzIL78+9os7S0e0iO4pRdXJc81KEq56vncEHgiTME0ewrL9ZbdrjqFrHZNSs2scFcGwNWttiJbrQ0rnzHGIqHnll1XUrlxx1ZAPA44HFcfFoj5XgGSaIblMegOS42BX5YlAA9KHC4y8yFEeiV+Q4jn6SB8RXO2KVOjRqHymp5jphLvKOIGUZQSF0matSrVpi+Dyj94NueB1nOQCv43G6b6EahaF3nC9MdlupFgy6LF8/H7vG04djS0zWyFI82WkeasLIIWpZUT1dR7gGpG25WzUXU3Y1esS3baaTZF8xePcY7DeXWrqdlNMxi5FlFL6T1LR1lAc9eW+eeeTU76d7stvOpWAxdRbLqzi8W2vRw/KbuF1YjPwFccqbkNuKBbUnuDz2PqCQQT4bHqyddNL9RMnxS0rOHm3zbfjdyWlaXL10R3A/JbQoDhguEIbV6r6Fq9Cmq7WHU6G0NOZbJ4EZEcdOIyyV9lLalag4ZB1uIOYPDWN6p94P2j+k+X6GXTOMr00xe85FaMzd9gu0+0sPzInlx4y2/KeWkrR0rJUOCOCSR3rpbXO3wbMox2z7Ys1VeL3BgItGWPyZzkqQhpEZlcSOEuOKUQEpJQscnt7p+iuiVd21WeAPut/tC4tmHlJP3nf3Fc+vGSwq0O7eoGeiRdUXWNf4UAJTdJPsqmFtvFSTF6/I55Sk9YR1e6O/Fbdadj+jOsG2yz5Rqy1dcsy66YVFkRr7OuDyF2vmElbLURltSWWWmj08JCD18ErKySax3jHKSnaRHSVAFWV28AfT+Dfqymky0I2uYc4pQCU4FbyTz2A+56K8022PanDMOEcPsybbrrvknatmG9p6+cC+9VY8IvVC8XDatkkPL7s9Jt+B3qUxEdeWVFiCI7bxbBP4qVFwgfAK4HYVrW3nWLDNwTGRa5677fc61Nm3m8youPstYWu92eyWlohDceMlQLSXSeourCetRI5PwrweEVY/lZtc1exZp5KF3i8yoIVz83zrehAP/wBdWM8LLcniekVuyjajrXeomI3+yX2S/bVXZ5MZp1ZIQ/FK1kJS4lxvqSCR1BZ454r0Kgx7W4Oz8NhHEnDiPONdxI1XG37PZwW5eI4HgAXYek/AHRNFMb1Z0h34C+6F6H6l45olm77TF4tdwx2RChQluNcKd8tQ6GktvALSofNQpSeye1eTeZpdpqjxKNDbGjAMdTbsrXFk32ILYz5N0ecnPBxySjp4eUoAAlYJPHer15huUwpjKrHpjplebRmOcX+U2lu22+YmQi3wgoGRNlraJ8lptvqICiCtZSlPqSKbb4pkazeJVtuvF0dTGhAQGy+4elAP3QdHcnt6rT/PWNG9fZGZgPieEExxA/xMi2lb9jtL9SyY4yBPM/51VsNc9pGlGoG3bKdFcP08xvH2Zkd+daGrbbWYrUW6hILT6UtpASoqQhKlDuUcg9qr14Sd4wzLNuWTaPX7D7Qi9YreX4GQRJEFsqnMvKUtpUlKh+EIV5zXCueA0BV9Y13tM2fMtcO5xH5tv8v2yO08lTsfrT1I8xIPKOodxzxyO4rlnrJds22Qb9smvGmmPP3CHrrY3k2eCyn3Den1dLZ49D0TAFkfBEg1Rji2q5hEio2I3ubdoH4gC3srvbjpBwMeG4HobOJ5TPdebZbF0j0Y8SLUzR63Wy0T7bc3pjGKz3IyVqgSWFl5UZhxY5T0p85olJ7lgDmrba16eYRrxu+02w25Yva7inTe3P5jkEp2Khaz5ivJt0NayOSlTqXXi2exDQPHeq4b1du8zbLo5orrvgPMrJdHrpHN+npB67gqS8Hnn3VepCpZWO/wkkVbPZnDueVYhkG4nJre7EvOsF1VfWmHv2yJZ20+TbY5+HZhIc7fF5RrWiPIxpMmjLSd5EYCNb4sQ/BBzWdU+d7wIFaCBqAbOB5ARzeCFBuvWok/cNvwxbZg/dZMXT6xQzecthRnlNG9vJjmQiK8pJBUwElkFHPB61888J4mDdBs00k1J0cusHCsDsmLZZj8Jydi92scFuBJhy2UlbSEuMhJ8tRSElPoOeRwoAiqO4Fidtg8UvDdfclQuPhWfeVEduS+zDC1xRCeQtXonoPlOnn8VXPwPHSTP8xsWCYHfs5yCczHtVmtr8+Q8tYCfLQ2Vevx544H0kiuSpb9HCqLO8xJ1DwT8BhjhkuhpP67giRDYGhaRl1MzzVJtlOst73kbR8oxTVPJchRk2El2BKutqu8m3TJTSo61RnnXY60KWrstKkqJSstgqB5NQV4a22zGNz+gmfWLV2/XyZjiclWWLfCnuRibiqM3zNeWk/hloT0+WlYUgEuFSVEjpmfwsdIcgwLbHnepOSwHYCtQ3Xp8Bh1JSowGGHEtO8H0C1OOkfSkJPoRXm8FNaFaFZ4gKBUMuJI57gGIzx/1Gu5zA7aa4cP+nTJH7xInrPYjeAuXFgo0sBt4jwDwAMdI7g7iV9d7uhWNbb/AA0rlpHid2ulyttou0FxEm5OJW+tTtwDiuehKUgAqIAAHb6TyayOhmp9t3DaEYJtK0ou1jfmQcFtZze9S0tSU2WMthKFMxY6+fPmHkp6iPLYJBWSvhFbv4spA2VZOCfW6WoD/wDG0VVvL9DM20b0R0U37bcGSxkOO4jaTmFtZSS3cIYjISp9aE/OT0DodHr09KxwUFVc9J4e2sdo9g1Gz/8A87E/uznGnCVtUYWupCh7eBxH84JjidOPGF0c0Y22aKaA2CFYdMcAtVtVDR0quKo6HJ8hRHCluyCOtSlfHuAOeAAOBVI9C9wGlGPbvtbMN3csW6Dmd1yRUPHLrkcZLkRm1IKksQm3HQUxm1IKHAfdQ518k9XHN2tuW4PBdy+l1s1MwaWnokoDVwgKWC9bpYA8yO6B8QTyD6KSUqHY1DebaFbePEMwSfe8sx9NuyWwXe6Y392LY8kXC3vw5TjQQtXTw6hSUocDbiSAHfd4J6qvV8SnXxOGIgEEcCW3By3AaEO3G9KfhvoQ0wCRB4gGxGe8nWRvCk3QLQCwaGZVqNMwmLBhYrmt0iXu2woZ4birMcIfQhIHSlsrHWkJ7AL4HAAqZq57eHMjWHR7XjVnaHl+WvZTimARWJdsnL6iiIt1SC22jqJLQcacKi1yQlTSun1JPQmrOgsY9hlpaI5Cw7QoEhzmPEOBM8zeesyuZ27BGca6eJZprpRgxs7rmm9qbv6GryHVQEyhzKUp5LXvkEIip7fHipc3fajbuNKNuOcZXmN00fbtrltXbF/cuPc0zCqWRHHklxzo6x5vUOrke6TxXmwHFV2jxY9R7xdmilV407iTrWpY+e2FRGHCn8ymVg8V+bwbVP3ba44ZtCxVxxzG8bls5XqPPZ5LcRgAiNDUodg84krISe/voVxwk8cwpmrstKg3/qFxPCXuDz0a304rfxBT2h9Z3uBuWsMaWjq53qvfsk212e7eH5YtK8+RcI0TPWHLzdERHvJeUzIfDraOrg8BTLbIUOO6VEVcmDBiWyFHttvjNx4sRpDDDLaelLbaQAlIHwAAAFeGZa5sTGnLNhj9vtEliJ7NbXJEJUiLFKU9LfUwhxorQnge4HEcgcdQqqOM5nrXaPEGt2kmcaru5DZVacv3tUGHbUW6CmSqUEdSWErcUogN9i44tQ6lcEA8V11KgrbQWtsHTHJrXEDo1sLlpsNKgC65GfNzmgnqT6K4dKUrJaqLdedy+jW27HFZBqpmMS3uONqXCtjaw5PnkdulhgHqV37dR4Qnn3lAd6gTbtrLo1PyvJNx2s2vWllqzLNGGYFusfy0tzhx2xtEqYhqUHuC8tSi68R26yB+LVidatv2km4PGHMW1Vw2DeGShSY0tTYRMhKP48d8e+0oHg9jweOFAjtUC7dcAwXHc3yTbFrBplhF7yTEYjN1sOQPY3CS7kFhdUUNvugN8GQ0seU6R849Ku/JJijON0+1BjlrH70Z/u4otiU1YwCMpE85t0n+qOCmz9lTtg/wj9Lvthbv9tT9lTtg/wAI/S77YW7/AG1Zf+4JoX+RfBPs5D/2dP7gmhf5F8E+zkP/AGdSoXht25jbheLhGtNo3AabTp0x1LEaNGyuA6684o8JQhCXSVKJIAAHJJqSa0qDoloxbJjFxtukeFxJcZxLrD7FgiNuNLSeUqSoNgpII5BHcVutTaOKi88FH+4HP16WaH53qIyoJfx/H5s6OT/6dLKvK/8Ar+mqM+Gjp3ucxnbwM407d0zTFz26ybu67kjNwcnOFCiwCpTCgno5aWoep98n41bbe9jlzyzaVqpZLOyt2W5jkl9ttA5UsM8OqSB8SUtkVr22bOMI0l2HYBn1/uLMLH7DhMWfMeJHzg31LSn6VqcJSE+pUoD1NZscKfjVXHIMHQl5P9oV3tNQUabRmXHqA0D+4qpmcJ1c1z8TXS3TLU17FpbulzCL7MONNSUxGEgCV7/nqUrrKhGST2HvJq/mqO4rRfRtMxrUDPrdBnwYKrk7a2lGTP8AZgFHzRGaCnej3Ve909PY9xwagPYLo7lXtmb7tNWLS5BzTV+cqbEhPoIdtln6+phk890lYCDx+9ba+PIqZ93v7lnVn+R12/qy6jaHP2TYw0iHNDnOH7xuR0sDxBU0Wt2naiQZaS1oPAWnrcjgQt401z+xaq4BYNSMYRKRackgM3GGmU2G3g04nqSFpBICuD3AJ/PWy1Cmyj9yTpJ/JO3/AOqFbLrZuG0d262KDkeseaM49BuUr2OIpUZ+Q487xyQltlC1kAdyrjgduT3HPTtTW0Kz2DIEgd4C59mc6tSa85kA+klSNX4SAOSewrF4rlWOZxjluy/EbzFu1mu8dEqFNjL62n2lDkKSf/seoPIPesk6guNLbB4Kkkf6KxqYmA2uNOK1aQ6CDYrlrs5a1x1m3U677mNMXMLU791XLAw7lLctxCYyneW0M+zkEFLUdgHk+ihx619fEtvm5XIbNpzt4zyTp9Jkag5G0qHHxdmcmQpTSktJ832hZHllclJ7DnlHr2qa/CmxwYXo3qNYrqgR7ra9RbtEuYc4SpC2W2E+9z6DgE9/rr4aP4w9uy3mXfdZcI6ndOdNWl41ga3EHy7pMQVpkTmueym0rW70rHqS3weUHiWMYTs9HNjWscfwhod6vIA58FNR7mmvVFnFz2jnJaOzQSeSs81oRhCr9p3k05uU/N0xtj1tsTXmj2drzWG2Vuqb47uBtrpSrnsFK7dxxI1V01p1Pv0vcXgW22NJy+w2jLLVMusu+Y/ET1uuNE9EVUpQPsrfCFqW42C5yppIKAoqMW7k8t1D2S5bp7qLimpeUZRguUZCzjuQ4zk1xVdFI80FSZESQ9y+2pKUr5SVlJIT24NWDzWLS733ED8RcRfdLreuV1TAKLS0e60H+ECfhf8AzZXdpSlVVlVvcxvi040wlL0mwDNsXnanXRaoEdE66sRrbYnDyFSbjJcUG2g33V5PJcUQEhPfmsloPqZtO0O0wtGn0Lc9plcZMYOSrpcncvt/m3G4PrLkmSv8N6rcUo8d+BwPhX9bk9muCasOHU3B8dsVl1Ts5My23R23NORrm6kc+zXFlSSiQ0580qUCtPIKT24Pt292XQPXLS22Z0dBMGtVz63rderWvHoZXbrnHWWpMdX4P8VxJ4J9UlJ+NRRnA6fatPLSOE56zE2wqasYmx7OnPWeMezwxR7y3H9lTtg/wj9Lvthbv9tT9lTtg/wj9Lvthbv9tWX/ALgmhf5F8E+zkP8A2dP7gmhf5F8E+zkP/Z1KherC9ZdINSJ79q071Vw/KZsZn2h+NZb7FnOtNdQT1qQytRSnqUkckcckD41uNa5jOm2neFSnZ+HYFjlhkvt+S69bLUxFW4jkHpUptIJHIB4PbkCtjqTGii6oV4wOY3iPohiOkePrV7dqHk7ENTaT3daZHUEfmLy2P5ql7CsS3haW6eWXD7VO0NjWfFrSxBZU7Gu3KWGGgnqWQsDnhPJPbvzUceIXiq7lrLtZyWc0VWaBqIzBlrI9xLj7sdbQV8O/kL/mqSd+GqF8xjSJWk+nMdy4ai6qrVjOPW+OeXeh0cSpJ47pbaZKuVnskqST25rFpc3Znmn7bnkRvIawN/unqtSGvrMD7NaySdwLnYj2b6Ku3hTxch1O1J1y3R5Shkv5TeBbWHWEqSypXWp95LfUSoISFRwASTxxzVvLtuax5ludcMR031Bzaz21xxqVeMcsqX4fW2SHAypx1tcoJIIJjpcHIIBJBFf3oBt5suhm3q06HWqapC2LY6xcLjGHQ49NfSS/IST8etR6efQJSPhX10G0si7X9EIuD5HqdcMjtuMNSJDl6vSg17NEBKwjupQQ02gcAFRAA7cDgDorYaf2bTDKbQAdDGfIZnSZzEQcaeKp9oRLnuJI1AOXXIdMjMjZdI9ZNONdMPaznTDJWbzanHFMOKShTbsd9PHWy80sBbbieRylQB4II5BBO61Sbw3LBebjetc9co9mkWrC9TswNxxWO82Wi9GbcklcpLZ9EOecgA/Etn4AVP2R7sNvWI6vw9B8k1Nt8DN5xZSzbXWXunrdALTanwjyULWCClClhR5T294cxElgiHOAMayWgkdMt6mwxGZDSb6QDE/VlLdYzJ8lsmG45c8tyW4NQbTZojs6bJcPCWWG0lS1H8wBrJ1X3f7jGU5hs/1MsWGsPv3Ny1JfDLAJcdZZfbdeQkDuSWkODgevp8a59oqGlSc9uYH0ei2oMFSq1jsiQtI0Un57vbjSNXs7ul6xbSJ+U7HxXErZMcgv3phtZQqbcpDKkuqQpQITHQtLfunq6x3V9NH9sOjeeYvmuYWrEo+MS79lFzGPXvHibfcLdFiKEJhxiQyUr4UuKt/pUSlZdJUFdR52nSrULG8M2G41n2GvMPw7Jp9HchpZIIVMbiBHldvxzIHQR69XIPev6zzUi0bQttWOY0y2u7ZkizR7HjVjipLsy83gtBPCGk8qUPNJWtXHYE/EgHo2ljdnfUYwHy+Ub3EnMa4paOWKBAgLDZ3OrspvdAxebg0AZHQN83XCSbytD2yZCN6mgNzwDXeS9dLnp/mYtF5kRFJZbvK7c+h5hx1IBSUOcJDiRx1FBI457XFAAAAAAHYAVXbYht4vW3TQqPYsydS7l+Sz3shyEpUFBuY+E/geodldCEpBI7FXUR2IqxVWqDCcJN7TGRdhAcepGltyqwh0uGUmJ+7iJaOx570rWtQdScC0oxmRmWpGW2zHbNF7OTJ8hLSCrgkISD3Ws8HhCQVHjsDWy1g8zwfDtRcek4pnmMWy/wBnmJ6XoVxjIfaV27HpUDwoc9lDuD3BFYvxYfJnxWzcM+bJUywvchopuN1rtmtGcav4LiuA6eqkIwuy3vJYMW4XO5OJLbt1kxluhbKEo6kMoWAv3lLITyBVl/2VO2D/AAj9Lvthbv8AbVXfFNEdM9sGt9m0kv2nuM5BpjqU8+jE5t4tEeVMsN3QkuKtrkhxBW6w6gKUyVkqSpJRyfWrP/3BNC/yL4J9nIf+zq4jw24MvWdZ4+kRHlhUM+I7Fn6RpHDP+LFN5WI/ZU7YP8I/S77YW7/bU/ZU7YP8I/S77YW//bVl/wC4JoX+RfBPs5D/ANnT+4JoX+RfBPs5D/2dQpW52+4QLvAjXW1To82FNZRIjSY7qXGn2lpCkLQtJIUlSSCCDwQQRXor4xIkS3xGYECK1GjRm0ssssoCG220jhKUpHYAAAADsAK+1SYmygTF1VbUjXHPtXdwEjaloBe/uF9wIqJ2fZk20h520sq46YcNKwUe1LCgOtQUEckgcoPHzuW2jRW46/YhhUzCo+SJseP3DI75cMjcXdp0151SIkTzpEkrWeSqW4ACEhTSSkDpHGn7AbM5iGue5zF8uPl5i9m33TcS92dkWx1Ty4zyOe6mz5hPI7DqFTlpve7OrOdY9Y8iukWBaIVzZxxqdKdS2yzCtbHLy1LUeEpEqTMBP8WqU8IZTe7VmN3UC06Bpc0R+6Sbkq1SS97Ro7A3ofUuaHEcCItCi3DsvvGgO9a37YId9uN1wLUDG3r/AGCDcZbkp2wzGPNLrDLrhU4Yy0MLUG1KIQSAngcg29qk23vGr9uR3b3zepdLbLt+DWK1qxjTxMtpTTtyZ95L08IUAQ0rzHugkDqDv8Q1dmrjF4TMftQZ3wXOLZ44cPEZG6q7D4j8GUjlOETHDFPMyUpSlQpSlKURKUpREpSlESlKURKUpREpSlEUd6taBaZa4wV2nUyFfrnbXWQw9bo2T3SBDfQFFQ82PFkNtOnk+q0k9h37DjTtMNke27Ra6qvOlOIX3F5Li23Hxb8yvbbUktnlAea9r8t5IPPuuJUnuRxwTU60o3yGW2R3nEOuoHyTY9tqy7UBvVXI8Pv03L2XUPMXleaXxMqOpB5R5S0zAWgn8UI4A+AFZ7Vratojrra7ZY9WcevORW60IQmLEkZTdkMhSAUpdWhuSkOu8KUPNWFOEE8qNS1SowjCGRYXjjvUycWLXKeCiy27Z9JLPpwnSS1RMpiYmhBaRb2c0vSehkoKCwlwS/MSz0kjygoI/i1i9Htnu37QG5vXXSDErtjTslSVyWY+U3ZyNJUkEJLsd2Spp0gKPHWg8c9qmelWxHEXzc5nVVgYQzQaKD9dNmG3rcZk9rzLVHDXpd6tLSY7U2HcH4brjKVFSW3C0pPUASSD84cngisrle1PQnM8Ag6VXvD5jeHW5hMZmxW6/XG3QVoSvrHmsxX20vK6/eKnApRV3JJ71LdKqGgNwRaZjjvViSXYtcuijPR/bjpDoLa37FpVYbnaLTIS4ldteyG4zoQ6yCtSY8p9xpClEDlSUhR7jngnmL7Z4bOza1Z4vUFnSCM7LU+ZSIEiY+7bm3SeeoRVLLZHP4igUD4JHarOUq0nEH6jVVgYSzQ6KBbzsZ2xZBrInXW8aax5OUBxp8hcl32JT7SUpbdVGCvKKgEp9U9PIBIJ71+6gbG9tGqmVtZxqHhl8v19jkGNNl5le1LjcLK0hniWAykKJISgJAPoBU80qBYNA93LhyUm5JOufFRpm23XS7UbDG9Pc3Yyi7Y+ltTTkN7MryPakKUFFMhaZQXIHIHHmqXxxwOBXi0n2t6KaG2ybY9K7BerDbLgh1L8BvKbs9FJcSErcSy7JUhDhAA8xICxwOFCpYpSM+OfHnvTdwy4clXbGPD92qYS7LfwzBshsDk8pMpdrzq/xVPkEkeYW5o6uOpXHPPqfprLx9lO3NrKLJmcnFcguV4xua1cbXIuuZ3u4CNJbUFIcSiRLWgkEDsUkH4ipypUglpBGYy4KCA4EHXPio01v24aL7jLIxYdYMGiX1qGpSocgrWxKiqV6lp5pSVpB4HKeek8DkHitK0V2H7W9Ar+1lun2mbIv7HPkXO5S3pz7HPxa81RQ0rgkdSEhXBI5qwFKhn2ZJbaVLvOIdcJX8PMtSGlsPtJcacSULQscpUkjggj4giv7pUEAiCgMXCrxp3sC2q6W6gK1KxDTVLN3TJ9siokT334sN7kkLZYWstpUkklJIJR+L01/esmwra7rxnKdRtRNPVSL6sITLkQ7hIiCaEABPnJaWkKIAA6hwrgAE8AcWEpU/dH3cuCmTJO/Piq/wCo+w7azqjYcXxnIdLokS24cFt2li0vuwA20sguNqLKklYUpIUSrlXPJ55JJ+2c7F9rupNvtNmzLTmXOtVhYTHtVrRkt1jwIKAkJ/ARWZKWWyQB1KSgFR5KiSSanqlDex3z1381AtlujpuWlYhguE6IYbJhY0L2zY7TDLgYnXufdTHYZbJCGfa3nVISEjgIQQOwHHYVRbQ/Q/a94jz2abg89xeJDu8+8rgs2izXBcSVBisjhuRMCFfhZD4PUpZT08JSlPcKJ6OqSlSSlQBBHBB9CKqVm/hf7Wcuy1/NbJAyfCLlKcU6/wDJW8GE0tajyohtSFpbBP4rfSn6qrd1UvqXkQN4Mi/GQI4KbCngZa8ncRGXC9+PDWLNTPDA2d6VYvMztnPc3wqfbUl62XFm+tl1M0DllDKC11uuFfSEtoPWokBJ5NXEw3EZeeaDY7iOu9jiXufdMchMZLCuDCXEPyiwjzgtPp1eZyeR6KHI9BWo6Q7LdAdGrwxlVjxqZfMki/tF8yS4O3Oaz9bSnSUMn620pPf1qc60MYDTNwYtoInLnN+Q3KgnGHixE87x8ItzKqlZfC82UWTIhkbWki5akOh5qFMvEx+I2oHnjylO8LT/ABV9Sfqq0sKBBt0Fi2W+ExFhxmksMx2WwhpptI4ShKR2CQAAAOwFeilRJw4NFMDFi1VdsS8P7ajhOpLuqlh0xbReFy/b2mHZz7sGPI6uoONxlLLYIV7yQQQg8FITwOLE0pUCzQwZDIKTdxeczmVFWtW1/RTcQmIzrFjNyyCNBUFx4ZyK5RYiHACA57PHkNtFzhSh1lPVweOeK9MHbppZbdNHNIIMXJmsScaEf2AZheCpDAQUeQh4yvOQz0npLSVhBHqmpMpUYRhLYscxv5qcRkOm4y4clD+i+0rQXbzPk3DRzErljipvBlMN5JdH4r5AICnI70lbS1AE8KUgkc9iKwWuGxXbHuEyD5XajadIcvyglL1zt0t6E/ISkcAO+UoJcPAA6lAqAAAIFT7Spd5yC68ZKG+SQ205qMtEttWiG3S2SbZo9gEGwibx7XJC3H5Unj0Dj7qlOKSDyQnq6QSeAK8G4Harolucg2uHq7iq7i5ZXFuQJceW5GkMBfHWgLbIJSrpTyk8jkAjg96lylH/AGhBdeEZ5PZstQ0t0m090WxJjCNNMaYstpZWp0toWtxx51XHU666slbqzwOVLUTwAPQAV+5bpRgWc5biWcZRYGZ15weU/MsklZPMZ11ry1nj0V24IB9FJSR3FbdSpLi52I5qAA0YRl+ea1zUTT/FdVMIvOneb20T7Hfoqoc2P1FJUg/EKHdKgQCCO4IBrMWq12+x2uHZbTEbiwbfHbixmGxwlppCQlCAPoAAH6K9dKgWmNfll8T3Um8Tp84n4DstX1I0w0+1exWThOpmJW7IrJKIUuJNa6glY9FoUOFNrHJ4WkhQ5PBqM7Xsw0RgwIVgubeW5DjtrWhyBjt+yu43C0Rig8tgRHni04lPHupcCwPgKnSlAMJkfUZdtNyEyIK1fO9NcS1HxZeF5OxcxZnUeW5Gtd4mWvrb6SktKXDdaWpspJBbJ6T8R2qPdI9m+3rQe5uXXSLErzjLr60OSGouWXhUeSpHPSXmFylNO8cngLSr1P01NVKDykuGZQjEA05BRbrJtm0c3AtNxNXLFeL7CaKFJt4ya6RIXWnnpWY0eQ2yVjk++UdXf1r7Yrtz0pwnT+VpZjMDIYmLS4vsSrccsuzqWY/JJbZW5JUthJ6iCGlJ5B4PI7VJlKjCMJbFjnx5qZMh2oy4clBelWyTbZoffvlLpPhV3xmeooLphZbeQ1ICTylLzKpRbeSD+K4lSfqr3WzaTo9jV2vF/wAFbyjEbnkMt2ddpViye4RfbnnFqWpbrfmlpSupauD0cpB4SQKmalWJJidFUACeK07TXSLT/SODPhYLYjDcu8pU+5zZEl2XNuElXq9IkvKW68v61KPHoOB2rcaUoTKAQtD1D0VwnUm8WnJ7sLpbchsSHmbde7NcHYM5hl0AOs+a2R1Nq4HKFAp5AIAI5rIac6WYLpRZ3rLg1iRBbmSFTJ0hx1b8ufJV89+TIdKnX3D8VrUT8PQAVtlKgeUQPrX435qT5s0qm87/AIVy2/8Asic/ryqt7dmrq9a5bNjmxYlxWytMSRLjKkMtPEHoW40lxtTiQeCUhxBI7BSfWqzPbVtcntwbO5NW4PEhk7FhOOJhjT1/7n+xlwuEFv7q+b1dR56vN/0VDLVmOOQxerHtHqQpdek5ozOH0e13wBVpKV4rM1eGLTEZyGfDm3NDKUy5MOIqKw67x7ym2VuOqbST6JLiyP3x9a9tWNiqi4StTuemGI3XUqyatSoj6cksFul2mLIbfUhCoshSFONuIHZYCm0lPPoeT8a2ylRkQdR8xHwJCnSPrf8AG6UpSiJSlKIv5cbbdbU06hK0LBSpKhyFA+oI+IqHLZtH0Ttj0KOLLc5dhtU1VytuMTLvJfscGUpZX5jUFay12WpSkpUFIQSShKamWlBY4hmhuMJyX4AAOAO1V43u6s6YYpoDqRhGSahY7bcjvGIXBNvs0m5MonSy6y422Wo5V5iwpYKQQkjkH6DViKVlWp+NTdTJsQR3C0pVPCeHgZEHsq0bDtWNMMj27aaYDYtQsdnZPasVipnWRm5Mqnxi0lKXC5H6vMSEqIBJTx3H0itg3f6VaJ6n6eRGdY9Ornma4E3/AMAWy0qkInSJ7iSEstKYUkpCwn31LIbSlJUogJ5E70rfaSNqeXOESZ9Ztu4Zwb8Fjs4OztDRoI9Iv88rWUUbV9IJehGgWIaW3B1CploiOKlJbdLjbTzzq3ltIWe6koU4UBR9Qnn41K9KVL3mo4vOqMaGNDQomyjbBpTld/vd+lR77bxlRbVkcC03yVBhXsoT0gymWVpSslPuqI4K09l9Q7VJdjsVlxizw8exy0w7XbLeymPEhxGUtMsNJHCUIQkAJAHwAr3Uqg8owjL6+GisbnEc14b5fLNjNmm5DkNzjW62W5hcmXLkuBtphpI5UtSj2AAFVOv2CSNz2ouPa86zJOKaN6cv/dTFrPdf73kXqX1J6bnNSvjyGOyPKaV76h3UEhRSd53VbbdWNw71hh4duIVp7ZLK6ma5b2sYRcTNmoV1NvOrXIbBSj3SlsoICx18khPTHMTZJuWud1tSNSN+eQ5bjUW5Q51xsTuJtx27i0w+h7yFOJlkpCi2BzwfzH0pRvUD3Wg24fvWzO4aZ+1GFVswtbeRfj+7ffqdcspxXMpSlEStTwTTDEdOJmTzcUiPxlZde3sguTa31LbM11CEuLQk9kBXlgkDtySfjW2UoLGRujpIPxA7IbiDz63HzPdKUpREpSlEWs6i6cYbqtisnDM7sybla5K23ujzFtOMvNqCm3mnUELacQoBSVoIUCPWsHg2hWA4JkcjNo7V0veUSY4hqvt/uL1xnojA8hhpx4nyWue5Q2EhR7q5PepCpQeUyPrT4W5IfMIK+M2ZEt0N+4T5LceNFaU8884oJQ22kEqUonsAACSa5uak+IjtP1hzWXjmpmYXsaaWKWAzY4FqfcTk77ZBD81Y4/vRKhyiOP2wgKd7cN10ppVYOMOOQ+O/ppuzziLSMMDP5bvrlvmAtrW53EdzEzLbhpdDW1gmMIttstjj0AxXVyih5chIRzwG0o9lSkADj3/UEcRfuj2maPa0a22GbYNP7gdSpk22XK9ZNHfktQ7Xa4zySXXveDDkhxDJYaQAVnnrPCUdVXNpWkgVW1W5tIPGRx3E5jdbiqXNN1M5ERwjlwHrfglfhAI4NftKqpVVdbtuenLF8xLDsBN7xM6hZey9eLfYru/Ft77MVC50iQYYUWEOExm0+YhCSVOJJ5qasK0I0ywS/u5jbLI/ccnfb8l2/wB7nv3O5Fv94mRJWtbaO/zGylH1Vs87EbFcsqtOZzYqnLrZI0uJBcLiulpEkteaejnpKj5KAFEcgdQHzjWZozyNgcelgIHCBlxKO87pP+zJMnjfPgEpSlESlKURanqJphiOqUOzQstiPupsF7h5Bb3GH1NOMzYq+ppYUnvx3II9CFEVtlKUFhA3z1sPgB2Q3Mnl0ufme6UpSiJSlKIov1k0r0rvsd/VPLYEu23nFbbIfayKz3B+23KNGQguLb9oYUlS2+ASW19SDyfdqN9CtquCzNKcQuOq0e95ZcpcdGQS7bfrvIlW5q4SlGU6owioR1rDjqveW2pXIJ5HNWAy/FLLnOL3TDsjjrftV5iuQpjSHVNlxlY4WnqSQRyCQeD6Gss2hDSEtNpCUIASkD0AHoKM8k9I4Zz3kdkecUdZ45R2v3X4yy1HaQww0htptIQhCEgJSkDgAAegFf3SlEySlKURKUpREpSlESlKURKUpREpSlESlKURKVXre7o9ptn237ULJ8mw+2Sr9YsUuEu2XcsBM2Ithlx5sIfTwsJCxz089J5PIPJqofg56SaeZpp5lepeZ4rAv+QW3I24lvl3RoSjCQ2w26lTKXOQ2vrWT1gBXYd+1KP2r3sNsIB6EwOqVfs2McL4iR1AldQaUpRF+EhIKlEADuSa8Nlv9iySF90sdvUC6ROtTXtEKSh9vrSeFJ6kEjkHsR8K+eT4/ByzG7ri1zW+iHeIT8CQphzy3A062UKKVfiq4UeD8DUS7UdqeF7SMFuWDYXkF6vDN1ua7nIkXNaCsLKEoCUpQlKUgJQnntyTyfoANuXYsoEcTNxwtdHWAw5zfgIz72U2157hcbfaIL9zus6PChxUF1+RIdS200geqlKUQEgfSTUT5BuUw217jcV202xbdxyW9wplzuXluji2RmmFONhYH/GOEDhPbhIKj6p5z2vui+PbhdJb/pFlNyuFvt1+abQ5JgOBLzSm3EuIUOoFJHUhPKSOCOR9dUcXYMbBM5dDB9Qeys0Nx4XmMp6ifgZW9QJ8G6wmLla5rEyJJQHWJDDiXG3UEchSVJJCgR8RXorQdCNHMe0A0nx/SPFrhcJ9tx9hbTUme4FvuqW4pxaldIAHvLVwAOAOB8K36tXhocQwyFmwuLQXCCleW4XW12ltt663KLDbedQw2qQ8lsLdWeEIBURyokgADuT6V6q5Y7+tOMZwfeztzu+P/dMP5DkMR6f7ZdZU0LdbuUfhSfPcX0dnCOE8DgDt2qrPPXpUfvuDZ3a/JWf5aNSr9wTz+pXUiZMiW+I9PnymY0aOhTrzzywhttCRyVKUewAHck187ZdLZe7exdrNcYs+DKQHGJMV5LrTqT6KStJIUPrBrVNaNKbFrjpbkelGTTp0O25JDMR+RBcCH2veCkqSSCOykjsQQRyD61ituuhONbbtJbPpHil1uVygWkvOCVcFpU86464pxZ4SAlI6lHhIHYfSeSTb4sXCOOc8osjrARxnhuUlUpSiJSlKIlKUoiUpSiJSlKIlKUoi8tyulss0Ny43i4xYMRrjzH5LyWm0c9hypRAFer1rlp4vmnGM45fdKc3tX3TFzvmQSWp5kXWVJacCSytHS064pDQSVKADYSADxxwBx1Ia/akf4opT89LGc8RbHINPzR/kqBgyIn1I+S/ulKURKUpREpSq+b8cGsWX7XtQZ93Xc0yLBj0+4wVRLpJipS+hoqSpaGnEoeHKR2cChwTwO5rKtU8Gm6pEwJ7LSjT8Wo2nvMd1PNvulsuzKpNquMWayhxbKnI7yXEpcSeFJJSSAoHsR6g16qp54Tv7ivF/8p3X+tLq4ddVan4T8EzYeoBXPSqeK3FGpHYkJSlKyWiViciy7FMQjIm5bk9pskd1XQh24zWoyFK+gKcUAT9VZJ95Edlx9w8IbSVqP0ADk1QDYv8AJjefF1u1O1sxm35Q5fMjXY4TVyYS+LbaUs9TUaMVclgDzOSUdJKwFE9XeqjE9zmsza3Ee4AHUn0PJTZoDnakD0J9AO8c1fq3XK3XiEzc7TPjTYchPWzIjOpcbcT9KVJJBH1isTB1AwO53xzGbbm1gl3hoqDlvYuTLklBHqC0lRUOODz2+Fc490WDMbENvGPbe9Js6yZ6LqtnBEyVLlBL0W3cNh6MypsJ6AoFpKlDuoFfPzuKtLqtsC0H1LyPBcus1scwW64LLjvxpOMMsw1SmGlJUlh3pT34KRwse8OVd+9aNDXEOHs4sJ3izSTxw4gOMHK01dLRhPtQSNxuQO+E8rZ3iy1KUqqlKUpREr5SZMaFHdlzJDbDDCC4666sJQhIHJUonsAB35NfWq7b/rPiEzapn99zCE7MbsdmkyYLHtjzTBmrQWmFutoWlD3S44lSUuBSQoA8cgGsq9Twabqm4StKNPxajWbzCmTBtSsB1Ntcy+afZda8htsCY7b35lvkJeYTIbCStAcT7quApPdJI7+tRzku9XafiN+OM3/X3DmLilzynGm7gl9LS+eClxbfUhsj4hRHHxqinhoYRl+v+j8XT6/iXZ9HcSmyXbzFivLZcy+7vuFzyHnEEKERlktBbQPvqICiR2TYnfpoBtfsm1zJ3pGl2JWG7RYoj4s5aLSxFnLuiiBGjseSkLcK1cAt9wU9RI7cjXaQdnGLOwtqcsuZ9kXkEZSs9nIrnDlcidBf5D2jaDO5XDtd1tl8tsW82W4xbhAmtJfjSoryXWX21DlK0LSSFJIPIIPBr1VXrYNpVnGjO1bC8G1ELrV7aafmPQ3VcqgofeW6iOfoKUrHI+CiofCrC1rWYKdQsaZhZ0XmowOI+v8AOaUpSslolKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiizdT+5l1Y/kXef6m7VQPBP/3gM2/lar+qMVb/AHVEDbJqwSeP/Eu8/wBTdqoHgnEf3Ac2HPcZae3/ANKMU2X9ttH/ANtn96bT+yofjd/YtU3FXHXTFvEf0z01xTXfJZcW/wAVu4Q2b66l6Bb33kymVrESOllp3oS31ICk8lXAUojmv6396B5Rtnwi2bodOdwWp0vNbbfIrFxk3e9+c3LS71d0tIQhDaQtIHlAeX0KKentWV3Mf8LhoV/keJ/rJ9Sf4vZA2cT+T65DbOP/AMNdYBxp7FTqtMOFRwnhjaI7HrrkFvhFTbH03ZeG0xxwOPx7KeTOyzWrbpYstx3Pbtg11yDGo15VNtDEVxxDjsQOFsCS04kI6leqQlfA7KT61WHwhc6znUbTDUe+6hZjeckui8pS2qZc5zkh3p9lR2ClklI+gDgD4VZzb4QNpmnxJ7DArb/UEVUjwVCDo9qMAf8A7rE/1ZFd7mNbtm1MAsGiOH2kfBcLHF2ybO85z/4EqLomg+nh8Wu5aZITkLVkk2ly4vLaya5NznJDluDy1qmpfEk9SySR5nBHbjjtVv8AeRiGW6X7TckuOj2ruZYcjCrQ5JYbjzhMemDzQpQemSkuy+eFqCVIeSR29QAKgGO4hnxrXw6oIL2O9LfPbqP3IB4H8x/mq1W/YgbOdWeSP/N14f8A1ya86sS39GsLdGuPUOfHZd9EB23kO1LPVrJ7rXvDbvt9yPZng19v92m3a6Szc3H5c6Qt559ft8jutxZKlH6ya0jbJpJq3rJM1Fzje/iV3E1y9KjY9a5t1dat8GAlJ6vZozLobSOSkeeQVq6eer1Jyfh25PAwzw9cXzG5pWuHYrde7jIS2OVFtmXKcUB9fCTWt7L8quG+6z5PrVrhcvulZoV+ctVlwNp4ps8BltttwOymBx7a6rzOxf6kjpJSkcgJ9DaGh22VWtGQvuALgJHExHKcteCg4t2Zpcc3GN5IxW5DM8QM1jPD+1ayE7idcNuaM2m5RhWIzXJmLSJtwVPciRhJLZYRIUpSlt8LRwCo8FB49TWpeJT+7E2sf5dZ/wC8olfuxhFqZ8RvcnGsiIrcFlElphuKEpaQlE5tPSkJ7ADjjgenFfniUkfsxNrHf/8ATrP/AHlErLZjjf8Ao95uSWknU+0L8YC2rjCzbmCwAdA3eybdSrX704mesbfc0y3T/VbIMLn41Y5lzSbS3G/vtTTfWEOOONKdQOEkAtLbVyeeTxxWgeFxkmRZbtEsmQZVfrjebpLvF0U/NuEpch90+0q7qcWSpX6TUrbwiBtW1ZJPH/ifdP6suoX8JYg7Lcd4Ppdrp/WVVXZfbrj9xp7vP5BTtP7Oif33ejf8rO2nbPpxp/pte9RtxGZ5vJnx3bjfLzNOoF6jRobBeccQ003FlNoCUNlCQAkknsPUCqyeH5iufbide8h3Bt5JnFg0hxm5Lbx3H5OU3KW1MkgcNocVIfWp1LaeHHOVFJcWlIHSCBm9+Wp2W7qtcrDsH0RnExhLblZrcWiVNM+XwstuEfiMJ99Q/GdLaPnJ4N48YsGm+13R+yYlaIyoGOY+IdpjpQgKdffkPoZSpXHHU4486Co/EqJpsth+saeywb9C75N43BTafMfA1Pmcd2ob1zduFrKs+tG6PB8o3Q3Tb1nGp8zCNP8ACrW3JyB62yZEWXfbk8EFEL2iMPOZYQ2vqX5akKUrlJVxVadxurWEbcdQsM1V2NapX24RXpDjOUYobjcp8GShJSpK1ollR99JcQSDykhJT0nnnftLNTrbt48VHVnFdSpyLVbdTA39z50pYQz5yw29G5WewSoF1oH06+BXRvN85xPTjFbhmubX2LaLLa2S/JlyFhKUpHoB8VKJ7JSOSokAAk1SkcGz0dpmDEk8ZMtO+BaDkI1ur1Bir1aESMgOECHDdJvI1m8WXpxXIIuWYxaMpgoWiNeIEeeylY4UlDraVpB+vhQrSdxNsy+fpDk8vCNRrvht0tlpmT2ZttYiurWppha0tq89pzpQSByUdK/oUK2/CshlZbiVpyeZYpdlcusRuX9z5nHnx0rHUlDgHYL6SOpPwPI+FYrWP/ejzf8Ak5cv6s5Vf0h5KVUtEQD0hTsIxVKYJmSL7/8Aa5s7K9MNad5+1/LYWZ7icwsUVeSy1NzYk1b8y4zyxHPMxxw9aoraekJYQtHUpSiVDpSKmHVPU+/bS8H0X2jXjWyX928iLqchzt5pRk2+zNOKUtTCVeYoOqB8hpaurpDZPHPHH54Mf7lm9/yxm/1aLWl7+80e0D356DbgL8y98mI9vVbJT6UlQbQHnkSSAPVSWpiV8ep4rrrgM2ilRFmvLJ3WZInhNjwsuWiS+hUqm7mh8b7ui3GLjjda1utu223HdOXNStqGu+WW/VWxSY8ll1jIr1MfvDZcSl1DwlLWhZ4UXPgD0lJBB4qf4Gr2qG4Dw+ZOtNt1AvuA5lZsdukq5LtMWOj2qZCQ5yFB5pa2kOeWF/gVNqHmdlcDirf2zI7BerDHym0XqDMs0uMJjE9l9K47jBT1BwOA9JTx3554qDtUNUrJq5tV1mybFGHXLAxj1/gW25kgtXVDUJaXJDH0s+b5jaVfjeWVD3SCeLaSaWzV2agFw/dIEW4HUam+hXXs4FSvRfpIH4gb34jQ5wqmbGtGdRd2u2ZM/VncTqFCx9d8uAEOw3QsTrhIKklb06Y8lxbyRyAhkAJHT1HqJ4GK0tyjV/YXvbse2nLdR71mOmWfqYRaF3Z9Tq4/tClNsOI6iQ24l5PluBHCVJPV0g9PE5+EIQdnMHg+mQ3Pn/8ACRWh7vsZVrD4kGgOB42j2iZicZrIb2trv7JFal+ePMI+byGQBz8XU/vhXoPAp7bRY32XYQ7lgkzyix0GWi42k1NjrOdm3EW88cDvOWpzWP8AGY/8m6J/yllf9lirgbsbzrdYtvuSz9vFlduedFllm3tsoQt5pC3EpddaQvstxDZUUg89xzweODT7xmCPudon/KWX/wBlirJ78d0F12p6CqznGbZHm5DdpzVmtIkpKmGHnG1rLziRx1BKG1EJ5HKinntzXA8gbK6Sf2hy4tpiOuXzC7GgnaWQPc14Offpn8ior1A0EseI7JZuoOpFzv1h1btuIm8ycklZXK+6bd9QwXegSC9wep0dAaT7vB6UjkA1ImyvOLvun2a49ctVZ0ybcZ7cq1XGbFluxJD6o76kIeDrKkrQ4UpQSpJHKuT8a1TLNK8Yb2WZbq/qPfjnebXnTmdc3MmvbqXxHdkwFLDcBs/gobXUsJSllKOrtyVE19fCYIOy3HOD6XW6f1lVdZZirbTSqAWDTAyBxOBjpbTIbguXFFHZ6rCblwk5kYQb9b996grY/MyHSje7qBtw1yzPKcgvERCnsQmXe/THm3G2+pwFLSnPLUp2M4lzuk8eUoCrI7qMeTqfr1o5o/Y7xfrbOmypOQ5HKtN5lwlJsENJCmVhlxI4ffdQ2FEdQ4PBFQ34pemuRYLesA3r6aRii/afXGPGvCmx8+L5vUwtzj1SFqW0r6UvgegqbNoGQMa+5fnG7n2J9m35MmJjOLNyUELZtUNHU+Rz/wCkmOPc/wDJCs6BNZjMWdKcXGPYO7zEtkbg6VpXHhPeW5VAMPCbOHDCAS3jCs7EiswYrMKMlSWY7aWmwpZUQlI4HJJJPYepPNRFvI/cpatfyQuf+oVUx1Dm8j9ylq1/JC5/6hVcu3GdmqE/dPwK6diEbRTA+8PiqN7E9rmSbjdpNiYzHXPNcWw6NNuLNsseIS0QS+77QsuSJrq0LLx6yUpb4CUpQDzyo1/Gh2e6zbKN58PaNqhqDdMz0+zQtNWSVcnlrWyJHUmM60VqJa/CpUy42FdPPKgPTmw/hOfuK8X/AMp3X+tLqLd1eFv6w+Jtofi+Ltl+Rh1sjX+/PNjkQ4rExx9PmEfNKulKQD8XU/TXpVRg26mz3XwHcRgkk8omdOgjz6ZxbHVf7zZLeBxwI5zlr1Xgw6Zf8M8XF7S+y5jk/wAkfuO7MRZZV8lyojbi7Z5iuG3XFDjrJUB8Ce3HArJeLxGuGnum+NanYFlmU47f7jkiLdMetmQTY7T7BjOK6VModDYILSeCEg+v01iFvswvGsT7Y6ln2nHuhnzD0+Yo2jsE8+p90/zVmPGsuMBrQbCbW5MZTMfyxL7TBWPMW2iK8FrCfUpBWgE+g6h9Nee8n9V2d2sgf/tI+GfBehSA/WaoOUT/APqBnv6q3GW6ZYzqzpFYm8yk39Rg2duY25bcgn21xbpijkuLivNqdB/erJHPfiuf/hN6EYBqrpbnN0y17KUvwMlEdkWrLLpa2+j2dCveREkNpWrk/OUCfhzxXSqwqSdJLcsKBSccZPPPbj2YVRfwVFA6O6igEE/KxP8AVkV3QBtu1AaNB6+JHwsuFhJ2PZzx/wDAn4qOvFX0bwvBci0gn2B3Iy7keRyxO9uyW4zkj34x/ApkPrTH+er9qCPh9A46BWTTLSvbhY8k1It8vKExLfaX5VxXdcqud2QmOwkuqKG5khxKFcIPdIBPpzxVM/GJUlN20D5UBxkcs9z8OqJV1N0mM3bMttupmL2FpbtxuWK3FmK2jupxz2dZSgfWojj9NcLqjqWw1KjMw6pHRrV1sptqbVTpvyLWz/O5U2001z247k8Qkai7r9Wp7dwvsySq14jEu90gwbDAS4pDKAiEUJefUlPWp5wrPvADpA4rAbItf8sw3d1kG2iBqBfM70qvC5SsSutzW9IXCKGvPbQh50dXT0Bbaknt1pBAHJ5lTwlNdcXzrbhA0kXc2GsowN6RHfgLWA85DceU60+hJ7qQC4UEj0KRzx1Dm1+Y6s45ieZYxp23zc8nyqT0xrZGWkvMw0Aqfmuj8RltIPvH5yylA7q7dzmNoVwG+yRAGhBFieIzJORkk5rkDzXpEus4GZ3QTIHA5RqIXPXW697ibB4keAaRWTXrIbxDukVE+A3dvJbhwFvMSW1uKixUNMvloIU4gLRyogJUruVVZ/QbZrl+iGvl/wBWn9xmY5lZb7bVR3bTf1l55cpa0KLzjqVBtQT0q6AlpJSF9PPA96veuJH34nSL/ILP+pn10mrHZ/Ls7KgzmoOkxHbr2C22nzbQ6mcsNM9Yn4/PelUk8XfNV47tOXikVZMvMr/AtaGk/OWhClSFcD492UD/ADhV265472brYtWt7eiuit6vVvhYzgSF5plUmbJQzFjNdaVJS8tZCUcpYQkcn/5oT9NZPYKz6dF1g5wk7gPMT2BWjHmk19UCS1pjmbD1IUt4nobuT0d2y4HpBtkuen+PXWJag9fbrkiZDrwuDoDjpZabaW2T5iljrc6uAlICD6incXWrXrZnuItV531aeRc+N1eULXmhluylW9kkBxdtQSIzQTzyppDDLpCu6uCAeuceRHlx2pUR9t5h5CXG3G1BSFoI5CkkdiCO4IqjPirWFOrGEae6A4hARdtQMpypmRaYTfCnY8Rtp1MiS58W2U+YnqUeB2J/FNa1KtQbQKrRLnOAg/vGCBugbtB1WVKmw0DScYAaTI4CZO+Tvm56K8VuuEK7W+LdbbJRIiTWUSI7yDylxtaQpKgfoIINeisDgWLowfBsdwtuSZCLBaYlsS8fVwMMpb6v09PNZ6rVQ1ryGGRNuSrSLnMaXiDAnmlKUqiulKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoii/XPQv+7vjk7DLvqvm+NY/dYZhXG3Y+q3NIltknq63X4jzyeoHpIQ4lJA447nmNdvOxDD9r8mUdJdatUYdvuMhqTcLXMk2mTElqb9OpK4HUjke6VNqQoj8bsOLNUoz7MktzOfHnw4I/zgNdkPruqw5vsIw3Ptb7duGveteqSM0sy2jbJMaTaEMwkNFRQ2ho28pKB1q5Cwoq6j1E81tG4vaRju5/E7Zg+pGqmfM2S3qafci2t22xxNlNpUkSHlGEpRXwtXuoKW+e4QCKnalVwjAKegMxx389ed81bEcZqakR03fLlZQ/ju3Z/FdH42i9k1x1FYtMKILbFnE2hU9iEGvLEdLpgdPSE+iyguDgcL7Vqm2jZHhW1GRPTpfqlqE7bLq+iTcLTdZFtkRZLqElKVEphJdQeD+I4nngc88CrF0rTEcbqmrs+PPrfnfNZ4RgFPQZcOXw5WyVZtf9iGA666tWTXCLn2YYLmVnZRGNzxqWhh15tHV0nqUklDgSpSeoHuk8EHitjzvaJiedaTO6Kr1Kz+z4zPQfuyIU6G9MvTynA4t+XLlxnnluLUlPUUrSCAE8dI4qdqVnhGDw9M453PreMpWmIl4qaiL8suyhfQHa1i+3rBH9LcfzzL8jw91qQyiy5EuA+wyH1dTvStmK06QrlfuqWpPvq93k8iGME8LbSHTvMbneMX1V1Qt2NXZwLl4tAyBcKHKQCSGH3GAl15ockBJUFceqj35ufSrkkv8Q5xHTjv6qkANwDKZ671U+D4cOjdi1zkaz4flOYYrGmtttzMZsFx+59ukoQEDyVlkJcLCi2hSmurhSh68dqyGs2wXCteNRLPqfn2tOqS71jjiXLL7FKtUdm3dLvmpDSEwO/CwD1LKlHgck8VaClQJGGPdy4clJvin3s+KifVDQF/VzTWRpblOtGfNWu4xnId1kQU2liVcmVnkpdX7CUoHA6fwSW+QSDzzWF28bULDtnxKVgen2qufSsefMh1mBdXba8mI+8AFPNLRDQ4FAgEJKijnklJ5NTlSgEYo94QeI3flu0TOAdDI4FVL0x8ObBtG8ivWW6b696v2i95ESbpcPbLNIkSeVlZ6lvW1ahys9R4I5PBPPA43K57P2clyLG79nO4vWHKGcXvEW+RbVcblbEQHpMdwONF5qPBa8wBQHxBHwIPerBUqWktLSPdiOEXEclDgHYp96Z4zYzzUAbpNkuim7OLCe1BiXC3X22NlmFfLS6hqW20ST5S+tKkON9RJ6VJ5BJ6Snk86Loz4aWiull+tWTZLleY6hzLC4l61RcluCXbfBdT8xxuMlIT1J+HUVAHggAgGrc0qKf2Rllteu/mpf8AaCH3/LdyStM1Y03d1Xw6bhKs9ybFYVzZdiznrAYaZEiO4goW0VyY73QCCe6AlX8atzpVXNDxhdkrNcWHE3NV821bMsU2qx5Np0y1X1DfsU2X7dKs12ftkiK8/wBAQV8phIdQSlKeehxPPSOeeK33Xjb/AKYbkcEe091TsZnW9TgkRn2XPKkwnwCA8y5welQBI7gpIJCgR2qRqVap9qIff/GXbTdoqs+zMst9X7671RnBvCQ0PxWUI161O1IyLHEP+cMdkXZEeA+Oeel9LKElwfT0lHNWL3E2a047tV1JsNhtsa32234Ldo0SJGbDbTDSILiUoQkdkpAAAAqW6jfXzTTMdX9OrtpxjGfwsTi5DCkW26S3LKbg+qM6joUln8O0lslJUCpSV9j26SOay2oPq7O+k3UEekXWmzllOu2o7Qz6yqNeGHojkOR7Zomcaf63ZlgF0mXqfGuDVuTEmwZiUKSErVFmMutodA4T5jYSSAArq4FXa0a26YJovPveTW2Vdshy7J3Eu3zKL9JEm5TyPmoUsJShttPA6W20pSAB27CtC2kbVc32oYyNPrbrJAybElTnp64kzFyxLQ44gBQakIllKUlSUqIW2v4gcc8iyFdlZ7S/EzcBxyEjkSNLLlpMIaWv3k+pjqAq1bjdieD7pL1brtqhq3qT5FldddtVutsm2RosFThSVdH94lxR9xI5cWtXCfX1rbtU9rWHa3aMvaL6tZZkuUxC83JjXiYqG1cYr7aeltxCo8dpokAqHvNnqC1A889pnpXNgGDw9M+u/nxzXRiOMP1Fum7lwyVTtLfDj0kwTF3cRzLOM61EtSYz0SBbMivC1221ocSpJXFhp4aQ6AokLIUUngp6T3rZNrWyfCdqRmuY7qNm1/juF0xIV4uh+59vS5wXFtxWwlrzVBKQp0gngcDp5PNja1fVDT+16q6dZJptep02HAyW2v2yRIhOdD7SHUFJUgkEcjn4gg+hHFWdUe3E9l3ERz4f5VWsY6GPymeX1uUT718vskfbfkWMx2od5uuobScSx23haV+33CafKb6OPXy+oukj0DZNSbozppadG9KcV0usiU+y41a2IHWkcea4lP4Rw/WtZUo/Wo1Wzap4aOmu2fM28/nZtds2vFuDoswnR0x4tsU4OlbrbIUrl4p93r59OeEg8EXGqwDWNOEyXRPITA6S4k8QNFUkvIkWbMcZiTys2BwO9KxWV4tYM3xm6YdlNtbuFnvUR2BOiuEhLzDiSlaCUkEcgnuCCPhWVpVHNDwWuEgq7XFpkZqsWnuyWXonEkY/oTuS1Hw3GJT65CrGtq2XRiOpfzvZ1S4q1Nc+vqST3PJqWNJtB8D0dcu90sCbhdMiyJ4SL5kd5k+13S5uJHCfOeIACUjsltCUNpHokVItKtiO/SOMbpzjK3BVIB+PCd8ZSq2bndiOlu5nKbNqHcMhyLEMysaEMxr5YJKWn1NoUVICwpJ95BKilaSlQ59SAAMHkfhu6IZng7GHZtlWeZJMXdWLpc8ku969svNxDLbqERlyXUK8tgF5R6G0p7jnnnvVr6VVrQ1uEZTPWZ+NzvVi4uOI5xHTL4ZbtFFyNv8AZbRpc1pHgee5tiNlbYXF86FdET5hZU2Gy2l65Nyi2kJHuhvo6fxeKj3blsZw3ay9OTpTq/qS1brrIak3C2XB+1SY0pbfYdXMEOI5SSkltaCR8eQCLJ0qwcQ8v1OfHhy4KpaC0M0GXBVk3DbC8G3PX+233VLWDUx0WNbq7TCgSrXGjwPMUlSvLAglaj7iB1LUpXCR3qesJxa6YlaBarrnuQZYtISlEu9IhB9KQnjjmJHYSrn1JUknn41sNKhvkbgbl9X5qXed2I5qlmqvhT6B5/qE/qXiOTZZp7dZshUmU3j0pttguqPK1tJUgqZUok89Kunv2SKm3b7tO0o23Rbg9hLd2ud/vCQi5ZJfJftl0lpT81CnSkJSgHv0pSASASCRzUzUoz7NuBlh8t3JH/aOxOuVVfKPD6w3MNaIO4K968atLzm1raVAuLUuztpjJb5CG0NJt3l9AClApKSFdSurnk82bs0CVa7ZHgTr3Nu77Kelc2ahlLz55+csMNttg/D3UJHb0r20oPKzAMs++ffXejvM7Gc/yy7abkqoOc+GnpJqxI1ByLVXKr7e8szy6JuCb3HKYqrS00CmPGjtcrSW0oKUqDnV19KT7pAIt9SqljXGTnEfA/IKwcQIHP67qlel/h+6v6YW5rFLTvr1KYxSP7rFrgQmmVsNfvGnXXHg0P8AEQB9VWP0s0D070jkTLxYYk+55HdEBFyyS+znLjd5yR6Jdkukq6B24bR0oHHZIqRaVpjcb6+vfNZ4R9ZdkpSlVVkpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKVWrxDtdMn2/bYL/mOEzDDyC4SI9mt0sAFUVx9R63U8/jJbS50n4K4PwrnHojohnmuOyTN9WcExeZmmsL2btR0XuRPcXdosFlthxZiurWD5hWv3uD1KSpXrwBVGvx4yBZkT1LR6YgTwV3Nw4ATd8x0BPrBA4rtjSod2hw9abft2w6DuCU+rOGIi0XAyXkuyOgOr8jzlpJCnfK8vqPJPPPJ55qYq2qM8N5YDMLGm/G0OIhKUpVFdKUpREpUQbtdbZG3fb5mGrNvZiv3K0REItrMoEtOS3nEtNBQBBUkKWFEAjkJPcetbPobkeYZho3hWWagR4jGR3qxQ7hcmorRbaQ860lZCUkkp+d6cnvzRvnDiPdgHmZI9B6hHeXCD70x0ifj8VvFKUoiUpSiJSlKIlK8t0NyTbZarK3GXcAwsxEylqQyp7pPQHFJBUE9XHJAJ454Brnxvz1Y3iaV4zgci76hYrjUXJsqi2x6HhjEpMhTY/CHrnvqC+k8AFDbSOQTyog8Ub5ntZvLR/MYHqVJENLtwJ7CT6LojSvwegr9ooSlatqfqbhejuC3bUbUC9M2ux2ZgvyHnD3UfxW0J9VuKPCUpHckgVCmxPcxke6vT3LNSb7b49vis5bLt1ohtJAVHgIZYW0hxX47n4RRUr6T24AFGfaOc1vuiT3A73R3kDXHUwOxPyVlKUpREpSvhNmRbdDfuE19DMeK0p55xZ4ShCQSpRPwAAJqHODQXOyCkAuMBfelU50e2q2TW9257itc77mNzmZ/Mdu9nsLWRzrfCtVpWeITflxnW1KcLAbUrk9IKuOnkEmt29TSzFrNuo0T26aKzMmx9/KH0P5AqPlFzfUuI7ISgc+a+vp6W2ZCuRwe/f4VLQ41KdFwhzyBG4kTB5QZUOLQx9QGWtBM7wNRz0XVelQHcdjm3W4W92CixZTDUtsoTIj5tekutnjspJMogkevvAj6Qa8WwrErlh23mLbbnkdyvi13+9lmbcJK33nY7c95hklSiTwW2Uq4Hb3jUtgkg6Cedxb5qDIAI1McrEz6R1ViaUr8I5HBqFK/aVxh8Vq1ydCtasbg6U5VlWPwr9YTcJkVjIpy2lSPaXElaQt1XRykAdKeE9vT1rqftemTLjtu0un3CW9KlSMQtLrzzzhW44tUVslSlHuST3JNKP21E1hoY/uH/ilb7KsKW8T8PzUn0pSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIudHjKYHLXoxA1HGfZQmMi7wLWcbTLR9yFq6ZC/aiz0dXn/i9XXxx8K1jwhdI7hkGly9TGtXs8tEWzZbJbcxm33BpuzTumMweqQyWypRPWASFjshP0VKnjIfuSon8rbf/qpFYLweoj8/aHlsGK75b0jKLg02vnjpUqHGAP6Caz2Rxps2p7BJbcDj5D81bawHu2drjAdY8vOPko+vO8TUjeBvUs22/TDOrxhmmbFzkx5s6wyTFuN1aiNuOPO+1J/CNIX5ZSgII91QUrk+ns3j6raseHrrPg1/0v1AynIcFyiI6q4YxlV7k3dkuR3EB0MvSlLeZKkOIIKV9lA88pPTVYPDItk3Ft/Vix3IGFxrjATe4D7To4UiQiK8lSSD8QUqqwvjjyGC5pBFCh5yRenCPj0n2QD/AEg0ef1elszqZkuuT96cQvwsCBkNFYNFavXpvEACAN0XnnoTqrN7t9QJGoOyWRuQ0f1JyvFn4dlZv9qes9xVF80OraStmShPZfTypPHqlQPB9eaU7K9V99W5rGdSNOMG1nmouKYsWY5kt+nuvO29ADyUxYnYlpx9ZHLv4iWiR34qe4+P3jGfBdXa76w4zKVibswIcBCgzIuan2ex+lp1B/MajrwN/wDd2sH/ACVk/wCuZW/gt/Wtpo+6Bl1I+Q5xBsSsHVC3ZKNbM4hff7P5nkpm8M7G97OL3rObZubeyxePNIaTbTk9wVMfVODh8xUdxa1rLJRySQegkpKeT1VfRQJSQk8HjseOeK/aVD3eIADoI+vrKFLW4SSNTP19ZyuaXiC4xrhkd50m235NrdAyVGpuVt8RI2KNwDEZYUlPnLUl9ZcQnzyek9PPQTz2q9mlmGap4h7RH1A1WgZbDDDTMGPFxlu1CL09ieUPOdYI4HHbjiqpgHWfxXySPOtWimGjj4oTOkp/7XEv/wDhfVV1LVl+P3vIb3i9rml+4Y6Y6bkhLauhhb6PMbR1kdJX0cKKQSQFoJ46hzWgY2drvvlzuk4B6NmeKtW81cj7oaOp85/uAjhkszUYaua5O6TXCBAb0Y1PzUTmVPefiNiRPaj8K46HVKdR0qPqBwe1SfUYat623DSq4QIEPQ7U3OkzmVPGRiVqjS2o5CuOh0uyGilR9QACOPjVXGCLqwEgrQf2Zkj/AAR9xv2La/WafszJH+CPuN+xbX6zT9mJfP8AA23FfZqB+vU/ZiXz/A23FfZqB+vVZVWxaf7m3s+yyDiitueteNCaVj7p5Bi7cSAx0pKuXXQ+op544HunuQKmuoU0/wByt2zzLIOLSNs2tOLtzSsG63+xw48GP0pKuXXES1qHPHA4Se5FTXViLAwqg3KVz88Xz/zQ0b/l4z/qzXQOufni+f8Amho3/Lxn/Vmqt/a0vx0/72q//Tqfgf8A2OXQIeg/NX7X4PQfmr9oqjJc6vGb08+6Gi1k1MezDIEptF2jW1mxIkIFsWp4OqVJW30dangEhIV18BPPA7knTvCN0buWV6WOajR9ZdQbFGsuYuBzG7VcmmrRcPLYjrPtDSmlKX19XSrhY5SkDt61MHjGfuRmf5V27/Vv1hfBmkxom1nI5EuQ0y03mMorccWEpT/esX1J7Cn6PsdoI0gj/wDWfmVO3X8Ab5n+v8lo+8He/qZme5yzbP8AQPK5GKxHb9CsF9yGDwJrkl51CXW2HDyWktBXBUnhZWlQ5AHfMb7F6n7GbRgusGhOr+dyI790NpvdpyjIpd8h3BZaLiHVty1r6FKDbgUWyj1SU9JHNU4sdhuuFeKbDtWUJW3Jb1YD6lOgjrQ9N8xpff1CkOIUD9BFXm8aaQwjbLjMdah5ruZxlIHxITDlc/8AWK52vcz9H0dpb7bnCTzwW5eY2y6rctDttq0D7LWmByx352Bn5K1u2LXux7ltFsf1assX2NVzbUzPhdXUYkxs9LzXPxAUOUn4pUk/GpGvllteSWWfj17iJl265xnYcthRIDrLiSlaCQQeCkkdj8apX4O+P3iy7RFzbow40zesquFwgdYI644ajs9Q+rzGHf5jV4q7tpY0PIAsQLcwCQuSg44QZkib8j8V8osWPCjMw4bCGWGG0tNNoTwlCEjgJA+AAAFc29Ev/l88WXUbUVX98WnS+3vWyGv1Qh5tCYYA/OpUpYroNqPmMLTzT7Jc8uSgmNj1pl3N3k+qWWlL4/T08fpqjng6YdNXpXnutd8Spdzz3JnAXlj3nGmAVFXP1vPvfpTWVAl20uefcYT1ecIPMeZXrAN2YMFsbgOjfMR6BdBqxuOY5Y8RskXHMbtrUC2wUlEeO1z0tgkqPHPJ7kk9/iayVKhSsfkN+teLWC5ZNe5Ij260xHp0t4+jbLSCtav0JSTXIXTHW/cZ4jm6t/ELXrJlmmuBQWZNzEPGp7kNyNb2lJS2FKbUPNfWtbfKnCoJKlFKeAE10y3Y2m633bHqnabGhbk6TiV0QyhA5Us+zrJSPpJAI/TXLbwVp8OPuPyyC+tKX5eIPeSD6q6ZccqA/R/1VGygVdsLX5NaSOcOv/SPXep2hxpbKHNzc4A8pb//AEfRaf4p2D6hac6rYdieb6kzM6iRMc5s12ubSU3MxTIc5alrQAl5aVhXDoAKkqHV3HJ6XWXXWwbbvD9wnVvII5lptOD2ZESGlfSZctyK0hlkH4AqI5PfhIUeDxVEvG1I/u5YEORyMVV2/wDpt2pd3p43er94VOldwtLbjjFigYxcJ4QCeI5hFnqP1Bx5usmVHD9HVCDc1AJ5ueJ5xed9ytH02nb6YItgJjk1hj5fBZHYrO1Q32Rs81c101aziJBhXJFqsllxXIJdjhQV+X5i3AmKtBcUgLaCS4VfjFXVzWF2m72tTtO91F72f68ZdKy21oyCXj1iv9xIVPZktuqSwl531dQ6AByrlQWpPfp5A3PwU32VbccvjoUPNbzJ5Sx8QDDjcH/Qf5qpBndiuubeKTcLNiqFuTH9UmyhTQJLYZlpU6vt6BKW1qJ+ASa7msaz9IUtnHsOaJHMMvz8xM/JcZcX7DVrn2muMHljtysLLvRSlK51ulKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIq0b0NpWbbu8et+CN6zQMPxWHJbuDkNOLGfKkS0BxIWp8zGgGwlzsgN88gkqPYDE7Otm+pe0KDJxS169WfJ8RuNwVcZttlYYuPJDxaCCWJKZ6g3z0N89bbg908AE81a2lKf2OLB72fHL8h2Sp9rGLTLhn+Z7qpWsuwm25Hrpa90Gh2Ys4JqPbZaZskSIBl2y6OdJQovNJWhaFLQSlakK94Hnp6iVVi802IZNuP1esmqm7HPLJeLfjUdMa3Yni9veiwVgL61F9591bigtXHUlITyEpHUAODcmlRTHh4Q33SSOBNrfVtFLyXyT7wg8QL3UK7odA8r1+0lf0bw/Ue3YJZLm0mNc1HHfui47GQpCm2WeJLKWUgo4PZRI4A6eDzB+0bw9dQ9oGXT79hu422Xi2XwR2rzbJ2DqSJLLSyoeW6m4ctOALWAvhaR1clCuOKu1SrMJpvNRuZz1nv8ABVeBUYKbshklKUqFKrpG2z5tgmvOe61aP5vYbevUyNEbvEa92l2YYUhgFIfjFt5vr6geS2vgdXfqI92pi040+tem+PGywJku4S5Ulyfc7nNUFSbjNdPLsh0gAdR4ACQAlKUpSkBKQBtNKN8jQwZAQOUzHLhy3CDvO4vOZMnnETzjXnvMqUpREpSlESlKURfGa3LdhvtQJLceStpSWXnGvMS24QelRRyOoA8Hjkc+nI9ap9uF2P607mYuPwtR90lrZYxqd90oLVqwBMdIk8cBxRXPWpXA9Bzx3Parj0qI8wdqIPUXB7qZMRz9bFR9pri2tWPTXlam6u2PMIRjBthuFiX3KeS91D8KtwS3UqHSFDpCE9zzz24Mg0pViZVQIyUR7p9umP7pNHbnpPf7u/aTJdamQbgy0HVRJTRJQsoJAWngqSU8jkKPBB4NVi2teGfmOi01y2aia9ysgwgXJm7LxC2RnI8C4zGiktOy+tZ6kpKGyWgOFFCeokJ4N+aVFP7FxezM/LI87DsNwU1PtWhjsh9fXM7yqt7q9huG7icqs2rGM5M/g+pWPOMPQ77GiiQ1ILCwtkSGSpPWUKA6VhQIHY9QAA1rWDZbq9uvuuKxdzWq2Ns4rijqpItGG2mRHcuT6gEqcdekur8r3RwAlJ4ClccE8i5NKhrQ0BugMxpO+PoTfNS5xcZ1iJ1jdP1a2SxOJ4pjuC4zbMOxK0x7XZrPGbhwYbCeEMsoHCUj4n857k8k8k1lTzwekgH4Eiv2lWcS8kuNyqtAaAG5BVk1k227kdbsEvOm2T7pbBbrFfmyxNbtOnfkvOMdQPleY5cVkA8AEjgkcj4mm3zbBrlt3w2waaYzuJxy4YnY5CnPZJenxTJdaW8XXUB9FwHSpRWoBZQrjkdjxxVm6VDPs5w6x6ZKX/aAB2k+uaUpSiL+VoQ6hTbiErQsFKkqHIIPqCK58ZD4XeW6f66J1w2la1x8BkKkOvpt1wtvtLUPzeQ622eSlxlQUQGlo90ce8eBx0JpUAQ8VBYhSTLCw5Fc8tZvCkyLXjLLVmmoe5e53K+mGpF8usiypWqU71/g2osdDqGojCE9gkdRKionnnta3SLbvH0/0VVoTnebT9Rcc9g+5LbV4gx2Q3A8voEYeSkFSQPQrUpQ7cK7CpgpU2FM0o8pzGn1ftZQZLxUnzDI6qm2kOyrVnahfMpTti1TxxzFsrcQ+uy5napElVueQCEONPxnUFzgKIIUkchKQSSOqtj2s7DMQ2+5ledYctyh7OdS8hekPy73IiiOzGU+sreEdkKV0lRUQpZUSR2ASCoG01Klri04tYidY3T6b4tkocA4EaEzGk74+r3zSlKVClKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKhPeVr1M22beMo1VtMNmVdoaGolraeHLXtb7iW21LHxSnqKyPj08duapUqCk3EfrcOpV6bDUcGhTZSqSbbtssjWzRzGNbNXNwOq97zDMICLwJlqy+Tb49r873ksxWGCG0dA4BBSR1A9gO1fzpdYdxWj++Sz4FqprDe89wa+4fdE4nMuCW23EqaejuOtSPLSkOyEBI/CK5KkqBHT3SNjTLKvgPs6/KWgkid9o3cd+IeHU/FZcW5wSBPK87+G67tKUqiulKibc/jeRXvRvKrjjOpeU4dOstkuFwYfsT7LK3XmmFLQHFraU4Ego9G1IJCj39CK/eEflWT5ltjut/y/I7nfLnIy+f5s25S3JL7n4CN85xwlR/SaUvtXPb90A9zCVPs2sd94kdhKu1SlKIlKUoiUrCZrmeNaeYnds4zG7M2yy2SK5MmynjwltpA5P5yfQAdySAO5r46eZrbtSMDx7UG0R32IOSWyNdYzT4AcQ0+2lxIVwSOoBQ54NBeY0iesx3g9kNonWfSJ7SO62GlKURKUpREpSv4eeZjsrkSHUNNNJK1rWoJSlIHJJJ9AB8agkASUzX90rRtHdYsO1yxSRm2ByXJVlbuky2MSlABMoxnS2p1v6W1KSSknuRwfjUU6hbW89zPdnhm4i2a4Xa0Y/jURDEnGGkuFuQU9fUlJDgQEO9Y6+pBPu9ue3TYA42g2B13AiQY7cbofZcRmJtvIMEfHhZWPpSlQiUpSiJSlKIlKUoiUpSiJSon3R6N5Jr5orfNMMT1Al4bcroWVN3OOFH3W3ErU0sIUlXQsDpPB/nHIOw6Jaf3jSvSfFtO8gzCZlVxsFubhyLvLBDstSefeIKlHgc9I5UTwkckmjbhxNoIjiIuemSOsWxeZnhu7rd6VoeJaz4TnGpeY6W4zOM26YK1BN5dbILLL8nzSlgKB7uJS1yoeg6wPUECiPiX3LUfAtbdFzY9Y81TZMyyBKZthFwSxb2wxKi9KUtsIbK0kPK580uHsO9GeerTp/fIAPPVHeVlR2rASRyE/BdKqUpREpSlESlKURKUpREpSlESlUM8WMahYToeNS8K1ozmwqXeYlretNuuCIkJcd1tzqH4FtDyiVIB99xQ7qHHHAFs9vT70nQTTeTJeW667iVoW44tRUpajEaJJJ7kk/GlL7Vj3/dIHcE/BKn2b2N+8CexAUgUpSiJSlKIlKUoiUrQ9StaMJ0su+H47kc4m75ze2LHZoLRBdedWR1uEE9m20nlSvrSPVQrOagY1PzLBchxG1ZDLsMy9WyTAj3SJ+3QnHW1IS8juPeSSFDgg9vUetVcSKZewTEjqADHqO6s0AvDXGJg9CSJ9D2WwUqENn+gGW7btJBp3mep8zOJxuT85Mx9K0ojtuBIDDYcWpXSCkq7n5y1dvpm+tHgNMNM/XyyWbSSLiPr5pSlKqrJSlKIlKUoiUpSiJSlKIlKhndroXlW4nRubprh+pEvCrhImR5Xt7CVlLqG1EqYcCFJV0K5B7H1SnsRW+6WYfc9PtN8Zwe85RLySdYrXHgSLtL586a42gJLquSTySPiSfpJPejbhxNoIjiIz6GyOsWgXkGeF8uua2mlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURK1vJNQsUxS+WHGLtcVfdjJpKo1sgMNKeff6Ry450IBKWm091uK4QnkcnkgHKZDfrVi1huWTX2WiLbbTEenS31/NaZaQVrUfzJSTUA7QIt31Ktl03V5zHWL7qOpX3DjO9/uRjbbhEOK2PxS5wX3CPnqcST80Ub5nkaC565Dmb9ATmACd5WzqbD5nkPiQLTIsdSlKIsVlOSW/EMen5Pdm5a4NsZMiT7JFckupaT85SWmwVr4HJISCeAeAa+1ivtmyizQsix26Rrla7kwiTElxnA40+0scpWlQ7EEGvf61WTB7uNv26KToM4otYTqjEk5Ph7RPDVuujR6rlBbH4rawoSEpHASVLAHejbvwHWY5gEkdgSOUXkQdZmMaZ8jAnoc+Bm0Xs3SlKIlKVoN51dtlm1qxvRV20yXJ2SWO4XtqalaQ00iK4ygtqT6kq87kEenT9fYLuDRmZ9ASfQEobAu0EepAHqQt+pSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKizc7oTa9yOiWS6RXOd7Aq8MoXDmdPV7NLaWHGXCPikLSAoepSVAVKdRnq7rjZNG8j0/tuUQ0otWdX048bmuQG0QZS2VuR+tJHCkuLQUc9Q6SQe45qj6YqjwzqQOpNo4zEcVZjzTOMaSegEnpGa5XaT7jt1HhkZENHtccAl3vAVyXFQ2is+WElXK3rbL4KFJPPUWVfE9w2oqJ6f6H636E7qbHbNTtOZ0O7SrEtYS3KZSi42Z55soWhaDyWypHKeUkpWAeFKArfc90+wnVHFpuFahYzb79ZLgjofhzWQ4g/QofFKh6hSSFA9wQa5I6X6eXbZv4pln0i0vvU2Xj19kNMORnFlSl2yUwXS09x2UWSOoKP/o0q+Jrak81qraFbN1mu47j+f5Qc6rBTpurUrBtyOGpH5fmSL+7jdx0rFtVsN25YVlloxjIsuYeul0yO5lpTVitTQVy4hDpDa5DqkKQ2F8pBBKkq7Cq47s9SNRNq9js2teh+8C56hIZujUS/Yzkd2t1zZksuAkONojtNlpPUnpV0AEdYII4IOJ1xuWF4v4tON/3ZrHaLjiuV4zFtcQXqI1IiNrdQ4hpXS6Ckf3w3088duvmr5K27be0pKlaF6eAAcknGIPAH9FWLA40W1mm+J18/ZcRhg29kCRxlavLRVdSIthbb8TZnnJMcu+rS9ULVrTs7u+qtlYUxEybA588MKV1FhaobgcaJ+JQsKTz/Fqv3g1kDaZcCf8A6L5/+ojVYa/33T+/bYs8e0tt8OJjEOwX+DBEGGiNDc8lp9Di46UAJLRcC+FJHCuCRyCCa1+EQzKk7McgjwFFMl3JLqhkg8cLMWOE/wCnir+IGVNqqU228NpA4Y5AWbmE09npvN8bgTxwQStzx3XmNujyrNUR9wY0t09xS7OWC2/cm5QYl3vsloAvS1PykOFqMCoJbS2hKldypX4tRVp9us1D0F3pW/bRmOsQ1W06zFUdFhvst+NInwXZHKWkuSGAEuHzUltQV8ClQ6e6TgPCUtOiueYBmumOomnWI3jNcbvzsxwXizRpMtUN1KEdlOoKilDrbgI9Elaf31XgyzANs2kdtZze6aR4LbXIkyO1Ach41DEtyat1KY7ccJbCi8pwpCQnvz37AEjRjBRfTJMghs/vYgJ6ybRkbCyh7jVFRoEEEgfuwbemc5i5Wiaz69ZtfNfrFtK0OuUW1ZLOtyr5k+SPRkyvuBbE+gZZX7i5LhKQnzAUJC0EpVz2jjdfjW6fbpjdg1X24ak6j6hzIlzRGyDHryhq8NzIyweHUsNMJU2AsBKvJ6eA4COng1GmG51F0t8YTPLfnkpMNrO7MzbLTJkK6UKWuPEcYQFH4KLC2x9K+B610iuE+DaoMi6XOYzEhxGlvyH3lhDbTaQSpalHsAACST9FYM/41OvME+YndDj5b2gNABHU5rR1q76REgQAN8tHm33JJB4QFz/8Va1ZHlmz2zakXm+ZLjjzblqcm4kh5lMIyZHBWJI8vzXFtElKR5gQCnnp571POynTfIMd0Z08y2frFmd/hXDDLb5VguXsH3PhdcdpQ8nyYzb3uAdCet1Xuk89R71FXinZFasv2JPZTYnXXbddbpZ5cRxxlbSnGVucoV0LAUOQQRyB2Iqx21AhW2LSgpIIOGWfuP8AmjdbULN2gxH2jbbpYbX+dwsq13UATPkdffDhf6zUrVUXVfVPXHMd5uN7ZMZj5LiWn5tC7ve8mtEFPtE0+UtaW0SnG1oYaC0ttlSAHCokBQ5FW6qomYbncz1S3ZvbPNGbuzjCbDBXcMsypcdEmW0hKW1GNAacBaDn4ZtJdcSsAlXCPd97NoxVWNFzcxvAabng2xO+I1WhtTc7LITukjLichznRalqhqtqRtR3f6TadxNR8gy3AtVV/c+Za8jfTMft8kvJZS9HklIdA5dbJQpSh2X9KemZt1m429aRP4ZpfppboVy1K1NuYtNgZm8mLCRykPTX0pIUpDYUCEgjqPPfhJFU+3sadY7p9vL2uN2h+6zZlwv7Dk+43W5PTpktabhFCVLcdUogDk8IT0oTyQlKR2rOb6Mza0j8RHb7qllzpYxWPAERclz9qYKpDzb7hP8AETIaWfqAqaEVWUGPPtVXtJ4AzE+k5wdIERWmm+s5g9mm1wHEiJj1jKRrdTfuA0d3H4To3dtRdJtw2pGR6m2Jpu4JiKRDXAufSoeey3bUMeWB0dZQgdSuUgcqJ5rB6oXXVbXnw5bvm+p6sn01ylrGbhcbva7e2iEqeY6XQlt5D7anW2HghK1NpKFEK6SopPe6LL7MlluTHdQ606kLQ4hQUlSSOQQR2II+NQRuWzLHc32layXHF7gJ8OHjt8ty5DaFBpb7LC0OhtRHDiUrCkFSeU9SVDnkGufayW7NWHCeUDTgbSMu632UB1ekRvid86c8yDn0Crx4VGm+QztuWKZ0xrFmcO3MXa49WMMewfcp7peUk9fVGMj3j7x4eHcduB2r465a8bo8A316Q6KXjUuynD8snwZrkOx2X2LzWFSHG1MPrdcedV+19ylaEq5+YPSt/wDCWIOy7HQCDxdroD9X98qqIt53/Cebbf8Ak4H9ffr0q1v0hs7NC5s8fLPxAXBS/wCFXfqA6P5o+ZVmd626eTtpwizRsStMa8Z5m9xTZsZgSSfJ89RSFPugEEoQVoHSCOVLSOQOSP6uG2/Wa54X7Y5uy1BjaimP5wuMYxG7OiZxz0fc8MdBjhXbgkrI7lRNVN8XZV2w/VzQPVl9l1yxWO4ueYoAlKH2pMd8g/AFSEnj6eg/RXTS3T4d1t8a6W+Qh+LMZRIYdQeUuNrSFJUD8QQQa46TfE2c1CfNicOQERG6bnjyC6qh8Os1gywg8ySZ7WH+SqgbHt4Oda2z800I1ih2+16s6frfYkyGo/8Ae05DbhZU8WUqTwpDvSFpSUpUFpKenkgRFYtd95k3f7lW3JnUewXpNutTyYCnrWmDa4KHGmH/AGxcZsqekLbQ4UpaU8epRHvJTya8+27GpN58WvWvKsdQfuNY4kpu4PNj8H7Q8mOjyyR26i6l1XH/AKtX0V9tOv8AhoM+/k6f+74dX2c+O+g93v0nuIFhIBy3dN9otFKw8IV6bfce0A6wS3vnr1m68u47N91exfVXT3UK/wC4+6amYhl90VCvNquVsZito6VILiWm2yUt8oWVIKOkpKOD1A9+lza0uIS4g8pWAoH6jXOLxo//ADH0l/lO/wD6lNdGIH+4Y/8AySP+oUpHFQIPuvIHLC0x0JMJVGGq0j3myeYcRPUZrC6gY9fMqw27WHGcsnYzdpcZaYN1hJbU7Ff45QvpcSpKk9XHII7jkdvWqS+GvrPrDqjlWqWC6/apX66ZnhUwQPuY83FZYaZ6ltOOpS2ylSlpdb45J4AUnt3q/dc1dfHGdnXiR4drypxNvwnVuMq2X94noZafPQ08tZ9AEq9lfJP8eq0iG1w1/svBbydm08JggniFapJoFzc2EO5jJw47wOC92QZ/rzpn4hmPbfc+3F5erTvL2RNsiw1ARIWpxtwNR3HfZ/T2hpTfIAUQUdwSTVlN6l41AteC4xadJdQ7vjGb5Nk8CwWUwksOIkKfcBfU8h1tfKG2EPOcp6SOn178VSPdRphlurOjV48Qq1rmx8igZVGu+MN8qBiYrFc8iKsI+ClucSif3qyatNt+1KgbwtabNrXbUdWLab4rHYiI9UfKS5spcmgH4qjxwln6i6qrUWl9NlN/tMdLt+GA8A6yf2bjo71iq4MqOqMyc0gbsQOAxpEkPH7t1mt7GX7gdC9t1zzzSbUe0tOYpAipnyrxZRLuU1SnkMqeQ71pjtq98KKTHUCQeOms5tOveb627M8OvWU6g3uLkuTWl8yshheQJzbhkup8xvzG1tJUEgAcoIA9ACARj/Ek/cT6n/8AMYv9dYr1eHX+4r0r/wAlO/1p6q0x4jK4de7fUOkckd9maRb+96YInfmqS+GbpVkWVaja722z64Z7iyrLe47D0m1OwHH7nw/MSFyVyor3Uv3CeUdPJWrnntxvHi5uzIWpO3F6DFVPlMXuWplguJbVIcS/A6UdR91JUQByew5r1eEyR/db3Kp57/KKOeP/AKZn08Wb/fd20/yikf1mBVqRLqmxXzNP1b9cFFQAP2vh4nw+uK3rc/ptvYsmlmR6/wATdU/Zb5jUNd6ViFitTbVpjxmvfcjpeWS5IUlAJ8x1JCyCOlII4njZPrreNxu27FdT8laZRfJKH4V0LKOhtySw6ptTiUjsnrCUr4HYFRA7Cs1u3/cu6s/yNu/9VcqDfCQ/cYWL/LV0/wBeabOcRrU9Gta4cCXFpub3G/W6VxhbSqalzgeIDQcss+HBXMqoszcC3rvrLnOl9m1uj6YYTp2+i1XG6RJkJi7Xu6K6vNajuykrSwwz09KlJQVqUeykirdVy58OC3aV37WjXXRzWLBsYvWXsZNKukP7uWqPKfcQl91uSlsupUfdV5aiB+/J+BqjB4lbB+64xvILR6Ak9J0V3nBRx/vNE7pn4kAdeqzN+3S57tH3WYpp3M14d1c0lzjyEmRdJkWbcLQ4475KiZLCU8lCuhfSocFCiOAodVTF4hlv3PYDp3M1s27aw5Fb0WM+ffbCmPFkMiHwAZDHWyVpLZ7rSVEFJKhx0nmb8y0s2uabYzcM5yjR/Tu222zNGS9I+TELqSQfdSgBrlS1K4SlKe6lEAdyKzur+qmIaYaTXTUDNILrltENKE2tbQVInPvgIahJa79Tri1Bvp79yeewJqtWfAgHzNJvz9lpAz3ak84Vqf7eSPK4C3LMgnLMHcD1VRrFrxlG5bAtG8H2562ZRCzi9si55fdCIkhVmt7RKJipiFM9Hmqe4bjpSEdQPV82reXXF9R7Dpq7YcC1ERKyVhtSmr3lsAXAuHgk+Y3GVGTz6AEdk/FKvSuZm3+55t4cm6GHjusNittjwPWuKxJK4nKo9lkqWS2x5q+VERlullzk8dC0ufRXWmSpKobqkqBBaUQQexHFabW7/wBs6tTtixE/uuGbRwbAykGZ1gYUvLVDHXwgR+803DjxMnlAEanmdss1a3ubvsd1AhK1tt+Lpi3dlqVkptDMqTDR5SuIkCGkNtIB4KlvLJUPd6eVckfK5a5bq9hO5DF8E1/1We1O0xzd5LbF3mxg2+ykuJbW4k91tuNKWhSm+taFIUOOCfdzngtf+Y+rX8p4/wDqV16fGEsSs1jaI4BZGfPyS/ZU9Ft7SBy4UrS02rgevHWtr+atXfZ16OETi8MEb8QaDyiZtunOSrwHtrNcYw+JB3YS49covyygLdPGHIVtACknkHKbaQf8x6p4wLUTHNJdnmJ6lZa+pq0Y5gNsnyigcrUlEJohCQfVSjwlI+JUKgHxe2DF2ax4ql9RZyS1tlX08NvDmvbuZx6+ZN4VjcGwNuOvx8Fx6c622CSqOwmK696fAIQpR+pNcheaWzbU5h99t93kz6Zq7G+LtGzCoM2Okfxi3yWw7dF6xbvNPE656i6p5PhFkyN+QcZxnEJDMIQ4Tbim0vSJKmlPPvKKCfnBsAAhHvcDRbduU1h2o7rbPtt3AZi5nGC5v5RxTKpsZpm4xC6stttSVNJSh0B0dC1FPV7yV8gHoEleGFmduy/ZhgzMJ5Kn7AJdnmIB7tutyFqAP0ctuNq/zqr/AOLFjcjNtaNuWHYy2XMkud2lsMpbHLiW1SIYSs8dwkELPPw6VH4V2vYKW3U6LPYc4N5tIMHnkZ+S52PNXZalR3tBpdyI05ZiNba3Wd347id1GiGuemGI41mVpcxfMru0tq2WW0iNPkNtS2kmI9Ifcd5K0uJT1t+UO5BTx6y7i2km+qFuUsOpGXa9WG56ey2Xl3zGIkdcdiDy2oIix2ylXn9Ky2RIUtCz0qJSB7pgrxPv3UG13/L4/r8KukjjjbLannVhKEJKlKPoAPU1z7OQzZm1nZh9QX3CBfpv3cTO20DFXdSGRYw23mct31uEU00ezfW7c/rxq1YM8m5vp3g+ATkWqxQLO39zjcHPMdSp9+WpsuuK6W0LCG1JQEuDkHsT5tueuWpeOb09SNnudZnOzGyWWAm8Y9drohv7oR0Fthwx3XW0pDw6ZHzlDq5b9eDwPXoRr3mO+jOM2Xi+TTcJ0pwuci2NotKw1eb+6rqPW5K4KojPSgEJZ6XfeH4QcGoa0SxexYZ4v2eY5jcRUaBFxblCFvuPLKlQ4SlqW44pS1qUpRJUpRJJJJq2zAirTpuyNNxvmYbIcdxOY1AMWyVdou2q4aObyHmDS0b9QdCb3Wq7ydPL014lWjcL+6tmC5GUPRJMaYp2IXLGFS3UJbgo8jykISEAjzEOEqJKiomr4Zri+tOm2i19Tp7rGu+ZBbo8y5i75ta2ZzykIZ6ksNph+yto7o7LUhzjqPKVdgKh7zyE+J3ttKiAPLgDv/z9+r/6o/72eXf5Cn/1ddcxcaf6Lc5pgh1X0Ij611XQGh/6RDXZEU/UGfrTRVh8L7W/VDX7RHJc51ayp6/XgZZJitvLZbZS0yI0dSW0IbSlKUgqUeAPia/bJrpn263chmejelWZy8M060xCY2RX21tMrul3uClqR7PHdeQtEdoKbd5WlJWfLPBHUOND8F4E7YclAPBOZSu//wBKRajzworlKwfcXr5o5lhUxkC5YmFt3stxUWW+28Rz3P8AuhtX5jzXfUY07W2nFhTxAbyGt+AJPExmuGm5w2V1Sb+JhncC935AcBkpZ3WZBuF2RQbXrnp9qjkGoGAonswsmxjLXGpbjKXDwh6PLS2l1sE+5wSQFKQSFA9I3Tc/r/qLM2bq3N7cM+t1jhG0x7sEzLKiZJcQ6622ptK1ueW0tsrWFAtOcqRxyK2DxIXLY3sp1P8AuopAQqBGS11fF4y2fL4+vq4qtrWLXzEfBbk23IGnGpMmwG4obcBCkMSLoHmex+ltaD+muCo536tWk3YWkHmDLTvym+QMLtptH6xRge2SCOUebhnFszdbvoJad326zbJiOTyNxb2nbMmC75M62wETbpeZCXnEqkSnlFAjthSelLLI5KU8qV3CRlfDY3Has6oK1I0a1uvSb5k+md0TDF2KEpckslx1pSVlIAWULYPCyOSFjnkjmpI8OD9xPpb/AJOk/wBdfqtvhl/usd0X+XXP+8Jdeg8Bu3VKA9kteerSIPDM5WiwAC4mku2JlY+0CwdHSCOPW83zXSaqMeKLrJuK0E06tGaaVamwbFaLtdUWZ+LHsqFTklbLjnmCU6tY7+UocIaQocj3jV565++NN+5pxb+Wcb+pyq4NocWtaR95vq4A+hXdswDnkHc70BI9Vs+q+AbzdQtF39YrZuOXgNwtlg+7VtxSxQUuMuNtseb0y5yiHHn3Ej3iEBpKjwEqHKjvOwjc7edfdsCNS9TZkZu743Jl269zw2lpt4R0Jc9oKUgJSS0tJUAAOQogAECpSuX7mOV/INf/AHeaoh4d1gvWVeG7rLjeOpWu6XORf4sRCOepbq7YyEpHHxJPH6a6dqd4FTbGsFmNxDniI53Gi5dnb4tPZXON3ugnhhB5ZqdNvepOpu+eTlOpTWfX7ANLLVdnbLjtrx5TUe43RTaUqXKly1oWtA4WnhtroHJIJPTyrTNZNdtatgus2KNZ7nt01I0VzZ5UcP3tppd3sbySnzOJDSEF4JStKwFg9SQpI4Keo/ngyZhAuu2/IMKDgTcccyd9b7J+clqQ02ptRH1qQ6P801/HjPMRZ23rDbY01512l5pHbgMoHLjnMWQFBI9T3UgdviRTaB+qPp+GJ9gEZ4sWEGeMmRGWQgWVqH/uRU8S3t/w4ZiO15zvMm6lHxBNV9etItBbhq7olneOWu2whDD3m2X2qa6mQ6Gw6y+t0spA8xB6VMKJ7nqHpUhbbdRr9dNoeGap5pPl3q6qxJN4uD6+C9KcS0pxZ7DjqPHwFQhv4sVyxfw0n8ZvKibhaLPjcCUSeSXmnoqF/wD1yTUvbOLnbbLso01vN5WEwIGGsSZSigr4ZQ0VLPSOSfdB7cd6h7QyltTWu9l4AJ0GFx7TdGOL37O5zfaaSQNbt9dFGO0nKMl3i6STdYL1r5lNpyabcJjCbRjc1iNFxpKHFCO17OW1B8lAQ4VSOvr6uBwBUr6T6gZ1pptwvGb7mb+7OvWGv3xV5uRhtxva48WW+GXGmkJQgBxpLfQABz1J+nmqjateH7qNp1kStzPh8ahybO/cGRdfk2h/y0SGXAHOmMpX4N1pQIIjvjp+hXokfXJNzmYbuvDV1fnXzHvuVmmJeTbr+zFbUhp5LUhh1byUHujltLnWgk9JQr4EAZ13ltGoaYwua0S0+7cDFJzE56mbyRKvSaH1qbXHE1z7OGoM+WNLZaWtmpYwnK7juV08i6n53u7l6XSchbXMsuNYtfLZDTaIqifIEtbza3pLxSErWCpCB1FKUp9a17w/94GpGf6p5vti1ryOBk9/xBUly1ZJFQ2j7pR474acC/L4QvspC0LA5KSrq5I5O27CMV23607WcHyAaTYBc7va7eizXpb+Pw3ZCZscBCi6pTZUVLSEOcnuQsGp4i49oDpZnuPWXG9OsUsmV5IJLVv+49ijMSvZ2myt9xa2kBSGQAlJUTwVLQnuVCup7G7PtDmD2YIjf910m/GdRnZc7XmvQDvekGd33hHpGhG9SfUX7jdweE7aNMZ2pOaF18NrTFt1ujke0XKYvny47fPxPBJP4qQo9+ODKFc8d5kC5ateIdt40UuXUvHLcx8p3o6v2t5xDrzjnUPQ+5CQn8yz9Nc0GpUp0QYxmJ3CCSegB6rokU6b6rhIaCY36AdyJ4KxWN27dVfdP16n5ZqNbsYyV6IbpDwyLaYzlpiICetESbIdSqU44UgJW6060EqJ4SQnv4Ni25bP91OnmS6lZlitpsVvZyORbbGzBLilKitttqPmrWohxYUsgrSEpJB90cVsO97UoaT7VNR8ubkBmWbK7bYaueD7RK4jtlP1gu9X+bWvbIrJYdFtt+j+llycMe/5NZXr0iMGlFTjjgEp9SyBwnoD7aeVEd+kDvxWlIh7qpjygNAG4kkm+Zhrdd86lZ1AWtpAnzEuJO8AAZc3abuAVk6wuZ5niunmL3LNc2vsWz2O0MGTNmyl9LbLY+J+JJJAAHJJIABJArNVC+vOmM7V3N9McVvNlNywSBdpV9yJhYBjvuxmP7xZeST76C8519JBBLQ5qjpNm/63njA0tOUq4gXd/vh1ynTNRlj/AInu169XuLbri/l+O2q4PBiDkN7sDsa1SVH5pS/ySlJ/fLSkAdyQKtfEmRLhEZnwJTUmNJbS6y8ysLQ4hQ5SpKh2IIIII7GsfkOJYvluOSsQyfH7fdLJNYMaRb5UdLjDjXHHSUEccfR9Hbiqlq053K7NJzyduuOr1a0kfWXW8In3UMXXHlE8lMGS4FeZH5PPlqClD4DnqWZxNHldbcfz3c8t8ZmMLjcdvy38s905K1OeZ1YNOMdVlGSuPIhJmQ4P4FvrWXZMhuO0APj+EdRz9XNbDVRcVO5TdVneNXTVrR5WkmmWH3Nm+/cidcky7pkFyYPVFDgSlPkx2nOHClSeVKQngq/Ft1U4SGy7Mk9oETxJm26FEgugZfO8joIvvkaLXdQc7sGmeHXPN8lcdEG2NBZbYbLjz7iiEtstIHdbri1JQhI7lSgPjVN9Uc98SmNao+oUBWk+A2m83WDarTi8xL8+7IcmSUMMIfcS2poucuBS+hQCUpV25HFXlkRY0tKUSo7TyULS6kOICglaTylQ59CCAQfgRVb9TMqb1E3jaZ6IW5zzYmDQZeoOQJSeUpd6DFt7avrC31u8H6EGqtE1Wg6n0Hmd1wgxO4KzjFNxGg9TZo5YiAefBRDIl+KKxqXB0sGrmijl1m2SRfi6i2SvJZjtPtM8LPs/UFLW77oCSCG18kcDmzm3yz7lrPZLo3uWy/C79dHJKDbVYzEdZbaYCfeDhcSjqUVcccJHHB7nntG+FZi3d/EP1JxiS6PNsmndmYioJ/EVJW86R+mQ3z+YVZ+rUz9i12rsXo9wEdAPVVePtXN3YfVjT8SUrG47kdky2zsX/HLi3Ot8lTiWZDYISvoWpCuOQOR1JUOfQ8cjkd6hzV7dPguLYpmTeELumV3zHob7D33Dt7suJCnlHS0y/LAEdtzzFIBa8zzBz82spt6zjA2cZs+itufuFsybD7FDamWW8wXYU4tJQGzJShwAPNKWDy62Vo6jwSD2qGeeY4RxmZjlAnmpf5Inj0iM+c25FR54mGWzcV2eZjHtrqm5WROwrC2pJ4PTJkIS4P0thwfpqw2BY5Dw7BsexK3thuNZbVEt7KQOAENNJQO35k1VrxVozq9qLtzSklq05RZpr/HwbEjoJP1crFW8hPtyYUeS0oKQ60haSPQggEUo/s6h3vjoGNI9XOSt7dMfuk9S6D6Nata1X1IsWkGm2San5MHFWzGrc9cH0N/PdCE8pbTz26lK4SPrUKhjHrxrFlWiadweY6z3LGGplhOTs2HFLFAmxYkMs+ehlZksOvy3fL4Ci24yFK5CQkcGto3EahbeZSYW2nWjK0RZurDC7RAtjbbyn3w4ehKwttCksnzOAhbnA6h8eDVJ9Rtou63ZBg911U287qrtdcSxBtVzfxe7pWhpERCuVpDSluR3SE8lXCGiRz09+BWRcAHucYbkHZgETikdWk7o0krUNkta0S7MtyJBjDB/m5zrCvntm1Tv2tWiGMal5Lj79nn3ll1S470YxlOJQ8ttD4aUpZbDqEJcCOtfAXwFKACjBniMylYbC0U1jiHomYbqZbAXB2PsslK0vo5+hQQkEVZXSLNJuo+lWH6gXK2fc6Xkljg3V+J3/AOPsocUgc9+AVHjn4VWTxTkmboLiVgYHVKvGoVjhxkj1U4VOnt/NXVVaWbUxsRFRn/cauak4P2d7psWPPdhVyPWv2vxI4SB9Arw320pv1kn2Rc+dBTcIzsYyoEgsSWAtJT1tOJ7oWOeUqHcEA1i6QLLRt4le+q4Zz+710s/kDkf9YiV/Z2VW0nn9k5uM/8AeRL/APhUG5btXgQ94WnuFjXvW95Fxw6+TTc3s5kruMctPRh5TMjjqbaV18rQOyilJ+FWp/t2fx/9t6O/Yv8A4f72K/1Krl+wqtv+E5uM/wDeRL/+FTfguJN4JiduxJrIb9fEW5tTYuF9uC50+RytSuXn1+8sjq4BPoAB8KWhRqs9VX7tunzTVXWK86EbVrHZLnLxThOV5lffNctFocJKfZ2mWVJXKf5SocBaEgoV3IBInrU6feLVptll0x5KlXWHY5z8EJHKjIQwtTfA+nqArnz4X+3p3Ltub+okXWrUTF7hkV+mqnM4/cIjLTqmSEJU55sZxZX8489XHvenrzSmPFquacmNBPGTA6DMxc20lWqfZ02uGbnQOECT10GmfBXu02xXWHH5kyRqbq5bMvZfaSmPHhYum1CO5zyVdQfdKwR24Pp9Nb9Wtaf4ZJwSw/cOVm+S5Uvzlve35BIaelcKA9zqaabT0jjsOnnue9bGFpUVJSoEpPCgD6H660cqhf1SlKqpSlKURKUpREpSlESlKURKUpREpSlESlKURKg7d9tlhbrtNLbptPyBdlZi3+HdnZjTfU6lpoLS4lvnsFlDigCewPrU40qrmh0ToQexkeoVmuLZjcR3EKt9m0o3kabY+1iGB6/4XlttjNBiHMzjGZK7nFbA4SlT8SShEggfjONhR+JNebb7ssh6Y6oXrcHqvnsjUXVW/BSXby9DTEiwG1JCS3FYBV0e4AjqJ+YOAEgq5szStA4hxf71xOsHPvrqdVmWgtwabtLZdtBkq6bxtk+n28DHLfHvl1kY9k1j6/uVfYrIeU0hfBU062SnzWyQDx1JII5ChyoGI9P9gGv6YMfD9Z97Wa5Fg0dIYdx+1+dFVOjDt5DstbynA0QOlSADynsCKvPSqMApzGRvGYnlkrvJfE5jXXvmow1O0lyHIdJX9INJsosOCWqTaHbEVO4+u4+zQ1teUEx0JlMJbUlBIBV1juO3bvG+zTaZm+0XGJWAI1itOWYxLuDtzUw5ijkKY2+ttKCEPic4gI/BoPBaJ9e457WXpVmktc54zdY8bz8bqpALWs0blw0+FlRXXLwyXMj1ika87c9Z7jpXlU99cqa3GYWplb6/21xpbTiFt9Z5K0ELSok+gPFSnojs+yDEcrt+pGvuuuSav5VZgo2U3NBj220LUnpU6xF61pLxSSnzSeeD2APerL0qKX2LQ1mQy4ct3RTU+1JLsznx57+vVVh3lbDsA3csW6+PX2TimaWVvyYF9isB7qZ6ioMvtdSStIUSUkKSpJJ4JBIOk6VbBtU2VwoG5LdfmGpeL25ba2sU815mBLLagUCYtx1a5DY4B8ogDkDkkdjdSlKf2Xsb54Tvjf8AO+aP+0jFy4xun63ZKK9y2gNh3I6KX3Ry7XFdpZujbSok1lkOGHIZWlbSwjkBSQUgFPI5SSAR61oW1TbTrFohZLNYNTdws3MbRi0ZcOw2WDbUQIkdspKQZDgJdldKSQhDiuhHY8KUEFNkaUZ9mXFvvZ/Ceca5o7zBoPu5fly4ZJVJtZfD3zS+7lVbndv+vDunOSXApVckuWpM1HmeWG1rbClBK0rQkdTTiSkq5PPfgXZpUAQ8VBmPnmORUzLSw5FUe1z8ObLdUZ2DZ7adx99VqViU4TH8mvsRMlL5C0ON+RFbKGYqWnEFSW0J6T1q6yo96lvWrZri+4jRG26Waw5pdr/kFqUqXFy9Udhma3NXyVuJZbSlpLSuenyQOOhKRz1JCxYalC0OYaZFpmNx4bukegQEh4qDMCOnHfmRfRUH0s8OTXfEozWEZbvezeTp3G/Aiw2Tz4Tj8b/0IdU+v2dBHYoQFAgkDjnmrkTdJ8HkaUS9FoVmat2KybK7YEw4o6QzEcaLRCPXv0qJ5PJ57nk1uFKmp9qw033Bz48+55SYUM+zeHtsRlw5Km21rY7q/t2DmGyNzs+fpzHupu0ewW20IhvyHeUkB2WVKdbbJQgraaUAvggkBSgr81g2K6rav7g8V3E3HcZYbZesLMX7jwYuBuKioSw8p0BzquRWvqUtXUQpPY8DirlUqcRLmPObcjxAie1uSjCMLm6OzHWfioz1Z0IxbcBpO/pbrYzEvLctIW7MtsZUIsSk89EiMlbjqmlp57crWDyoHlKimoz000I3TaRYUxpPiu4TFLnjNua9ktF1vmJPP3i2xR2Q0OiWhl4tp7IUtPbgcggBIsxSqxBdHvZ7j/njnFslaZAnTLh9bstc1FWhO3fD9vmJ3OzYbJkzr5fpTtzvWQ3YefLulwc5Jff6SnlIUTw2kpABPB5JUYOxvY1qtjm6i6bsW9xGPyMjvCXGJVucwN0QvZ1NIaDaQLn5g6Uto4V1E8jk8gkVcalWBIqCqMwIHARERlEW5WVSJYWHImTxMzM55353VTd5mybNd4EzH483XC3YrY8ZeclQYUfElyn1PrSgKW6+qcgK46PdCW08BR56vWrLYXbsrtONwrbmuQW293aO2G3p1vti7ey9wOAfIW+8Un6fwhHPoB6VnKVDfI0sGRM9VLvOQ45gQOSVB27razjW7LTu3YLfriba5bL1EukeahrrWhCVdL7Y7j57KnEj4BXQTzxU40qC0OidCD1BkeoUhxbMcR3EFa1d9PMUvGncvSx+1NIxyXaF2Mw0JAQiIpnyuhI+HCOw/NWibUtu9n2vaL2rSi1z03F6K8/Ln3ANeWZcl1ZJWU8njhPQgd/RAqYKVaTic/V0Txgk/Eyq4RhazRuXC0fBQhut0DzrclptctJrPqhacQx+9IZTcVuY05cZjnlvB0BDntjKEJJQjkFtR7H3u/b77V9C83256ZWvSa+al2rLrJYmnGrY6zji7bLbC3VOEOrMt5DgBWoDhCTxxyTx3milQz7PEG+9mpd54nTJUmxnYBqZpjr7mWpmiu5GRhmL59Jck3i2NWFmVLSlbinFNMuPFTaSFLX0O9HUgK44Vx3yW57YXnW5LKMNvDu4OLj8DT4AWCOcUdnygrlo+bKlOzwZDpLLfKuhHPBJHJJq49Khg8MMDfc9nhGXabKXEvL3H3s+M599VDusOkmrerei1x0pd1Xxq0Tsggv229XhjEHnEusOjpIjx1XD8Arp6gSpx0HnkBNa9s42x5dtRwF3TCbqrbswx5El6bCCcbXbpUd50pKwXfa3Urb7EhPlhQKj73HarB0qzTgc5zc3CDyz+Kq4YmtacmmRzSqVbnvDbtur+qaNedG9UbhplnxWl6VKiMrW1JfSOkPpLa0OMulPZSklQVx3TySTdWlULQXB+oyKuHEAt0OaqnpFstza3ZJacy3Lbi8l1em4++iZZ7TLQYtphS0/NkKY61e0Oo9ULVx0nvwTwRuOo+3rUjUrWzDdSr1q3Y1Ytg09dxtmIuYm640uSpBQmQ8+Jw8x9sKPlr8sJQe4QTzzPdK0xGWuHumRz38TxN7DcFnhEEHUQeW7gM7C1zvKr5vP2oHd3p7b9P1ZhbMZRBnpuCbi9YVXGU2tIKeGVCSyGwoFQVyF9Q49COa2rQHS/UzR7TWPp/qDq3Fz9mzxExLbcDYVW+WlhCSEofUZTyXulPSlKglB4HcqPepZrXc+VngxmU3pvb7DLvjyS2yL1OeixWgUkeYossurXwePc4Tz395Nc9UFtGoyn73qYgfktP2j2l3u5cBMlcuPCrxDWS5Y/qTk+jup1tsMuJkDMaZaL9aFXC1XBBbUUrUG3GnmXU8q4WhfBB4Uk8Ai9eC7YLk9q8xuD14ziPnGc26KqFYmIdtMC0WBlXPX7LHU44tTiupXLrjhVwfQcDiI9i+03cZtCcyOz3qdp3k9lyufHmynI12nRpMNSOpK1tpVEUl3lKuyCUd0j3xzV2q63kAtc3RoHI4QDHrfdlqsz53PByLieYLiRPpbvoq17yNqOd7tsVY09OsdpxLFmJzNxMdvFFzZjrzaFJAW+ZraOjlajwGgfTknjvJmjGmmW6faZW3THUDMLJmMO0WtizRno+Prt5ditteV0yELkvpcKkBIPSED17HntJFKxaA1rmDJ1zxtHwsrOOJzXHNthw1+KqfgGzTONt2Z36+bV9UbXZ8Wyd/2mfhuU2l24QGH/QOxnmXmnW+AekJPVyAAoq4Txv2BbZxE1bc3B6w5Y3muoSYf3OtjrMD2K2WKIeepqFGK3FBSupfU644tZ6jx08kGcqVZpLY4CBwGXwtytlZQ4B0zrc8dfjfnfNU83ObGNTNy2qGMakT9w1oxwYRIL+PwoeELeLB85DoU84u4fhl8ttgkJQk9PZI5NWpxqBlTNjEHO71aLzcFApdkW21OW9haCOOPJckPqB9eT5nHf0FZqlVaA2n4I9m5jnn31UuOJ/iHOw7ZdlQrTjw3tVtDdT8gvWg26y5YXhmTPdc22tWJmXLDIUopZSp8qa6khSgl/oCkgnse/OXuHh05ZYdyMbXjR7cXfMVVMgph3x6dDTdrpLJQEPrD8kqb6nQlJ5U3w2ruhPASkXepUtJZhj3bDlEQd4jQ23o7zlxPvZ85meBtnmqobvdkV13DZPgWpen+qDmG5xgCkJh3KRD9rS8hDiXG1KAUnhxDgKgeCFdSgRW/uaK62PaV3jD5m4Vu6ZTk8Z6Ld8gu2Neey02tktBEGCxJYajBKSo8kuFSj1L57AThSqloNN1I+yZJHPPlOsZqwcQ9tQZiADyy5xpKq/sv2f5ts/s1xw1jWa15ZjF0nrub0V3FFw5bcgtJb5bfE5xISehskKaV808FPPNenWbZdAzHWC2bkNHc5e061QtoCXrg3BTMg3Vvo6C3LjFSOrlHuFaVA9PHIJCSLL0q7nF7g8m4yPSPhbkqNaGtLALHMevxvzVcsw21ah6+qs9s3M6hWG54nZ5jdwXi2K2d+BFuklvu2qY+/IeccbBJPlICBz3KlcCtk3Q6B5PuB0jm6M4xqBasKsd2aajz1qx1VweLLbiFobY6ZTKGhy2AeUr7dh01NNKq5oe3ARaZ5njv66WyVmuLHB4NxYcOW787qCtumgup23vReLo9F1Zx2/N2Vhxmx3B/EXmFRyt5TivaG0zyH0grUAEqaI7ck1G21/Y3qFtq1XyrU5rX615MnOXy/f4MjDFxi6ovKeKmHUz1eUrqcWAShY4V809qt9Sr43eL43vQRPA599VTAPD8L3ZBjiMu2iVWHens/wAv3g2S1YcvWK3YjjlpmpuSIyMWXOlOyQ2psFb5mNp6AFr4SGwe/cmrPUrNzGvgO59lo15Zdv1Kh2dpPrBO0He0hVq5jbd3etZsisgRh73SIRj+SSIpuHHn8cq8zzOjn/i60nZbtGy7aBjlzwb+6/bsuxu5Tl3PyFYuuDKZkqbQglLwmOJKCG08pLZPPooVZilaYzjfU1dY8dfjfms8AwNp6NuOGnwtyVVnNld10y1ouuum1vUOHhFwyQH5QY1dbWqfZLkSrqKght1pxhXUSoFCjwSrp6UkpO1xtst3zzU+wawbisyt+W3TEOpzG7FabYuDZrU+ojqklt1112Q/7qeFrWEp4HCOQCJ+pVWfZhob7uXDlu4btIVn+cuJ97Pjz38d+sqAt3W3DOt0en8nSuDqvacRxqe7HfmA4w5cJjq2V9aUh72xpCUFQSePLJ9353B4r2aM6AZxpjoIvQTItTrTkluiWN+x2uexjjlvkMtuIWkF7mW8l3pCwB0hvsnuSe9TjSq4G4H09H58bR8LK2M421NW5cLz8VXrSzRjcjoxpva9LMc1qxHJLfaIqYVvuV+xeQJkJlI4QghmYESEoHASD0EAAFSuK2LQnbBgmiOmV406C15IrK5MuflE+eylKrvJkjh8rbT7qGyk9IbHIA+JJJMx0q7yahcX3xWPEZx1Oe+0zAVGtDA0NsBccDlPrbdoueMTwt9SNKM9uOQ7W911809st3c5ft64a31tN8kpQVJdSiQE8np8xAUB+MTyTabb9tot+irtwyrJc8v+oefXtlEe6ZVfnSuQthB5TGYbKlCOwFe95aSeT3JPA4milGuLBhHLeY55/nqjgHmTz4TySoV1w26q1IzvCNZsLyGPj2oOnzzptk2VEMmJMivJKXokltK0KKFBSuFJUFIKlEA88VNVKrFw4Zi4+vQ7xY2VpsWnIiDy+vzVaNb9pORbosDumM646jtsyFtA2OLjsRxi3WmUlQIkuNuOFcx0gFHvqQlKFrCEpUSs7/pbpRmNiyGPmupuS2i7Xi2WJrG7SxaILkaJCiBSFPOcOOLUp15bTRUeQlKWkJAPBUqWKVZhwThtPxgiecEid1lVwxxi0/MGOUgGN/MpSlKhSlKUoiUpSiJUFaG7bZ+l+sGq2tOWZijJL9qNc21xlpilkW62M8hiKOVK5IBSCRwCG0/XU60o3yuxjOCOhifh2kaofM3AcpB7ZfW+DooAc2uzWd5rW6u05t7HEfxk2K6WQRSpU1wdkLLnVwlAAbVx0k9TQ+BPHw3+5pm2BbUs1v2AS34VzW3FguT2OfMgxX5DbT76SO6SltaveHzeertxVhq81ztluvVuk2i8QI86DNaXHkxpLSXGnmlDhSFoUCFJIJBBHBBqj2YqYp6DTfLi4g8CSQeBWlN+Gr4hzt6AAHmAB2uoAvmA4jh2D6M7fMDiMNWSffYD60tcEPwrc2bg6+sj55deYYClH5xe7+tallkmRqt4gOA/3Pj58HSCx3U5jdWO7KHZ7YQxbVLHZToKQ6Ufig8ngispatumCzddpeOWaRmNrxTCMWZZjwIeVXNiO1JuD61OMsKD/Wy0lmI1y00pKAHEjgDtVhMLwXDtOrC1jGDY3b7Ha2VKcTGhshtKlqPKnFEd1rUe6lqJUo9ySa2D5eKx3udHEgsvwgSBrOlwufBhYaI3NbPD2pHG8E6RvErTNzulB1w0BznS5oJMq+Wh1EEq9BMb4cjk/QPNQisZtC1HVqht1wq/zAtu7wrcizXqO4OHY9xh/wB7yG3E+qVdbZVwfgoVMdQzb9NMm0r1kuGZ6c24T8R1BloeyqzJfQ0u3XPpCRdY4WQlSXEhKX2weolKXEhR6kmlPyuc3R0d2zHQgkE7w3ISRo/zNadWz2MT1EAjhizMBfuqu3uNmWqONa54uvHmc2xaGuBFVf7P90IjjCllYKQlbbjDyFKX0PNq7BagUq7ceLMNF9UNbILeKa15rj8fC1vNu3GwYxAfacu6UKC0syZj7qlJYKkp6m220qUBwXOCQZypUNAaABkDPUmfjdSSScWuU62sF8o0aPDjtRIjDbLDCEttNtpCUoQkcBIA7AADjiqpbh7crWrdtovo3BQX7dgTzuo2TKT3Qx5X4K3tq+hS3uv3T36eTVncquV5s+O3C547jrt+ucdhSoltakNsGS76JQXHCEIHJHKiewBIBPAOhaGaQzdO2L7l+aXFi75/m8tNyyW5MhXkhaU9LMOOFd0xmG/waAe595R7qIFmmaoefd83M+72Pm6AHNVcAKZYPeEch73ceUc5GRUpUpSoUpVf8zsF8kb3NNMij2ea5aomD5BHkTkMKLDTq5EQpQtwDpSo9+ATyeDx6VYClG+V7X7p9Wub/wCUobsczfHo4O+SUpSiL8ICgUqAII4IPxqvWE7cc50EvOQjbvmFgiYlk1xcuz2L5FbXn2LbMcADjkN9h1CkNq4TyytKgOB0qTVhqVAEHEM4jpu9AhuMJyz6/RPwWo4Bj+oNoTPnai5+xkU+etCm48C1IgQICEg+4wgqceUTz7ynXl88DgIHaqPaJ7Ucz2+79mrvjOt0zLYuURrvesst7kZbbkSC5z7IZa/MUlxxchYDfKUkhlxQAHIHQ+sNYMOxfFpN0m49Y4kGTfJip9yfaR+FlyFdi46s+8sgcAcnhKQAOAAKlpw1RVGgI6ERHzJztGsiHDFTNM6kHtr+QsLzpBzNKUopSlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlK5/wDi+6i6i4rpfiGM6cX272164zpd1uqrZJWy4YMRDYPWUEEthyQ0SPTkDn0rOpUFMAneP8noJPILSnTNR2Ecfhl1yXQClRttu1Sj61aEYPqey6lbt9s7D0vj8WWlPRIT+h1Cx+itqz/MbZp7g2QZ3eVdMHHrZJucjvxyhltSyB9Z6eB9ZrXaB+rF4f7sz0WOzk7QGlg9qPVRVrlvb207drr8ndTNR2I986EuKtMGM7NlNpUOQXEtJUGuRwR5hTyDyOalzD8ptWc4nZs0sRfNtvsBi4wy+0WnCy8gLQVIV3SelQ5B9KrFovsY0Mv2LQNUtbtM4GWajZihOQZBNvK3JIRLk/hVMIaUry0Ib6g2AE+iO/NWPzzM8X0k09vOc5AtuDYsXtrsx4NpCQhlpHIQhI7cngJSkfEgVD//AG9N3j+0M4yETPPnw4qzft3t8G7TlvMxB4cuKyc3Jcft16t2NzrzDYut3S6qDCW6A/IS0nqcUhHqUpBHJ44HI59RWTqCNrGL5Desfd3CanRh8udR2G53lK7izWc+/CtrPPzUpbUlbnoVOrUVc8Djz7hdweQYfqHg23zSeLb5OouobjjjMi4IU5EsttaBU9OebSUqdIShYbb6khSknk8Dgy5rmOFNw85tHHdusMzkIJmLqGua8F4PlF54b+ugzMi0mFP9Krrq6ncJobp/P1axjVWZqJ8mIqrjfMcv9qt8ZqdDbHU/7G9CjtOR3UoClJDheB6eDye5l3SbU7GNZtOMf1Qw2Qp20ZFCRMj9fAW2T2W2sD0WhQUhQ+lJoBiBI0iesx3g9ihsROsx0ie0juttpSlQpSlKURKxloyXH7/IuMSy3mHNkWeUYVwaZdClxZAAV5biR3QrpUlQB9QQR2IrJ1XTc+Zuik6DuvxCM5zjqmIOcwWE8/dfH1OBKnFJHznoql+a2v1CfNST0q4qJAcMVgbTunInhv3C+kGYLgcNzu38Bx3bzbWRYuleS03S33y1w71aZbcqDcI7cqM+2eUOtLSFIWD8QQQf0166sQWmDmqghwkZJSlKhSlKUoiUpSiJSqR+J7q1uI0I0shag6T6pQbDbJt0Ysz0JmxtrmDzGnVl0SnVrA7tccJaSQFfO5He2Oktyn3nSvDbvdJbkqbOx+3yZD7h5W66uOhSlqPxJJJP56U/tGPePdIB5kT8PilT7NzWn3gT0BA+K2ulKURKUqh/ijaybjtBMIsuX6W6qxLHZr5dE2Z2FGsjftrZLK3C57W6tfqW1D3G2yAR3PBJo94ZE6kDuYHqrsYahIG4ntc+ivhSsRiMl+ZidlmSnVOvP26M444o8lalNJJJ+sk1l62qMNN5YdDCxpvFVgeNRKUpSqK6UpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKqh4i+ouu+jehNz1T0f1Ht2Ot2pyLHkxlWJuVKe894NFaH3VqQ3x1p4Hkk9le93HErbTcnv+abatNssyq6yLneLtjkOXNmPq5cfeWgFS1H6SaU/tGvePdIB5kE/AI/7NzWn3gSOhA+almlKURKUpREpSlESlKURKUpREpWvZ7AzS54vMiYBlFvx68rSSzPm2s3Btrsf+JDrfJ547lRA79jVOvC7171j17tmqV31jzR7IJ1qvcSJF5YaYZjo8tzqS020lKUglI+HJ47k0Z9o5zR7onpIFupR/kaHbzHWCfkrzUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpWvWTPsTyPKchwyyXhqZdsV9lF3Zb5Psq5CFLaQo+nWUJ6in1AUnn1oi2GlKURKVjshyCy4nYbhk+SXNi3Wq1RnJk2W+rpbYZbSVLWo/QACa/cdvttymwWzJrM6p2Bd4bM6KtSCkqZdQFoJB7glKh2NBeY0j1mO8HsUNonX5RPxHdZClKURKUpREpSvNcrhDtFulXW4vpYiwmVyH3VHgIbQkqUo/UACaq5wYC52QUgFxgZrU7brRpbd9ULnotbc1gSM2s0NFwnWZBV5zLCukhRPHSey0EpBJAUkkAEVutUT8NvBJmc5DqdvSyuIsXLU6+S2LF5o95m0tvHunn4KUlCPzRx9NXsrTCWsZjs4gEjcTeOgieMqsgveG3aCQDvAtPUzHCEpSlVUpSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIleC+32zYvZZ2R5FdI1ttdtjrlTJclwNtMNIHKlqUewAAJr31Q/xg8oyS3bfMawewyXI8fNMqjW24OIJAW0ltbiW1fUXEoVx/ErOoXAAMEuJAHNxAHxWlNrXHzmwBJ5AEn4KW8L3L6s7gmZF+22aS2pzDGn3I8XLc1ur1uYuakKKVqiRGGHXlt8j9sWWweCOOQQJ8xBWXrxyErPWLOzfylXtqLO665DCuo9PlqdSlZHT089SR35+FQfgWg24zT3ELFhOObksch2ewwWIEWKjTpv3GW0BIT1e29zwO6vUkk1YdsLS2lLiwtYAClAccn4nj4Vu8NbIYZHHPnuvw/ysGOc8BzhE9hw3/WmS/qo41g3GaIaBRosnV7Ui042Z3JisSFLckPAdipDLSVOKSD2KgngH41I9Vo0H0MxXOr7lW4bV/Cot6zLJr9cY0D7uQg99yLRFlOR4keO26CGgUM+YpQAKi6TzxWdy6BYRPwsOJnsDwB0sGyd8fEzyt3IHFTJpdrLpZrXYlZJpTndoya3tqDbzkF8KWws+iXWzwttRA5AWkEjvW51WLVrZ81b8gTrVtSlW7TbU63tnqaixw1ZsgZ9TFnxkAIIVx2dSOoHueSElOpJ8Q+7YoyMR1V2o6vW7URr8AbPZ7KmfCnP+gVFlBY62lK/GCVcA9uvipxNOVju/LfyzG6IJjC4XNxv/AD3c8jvmwttByaw3O+XTGoFzZfudkTHXcIyeeuOH0qU11duPeCVEfmrKVCm2LD9RIdqybVTWC2NWrM9R7oi7S7Q06HE2iG0ylmHCKx2UtDSOVkfjuLqa6lzS2xzgTwMXHQ23KA4OuMvq/XNKrDm2CWjcBuSz7Dr02HrNjWmfyYdURyGpl5dW44R/GSzFYV9PvirNSJDESO7KkupaZZQpxxazwlKQOSSfgAKhDaVHevuG5DrNOaUmTqlkczI2escKTbQRHt6fzeysNL/6Q1mWCqSHZBp/qGCOrXOI/Cr4zTaC3MkehxT3aAfxKsfhG55dcfs+o21fMllq+6d3x59hhZ7hhbhafSkfvUPt9X/TCrObtSrIsUxPSBhRLmpGW22yyUJ+cbe0szJv6DHiuJP+PVOdyilbNfEawvcW0kxcL1Pb+59/WkcNocIQzJKuPo/veR9ZSqrhXB5vOt4tihNLS/A04wqRd1KQeUiddXwwyefTkR4sgj6nPrrZjztDaNV2cw7dNMSRycGgcZWb2ig+qxthEt4CoYHVrnX3Qp2SlKEhKQAkDgAegFVF8SO5v3TTrT3RuO8pCdT9QLNYZgSeOqGHfMdH5upLfNW7qmu/gKY1Z2t3N7kRWdT4zThPp1r8vo/7KqoBNWkD9+n/AHtVpw06hGjHx0YVcZhhmKw3GjtpbaaQEIQkcBKQOAB+iueWP3l/JPGYv0eYorbxvDvY4gPo2kw2Vnj/ADpDn89dEa512K1v4v4zV6emILbeT4h7VDKvRwCE0k8fT70Zz+Y1NIztbC77tTvgPylQ8AbO8N/c7Y2/4V7tT4rU7TXLITyApuRY57S0n4hUdYI/01TvwccimXjafLtMp1S0WLKp0SOCfmNrbZe6R9XU6s/pq4Gq05m16X5hcpCwhqJYbg8tR+CUx1kn/RVPvBwx6XaNp8y7SWlIRfMqnS45I+e2htlnqH+c0sfoqNl/a1/wM/vKnaf2VD8bv7FeulK/CQASfQUJi5RQtuH3TYhoG5aMZbsN2zLPMmUUWHEbG35k6bx2LivUNMjg8uKB9DwD0nirus+/De5ofAgZLqHs7seOWK7Sm4kWVJyVExDTq/mIfdYV0tE/SsIHY/Qa2Tw/WntbNW9at2eXNe03OdkbuKY+Xh1G3WyMArym+fm9SVsg8epQr98ec34r18aTteRp7Dj+133O8jtlntMRA6nHXg8HSUj1P7WE/nWPprM4mspvzc/Bbg8iAOMESTrpGeoDXVHsyDcQni0GTykG27W9pB0J1L3o5dmrMHW7bti2HYo5FcdVdYOUMzXfM6eW0pabWsqCj6k9IA78/AzbqBi0HOcFyLC7m0lyJfbVKtzyVDkFDzSkH/tV9cKs8rHsNsNgnPebJttsiw3nOeetbbSUKPP1kGswshKFKV6AEmtNsYxzX0gbQRO8XusNlqPbhqmxsY3G1lWDw181n5jtExKLd3lOz8XdmY2+pR5PEV9SWwfzNFsfoq0NU18K1Cjt6yWann2eZn98fj/QW+pocj6uUmrlVrVeahbUdm5rXHm5oJ9SoYwUy6m3JrnAcg4gegVXd1u6/ItJNUdM9AtPLXBGVamz2oybxc47kiHaoy3ktF0MtqQp9zkqIT1pSOnlXINaluQ3Aa37MsiwDJszza26g4Nll3Flu8d+yN2+dAWQFB6O4wrpUOnrPQtJ+bx1e9yN73G7lLHgereC6I4Ng9py3V7KFldkTcQERbLHWFBct94JLiU9Lbh8tvhS0tq7p7c1M8WXCswsWlWnd9zzUu45PeJeVpacZbjtwrXF/vdxREaKgFQ7j57zjrnHbqAPFY0TAY7OagBOhBc1uEDgDBO85yIWzm4nuZl5JjUGHHFPGLDcMoMq5e8vWnVnQXRO76saXYzi13asrKH5qrzMkJU2hbqG0lphpADvBcBPLzfAHYGoc0o1g3zbmNv+OZppTGwbEpkmG45Lv2RMLUq6ykuLBRCitBaWWE9IQXXuVKUDwngdSpF8Qj9wzqP/AJGi/wBZYr7eHB+4n0t/ydJ/rr9Gsnxmk5FsdQ78vzlZCpLKTwPaDp6YPzPyWt+H/u3zncZa8zwnV2zQLfnmnlwTBuSoKC2zJQpTiAro5IStK2XEq6T0n3SAOeK2HN9xWb5zuBkbXtvJtDF4sUIXHMMqukZUuNZGldPQwzHStHnyVdaeylhCee4VwoJrh4Zn7rHdH/l1z/vCXXy8LrJF3Dc1uYjZK7/4yz7z7YtLn7YUNzZaXQOe/CVONj9Iq1M/rBoucIxU8ZAtJAA7ScR5Rkpqj9XFYNvhqBgm8Ame+gWH8V+2a94toJZ7PnWYWbN8WnZJHdRdW7SLZPgS0MP9LTjba1NPNLSVkKAQpJRwerkEdD9E/wDeawP+TNr/AKq3VQfGZ/cpWr+WEH+ryat9on/vNYH/ACZtf9VbqNlM0a/42/2JtAirR/A7+8LcX3mYzLkiQ6hpppJWtazwlKQOSSfgAKqppZr/AKrbvcgymfoZe7VhOmeL3BVoYySZavujcb5MQAVqjtLWhphhIUk8rC1K6k9k8kJmTcw1e39u2prOOBw3NeJXZMUN/PLnsrnAT9f0fXVVvBpyG3XLazdLCw4n2yz5VMEhA+cA60ytCj+f3h/mmoojxatQOyY0Ecy6L8h8bq1U+HTYRm5xHQNn1Way3drrBtN1vx7TPdE7Y8lwPNFdFkzm1W9VvejOBSUrRMj9a2yEFaOoo44SoLHV3SNL8ahaVbesKcSoKScvaUCO4I9kkV9vGnttvk7dcRuDiEm4R8wZainj3+lyJI6wPj3KEfzCtN8UuLeIOyTRWFkPX91Y860tTvMPve0Jtbgc5+vqBrJxNShJzZUY2d4Ja4dsuK2pjw67YydTeY3EAj1z4aKT8pyrxLb5p9Bz/RXE8Ax3G4FrYet+O3FS5WQXGOloEOOpUnyEKWkdQZS4laerpJKqkLYbvWZ3bYhdoeTWOPYc6xVxDV5gR+oMuoXyEPtJWSpI6kqSpBJKVAdz1CrG4P8A+Zdg/wAlxf8AVJrnVscxZ22eJRuMk44x0Y/bzcWJHljhpL709taEdu3PKHuB/FNdrv8AmPoG4IeZ1Bbx4zEZbtI4Gf8ACZWFiCwRoQ63pvz36zYjRnWrUzddkGoNz0+1AtuEYthWQvY3AjtWdufPnOtJBVLkqeV0ttKJ9xtCArgK5XyKkPbtnWr2RDP8e1xax9u9YXkqrSzKs8V2NGmQfY477MkodccKVLDxUR1cJPKfhVONZ9nuvGkuol03UeH9nplRsgecuV0xmO+haJKitSnQ0hRLMpoq6z5SuFoJIQSeAJF0Y3q3Hc9tw1ltNyxBeKap4Zi9yTdYDSFpQ677K8hDzaV/hEELQUqbVypBA7nntxioWbM549prLtOciCXA6iQcsg7KBbsNPFXDT7Ln2I3GQGnjcZ5kZ3vJ2Jap6ybp2L5kWg2a2HAcDtdyftNsv0+wm8Tr4+wel19ppTzTTEYL5SknrWvpJ9z0rQtu28jU5O5u/wCzjcnCsruW24uKsuQWhhUZm5oS0H0hxlSlBKlsnzElJAHSUkc9zGXhl6IbedatsFvuOQ49KnZHZbnNt91LOQXGNwoul1olpl9KEgtOI7hI5KT6nmrRxNp20jSTL7Rq0nBYtqyKFPjxrddpV4nvve1PKDDLafNeV1qUVhASQfX04rq8PwKwY4y2ADqSSBDhuM3AEC8Lm8Tx6bnNsZMcIJsd4jM562UGa974twekm6rB9CpmluPx7TkL0eUgWuY7dJ90juOONoZbUtDLbC1uN9JBQoJ558zjk1v2nd58RO4a73awao2DCLNgNwtD0m33S0pTNZtcnsWWj1rbefcB91wKSlB7qSQOKg/d3/wqO3f/AJjB/rkuuktY7OJ2YVXXOKo3oIA6jMbjlEmdq5iuabbDCw9cz+R3jObKhu2LdVuTzbd1le2nXW74jaZOJsyH2mrdZHG13cNrRwW3FvnoSppwOj3VEp+jg1N+6zULWfCLlp3YtE8gx5m+ZvkbVhTbrtaFywWS247ImBSHkFKWW0dShwQeR3BNVj8SCwXLb/r/AKTb38RiLCLbcWrLkiWhx5zY6ujq4/8ASMKkNEn96irG6aXS2697m71q7a5SJ+Jab2VnHMdkIPU0/cpzbcqdIQfpSyYrPP8AGWKmj9rTpzmwkP8A4byeDwWtG6Z0UVvsqj4ycAW9bQPwEF3EBbDq1rlkOCZNg+hWFNWzItUs2aWthyY2tiBBisIJkXGS22or8sdKghpKgVq93rHBNQVuf1q3ebMbfZNW8kzbE9UMJmXNu3Xm2Ixo2Z+EXAVJLDiH3SQehQCl9XB6QQrnkRnrTBwzJ/FpsmG6wxnX7DesSYttqT7e/DAeU04tsJcZWhXvPJcTx1cFSgODVtLpsP2rXyIq33rTORcIqiFKYlZHdXWyR6EpVJI7VVuN1NtYZkniIDiMMZCwnfJmYgCzsLahpHINHO7QZnPMxutvkmW9OM9x/VLArBqNizynbTkdvZuMQrHCwhxIV0qHwUOSCPgQa9OXzMtgY/KlYPYbbebyhP8Ae0O43JcCO4f4zyGXin9DZ5+r1rD6SWTTTFMJjYXpI1FYxvG35FqYjxn3Hm4zrbqvOaC3CpSilwrB948EEfDgbifSr1sLiTTsDly09FnSxNAFS5Fjz19Vzp2+73N325h/VDFMC09w2JlticYZtolqebtdnbSX0vKkPcqXIfWtCENoASkkKUQlKTzLGI7lNwGkW32Het1GCw16qX7JzjOLWGA6y0q8vOlKY5WWlLbaSFFzqWPxEJPTyocwz4RP/n1uJ/lHF/102vR4pFzs9v1521Kz1LisJTe5KroA+4ykIL8RLqi42pKklLZ55SoEDng1DLmgwZ1RTB4SA4kZwSJE3uZ4K7xDqx0pl5HGBAB3ibxawUubhr9vh0T0xueutt1PwHIE46ym4XfEGsScYipigjzfJlqkqfWUAk8kI6gCeE9kn6P7rtRtZ9lknctt0j2qBkdjjvyLtYrrDVNQVxu8phCkrbIIR+FQrv1JKQQCe0iP7H9r9yhrjycBmyoslspWheT3ZbbqFDuCDK4UCD+Y1tWjmlWhOkVvv+k+j9ggWqIw8ibebSzJekBtyS10pLnmrX0lbbQ93n0APHvAmrgTTqMBgm7eBG85lvOTOsFGkB7H5ge1xHDQHiIUV7JdbdUtz23J7Uq+Ztj0fJZ0uTAbRBsagxaXmVkAONqfJfKkFtfzkcBYA+mo52lbm9ymqm5bPtCNZshw+0ytPStTkK3WJxLt1bQ95altuLkHy0cKaWD0qJDg9Kijalm1p2M7n9dNvOeTzBw/2SRmFicdV2LLDZeCUc+qlRldJ49VR+K0rKYeom2vX3RnfRn8qU1/dZuUj5XxHPmWuPLV+AjfmbhLaPB/HjKram5tXaKVRohj2gcnOkNn+Jrh0veFnUa6nRqUyZe1xI4tHmdH8Jb8tSr4a/Z9rbYdaNLNOdJMlx5lvN5UoXSLcbMqU7DgRGw5ImJcS8j4KQ2EqTwVrT39RWvblN1mUYHr3pvtf02gwYuS6gqS9Iv1ziOSo1siKW4gFthC0ec6S0585YSnhPPIPbN6Duo1d111I3BKWH7PanBp/ibgPKFRYi+ufIQfQh2WegKHqI4rw67bk7ZjuvGH7ftL8KsuS6v3xlbsabdU9MTH4SkKU4+84kearlDa1eS2UlQA5UnlPOTB+xBviOKNSCCQJ0GEBxn2fNNstHGfFIthEToCLExqQSW8YBFzfS9c9w2s+0LVPTSLn+ZW3PsE1BuKrPMU7Zm7fcLW+FNjzmlsq8txHDoJSpHPCCOeSDW7759xesG2bSmVqTp5gOM3q3xVsR5M26XN4LiuvLKEkRG2x5qOSjlXnpIKvmkcmql+KPheT44vRG65lqRdsqu03KHEO+Yy1EgRuCwemNFaH4Mcn1cW64QACsirG+Kz+4my/wD57av641WNZxGzYpuKmHp5LdMRvnxMLWi0O2gNIsWT1l4n+kfRUfOa7+Ipqnovp9qtoFp9i0mHMiRFXUTghE+7SFAec80w6pDbMML6kA9fmq4KwOjgmatVtyeanV/HNrui9ssz+pF2twu1/uNxC37djMEJBU6ttCkqfdJICG+pI95BUeFVteyz9yXpJ/JK3f6kVT3aDkin/FL3Bxcod4u0yNOjwPN7KLLEqOEITz/6lKD2+Ca7qrGjbXbKMpqO4+UezwBMHuBErjpvJ2Ru06wxv8xz4kCetzKy/iXWbcRiG1e/R8pz6yZ7i9zmQGbg8bIm1zrW6JCFtut+U4pt5lSk9BQpIWkrSQpQ5FWn2TfuSNJf5KQP9WKjLxV/3E2Z/wDPLX/XWqk3ZN+5I0l/kpA/1YrHZjNKv+Nn9hWtcRUon91/9zVNvpVQdZd6OX6R4pa9dpVix+Vpfc8uGMx4Ybf+7EqIFPIVcm3gvyukqYcUhjyiVN9KvMSVcC3jjaXW1NLHKVgpI547GqA7w8dseWbktr+0fFra1Gx+1T/lJNtzXJbbgxBw0k88njy2JKeTyT1VVsurU2D3nNHCJlxPJoPc5EAqzoFJ7zo0njlAjm4j00JC33cRvC1Z0Q09sesL2B29m0ZLdmLfY8WkW2TJu81twFaVyX0PNtwVqbSSGvKkKBUlJ79QTbm2TF3G2xLg7EeiLksNvKYeHDjRUkEoUP3w54P1ivSUpVwCkHg8jkV+1YQARxtwEC3G956ABVMkg8L8TOfCBb4krR9btVLXohpNlOrF5t8ifExm3uTlRY/Zx9Q4CEA/i8qKQVfAEn4VXjTHUPc/rvtvG43H9TsexifcoU652rGm8aTIgobjrcSliQ+655zil+V3cQUAdXISeO9hNcdUcB0a0ryHUXU4pVjlrin2tjyUvKldZCEsJbV2WpalBIB7d+5A5NQdgs7VPcHoU7qNdbijTHBbrZJEqwYrjKG0zXIHlK8pUuapJ8sLSAoNRkNFKSAXDyQOaq4tpVntzaBfRp8x6k2tew3EropNDqlJjsi7qRbsBeTbMawtl2e7nr1uo29J1OhY1bYeUxJEi1zLeuU4zCVOaSlQKXAhxaGlpcQr5qynkj3uOTAOke9bdhqTuN1J0Ib00w+Re8XbegwGIr7zdrgvNSA25NmylkvONAEdKGm0qWVJHSnupP38F39zHkg//bOV/VItavsn/wCEz3K/4k//ALwZruewHbm0/dNNzo44Gn5nlpBXEx5Gxl+oeGzwxEfIc9ZWyyd0m7TbfuiwfSXc7NwrJcU1JfRGt9xx+E5G9idcdDQ6erhRCHFt9SVhXKFghXPIq0O6Lchje2TTc5ndra9eLtcpbdrsFljrCXrnPc+Y0DwelI45Urg8AdgSQDTnxRf3Su2D/L5/r0Kvl4nWRuWbdXtlXkDnRjMC8tz3iv8Aa+sT43mqVz27ISj9BNY0ft6dFhsX1HMngCPWJE71vV+xqVXi4bTD44mR2mJG6YVpU2HeijETm8vUnCHck9lMxzCkY4pFsJ6eTDTO88vhzj3Q8QU9Xfyymqu+Cu87IxXV999gsuOZDEWtsnkoUW3SU8/V6V0jkEGM4QeQUH/qrnF4MX/kDWX+U0b/ALD1TQP21YAWwDp529e8/FUqD7JhJvjHXyv6doz5R0iqje9reNuB226p4BidnxDFjjGY3RDbcuMt+fdH2Gn2kvNpaWlppp1aXUhI/CDk/OB71eSubPiv/wC/Rtn/AJRP/wBagVWmMW00GaOeAfVXdajVdqGk9gt93O6seIRpZg03cFZ4em9kxOyFqRLxBbbs+4tRFuJT1SZHCW1rBUnrSwpISCelS+Oo2T0f3BYtqZt2s24e5luyWeXZXLtcg4vqTC8kKElPV6qCFNuAHjkgDt3rVN/37jbVf/IK/wDWIqpFjeurHgnSV2grDyrVIQ4UevkKvRDv6Ogq5+rms31DToVoF24SOGLEL8LA91dtMPq0Z94lp6YTPO5Cs1imVbndyuCtaraX55jel2O3cOP41An42bvPnRQopbkTHFPoQwHenqDbbailKgStR7VqOzneXnmvN/zzb/qlbbTjWq+ECSyZkFhTkKUGnfIW6GVL56m3CgqSF8KCwR09wMfsy267Z9X9sWn2bHFpkya9Z2otyW1ktzbCZrA8p8FtuSEo99BPSABwRwOOKmrCNtO1vQbUS35hh2GQ7BmGQqftsOUu5zJEiaVNl11AS66sL9xoqJI7dPPIrqfSFGs6k72YI3mfdINuuhmwC5mVDWotqD2pB4R7wIvlxuIuTdQPtJ3SbjdTN0Od6Aa3XnEbZKwRDy/Y7bZXG3boEOhsrQ4t9XQjpW24PdJIWPTvXw3Kblt0WhW6DAtKXcswpjBdR5rLMC+yscdU7B6nw0404BJCVqR1tnq90EOAkDg1pm+aI9tf3oaTbxbS0pmyX19FgylTY90hKfLWpX0lUZZ4+uMDWM32YTku7a2amaoYXdZCsd0EYbiWARVct3K5JUh+7PJI9QyyGkJI/GbVXMyqBTo1X5MJFTiGm5O4HEyOe6V0OpzUq022xgFnAnIDfBDp4X0CuzuWzjNNJtvmR5/jmT2yPf8AG7cJSH5trL7E+QAEpYDKXUlBddUhKeFHpKh2VW4aSnURzTiwSdWJVuey2TCbkXUW+KY8dl9Y6iyhBWs+5z0dRV7xSTwOeKqLp9rOzvTt+gGFsPpkstQ0Z1n7aTylLltc8iPHWPTh2enzOk+qGeavRXQ5hpF+K8uIG6G2kc3SP4QRnfBrxUDItAk8zoeIEH+LgvDfLxCx6yXC/wByWUQ7ZFdmSFAckNtoK1ED49garFt/1N103WaYO634lqXYcNhXKZNYsVgRYkXBEdth1TaPug8twOLcX09Sg15QSlSeOfWrPXs2kWaeb8phNsEV320yCA0GOg+Z1k9gnp555+Fcv8x2w7r9jGR3HV/Zbk72X6azlm5zMWWTKKGCOrhcfn++EBPAS8yQ8E+o4BUecODXu8Qw2LH7pnXmMjeIK3LS5g8P2py3iMhxBN99ldrSXLNddWtvc2bfLjj2GamCfdrPIksWpyZBgPxZzsfrbjrfSpz3GwUlTnHJCiCPdNGvC2ha/wCXSdXbxiur9kgSFZLGXfZF8xt26yLk8fOJcS4mWz5RPCueQvuofRwbt7Od1ONbs9JXs4tNjVY7tbpK4F8tZV1pYl9AX1IXwOtCwrkEgHnkHuOTVnwaf9w63fyli/8AVIrdjXM2ipiF/DB4e0z0OccpyWLnB9ARl4nUWfbmIiecZlTzrrupy6zbnsE2jaYsQLZfMtjG4XHI7nCXLagRuh5YSxHStAcdUGF+8tXQnlPIV34wGqW4jWLaxuA0wwPUjK7bneEaoylWxExVpbt9xtUsONN9XLKvKda5fbJBQFcdXft32zV3cjEY3HWHbno3h9iverEuEt6Xe7u1/emOW8o81SnCjh55RQAoMIUgHqRysc1VbxFcNyDF9aNtM3KdQ7zll1n5Srz3ZaGo8VjplQPdjRmUhLSfePzitZ4HUtXAqmzXq0Gm4e8gneCSIA0jKbSQYMyr17NqxaGSBuIEyTxzi9otBCkLxjjqXB0Dg3Gz581b8Qk3aLb7lY2ICg/cHlh1aVuyvN48pPlJ4aDY5V7xUeEgWM2v2vVzH9LsYvepOp2MXbGRiNvdhRIWNrtz0JIjtKCnpCpbqXQlsEH8Gjk+929Kgrxk/wByZB/ldA/1MipZ1Deusfw77q9ZCsTE6TgtlHPUB9y09RHH8XmsWVDQ2Xaqrc2uH9hP+t2i1dTFbadnpnJzT/eB/vfqvPhmpOum6qwz890Rziw6cYKJsiFYbjPx03i4XsMLLa5RQt5puOwVpUEJ6VrISSSnsK0vatvM1EyXX7K9pG4m1WdnO8bW/wCwXe0NqZjXVtoBR5aUT0LLSg6kpIBTyClJHfRPDp0H2560bVcYyC743Lm323PS7beCzkVyY6JCH1qTy01IShHLS2ldkjnnn15qyFk2r7T9GM7s+qdswiJZsqXNRb7bdJF3nPvuyZCCylpIdeX1qUhShwQeEgnsE8jsNMUK2B12xG8kmMLgeJvAgGYAXKHmvSLm2dM8gCZaelpMm0qe68t1ulvsdrmXq7S24sGAw5KkvuHhDTSElS1k/QACf0V6q0XWO56ZnDJuD6nZracdg5vHfx1lU64tRHJK5Lamy2wXCOtzhfYDk+nauaoXhh8O7tOJ0HUrophpcMeWvLX0Uc6fasaz644d/dbwN3C8Lwial6RZlZFbZVym3CG2pQEp4MyY6IiV9JUlPLxCeCe54rD2XUC6b2tqOXxdNbza7PdLyudibl0R7QuAvpWlt+RHK20OltbC1FHUgEKV0nnjqNWpOjPiR7IsNmL0o1LxzUbTLGGnpabJNjpW+3BRytwFl1IcSkJ6iUMyD8eBV6NrWeYxqhoDhmoOIYdBxW3X2B7ULRCjoZYivdakvJQlAA6fMSsg8DkEE9zWhZTqtfF2QBBzvNzzAItAmcrBUD30nsJs6SZGVtByJBveN9yt10/wiw6a4PYtP8XiiPaset7FuiIA7+W2gJBP0qPHJPxJJrYKUqXvL3FzsyqsaGNDW5BKUpVVZKUpREpSlESlKURKUpREpSlESlKURKUpREqJtzu3nG9zek87TS/z3ba/5zc+1XNlAW5b5zXPlPBJI6h3UlSeRylShyDwRLNKq5ocIP0RcHmDkrNcWmQoUsN73Z2/H4uM3fTfA7hfYzKY7mRfKh9q3yCkce0GMIhfSo/OLIPHPIDgHepdsbV6Ys0JnI5sOZdUMITMkQ46o7Dr3HvqbbUtakJJ54SVqIHxNe6laFxdJOZ+uQ6fksw0NAAyH1zSlKVVWSlKURK/DzweBya/aURc3rdn3iVaw5/lG2/PNLW8cxLIrjJjzcvXaHGhbLItwhxqLLbWI76yzy2g8Kc5XyT8U9E7HZbZjdlgY9ZYiItvtkVqHEYQOEtMtpCEJH1BIAr3UozyUwzW0nUxYTyv1JOqP89Qv5wNBOcc7dABooI3p7a4O6TQm8afIDLV+ikXLH5TnADM9sHoSVfBDiSptR+AXz8BWpbAtDdZNINOLnctwMpp7N765DiqbTKRJXFtsGMmPEYW62ShSkgOElKlD3hySeatJSlP7LHh96J6a84ty6o/7TDPuzHI6cpvzSqveIthN3yHbyrOcaiqkXrTK+W/NYjaBypSYjnLwH5mluK/zKtDXykxo82M7DlsNvMPoU0604kKStChwUkHsQQSCKo8OgFntAgjmCCOkhWYWgw8SDIPIiD6FY3EMntObYpZ8xsUhL9uvkBi4xHEnkKadbC0n+ZQqFNx23TIM6zvCNftJLjbrfqVp264IiLj1Jh3iA4CHoL60AqbBC19DgSrpK1cjvyPbobiuS6CX6ToW9bLhccDddkT8Ku7TSnUW5hSi47apahz5flKKiy4r3Vtno5C0AKnWtX4S4VKdryN44HlcHQ3zBvmyWtNN97QdxG/rmNQdxFq36wR9xWvWnk/SCyaXK05byeMbdf8ivN5hTEQ4a+0hMJmI445IcWgqQkuhgDqJPFTFpPphi+jOm9g0uwuOpm0Y7CRDjlfBW4R3W6sjsVrWVLUfpUa26lVEAEDWJ6THaTHMqSJIJ0mOsT3gdlTva1phuP0V3G5hj2sWtvy8seZWyff7YwqZJfXCLE2O22pSHkhMcrRJWny2iU/gh390cXErSMMwW623MMl1Byy4R5l5va0QojcYK8mBamFLMdhBUAStRcW64rgcrX0j3UJNbvRtqTGHQR6mOoEA8Ra0KzjNRzhqZ/P1mOEayq06a6O6p7Y8jzS3aUYxZMwwTMb29kcSBIu5ts6zTXwkPM8qaW29HJSkpIKVo7jpV61lsb29ZRm2r9u183EXK03C9Y42trEsYtKnHbVj/X898uupSuVKVwPwpbbSngdKeySLAUozyYYzaIHAAQOwtOfFQ7z4p94yeMmT3NyMkqO9w+o8PSPQ7N9RZjwbNlssl6Pz6rkqQUMIH1qdUhI/PUiVAuf4Xf9weqNrxm/WWbbtMcAuTV1ne2MFo5NeGuFR2W0KHKobCj1rWR0uuBKU8hCjWdSn448GYxWJ3DU9BlvMDVXpv8ACPikTF43nQdddwk6L77I9KZujO17A8Ku8cs3Y2/7pXNChwpMqUtT7iFfWnzAg/4tTnSldFap4tQviJ9OHRY0meGwNJk7951PU3VF94OzPXfMtxmJ7qNtWU2KLlNhYYjPwby6ptslorCVpISpK0KQ4pC0HpPHcEk9sTus2a7ttz+kdpkZvqXhz+cWa6NzomPWeM5CsUdooUlzpedDj7z/ACUEKWUoACkhI56jf6lYtbhYGDIHEOBmbdf8LUvJf4msQeIiL9FUvWHRvd7rNtVuOjmST9LXcsyltpm5TG5c6DBtjDa2VJbZQGH1yVq8pRUtSmgCs9KSAK23ahphrvoFt8tekeU2XA7ndMWjOs2qTAyOYlicVvrc4f64AVHADhHUkO88eg5qw9KvPtke9E9Mu0n5ysw0BrG6Ny6xPeB2tCo5s42mbmNvGtufakZu9prdrZqRKVKuLdsvk8SIKzIce6mkuQQl0DzVp6VLRz2PUK8et2xjWXGNxbm6zZzmljsuS3Ja3b1Yr11oiTFr4DxSpCVBSHeApbaunhQK0rB4AvfSqgYcGG2AQOW7iOf5K5OIvLr4zJ57+B5fmqCbkNre9vd7pda8Z1PynTLGZEW8sSm7PZPazES2G3UuSZL7gcW44nqCW2mwEcLUVKJ46bcaD4Dnmmem1nwrUHP4uXTrREYhMzY1pEBKGWm0oSjpC1lZAT3WSOfoFSHSrNOAOa0QHGTzAj67ZWVXDEWl3uiB1M/XfO6/laEOIU24gKQsFKkqHIIPqDVMMN2h6sbUtZsi1H2sv49fMIzJQcvODX2c7AVGcClKQuHKQ26n3StYSFpHCVFJ6uyhdGlVAwvxtscuh0P1yVj5mlhyz6jIqsOS7c9QNx+pmJZ1uOjWGz4pgkg3GzYVZ5zlxEueeOJE+UtppKgnpHSy22R3PKyCQffvq2oXHdzpLCwex5ZGsF2s90RdoTsphTkd5aWnGy050+8gEOc9QCuCPmnmrH0qHNDmBmQBnrIM87DoAMhClrix+PWI6XEep6knO6rji9x3xRMQh4NM0z0qg3eHDbhDKU5VLehDoR0h5MD2MOqVwAegvJBPqoCtx23bcsa26YncLZAuki/ZHkc9y8ZLkEtsIkXWe4SVOFI5CEAqV0oBPSCe5JJMu0rQvLnOec3ZnrPS9zETAnILMMDWhgyGQ6R8MpyvGZVZNCMN3WaEYlMwm9Y7hWdWoXObMszkTIn4MqExIkLdEd4OxShaUlw8LSrkAlPSoAGsltx2xT9OM41M1m1Hcs8jMNVJ3nT4FqK3LfboaeeiMhbiEKeUeeXHFIR1EDhI7k2JpVGgDicOHpae8CTmepm7iXSNJxdbx2kwMuwXNdewLdjtp1cvOdbI9UbDFxvIHSp+x3xwpDTZUVJacQptbbyGyo9DgKXACR8SVWV0Z0D13uOWW7VLdtqla8sv1h614/j9hjez2W0vLSUqlHlCFSJHSpSUqWn8GFK6eeQRZOlWYSxobnGU3I5fnmqvGNxcbTnFp5/XDJUQ1y2o7stVN2WE7lbK1pPbY+BiKzBtMnI7i4uW00+44ouOJtwCFL81Q4SlQTwO6vjd+wv3+RamHsntlvt9zUD58eBOXMYQeTx0vLaZUvtwe7ae/bv61kKVDPJTFIZAk9Tn3/0pf56niHOAOgy7f7UX7mdGIG4LQzLtKJgaD16gLEB135rE1v347hPwAcSnnj4cj4149qOh0fbroJielfLK59tieddX2jyl6e6et9QPqQFqKUk/ipTUuUozyYsPvRPSfzvyG5HefDPuzHWPytzO9VG327FnN06LFnOAZSzi+ouKjot894rSzJZC/MQ24tsFbSkL5WhxIUQVKHB5BTqGA6QeKDk1vYwnVzcFiGOY4lIjTrvZYqJN+kMDsQ055CG0LUnt5pIWknq4Jq9FKhjQwFubSZg5TrbjrodVLyXkO1Fp1j/Gm5YDA8HxvTXD7VguIwfZLTZo6Y0ZsqK1kDupa1HutalEqUo91KUSe5r7ZbIzCLYpDuCWez3O8gAR492uTsGKfpK3WmH1jj6A2efTketZmlS8mpJJzUMAZAAyVGNku0/dFtZzXN7xlH9y+/2vPpjMyaIOQXBl+E4hx1RU2ldvKXBw8sdJUjuE+8O9TbvK2n45u50r+RFyuf3IvVske32S6+V5gjSOkpKVp5BU2tJ4UAeeyVDukCp6pVajRUYKZyEAdMu0KzHFlR1QZkknrn3XPfSTQzxWMAs0bS1vXLTuPjNtQIkS8zWlXKZGjDsAwFxwpwpHzUvHgdh1AACrl6L6RWvRrDzj0a83C+3WdJXcr3fbkvrmXae4B5kh0/DslKUpHZCEpSOwrfaVqXuMk5nM6nr68TBOQWYYBAGQyGg+vS8ZlVO3T7HLRuM170q1VkORWoGNPqYyhhwkLnwGyXmGkgD3uXeptQJHuOn6OKkDeZt7/ZLbe8h0vt4itXlQanWR589LbM1lXKOSAekKT1tk8dgs1ONKxLAaXg+7JPUkGe4lah5FXxdYA6CbdjHJaNofpbadE9I8U0rs3SqPjlsZhrcSOPOeA5edP1rcK1n/ABqqRuh2abhrhuqsm7XbDlGON3+OyyxNt99dW22FNtFkkdKSFtLZPSpPKVA8lJJPu3wpWlRxqVRXJ8wJPeZ6XWVNop0jRHskR0t+S5+br9l27XcZhGKZPf8AU7ELhn+N3UTGbFb4zkCxQ2VAdXkuOBx953rQ2St1QBSCEpT36t73JaD7xNxO3P8AuOXybpKL1e5EabeLm3PuEOND8pwLTFjRxGfU4OUJJeW6kkqUA2AAauRSqOaHNLIsTMcbfkCdTGcEzo1xa4PGYEdL/mY571C+1LBtZNKdI8b0q1Vt2HKOK2xu2xrlYLzKk+1pb7IK2XojPlno45IWvkj0HPaCt0+xfUbJtcbZuq2tZvbcW1GhFszoty6kxJ6kI8sL6kpXwpTf4NaFJKVjjuk8k3dpWlRxqVBWJhwMyN5z77slSm0U6ZpAeUiI4KiWuGgO/LdRohcNPtUr7pZirrr8NbFtsJllE1xDyCtyZId8zobQkKUlplJUpfTyoAdJsTtV0d1F0K0ms+m2fakQMsFkiNQ4Bh2j2NMRlHPDfWVqU9xyAFkIPCR29amSlGnAHBo9ognmBH12ysjhiLSfdy6/X0Uqs2N7b85c325VubzGXa3cdZxmPYcVYafUuQ2SlHnKWgpARwQ+OxPPm/nqzNKq3yvFQZifUEH0J7qTdpZoY9CD8QEpSlEUL7wNvjm53QW/6SxL03abhNUxLt8t5JUyiSy4FoDgT36FcFJIBI6uQDxwYI26aAb7LDplF0J1W1NwnHsHtMRy2MzrFHXNv0iGQUiOh50BhlsJPAcLanQOw4PChd+lVDAA9ujsxplE84srFxOE6tyOu+O9+aozsW2wbtNq8XI8GvF308l4WblJu0NpDskzbnKUyGm0F/oKYrBCG1qJadWCngDgk0277Ud0OkG6rPtwuRt6X3GBqIuSmZboeRXBL0Bt6Sh4FtSrf0ulAR08Ho6ueeU1ealaB5FRtU3Ibh6EQe4AHCLQsy0FjqYsCcXUGR6kn4yqO7y9pm5jcbrPgOoGFvaa2qz6cSxKtzVzvk8yJy/aG3Sp1LcEpaB8lCelKl8dz1H0Em7uNo6N4uj1ux/Lnbfi2b2ZxU21zYUhyfFivqHC2S4ptlbjLiQnk+WlQKUkA9PCrK0rLAPCFHQEu4yTJPcD/S1xkVfGGcR0Fo9T3VMtIcK8Suy47E0nzvL9KmrLBZEE5igSpt49lA6QWmyENOPBPASt5A+lQWfXxbCtlWuW1ibfZ2V6r2Fy15HKRKm2KFblSluKQFhtRmOFsoVwv3gltQPHY/GrtUrYPIeampEE77zf645rLAMHhjIGQN2lvr0SqQ72dpu5DcvqjgOV4S7pxarPp1MVMhC6Xyd7TPcU8y4S4huCpLQ/AJHAWv1J5+FXepWY8tRtQZtMjmrky1zDk4QeSr9ug06171y283bSrF7LgVpvmVxfY7o9NyKYuLBbDqVfgFIgdb5UlJHvIa6Sfxqwm1fbfn2nu2Ze2TXq2YfdLKiHNt/tViu0mQZkeU44taXG3orPlKT5pAUlaueAeEkVZylMLYe0iz8+kx2k90k+Qg3aZHNc0dP9k2/jahlV1tO1vWHEbjg94lF9MTISoBs+gW6wWlhLgSAkrZV74SOQOABbbQbQjUjHL+7qxuJ1KazvUV+IqBFVDjCParHEUQpxmE0Ep5UspT1vKSFqCUp4AB5nilXa4tAm5AiTnGXwtvi2RKq5ocSRYG8DKf8Ad903UJbydvo3Mbfsj0wiGM3eXUonWV+QelDU9k9TfKuCUhQ6myeOwcNbBoRodjmi2hmP6LR2WZkS32wxbitSeUzpDoJkuK59eta1nv8AAgfCpNpVA0Br2aPieMAj4Z8huViSS12rZjrH5W5neqvbG9nEbaZb88TIkR5c3JsgechPtqKi3aGlKENtRI7L4WtSgO3Kh68VaGlKsXEgA6ADsI/3xVYAJI1JPcz/AK4LBZ5ZJmTYPkWOW9TSZV1tUuEwp1RCA46ypCSogEgcqHPY1CGj1s3c6T6VWTTfJ8PwTNrnY4DduhXuNk8iG2402gJa9qbchqUCkAAqbKyoDnpSeasZSqgRi/eiekx8SrEzhnSY6xPwCgfaHtka21YLebdc7rFueS5deH7/AH6TDaLURMh30YjpV7wabHYE8E8k8DkAV10D2gbvtsurufwdJsq08Tp9nM8Sjc7uiRImwGgtwtrajI6EqfQl1SeFqLaiEkn1FdAqVYGHh40GGNItA6QIUG7C06nFOs3M9ZPC6oDrhsu3M2PdlC3W7ZMyxqZd5UZiPdIuSrU0FuJjJjuFaWkdK2nEIClBHQpKuekenHx3TbLd22uEbT7Up7U/EbzqHiF19sFpajrt9igtlTbiRG6gt51QcaT1reVypPASEdPCuglKq3yBob7pkcLzA3D/ADGal3mJJ1EHjaL7zH+VVncztl1b3ObUXNMs5ynF06iiWzeGn7dGej2lEptSuI6esuO+X5a1I8xXKirhRSB7oye2HTfdDa8DtWD7krthAsFisn3AYtNiYckPXRkNBlK5z7vupCWxwEMpT1E8qVwOk2TpUiBjEWfmNMo+FvXO6gycBm7cjrnPxv8A4suado2F7yNrOpl5vuzHVjHTiN/e6nLTf3FDy0ckoQ82ppaHOjkhLqFJcI5HA5INotEdBNZhlkTVrdTqhAzLLrW041YrVZo3s9lsfmJ6XXWklKFPSFJJT5q0gpSpSRyDzViqVLHFgAmYsCbkD61zUPAeScpzi0/XZKr3uf2xQ9bsr0+1ENjteRzNPZj8lqwXe4Pwoc4OFpQV5zKVlDiFsoICm1oWCpKgAQoWEpVYhweMwZHAq02LTkRB5FQXntu3EaxYvcNO28WsenVqvjC4F2vbt7+6U5uI4Cl5EOO2ylvzFIKkhxxwBHPPQo9qlXAsHxzTTCrJp/iEEQ7Nj8Fq3wmeeSlptISCT8VHjkn4kk1n6VYWBA1ieMTHaTHM71U3IJ0mOsT3gdkpSlQpSlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpStF1v1gxXQTSzINWMzU6bXYI3nKaZ4LshxSghplHPbqWtSUjnsOeT2Bqr3tptLnZBWY0vcGtzK3qlU/0lzvfnr5hVu1lsMzSXBbFfGvbbFj1ztc64SH4ij+CXKkIeR0FQ7gtp+aQSkHtX10a3T69ZHuuXtw1t0qtGGvwMXlXTz7fMXLYvDqX2Q3JjOLSkpZ6C6Og9SuoEKII6RoGOx+EbOvY8ASeE2yz9VmXjB4gu21xxIA6Xzy9FbuleG93uz43apN8v9zjW+3w0eZIkyXAhttP0lR7DuQPzmvdVVZKUpREpSlESlKURKUr5SZMaFGdmTJDbEdhCnXXXFBKG0JHJUonsAACSTUEgCSpAJMBfWlea2XKBebbFvFqltSoU5hEmM+0rqQ60tIUhaT8QQQQfrr01YggwVAM3CUpSoRKUpREpSlESlKgDeVkm6rGcAtEvadisC+ZA7d227kiQhpxbUPpV3Qh1aUkFfSFHnlIPI47qFXOwxzA7mP8AfBWa3F6+glT/AErHY67e38ftj+SxY8a7uQ2VXBmOrqaaklALqUH4pC+oA/QBX92+92e6yrhCtlzjSn7VIESc204FKjPFtDgbWB81XQ4hXB78KB+NaObhcW7lm12JodvXupVcNZcp3i2zctp1Y9I8LtNw0ompb+VE98NFbXLqg/1KUsLb6GuhTfQk9SiQer0Fj6q3zMx8SONteR0Oqs7yvwcAeF/mNdyUpSiJSlKIlKUoiUrws3uzyLzJx5i5xnLnCjtSpERLgLrLLqlpbWpPqAotOAE+vQfor3UTglKUoiUpSiJSqsb89wmvm2vTCVqXprjOES7PFfjw35F1kSnpaFvHpC0x0Jbb4CuB3dVzyD0+oqZNvGaX3UfQjAM+yd5p275DjsC5TltNhtCn3WUrWUpHZI5J7Up/aNc8ZNIB5kE/AI/7NzWn3gSOQIHxKkOlKURKUr5SpUaDFemzH22I8dtTrrriglKEJHKlEn0AAJ5qCQ0SclIBJgL9dfYY6PPeQ35iw2jrUB1KPoBz6k/RX0quOgcyVuPyZ/ctkjTnyXiSpEDTi1u8htuI2pTTt3Wg9jIkELSgkfg2hwn56ibHVaCAMVidN26eO8aZG4VZEkDIW/Ppx1zFoSlKVClKUpREpSlESlKURKUpREpSlESlKURKUrTNZtRIOkuk+Xal3FSQzjdnlXABR4C3ENkto/OpfSkfWazq1BSY6ocgCeyvTYajwxuZMLYLPk+NZE9PjY/kNsub1qkGHPbhy23lRHx6tOhBJbWP3quD9Ve9l9mQkrYeQ4kKUglCgoBSSQodviCCCPgRVOPC30Xuun2gsnVHLvOVk+q885FMU7z1CMSr2fkH4qC1u8/+uH0VvurNz/Yw6h27WW3L9n08zK6M2vOoXcMW+a+QiNeW0+jZLnS1I47LCkLPvJJO76Zp1G0nZmAeDjFv5vLO+JtJGLXiox1RmQkji0TfteNRleAbHUr8SpKkhSSCCOQR8RX7VFdKVXLd3lG8HG5eBp2rYbar4xLuikZGqYGlFprlHlpPmLT0NKBd6lp5UOlPcc97FtlZbSXQAvgdQB5APxo3zNxcSO0X5XsjvK4N3ifUjvZf1SlKIlKV8pKpCI7q4jSHX0oUWkOLKEqXx2BUAekE/Hg8fQagmBKATZfWlc0pm9zV7Bd7GQYprznVpgYdgNmdkv49isZTjdwnPsNCPEbW6kPy3+uSgDnoT1IKuhCQSLF3rTzeJr1jovkzW5vQxie35sLHbHZW7jOjNkco9tmurSfN4PvIYCEp9OpRHNBLqbarRIIn1I72m02g6qTDXmm4wR+QPz1i9s1aKlc8NjutO6Gx7rs72la85grOGMbgPTW7u4gKcjqQpktrDoAUpt1DyT0uclKuACO/PQ+rwMDKjTIcJHK4+SrJD3McILTB7A/NKUpVVKUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiVXrfxork+vm13LcBwpsvX0CPcoEbqCfanI7qXCyCe3UpIUE89urpqwtYG+5ziuM5Bj+L327Ih3LKX3otoaWhXEp5pourbCwOlKvLSpQCiOek8ckVnVpiq3ByjnMj1V6dQ0nYx9DX0XMzYZ4kuN6a43bdt+5mNLxp/GD9yrbfH2FhDLaFdKY01vjrZU38wOcEcABfT0lR6Jy8DwfULN8I1vtN3alysdiTm7bNt7zbsedDmNJSpJWnnrRyhC0lJ9R9ZqNd0GxbQvdLb5ErKLCizZZ5XTFya2NpbloUBwkPD5shA7DpX3A7JUn1qhOzDONaNle8gbN9Qrsq54zfpwhNsJcUuO28835kabGCu7Yc90LT2+ceR1IBroZUO01QypaoZg6OOR5Ez1nmVi+n+r0y+ndgzGrR8wPqLBSf4tWoGulmvGnmERHLHBwG/XppbDMec8Zl0lx1srAmDyglplKnE9KUKc5I6z3CUi+GC5FqpEst5vOu1jwrGo9ubEhp6yX2ROZDCUKU84+t+Mx5YSAD26hxySRx3oz4xH/lTQT+Ukv/ALUSrEeJTdr3ZtlOo8mwqcQ89FiRX1t88iO7LZQ96fAoUoH6ia5fENHY3PFzjcL74ZBPz4ZLoFMVdpYw2Ba3+54+uK++nm43VzchCuWV7dcFxqJhEKW7BgZFmEyS2q9OtnpWuPEjtlSGQoEeY4vkn8TkEDxaTb037jrfK2xa/wCBt4BqO0Ou2qjzva7Ve2ykqSuM8pKFJKkglKFA89KhyFjor1+G3Ots/ZTpkq2eX0sQZLDwR8HkS3gvn6yrk/pqrni12yTYtcNveoGKpU3ky7i7EYcaH4RZYlRXGB27nhby+P8AGNdjqYo7YzZs2l2AnXXzDqMsoPVczXmtsz6ws4AuA5XLT0mTnI6Kfd3O/mdtfymwYjJ0WvDgyKZ7PFv11nR49tU0hxCXnWwytx1fQHAelwMnuD3FeiJu13B3HcRieCMbVL/F0yy1xSIWTyesvmOEqPtbqEAoio7BXkvdLnQQexPTUHeL62h3MdvLbqErQvIpaVJUOQQXYXIIroJqLdJ9i08ye9WoEzbfZpsqMAP+NbYWpH+kCuVtTwtm/WX3wufI3hoFvXTjnaOhzDVqigy2Jrb7iXOHyv04zE0jXrUXU7JMlxXbLiWM3pnEJptd3yXJro/FtYuCQC5EjIjtOOyFo5T1q9xCSeAVGtJ0L3uXbJtervtX16wGJheo9t6lQ3LfOVKtt1SG/N5ZUtKVoJaPWkHnqSD3SodNQN4YmlrWq22529xNd9Tsenx8ins3KBYb0xHjh5XQ4HShbC1dS0LSSSo88fVViG9hGkFq1fs24jJtTNQ7vlmOPsSGbler3GUhQa91CHSI6eUcKKeOQeDxzXQKZo1GNqXBAxcyAZHI5DdmZusC8VWPNPME4ehiDzAud+VrK0dUM8XPPta8L0FTDwuRaLZht+ltWq+TUTHfupILocUIzbQb6EMFLfvr8wqVz09IHUVXzqiXjJ/uTIP8roH+pkVxbX7A/E31cB8117N7Z5O9ASpn29ZTr1atHMcn5npdj02zQ8TtirQxit+XLuk5XktBCVtTGYzDXLZKlHzyARwOfWoa008SbLNYMm1AwzANuN2mZPjKUMWbHVXJtMyXISt1MhyW6QGYrLXQkH3l8qWlIJKwKtloT/vI6e/yVtP9UbqiHhittndVuhdLaetN9WkK47gG4TORz9HYfzV6G0Av2+pSJtFQ8ZBH5rhoEM2FtUC80x0Onopz043tZPb9t2Ya77m9H7rp7IxG6Ltv3MEd5DlyUfLDYYQ+Eq5LjnlkklPKSeeAQPvO3GbklbfRugs+nOAPY19xxkvycN3lruRtnR5hPtQaDIeDXvFHlkDgjqJHeXdy920NsmjN/uG4yPbpODNobM6PNaLgfcCwWkNoT7ynSsJ6Qnvz35ABNQJqDedTtRtnOY33FMXtOlOmzeB3ByzWh2KmXd5ltTCWWgpKSI8BC2wn3AH19J+c2a461UtpVajc2gRuENMzxcRMXtMDVddGmDUpsdk4md5kiI4AGJteJOinjSrX21606E2rW/TfFLpeUXeKXWbK09HalCQhwtusFbziGgULSrlRUOQOQCSAa36J+I3meud+znEcQ25XORlNgkMxLTYWrmklSgp1Mh6fNWhLEVpCkIHA6lFSulIWfTPeEySdlmNAnni63QD/APGVVD/hSf7+W53+UTH9cuFddWmBtTqY9nAXcvYy/mi89DdcrKh/VfFPtB4bzBLhftNo7KV9Id8WqX7JVna9uc0ct+D5JeY5kWOXbLj7VGke4paEqPcKCwhYC0q+enpKQfSZtcdwj+nGdYNo3hVgiXzPtRH5CbXGnSzFhRIzCCt6VIcSla+lKQelCE9SyCORxzVQt4Pu+KXt1WnsoxIIJHqR7ZK/+NTTvz2n6ha3PYpq5oNlf3E1R07LjtrSXwyJjKlBXQlw9kOBST0lXuKClJVwDyOdrw6hTrOFsbmujOGnPnlMaTEG63c3DWqUmm+BpbOQc4fDdxzspDlav644Jq3gWm+o2AYxcbNnkuTCZyaxT5DSIMhmI7I8h2K8hRKlho9Kg7wQF8gEcHW95+867bSsbbvyNEL9kcWVIRBj3Zc6NGtgkrbK0oJStyRyAlXqylJ6SAvmoO25eILfbpqVZNvG9fTNWLagRJ7aLTeXoXkNOzVJU00pxpXZlawtSUvNEtq8zgBAPJ2Lxk/3JkH+V0D/AFMiq7SXU6TXtMguAkZEEgdxPA5E8bbOA+sWuEQ24OhAcZ5GBwzVjs0z/VW47am9StLbVjgyu4Y0zeUIu8p5qHE8yKHnFjobcU6Uc+6g9IUeOVD4048KvNdxeUabZDkECz4nkNrvWeSJmQXi93+Uxc/OcajF9bbDcVxtwhBBT1Oo5V7vugc1cbGv3Ilr/wDZwz/3YKrB4Ln7mPJP5Zyv6pFrrwCntu0tGTWiOXiRHKw48VyYi/ZNncc3OM88GfO54cFIOs+/O/aW6/YJoMrQi9Qnc0u8OG1eL1PjoZXFdlBlbzDUdbpX6ngOLbUO3KO9WL1g1dwbQvT27anai3X2Cy2hsKcUlPU684o8IaaR+O4tRAA/SSACRQzxD/3dO1v/ACrE/wC82a/vxrL1doWnml9qSXPuLKyGTJnJHPQt1plPlBX+a49wPz/RXI0l2ysdMOdUc2dwxNaDusCTzXXhH625nuhjXRxhxPeIVl7Lqfu81CwZjU3BtIsDs0C4xhPteP5FepX3VlxlJ6my6400GYzjieCEHrCeoBSh349G1fePiG5f7u4y7jk7D8/xJ1TF+xa4OpW9HUlZQpbTnCfNQFjpJ6UlKuAQAUkznjkuFPx+2TrapCokmGy7HUj5pbUgFJH1cEVzURbZWH+NSGcTQppjIbaqVdm2uyVIctRU4VAfS402v/GINdGEDafAIhpxAbwQCR3AM+kLmDy7ZfH1EE7iCQCOdxh9ZU05h4iV3wrc/b9vOSaCX60peZW+twymp9xm9Uda4zcWNEK2ypxxKU8l4gcnq6QCoYbVzfluL275VYL7rvtghY9ptkM4QmpkXIG51wi8jn8L5XLfmBHK/L4APSoJWeCa0XUv/hn9O/5Oj/u+dW5+MykHanaSQCRmEEj6v73lVzeIadCnXNyXQRoR4mHvG7oN/UWB+0PoiwwB3EEtLviNVeyDNi3KFHuMF9L0aU0h9lxPotCgClQ+ogg1j8seyqPjdwewe3WudfksK+58e6S3I0Rb3w81xttxaUj1PSgk8cdueRgtFCVaN4IpRJJxm2Ek/wDNW63SunaaQp1H0gciQubZ6hfTZUIuQCuWXh/akbqs11j16yaFbMKyzI5F1gR769e77Lt7McsrlNttxA1FeKmgEqSAro4SlPqSeLJ7nd5mo+2rVvAMCuemOP3mz6hT0Q4VwYvLzb8f8O0055jRZ6SR5ySOFcH6qhPwmP8Afa3Kfyjj/wBZn08Vf/f22yfyhd/rkGqUTifsbDk7ADyLZj6ur1RDtrIzbjjmIg/VlcLdLq7qNoTpfedV8OxDH8ituNxDMuUSfc3okgo60pJZKGXEq4CuSFFPoeKx2g25C+6z7Z7Pr7G0uuMu43VElSccssyO6+otSXGeEOSlsNns31HlQPcgBR4B8+/f9xzqz/J17/tJrUvC/wD3EOnn5rl/3hIqjAXCqJywkcJxT8ArVYaKThqXA9ACPio60c8SXPNeZeaWPTHbJd7tkllfYj2mypuiG0hJ80PSLhMcShmMhKkICUDqUpRIHPBUn8w3xFdScJ10t2he8DQxnTmZfnG27ZdYU/2iKC4robKzypC21K90uocISeykjuRpPhAgfKzcIeP/ALo4v+tm16/Grxu3SNI9PctbZSLzAyZUCK6kfhC09HWtaQfX5zLZ/PTGKfg1HCQ4MkfjAy4yZvOukBT4ZqPq0mmC0vg/hk332EafNSr4s/7ivJf8q2r+tIqWtq15tWO7PdMb/fJ7EG3W3BbbLlyn1hLbLLcRKlrUT6AJBJP1VBviam5Hw/JJvHV7f12D2rq9fO81rr5/zua1LX2+3aw+D/YXbQ4ttc3DsbgvrQeCGHVR0uD8xTyk/Uo1DidmobUG3IqMA3ThgdJ9EYBtNXZSbA03E8sQJ6qd4+t+5XVnEDqXt40pw04o+hx6zry+8SY1wvjCSQHmY7DKkMIc4/Bl13lQIKkoBr7bOt52M7r7Re4DmNSMUzPFHgxfLDIe83yuVKSHGl9KSpHUlSSCkFKhweeQTqm3DQONmWgOneT2HcnrHDg3DGre43FgZDGRHjHyEBTLafZj0pQoKSASSOnitu0L2VaO7cdULtqjieV5XMyTLWZEOZ92row6iYtxwSHFBCGUFTnLZV2PYdR4+jpdTFGs+k72RI4yMuF9dNwWDXmtRbUb7Rg8IOfG2mu9WLqtHiL5/csC2nZa3Y31NXTKFxsYhqSeFdUx0NucH6fK82rL1TbxRORpHpypz/cydT7CZHPp5f4b1/TxXM5gquZSdk5zGnk5wB9CuhjzSxVW5ta5w5taSPUK0um2G2/TvT7GsDtLSW4mPWqLbWkpHHutNJRz+c8cn6zWQyjJ7BhWOXPLspujNts9niuTZ0t48IZZbSVKUfj2A9B3PoKyY9KijdTpFcdetAsw0hst8ZtNzyOEGoch4kN+c24h1KV9PJ6FFsJUQCQFE8H0qdqq1HB9UXcZPVV2akxpZTcfKIHRYe3666oZDhx1Rx/Ra3RMMVENyYkZHlqLVcX4PT1iQI/s7jLSVI94B+S0QCOro78SPpfqPjOruAWTUnD31vWi/RvaYxX09SeFFKkK6SUkpWlSSUkpJT2JHBrm/kG4Tejt40+b043nba7XmWlHsrdgul5trv4R2CoBoFb0d1TYPTx0hbbKlHgdQJ5ro/pXa8Es+m+NQdMLZHt+Ji1x3bNHjpKW0RHEBbfAPfuFckkkkkk8mti1pDnUzLZAB119rcSIi2/csgXAta8Q6CTu0y3gGdd2craqUrGZRfomK41dsongmNZ4L894D16Gm1LV/oSaxe8U2l7shdbMaXuDW5lRtqnuNx3AM1tGkmNWKdmeot+aMiFjlscQhTEYfOlTH1noisD9+rlRPZKVVpWpm863aAzLRYtfNOLnZ7zkcyPFsbONyxeI9x8xYQvoecRGKVNFSPMQpAPDiOjzOTxC3hXQ71qe1qluzzsmTkef5CuAw85ySxDYSlflNk+jYU4lAA7cMJHwr2bnko1j8Q3QLRdID0DCY0nNbonjkJUCVtBX6YrY/wClrRrCypRp1M3EF3ARjMcmi5vJmItGbnBzKr2ZNBA4kHCO7shujVXupX8oWhwdTa0qHJHIPPcHgj+ev6qqssbkmQ2jEsfueU5BMTEtloiOzpj6vRtltBWtX6ADVdbnves+A51j2G606ezcPGa2d28404zN+6MiSEn3YkiOhpKmZa+UhDSFPAqUEdfV2qfM8wiy6jYpOwzI/aDbLl5aZSGHOhTiEOJWWyeD7qujpUPikqHbmqWxI8fW/wAVybIfYblWjRLEG2mgpIU2i4PgEEfAKBlL4+ILI+iop+au2mcjM8AGuJPOcMab5lS/y0XPGYjuXNAHLOdYyiFL113lt4brZg2iupGnqbDc9Q1D7lxmbx7bcYiFlSWnJrDbAYZStSFJ/ByXSCDyOATVlKwU7A8HueVwM7uWHWSVklrZVGg3h63tLmxWlc9SG3ynrQk8q5AIHc/SaztWHsAHOT209Myqn2pGUDvr/hK1vUbUPEtKMJu+oedXVNvslkjmRKfKSo8cgJQhI7qWpRSlKR3KlAD1rZKgXe1oVk+4zQS6abYPkEa15EJUW624yXChl92M4FhtZAJCT8DwQFBPPas6ji1sjhxtNzGsC8arSmA50O+twnScp0zXou2u2qdhwqRqpkOidtsmHw43t8hN2zJmJeGovHPWuMpj2VC+CPwapiTyQkkHtWJ3FYfdN2WimGY3p3IS9hud3a0XK/zlPBlacfH99K6Eq94rWUso6R3HUeewNVGzLc9uZwWzQtKvEX2x228aa3+THtdxv9uUpKVELSpDq3Izy2VKCkBfQnyVEJPA7cV0ysFqs1isVusuOQmIdqgRWo0GPHT0tMsIQEtoQPgkJAA+qtsDSMeYDhH8NyHf02jKemIe5pDTZxaZ62BbyvrYx1+8CBDtcGNbLdGbjxIjSGGGW09KG20AJSlI+AAAArQdxmBRNT9B8+wOWylwXjH5rLQP4r4aUppQ+tLiUEfmqRq81z8oW6UXuPL8hfXz+96TzXLtkuoVJOh+C6NmIpVGEDIhQjsZ1KmasbUdOsuuchT1wFqFtmuKPKlvxFqjqUr61eUFH/Gqd6p74UgeGz+1FznyzfruWef3ntKv/s81cKu/aziql/3oP8wBjpK5aDfDZ4f3SW/ykgfBVG3m77b7tSXboLGhN7un3akrhwLzPnxo9tdcSElXQGluPq4Ch7q0Nc9+CeDU7ayapXrSnTC46jWnTq6Zeq1wXLhJhW+XGjlpltouOOKU+tJ6QEnshK1n4JNUl8aj/e20t/lS9/qKuxrX+56zr+R1y/qTlcFR7mbHVqD2mkweTWu+JXW1odtVGn7rhcfxYfgq1aKb8dYNx+l8rINF9un3ZyxFwkMuRn7sI1otcZPT5RfmupR576+SfKaTyBwpRQCOratoW9TINddQMw0O1d04bwfUjC+p2XBYkl6PIaSsIcKCeSkpUtHopSVJWFJPFaN4N/7kiV/K64f6mPWhaMe54x+qwR2CrA7yB8f73gmu7A1u1ChoaZd1DARHe+crjDi7ZnVdQ8DoXEGfkuk1fnp3NftaBr9qExpTolnGor7wbNgsUyWySeOXw0Q0n85cKAPz1x1qng03VNwJ7LqpUzVe2mNSB3XMHa1imLa1b7NYt02osqM1hGm14nXkSpXdgP8AnLbhk/T0NtKcAHfqQ3271PupXi56RRslbwTTCG4y/Jc8lWU5XDlRbTCB9HvZ2W1y3x9CShrnkcqA5Nbp4T+mDmB7UYGU3FgpueeXKTfnlqHvqZ6vJZ5P0FLRWP8AlPrqwevOgeme4XArjhOo+Nwpzb8dxMSatlJk294pPS8w5x1IUk8HseDxwQQSK0rsdsdNlDMU2gEZEmPNffpplms6T27TUfXyL3GDnAny23a655b9Y2vaQaeYbY7lqnjmaMZ/keozout7zRJbIuijz0oZS2SllhHdKWkk9PHBJI7ThXOrwYXcsjaYak45cpTsjH7PkyGbWsklsPlo+0hv6jwyogfFXPxrorW1YBpAb7MNI4AgEDoDCzp4vNj9qSDxIJBPWEpSlZLRKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKrBv8AtJNWNVtN8O/uGhTeb45mtuu1tlhwNiHwl1tTy1EHhCetKldj2B7H0qz9Kq5uIg7iD2IPyVg6J4gjuCPmq32bcJuDw7H2rbrDtMzS55Iw0EKm4PIt9ytk9YHHmJLkht6OFHv0LQen6TURaK7WdXdVt28veluNxiLhxhJS3i+JImNy5LAQ15TTslxvlAKUlSuAeorVyQkJAN7aVo12Gp4vvCY4TqBv46aQsy2WGnoc+I3E7t+/VUo8T/bdqrrrguE5Jo7Zher9g94cmKtiXUNuPMupRytHWpIUUqaR7vPJBPHJHFTBhMfVXcXppfcc3H6OW7BLDkNoNrdsn3VTcLg+44khyQpxsBthA7eWjha+odRKeADO9KzDBgdTddriTG6QAe4GvPOI0Ljia8WLbesjsT9Xmi+17TzcRsXcv2kV303u+qGmUy4OXHH75jL8UzYS1gBTUiJIeaUArpSolCilKuogq6vd3Q6A51uM3I4xuB1rxY4nimnTJ+SOKS5LMmfJmKUFmbNLClstAKDZS0lazy2nqI4INtKVoHuxNe67m5HpAPMCwPXO6zLRhcwWDsx1kjkTn2ysufHiK6N7ldwmdadK0k0Dutxt+ntxkTnrhMv1nitT1LcYUkMoVL8wJ4YPdxKD7w93tV5sUvN1y7HjIyjAbti77yS0/bLs/Dfc4KR1e9EfebKe5HzgTwew7VsNKo0BtI0TcSTfeYn4BXcS54qCxAjtJHqSuW9q23b2tiGs2RZBtYxGHqHpzk8jzFWl6Q2ChHUS2h1tTjbiHWwpSUutlSSn5w79Is/pvjG6/Xu/2nIt0ON4/p9h1glNXKLhloliXJu01pQWw7OfStaAy0sJcS0kgqWlJWPdHNqaVamSxrQb4cp0jLmRpMxnndVqAPc5wtizjXfynWPglVi8RTQDNNxm2u4YZp5FbmZDbrlFvEKEt1LXtZa60raStZCQoocURyQCUgcjmrO0rKrTFVuE8D2Mj1C0pvNN2IcfUQVVrbJm+6qLpZYLXqRtxcsTGHWNi1ritXmI/db+802hptUdpx1liKgBJWsvPEn5qB+NUJ7IdJNz+hevOqud6j7cr0zZdSpqprDsDIbJIcgrMp11KXUe2pKk9LxBKOTynsk89uiNK6DUJrnaD7RBH82ff00WIYBR8AezIP8ALl2VVPEi26agblNvicV0yDci+2W8MXlm3OPpZE9KG3W1NBayEBfDvUnqIHKeORzWg4Wzvr1n24z9D8x0ax3TjoxZ7HpF/ut0EmRdAIxZQhiE12YUsBKVOuOFKepSkpVwEi9NK5/DGCpTOT8+cRPbTLeCtsZxMeM2ZcpmO/VUY8PGx7otD9G5OmGo23iRCtuNSJkqM+1fIarjdnnnQoMsR1rQylKSVqU64+lJHSEgnmsF4fmjG5DQnV3VK+6raEXa2WzUm4tTY82NfLPKRAKX5DhD6ES/MKeJHq2lZ909u9dBKV0GoTU8U54cPS35DtznHAPDNIZF2LqDPxJ78o547j9INzeoW+TTjXzEdvF6l4jp77HHeU5f7I1JnIbkuuOuMtqm9gUujpCyknp7hPNWA1Cf19sW4nEdUsG0iuWQYhIxFy1ZPbkXS3sTorqpIdaLaXJAaddb4UCEuFJClAKJ4qx1Kyp/ZMaxvulx6uBDp4GfyWlT7RznH3g0fykEdbKme4HRTJ94eqWlUoaWXjDrDgN5N4u+Q34RWJchpKkKTBistOuOq6loBUtYShPHKSo9j5vEv0w123CaXQdItHdGLte3GL5Huki7O3e1RIZabZcHQ2HpSXirqdAPU2ke6eCeRV1aVVzA6mKek4utv/5GXxJJs15a81NYw9L/AJn/AFCiLRi2ZletArXp5qJp9dsNu0LGY+PzGpkyDKQ6tMUMrcZXEfdBRyOR19Cu47etVA2I6c70NsTWUaBvaFW161T76q4Rsxn3tpNviIKENLd8hsl2UFIbQpLaS2rkkKKByU9HKVsahNd9c5vEEaG8889xWIYBRbRGTTIOotHLLeFzv3i6RbntYt0OlOq+n23W9yMf00lRpD6pl/sjDs4ompeX5SPbTwkoQACvpPJPIHFWY3M7erVvD0FfwXKbZPxK7uLFwtSp4jvSLXOb6koLns7rjakqSVBQQ4r3V+oUOBPFKxwA0P1c5SXcZJn4gFbYz43jjOAOEC3wJVSNu+abl9EdNLbo1rDt4ynKbtiscWy0X7Fp1ukwbrEb91guKkSGVx1JSEpJWnuE9RAPaszt020ZVZtZc13Wa3IgJ1BzQCLBtEJ/2hiwW1KUJRH87gB14obbC1pAT7p6SQo1Z6lbF7nPNU+0QRPPPhJ19IkrIMAZ4Y9mQY5ZDkNBymYC545tpJuZvPiJY5ujt+3K/uYZYIiLYpBv9iTNeb9leZU6lozunsp8kJKgSlPfgngbr4lul2vO4vSWw6X6QaL3a6vi7Rr3NmyLvaorMdKGHU+z8OSwtToU6OSlJR7p4Urmrs0rE0w6k2icgZ6zi+N/8LUPIqmsMyMPQCPgo90CXlrWkeL2fOMEuOJ3mzWqHbJcGbLhyepxlhCFONuRXnUFskHjqKVdu6RUhUpW1Woary92ZusabBSYGDIWXNzQDSTdxtO3K6rw8O0IYzXFtRLiqXb707fmIEOKkPvOMvPKIW5wlL60rbCOslPu89uZH8RHa9rTrdiem+oWlqLdds+03l+2OwGiI7U1S/JWtbAeXwOl1hJDa18lKiOokDm7lKyDS1lNoN2RB18uX5fQjUumo+oR7cyNDIgqi2ozu+XdFt7zDCsp0GtOmiJVhdQ4z92Wp9xv0wAFEeO3ylEJpTiR1qeUpQT2T69aZR2B6Wa46IaDWnSrWLH8bgCzF9cFy3XZcqSQ8+t1SJDYaDSSkuEBTbqwRx2HqbMVhcvyqHhlhkX+dbLzcG2AAI1otci4SnVH0ShlhClnk/HgJHqSB3qxeKeNwHtAT0mOt8hbcLmaYC8NafdJI6gD5Zm+82Ecr/DVzrUrAc31wuuJ6TTs7sDmQst3ZizzGG7rEX5srynWWZC2230EdYUnzErB6SARzxajLdGdS94esWFZfq3gMvA9LNO5RutvsF2kx3rrfriSOlyQ3Hccajso6AOkuKUR1DjhfKYV8M6FqHotmuqbWrGi+pONsZxdI021ynsSnvM8h2Ryh1TTSvLPDyD1KAT2VyocV0sqzAabKLjcta2OBDR6tOU5WO5Khx1awGTnO6gn5ixjjxVR/EawPWnW7RKZoxo/o9dsil3OdClu3Q3W1xIbLbLnmKSPaJSHlL5SkceX08Enq7cHMaaaNZHqbszi7ZNbtN7zhsqPisbH5L78y3y2lPsoAbkR1RZDpPQtttzhwI79u/erQUrMMHh1KRuHmTzAgR0Vy842VBYsEDvPxXL3QnHPEy2YCVo3jOjNo1QwxMpxy0SDc2m2WepRJU06p1C2UKPKlNuo7KJKSOSTcfQ3TvW++ZONatzc2zM5Q3EchWHF7GpS7fj0dzjzllxRUXpTnSlKnOSlKU9KTwo1PdK1D3ZuucpOe7llaVmWDJthnHrzzv8AFKrZ4iOnty1D2nZi3Y2FO3XGxHyWElA5V1wnQ6vgfT5QdqydfN9hmUw5GksodZeQW3G1pBStJHBBB9QRWFVrnN8hhwuDuIMg9CAtqTgxwLhI1G8GxHUWWt6W5tb9SdNsX1AtTqXIuQ2iJcmyk88ea0lRT+cEkH6war5rloFeLnuuwXctPtWW5VY8Otfs8Sz4/Mjodgzgtwl9xh5aPOZWl0BQaX5nU2kFK0ntsOiNukba8tk7e735icKu86RP07uSgS00h1SnXrM4s9kutKK1s8/tjRIHdsirFVu8tdUbtFMReRwMEQeImOdxaCsGNLKZoPM2g8Ra/WJ7g3kKtmttu1E3TYVJ0Vsem9+xDF8hdZayPI8jTHYW3BQ6lxbMOKhxbrj6+gJC3EobQCTyo9qnGdY7hj+nr+N6cNxIc622ZUGxJkglhl1tjojhfxKAQjn6hWxV/K1FKFKSgrIBISOOT9XftWTgPDcwSJzIzygdrxuk6krRp87XG8ZDTj3gTyCq3sbyHdgWc0wHd05Gfyayrg3KA+25DW4YctUlAQsxPwfAXEUpP4wSsA/ACyuRWODk+P3PGrmlSod2hvQZASeCWnUFCuP0KNahpRieR2xd/wA3zhpljJcvmolyYjLodRborSA3FhpWOyyhAKlqHYuOuke7xUgVaqBVbheBBABHSO+8jM3CMPhuxMORn1+G7cFT/ajjOq20zTGZoDddHchyqTabxNex+8Wd2ILfdIj7nW2t5115JiqSSQtK0kgAFAc7A6zjGlG4XSbdzmm4bMMFXqDOzfE24FnVjq2W4tqnBTQ9hdMhxCkMpDSf74IPUApRSFHoq81KkkucKjj5gIngW4T1jXflaQawA0tAsTMccWLtOm7jBEPIwLVLANskvCtNLrBe1Hj2OU5EnSACw7enyt5533x08KfdcUnrHHcdXbmtR2QZRuLueA37Ed0gZczrFLumI8+25GWpyO9HafaS4qN+CK0hzuU/Ap5781Ya5SJUS3SpUG3rnSWWVuMxUOJQp9YSSlsKWQlJUeBySAOe9ahpHhl6xOwTZ+Wvx38nya4u3u9rjkqZbkuJShLDZIBU2yy2yylRAKg11EAqIo0+d7tCB0MyIHKZPADVHDyME3BPM219IG8k6FbufT0qpOxHRTUrB8h1m1c1hxtyy5LqNmD8liM8624tNvaWstHlClAJKnVgDn0Qk+hFW3pUN8ri8ZkEdCQf/HtKl3mbgOUg9gQPjPMBKqFnGR70sK3m2a6uv22ToFeZkGyNxkriBaXZDYRyUn++fOS/1LJHueWOOfhVvaj6/wCKZJmGqdhn3SO1FxPDgq5xPwqVOXK6uNuMpUUj5jTDS3D73dbjqSAA3yoy1Vj9AbjQjXrEgbjCOvTe3eLbwdPWJ4SpBqpuXaGXjFt5691d4sWaZjb0WRu3WaNYJMdabQ55SmnkPxXVoccbUCVpLKle+tXWgcBRtlSoFnB4zE+oIPoVJu0sOR+RB+IVZtYsNzXdzDtGmdx05u+IadNXaJdchuOQGO3NuTcZwOIhxIzTji0Bawnrdd8vpSCEpXzVl0IQ2hLbaQlKAEpA9AB8K/qlWHlbhG+esAfAAdN8k1PmdiOgjpJPxJ/0AlRpuV1Ai6W6A5/nkp4NfcqwTFsEnjqkLbKGUj61OLQB+epLquOpVpd3QamW3TOIypzTPArs3c8umKSfJvN1jq6o9paPo4hpzhyQRyApKG/ndQGVWn47fBmMVidw1PQZbzAzIWtN/gu8aJw3jedB1PYSTYFZ3ZJplM0i2sad4Vc2FM3Fu0pnzm1DhSJEpapC0q+tJd6T/i1ONfgAA4A4Ar9rorVPGqOqREmVz0qfhMDM49ePVUD8TPRrcXuVhYjhuj+h12uMbGLq/cJN0lXq0RWJHKEoQGULlh0j5xJWhBHbsfhY7Um86pZVtwyC12nQnJhlt7skqyt2J26WdK2XnYpQHlv+2lnyQtRHIWXPd/axU3UrnNMOouoHJxk77gA+gAW4eRVZWGbRA7z8VS/w2tM9cNu+jNz0s1b0XvNpmt3aZeY0xi7WmVHkIW00EsjypZWl0qbIHUkI7jlYqPtL9I9zuPeIRlu5y/bcr6xh+UR37c0lu/2NyZHaLbCG3Vtidwf2gFSUqJAV26iO/RGldBqE1hXOYbh4QQB8AsQwCkaIyJxdQZ+KVVDxAsH1d1xw7GdumlWPzVR8zuzb2S3xSCiDbLZGUlZDrnoVrcKClscqUGj271a+lYuYHwHZAg84v2nPhZateWSW5wRym0893Fc59Mtdt++2/IpGmOqG1C55zh1vQ1BsDuEQuWoERlAbbS042lYdbKUpPD5S4CSSr8Wp/fyvdZuDx2TjNm0oVolZbs0qPMyHIbk1Nu6I6xwsRIDHZt4gkBbzgCfUJUQKszSruPiCKvm3zrz38d+qo0eGZpeXdw5Tl8lpGjGjmC6C6d2rTHTu2GHaLWg8KcV1PSXlHlx95fA63Fq7k+noAAAAN3pSpc4vOJ2ahrQwQEpSlVVkpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURfhSFeoB4PPev2lKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJX4AEjgAAevav2lESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlK1rUnUXEtJcFvWo+dXRNvsVhiqlzHynqISOwSlI7qWpRCUpHqpQFVc4MaXOyClrS8hrcytlpVTsA3P7qNY8dY1O0u2o2tWETQp21i+5miDdLrHBIS820GFtshXB4C1cHsQogg1l9AN6Q1z1wyDRGdpPkOD3jFbJ90LpFvpbEhEoPobU22G1FK2ulxCg526ueQOOCdA0l/h+9cxrYSbfW7NVLgG49LCdLmB6qzVKUqqlKUpREpSlESlKURKVjciuVytFllXGz47LvsxlALNvivMtOPqJA4C3lobTxzySVDsDxyeAaxbY962Tbg9fs/wBGb5pKnC/kHEc9oQ/dBNlLlIkhlaVFtKW0pHJ+aV89iFEUZ53+G3OCegElHeRniHKQOpsFa+lKURKUpREpSlESlKiTdRq1nmiOi151E0306kZrfIDjCGrYyhxfCFuBK3lpaBWpKAeSE9/rABNVe8MElWY0vOEKW6Vp2j2Y5HqDpbi+bZdiEnFrzerYzMm2eR1eZDdWnkoIUAofTwoAgEAjnmtxrR7DTcWOzFlmxwe0OGRSlKrvvD3D6t7frXh8zSnRKdqG7f7v7DOTGbeWIrYCSlPDKVELc5ISpXujoPIPIFUm4G8gdSYVwCZ4AnsJViKV847i3o7TzrKmVrQlSm1EEoJHdJ47cj0r6VYiDCqCHCQlKqNuv3w5ftvz3EsEa0OkyWMxuaIFvv1wu7CIzgDrSHVIYZK3D0h1PAcLRPPPB4NW4qG+dniDKSOoifipd5XYDnAPQz+RX7SlKIlKV/CnWkuJaU4kLXz0pJ7q49eB8aIv7pSlESlRLua1xvm3vS+56mWrS645nGtDC5M5Ea4x4bcVpJSOtxThKyOVf8W2s8Ak8dufntO1vuW4zQfHNYbtYo1mk35UsmDHdU6hlLUp1pI61AFR6WwSeB3J7D0o3z4o92J6zHwKO8mGfemOmal6lKURKUpREpSoCvu4XLcx1ayvQzb9Y8eueQ4Rb2Zl+umQTHmrfFkPd2IaEMIU464oAlSuUpQBx7x5Agm+EXNz0Fz9dMyFMWxHK3qYHqp9pUAbSdftV9bYOXWzWPRWbp/kGHXNFteJ80w55UFkqjqcSCQnoHPClpIWghXvcCf6sREcQD0IkeiqDnwkdrJSoi3G5lrJp1hlzz/TZWCfcvHbVLud1RkaZhdcDKCsJZMcgckJI974kVD+yLevl24rL850q1ewm24hnOHLS4bfDU50usBXlu8hxSj1NudHJB4IcTx9Jin9q4sbmBMcPq/JTU+zaHOyJjr9EK3tK17PP7oHyakf3Mfk98oOpHs/3e8/2Pp59/r8n3+eOeOPjVedANW95ur1ynz8kwXSvHsaseRSrFMlImT35E/2SQWZLkNI93p6kLSlThHKh83gGjPO7CPoWE+oR3kbiPLrcx6FWnpSv5ccbZbU884lDaElSlKPASB6kn4CiL+qU9aURKUpREpSlESlKURKUqudu3F6vS95M/b2/oXcGcGjWv2xvLyh7y1L8kOdfX0+UUFZLISD1dQ559UgPM8MGZk9hJQ2aXnIR6mPmrGUr+XHG2W1PPOJQ2hJUpSjwEgepJ+Ar9J90kDntyB9NCYui/aVXTafuK1e11vuoVs1O0MuGAx8UuiYdsfkoeSJiCpwKQfNSAtaAhCipHukODsO3Ni6RYO3gHoRKGzi3UEjslKUoiUpSiJSlKIlKV8J86Ha4Mi5XGU3GiRGlvvvOqCUNtpBUpSifQAAkn6qgkNEnJSASYC+9KgXQXIMj17vDu4S6TbjAw5an4eC2QOqaQ9CBLa7rKQOPMdfIV5SVchtrggdSyanqrEEAYrHdu58d40yN1WQSY+uXDjrmLQUpSlQpSlKURKUpREpSlESlKURKUpREpSlESlKjXclqizotoPnGpzjqW3bFZpD0Tk/OlKT0R0/pdWgfprOtUFGm6odBKvSYarwwamFJVKql4aGmeY6fbX7Vds9vNym3nN5j2UONTn1uKjNSAnykjqJ4KkJS6r+M4ea2+5ZVd9vmsloseQXaXO041PuK4lskTX1Oqx+/r6liL5iuT7LJ4V5aST5boKU8IUkJ6KlM06opHPL+L7veQN5gDMLFjxUpmqMrn+Hf2udwncp+pSlZq6UqJtwO4KzaFwMbhC1G95Tm95YsGNWZMgMCXLdUB1OOkK8plHUCtYSojkAJJNRm3uC3PYXuQxnRvU7Q213TGMvSowspxNyW6xBISSoSfORwOgj3uSj3VBQ5+bRn2jg0amOsTHYhHeRpcdBPSYnuCrS0pSiJSlKIlKUoiUpSiJSlVq3x7nNRdsOl8rN8H00iXptpLSHLvc57bUKK866G22wwhXnvuEnqKQEICRz18+7VKlQUxJV2MNQwFZWlVP023V6i59g2H4XgWNW3NtXLhjcG9ZK646YNjx32psONma6hK1BZCh0x2kqcUEkkpHeoR3O6++I7tLFt1TzeZpdk+FSpyIkmJZbc+hqOtXJDay70vpCglQS4FrHI94dwDpUHhPwPsJidJmO02mI0zWdM+K3Ey9pjWIn4XjdfJdHqVrem2aRtR9PcZ1BhwnYbGS2iJdm47p5Wyl9pLgQT8SOrjn6q2Spex1NxY7MWUMeKjQ9uRulKUqqslKUoiUpSiJSlKIlKUoiVV7xKdO8v1M2g5jY8JiPzbhCXEuq4bCSpySxHeS46lKR3UQkFfA7no4HerQ14pd7s0G5QbPOusOPPunmiFFdeSl2T5aepzy0E8r6U9zxzwO5rKtT8VmGYyPUGR6haUqnhPxfUGxVCPDm3+6V5jpti+g2ol4iYvmWNwmbPAMxYai3ZhpIQz5Th91L3SEpLaiCojlPVyQm3c/Re2PbgLPr5bZDMS4xccmY3c2gzyqew48y6wSsHsWlNueoPIc47cVWndv4W+keuybjmumCWMFzp/rfUuOji23F49/w7Kf2tSj6uN8dySpKzUL+HVuf1p071vkbI9w65smTFL8SzuznfNk2+Qw2XPZ/NJPmsLaSVNnk8e70npUAOplT9aqy4RVgngbEOI6E245Cy53U/wBWpeW9Ow4i4IngCB2zN1e3W/cpguh9zxnE7nEuF9y/NZgg47jtrDZlTnSoJ6ip1aG2mgVAFa1AevAPBFajet3R011JxPTnXjS244OM6fMOw3lu5x7lb3ZQKR5Dy2+lbK+VoAJSU8qHfgEjw7n7Btgx/U3TrWzWBq5TM7sMoQ8OtNqccem3WSV9TbTcRB/ClLiuoKJShJUOtXBAqqPiiX3VG9w9Fr3mWIW3FIKst6rfbxcDMuSFcNEqkrbAYbPHH4NtToB7+YfSsqHmcwOvLw07oJAEH715OeloWtUQHYbQwuG+QCTb7toHW8q7m5fdZgO1rF/lTnWOZdco6ulKFWizOPMBayQhDklfSw2SUn3S51eh6e45j2679bfM09tec6U6F5/qOZNqZulzTYYfXCtPU0HFx3ZhHQ4+2FcKbaCykghXSe1YvxWwDsmy7kc8TrUf/wCsaqV9mbDDW0zSZpplCEKxC2qKUpABKmElR4+kkkn6zVKYdUpVXTdrmgdWk/79IU1CGPpCLOaSehA+vWVo2nniDaP6paZws2wTHcrv+RzXnoqcKtUASryl5oJLhUhKvLQwAtB89a0o4UBz1+5X7ofvpsWqWss3QDO9Kcp0zzlqMqZDt19CFCa0lPWehSeOFdHKwOCkpSrhR44qu/g/2+FFyncCY8VpCmciix21BA6kthyZwgH147Dt9Vejc8owPFm0DlxPwbsizR23VJ7FSS7OQQf80kVszC+vQbFquHoXMxCORzlZ1AWUqxm9Kb78LovzG5WL3Kb89JNst4hYvldhy2febi6lmMhmzuxoaiSnqV7ZICGXEpCwVFkuFPPBANaPqv4leO6c9eQ2Tb9qdlGBx3gy9mke1KjWl33uOuM66npeRz2CyUJUfmkjgmKvGUZZft+iTT7SHEOZNJQpKhyFJKWOQR8Qa6GXzGbFkuMTsQvVsjybRcoS4EmGtsFpbC0FCkdPpx0nisBjNE1BdwcRGhgNPTP58FqcIqtYci0HiPM4dcvrNa1oprVp9uA08t2puml2M6z3DqRw4joejPJ7LZeRyehxJ9RyR3BBIIJo1sf/AOEg3Of8pL/r6K83g7NXPG7vrvpwmQ47Z8eyCMiNyeUh4LlNLUPhypDDfP8AiivTsf8A+Eg3Of8AKS/6+it6WA7ax9PJ1J5HVrSsauJuy1Kb821GA9HH65q7GqGt9h04vFqwyDY7rleaX9tx61Y1ZktGW+y3899xbq0NR2EkgF11aRyeB1HtUSwd9FqxjV+0aKbgtJsg0rvmSdP3Dlz50WfbppUroSn2mOspQoq4TwQQCQFFPI5gPS2Zqfq14iW4BWI6rRsOu2OwotniKlWBu6lVuZWlCm20uOI8seYErJHPJXUk7g/D91H3OfJ9Wqm55Eh3GXXnrc9BwhiK42pzo6+VIk9xy2g/URWdEkspVnDyvuR+6SQI45Z2mRC0qAB9SkDdtp/egG/CbW0vOiupUc66a64jt9wmVnuZ2fJp9uhtqdeFks700toTwCpxaQGmU8qHd1aAe/B7HjfLXFkQbZEhS5hlvx2G2nZBR0l5aUgFfHJ45IJ45PrUQb0wDtL1bBHI+SVx/wBSqs9qeaNN72GcIJ5wr7K3xqjGPESQOUqO8X8QLGdRNMbdnGk+jeeZxerj56l47ZIaZDttbQ8tCFTpAPkx1OJb60t9SlkKHCSPer90h8RfRzVPFLxPfx3J7Nl1jni1v4UYJlXmVKUF9LcVlv3nf2twKJCPL6FFfQnhR8HhTMtNbJsOW20hCnZt1WspSAVq9tdHJ+k8AD8wFQls8t0FnxSNxJbiMgsxp7jZ6ByhS5kYrKT8OeTzx9Ndb6YG1nZtC1zp3YQDbmLXm9+C5m1Cdm8fUODecki/KNI3cVO2JeIDapOt1l0P1Z0OzjTC5ZSQiwSr+hrypqySEJV0EhBUodI6SsBRAJHPNSpuJ3RabbZMaXk+ocHJ5LASlSU2ixyJSPeUUpC5HSI7RKhwA46kn4A1T7xXlGFq5tnusY+XKZyd/ocT2UOJMAjv9RqwHiYJSrZHqX1AH+9oJ7/T7fHrjqVCNkFYZhzmniARfnflwXVTYHbSKRycGnkSSD0tz4qU9ONdrFqJoZZtd4uO31m2XmB7e1bosFy4TggrKEoDMdKlLUeBz0ggc8k8AmoR0+8SvR/U8ZQzhuDZzcLnY5LMK22Ji2pdu96eUlwuFmK2pRabb8sdbjqkpT1Dq6SQDvHh/wD7jXSj/IKf9auqueE3Ei/3WtyUv2Zrz28jaaS70DrSgyZpKQfUAkA8fUPorrrUw3a6lAZAOI4Q5o+f+CuWnUJ2VlY5ktB/iB/L/SsJt/344trNqzctCcu0yyjTbPILK5LNpv6EhUltKQpQSRwQvoPX0lPBTyUqPFZTc9vq0m2spREzCwZfdLnJUWorMGyvNxnXOkK6RMeCI6uAoEhta1J+KarZrslMPxg9HJEUBpyRj7YdUnsV/g56e/0+6APzVkvGqSk6CYMogcjMEcH6P70frnL8VGlVFpdhPR+GeFjMLpYwDaH0jcBocOrS6PTNdBbbNTcrdFuKGyhMphD4STyUhSQeP9Nemoo1P1YuGiuisHO7Zpxkmcvx2YMcWfH4yn5jgcSlJWlCQT0p9T29KgnHPEOy+/5BbLE7sl1ut6LjMZiKlybI6lmOHFhJcWfL7JTzyT9ANbuaHVnUmaGFzUnnwG1amoBUU+LtJjw8226y5b7bLDGSSnHXHFBKUIDsIlRJ7AADnmpJ1J8UPEtP5Dd8j7e9VLrgCnwwnMzaFRLdK5PAXFU8Al5J/FKlI6vgPjUa+LxFjTs127QpjCH48jJJTTrTiepK0KdhBSSD6ggkV0HzLC8dzvDLtgeSWxiXZrzAdt8mKtAKC0tBTwB8OPUEehAI9K56Ze3ZC9t4e+Bvs3X4LodhNdrXasbfd5nrE6T6vYBrZp9bdT9PL63cLDc2lOIeUOhbKk9ltupPdC0EEKB+jnuCCYtxTdo/rAchm7dtKbjn1kxma5bZN5cu0a2RZctCQpbUTzSpbvAUn31JbQeocKI71VTwdW7gzY9b9Krutcqw2m8sNJStRCPMcTIZf4+jqQw3zx9Aqd9ucjSnTG35DoxsjwqTlkSBeHn75f7ldVt2OFPWlKVNGYUrXJcQhDYDcdtaQAOtxJPUdnhrnyw+UsDm75MG+4AHPfE8cmlzWlr/AGmuLTugTlvJ3bpjhLe3jc1gu5HEbtkWG2+6wrljsxy23qw3BtDc6DMQDy0oBRQerghKurg8HngggUsxPdNr5l/iJ322XnRTIZYw7GplutuFQLrbRJhtOrjOKlvOuSEx1OLHllQQ4rpBQkc9Kict4V67mnW7c+xdnmFyk5QyqT7MlSWS97VPCyhKiSE888Aknjjk18dFP+GL1b/k6v8A1ECoo+faKR0dSc7kcGnc56cbqavkpVmfdqNb0xD/AB14WV4cy1qxTS/TBnU/VpuRiLC2mvMtspTciYmU4PdhoTHUtLzxV7oS2VA8E88Amoi1J3mZPo9ibGqOpe2fNLJgTshll24quNvduERLx4bcfgodJbBJAI8wqSSAQD2qI91+UDIPEa24aUZC5zjtuQ5kCI6z+CduCjISypQPYlKozYT9HUfpq6uouneHasYVdtPM/srd2sF7Y9nmxFrUjrTyFAhSSFJUFBKgoEEEAiqAufS8dkXc4AaQ0gGdZJmL2EG6ucLKoou0aCTr5piNLCJ3m1lBu67O8U1O2Gah5/g93audivmHyJcGU2CA42oD1B4KVAggpIBBBB7iq9bKt0bGmWzrA8PwPSbNNUcpgx58m4W3GIJdbtra50gt+1Pn3W1rT3Q2ApZHfpAIJn/dJpzhukmwfUTTrT+zJtWP2TE5bEKIlxbnQknqJK1kqUSpSiSSSSTWM8LeFCibJsDdixGWVyl3J59TbYSXXPbnk9aiPnK6UpHJ78JA+FXABqV/DMN8kb86kf5+iqPJFKh4gk4ncvZbK3TafvE0/wB2NnvTmN2a649f8ZfTHvNjuiUh+MVFQSoFPZSSULT3CVBSSCkduZ7rm7siQiD4k+5i3xEhqOszHS2nskr9vbPPH08rV/Oa6RUBFSjSrARjaDG7MfJCCytUpEzhcRPY/NRnrVuCwLQyPZ4+SC4XS/ZNKECwY7aGBIuV1kdvcZbKkpAHI6lrUlCeRye4BjXUTeJkWh0WFkuvG3fKsWw+a+iM5foFyh3du3uLPCfa2mF9bQ57dSesc9h1EgVXONlbuXeM2q0ZKvzGMVx5yBYmXfmsqNuDylJB+JLz55+v6qvTrnhNp1H0bzXB73HQ9EvNimRlBY5CVFpRQsfWlQSoH4FINYPqGnso2rOQXRphBIA3yQJnSRaxnZrBU2j9XyyE6yQDO6BIHGDe4j+rtqb7Tp3C1F0rxeZqPFuiGX4EexzYjSpLDn/GpclOtNcAeoKgr4cc9q5wbDNWNR7fuP3I5BatDcqy643q/penwYNytbL1rUJczht5UiS2hZ7lP4JSx7h78cEyr4MWW369bdMkxi6POOwscyZxm3FZJDbbrLbi20/UFlSuPpcNYDwxf3T26b+Uf/8AnTq6/DFLbC1psabiORwGPW/pC5i81NiJcLh7QeYc4T6fnK6I2GfOutkgXO52WTZ5cqM28/b5LjbjsRxSQVNLU0pTalJJIJQpSSR2JHevfSlZkyZVwIEKF903N9xTF9LG/eXqBltrszyB6qhNuGZM/QY8V1J/x6ppvRiv7RN8Wm+7yyMLZxrL3E2jKEtJ4QVJSGnioDtyqOUOJHxXHJq5d5/8b92OO2v58TT3E5d5e47hM25PCNH5+sMxpn6F1jd8OhCNw+2zLMFixQ9eo0f7r2Q8cqE6OCtCU/WtPW1+Zw1iah2djdqF8Li7+H2HNPCGkgaytWsFZ7tmdYFobycfMHerRwhSnnec2vDNN79qM4+29As9mkXcOJVyl1ttlTg4Px6gBx9PIrBbeMQmYLolhuOXQH7potbUq5k+qp0jl+So/WXnXDVH9sGvDmv2zfCdDrrLU7k3yvteBXVpZ/CrtbK/bFuKHrwYMV1ok/FCq6RgAAADgD0rqfTFNz4uCQByAxT/ABB46jguZjy9rA6xAJPfD6Fru6/a56+K9uD1c0+xayaYYniE62Y5lsxuNcMj9rjgzwnpWqDHbS4XUAjgOLcSgEe6OQSa6FVzj8Zz/wAztIf5VPf6pFYRNSmP32Du4D69ZEhdLPZef3XHs0n69LwVbeLuDm43g92zXVzRrN8Ft1mbbKA4wxeXpafLWpakN2tySptKAj3luhCR1Dk+vEKxvFG0kvmlt+1Nw3TnN799ybk/DYtMWElctyMy00t2fI8srTFipLyU9ayeTx255AthliQrArylQBBtEgEH4/gVVQ3wVLXbUaA5vdUwWRMlZauM+/0DrcabiMFCCfikFxwgfxz9NWaDVqVm5YWgjmXR9c+RWIIpUqL88TiDyDZU/o336Rx9A8H1zu9svUZWojxg4/jbTTbtzmzQ+phTLYKko4C0/tilJT0qQSQVAV/ee7wJuiVzxpWv+jl1wzHMqnJtkW/MXaLco8OUocpblpb4U1yAo9SPMT7p7ng1+bu8K2rog4PqJuEU5F+RF1S7jESA64l+ZLWpspiNRmR1PlSmmj0JHbp7kJ6qq/4rWUaoZftlsl8yHAI2IY65lcNUSHcJnn3pxZjyShTzbXLEYdPVygOOq5I56CCDV1QNOOPfAjQNJaAJ+9c+lomdWUy6GE+6TOpIxXjdYet8ovHr3uBwvbtgz+oOaWjJrja2EFxZslodm9CeUgKccHDTIJWkAuLQCT254PELW3xDMfzDTG251pPojn2fXaaw5LmWOwwxIVaGUurQj2yQkFtt1aW+tLSetfBB44IUc5u1Up7w/wDNXHT1qXgzSlE9yT5bR5/nrH+GBFjR9kuny48dppT4uDrpQgJLi/bnx1K49TwAOT8AK0dSLalamT7BbB5lw/8AH8ozWLagdSo1Y9uZ6Bp+fz4LfNqm7LT3dnhk7J8MhXC1T7NJEO72i4hIkQ3SCUnlJIUhQCuFdu6VAgEcV7LxuGM7VC86PaU4JMzXIcXjsSchdTPZgwLV54JZZcfc5Ut5aQVBCEK4A94p9KqD4Z6UxN126W3xgGoyMgUUtJ7JHE+YB2+oE14NwLO5bYluSzPc9plivy30x1CdZl5LC6VKMNxA4IcUgFbHSSsoe4U2AvpUOQOaeIx3g1HeVr2TycQIBO4mb8hqtMDmmtSb5nMdA4jU8wNOqutpJuCiam5Bm2H3TT/JMRv2nxiovUO6IZdCi+hxaFR1x3F+c2UN9QUACeoADnkVo2Pb79KMv3G23bTjWO5cvIJiZC5Eu52h22MxktR1PD8HJCH1FQSOOW0ghQIJFezajuR0F3RKvOpumRdg5W/ChQcitU09EuO2yp4sFSQSlxHLzoDqfUcA8EdIrJkSUjxpseIABVjJJ+v/AMFv1oGkV2U3jMHuGOM62Mf5WZcDRqVGHKI6uaI5if8ACy/ivbg9XNPsWsmmGJ4hOtmOZbMbjXDI/a44M8J6Vqgx20uF1AI4Di3EoBHujkEmrc4prBkDmL3TI9R9GMxwiLaEM+U2+iPeJE4K5B8li1uyXVFJCeQUg+8OOwPFNPGc/wDM7SH+VT3+qRXReL/uVn/k0/8AVWdIHwX78ZH9LD843a5yTpWjxaY0wz/UR6xz6QBAe23ehpvulzDMsX06sOQRWMMRG9pmXaOmMZDjq3UlKGeorSElo89fSrvx0jits1f3F4ZpJfrDgqrddcnzjKlKTZMXsjaHJ0pKeep5ZcWhthlPB6nXFpSAlXHPSeKZeGMlKd0O6VKQABkPYD/n86m2XK3c68V7W24ZOvzZtltEy1WhLv8A8zx48iMzw2D6cpBJ4/fqPxNTSisaDRbGwvPQTA5mBllKir9ia5N8DwwdYF/U58FYnOd6L+iN2tCNx2iWSYHj18kJiRcljzo13tzL5HIRJLCutk8An5iueDxyASJP1d1/wTRrTZWq18jX294+IvtqX8etbtxSqOUhQdLjY8ptshQIW4tKTz2PrWsb3cHtWoG0/U+x3aO24mPjsu5xyoAluRFQX2lD6CFNj9BNVI2s5bfMs8JLUGNfXXH02GzZHaYS3CSTGQwXEDk/BJdUkfQEgfCsalQijWI9qmA7mDI7yOUcVqymDVog+y92HkReeUeqnHHfEQxbUDTOFm2k+jOe5ze5vtDq8bskMSJFujtvuNocmvI5aYLgbKkNgrWQeySPerf9qG7/AADdjYLxMxm0XSwXzG5CYt5sd0SkSIqldXSoFPZSCULHPAIKVApHbmMfCZixmNl2NvMR2m3JF0ujjykIALihJUkKUR6npSkcn4AD4VEuw9KYniGboIUZIajmXLcLaeyer7oE88f5yv5zXYWBu1HZ97C6dxaAfWY9eC5A8u2fx9zw3mCSPSLeu9XBybcI23qpL0U00wmZmmXWiA1c70huazChWlh39qEiQ5yfMX6pbQhZ6e56RX20n1+RqTnuWaXXnTrIsRyfC2Ici5R7kqO7HebklzynIzzLig82Q0T1EJ9eCAQQKb7obNuX2dbmMk3gaNY2MzwbM4sZGV2strcMXyW0IPmBHvtpAR1IfSClBUpKxxx1WN2m7o9BN18+4ahYG09as6ZtUe33u0TlcSmYrbq1tlPB6Hmg485w4nuOvhQSSBWOz/asDsyAcQ1BvEbxlfUSdy2r/ZuIyBjCdDlM7jnbfA3qylVb8RjMLrZ9v7OnuOylMXfVHILdhUdaD7yW5bn4bj87SFoP+PVpKprv4Jd1b2tQXu8V3U6O44D6dafL6P8ArVVcIqPpsdcOewHiC4AjqLKwcabX1G5ta4jmGkg9Crb4xjtrxDG7Vilkjpj26zQmIERpI4CGWkBCB+gJFfe73e14/apl8vlxjwLdb2FyZUqQ4G2mGkJKlrWo9kpABJJ+ivZUDb5dMNQtYtsGaaf6XELyC4Mx3GIxdDXtiGn23HI4UogArSggckAnsSAaivVfBqZn89fmmz0mS2nkMlmYG5GHe7QMuxrR/Um8YmtHnN3+LaoyGXmPXz2orshE51sj3gUR1dQ4KQeRUnY3kdky+wW7KcauTNwtV2jNzIcpk8oeZWkKSoc9+4Poe4+Nc1blv3euuGJ23b2NvGb6X27I4aLDMyC2KdiICPdSXUNOtAob7Dq6FujpJ91Q7V0W00wnFdN9P8ewTB2yiwWO3sw7d1O+aSwlI6VFf4xPqT8Sa3LAA5zbtkQd+czu92LDMrEOMtDhDoMjdlEb9Z5DetmpStZ1Nzq3aY6dZNqLdkFcPGrTKujyAeCtLLSl9AP0np4H56wqPbSYXuyAnstmMNRwY3M2Wvaj6+YDpvkdrwWUbjfcxviS7bsZsUX2u4vtD5zykcpQy0ODy68tCOx97tWvvbtdILHkVqwfUGXdMIy69XOPaYWO3uH1TXnn+zKkmKXmVsrIKQ8lwthQKVKCgRUC+F/ZL5nuJZnuy1GX7dmGp97fQ3LcHJYtsdXQhhrn5jYcCx0jtw0j6KxmqkCFrL4qWm+KNxWnoulGLOX+4uBIKhIWVKZSo/xVuRlAfxj9NbNY5lWlSqZuu790YS8xvIAgzrpvzc8Op1alPJthxOINE7gXG3DM7r6UpSqKy8V7vVqxuzTshvs5qFbrZGcly5Lp4Qyy2kqWtR+gJBP6KhuDvD0kRkq8UzZm+4LPcsnykgJyWI3H+6Vt6inzmA244rq5H7S4EP8Acfg+e1SNqhgMfU/DZOEzrm7ChT5MRcwtoCi/HakNuuRzyRwl1LZbUfglZ7GqXwrNatf/ABV7zOukBmfZdEcWjMx0uJ6mk3JZC0Hj06kqkOkfQpkH4VFOX1m09+LoGtJJ43wgC2s5gg+G0i/dHUlwAHC0k56HQg2Rt+7DTx3U/HtJMjsmRYtfsuZcfsLd6ZjMqnoQCefIQ+uTH6glXT7Q011FJA94cVNVR3dNvuj161ht2vl1wmNJzu0w/YIV2ceeJZa4UBw11+V1AOLAWUdQCiAakSrCMInO/abdYzyE5KDOIxlbvF+k5apWIy3LcawPGrjmGY3qLaLLaWFSZk2SvpbZbHxJ+J9AAO5JAAJIFZeqy+IVpjq1qloAi3aNQEXS+WS/2++uWlSkgXJiMpSyzwogL9/y19BI6vL4HfgVnUdhAPEcYBIBMawL9FoxuIxz4TbKeOS3o7j4jNm+WE7R7UqFiKW/aF5BItUdLLcfjkvriCQZ6Wwn3iTGBCe5AFaTu8wC/biLdphpLj0GRNwrKcjYvGU3aN70ZFoiN+eltSx2/DrLYQfiRz8KrLfN7uL7gYkXazvS0MzrSM5bJixV3CPJdjNOPpeSUBaX2kLbZUsJSf20AEckD3h0hx+x2zGLDbcassf2e3WmIzBiNdRV5bLSAhCeT3PCUgcmtcAgVDkHAt1nDe/WLQLZ5rLGZwCxLSDpEiBHSbybwvXGjsQ47USKyhplhCW220DhKEgcAAD0AAqE96+BDUTa9qDaGQU3C3Wly9211HZbMyF/fLS0H1CuprjkfvjU4VgNQWWZOBZLHkgFp20TEOc+nSWVg/6K5dscRQe8G4BIPEXB5g3XTsjR4zGxaQI4ZQtV23amK1j0GwTUx5QVJv8AY40iXx6e0hPQ+B/0qV1JBPAJA5+qqqeF2/If2R6f+0Eny1XNtHP7wXCRxVrK9DbGgV3hthJXDspPhNB0t2t8lys3paqaiXrejt2evGieUWV+xXVEi3WWVcrY49dlqmtjllbUlTSOry0pHmrR9fA71fUbgZmOYXes61d0dzbBINoIKWnI7N5fko6FrWtKLW5JKEJCPeU50JHI5PHJqnu/P/hB9rv/AD6J/wB4proNmyQrDL+lQBBtcoEH4/glVwioaX6NNQZh1X0IvzPbguwsFX9ICmci2n6g26d+KqVG8UbSS+aW37U3DdOc3v33JuT8Ni0xYSVy3IzLTS3Z8jyytMWKkvJT1rJ5PHbnkCU7NvP0kf23Y9uYykz8fsuSJLUG2PNedcJEsPOMiMw0ju8tS2ldPHAKeFHpHPFavBUtdtRoDm91TBZEyVlq4z7/AEDrcabiMFCCfikFxwgfxz9NYreNkaZniR7cdNbq223jNlVCuEWH0hLHtb8p1IUE+nYsMAfmrrqU8Nens4N6hYJ3S2Xd/jwsuanUxUalcj2A8xvh0D63cVZbN91+oWmmKHUvO9q2bQMKZQl+bMj3O3yrjb459HZEFDvKAAQVAOK6O/VxxUrYTrXp1qRpkxq7gV4fyDHJLBebXbIT0mSSk8Ka9nbSXfNSexR09Q/N3rcbnbYN5tsu0XSK3JhzmFxpDLiepDjS0lKkqB9QQSDXNfwkJN2w/VHXvRRqQ67YMdvAcipUolLTyJD8ckfQVobRz9Plisqf2lR1HXCXNPKJB6EEZblep5KbaumIA/xZEdRfgp/wjxI9F89y7LcPtGJZui4YsEsNW5doK7rdphWtK40WA2VOlSPLJUpzoCfx+kDmvPiXiL4YrVxOkGtmleWaQXCZEXOt0vKS03HfZSlSuXFA8NchC+DypPUkpKgeAYV2CsMHxAt0TxZR5iLjNCV9I5SDcl8gH4c8Dn8wr5+JpboMndptcVJiMu+031uO8HEBQcbFxicIUD6j3ldj9JpQ+0/VZ/6wE8JxX6Ry4ama3kO0x/0pjjGG3WefFTTqt4iEbS2Ozl8vbVqnK08U+20vLn7cmHHUhZ4Q60y6Q4W1cjpLnldXI49RzafD8tsGe4paM1xWeidZ77CZuEGQkEBxl1AUg8HuDwRyD3B7VBfiIMNSNl2qiHkBQTaG1gH4KTJaIP8AOBXn8OCU9L2UaXOPuFakW6S0CT+KmY+lI/QAB+ippw5tQHNpbB4OxZ8i3SM1FSWljhk7FbiMOX8yspXPTxostcg6EYbgEMqVKybJ0vBpHdTjcdlfIA+PvvNfp4roXXO/dHbm9e/Eq0V0a6BKtWBW85Ndm/VCFdZf6Vj+MI8ZP/SCsnUxXq06Jyc4Sdwb5iezVq15o06lUXwtNt5PlA7lSPore9vXh17ecexXWDUC02TKrrFTer4ypSpFxmznUgq4ZbCnVIRwGkq46fc9Ryaxjt1tPiawoFpt8+32bRuxXRm5XOH91I79/vrzXV5TLsdha/ufH5JJ8xXmr7cJR61aTMNDNFdQZcq4Z1pHhuQTJraWpEq5WONIfcSlPSkF1aCvsOw79vhxXMPcJogzsY3t6RZtt5emWuzZ7dGoq7I2+taE8yWmpMUcklTLiHkFKVE9KueD2Txu1/6xtTRVHtuAbuBzbI3CLaWyKwLfA2Zxpn2Wku3ke9G6ZPHiF1st8CFaoEa122K1GiQ2UR47DSQlDTaEhKUJA9AAAAPqr0UpVSSTJVgA0QEpSlQpSlKURKUpREpSlESlKURKqf4hls1YZw/TfPNDLNJumcYpncKRbYrDfX5yXmX2nG19wA2sKSlZJA6SeSPWrYUqpBJaQYgg9iD8lYGAQRMgjuCPmq32ze9hFnx9tzWXTzUXT/J22h7RZJeI3GYHXQO/s0mMy4w8gq7JV1pP0gVAO3XQvUXXbe7fd8Ge4BdcGxaNy3jFrvLBj3GcoRRFbfcYPvNp8vqWSr1UpIT1AE10OpWjSG1fGjzAEDcJzPPdeyzcJpmlNjE7zGnLfa65q717frPpJvv053RQNMcizzCbNbmoaWLRDclGKrh9t9vhAPluEP8AmIKuEqVwOex4wfiG3XXTX/TnT3VGxbccxsmL4zkQkphXNjrvr/mJBDrsFjzPZmQW+kFayoqWnlKRwVdRqVmwGmxrQfZdiH82LrfL5rR5D3l5GbcJ5QW9LFUM3057mGvWzWXj+LaBaqx8hyqZAkQLI5ikuTLbitPpcU9IMVDrUcny1cNrcDnBQSkdXabNqmbM4rtUwqHlGHZ3arjiOOwLbdLbKw26pmpfbbCClpj2fzHxynnqaCwAQSRVh6VYeVtRrbYyDygQPTPjuFlQjEWF3ugjnJk+vpxuubfhdQ8501zvVi26k6QakYsM3vEe4WeTdMOuTMZxIckdSXHiz0NEB1B5cKR69+1efckvMMi8RnSjVvHtHtUbnh2Fx4sC7XeNgl3Uy2sPyStSB7P1OoSHkEqQCD36ea6WUqWnC+jUH/TiOOEYRPTNS7ztqtP/AFJnhJkx1+t3NPxibn7fjOh13tcSS4Xshkvx2JLDkV1RKGClC0OpStsnsCFpBHxFWbv28eOcWmWvEtG9TZepnkqjxsRfxOa2pqYRwkvTfL9jEcK4JeS8UlPcc+lVR8YHP8TvF00sxSw3hi63fGr7JnXeHAPtDkFvhjgPBHPQo8HhJ4VwOeK6OYJqJgupdkRkGA5Zar9A91K3YEpD3lLKQrocCTy2vgglKgFDnuKpTaKlB/3S92WcYWCQeNxMaWi6mo7BVZOYaOXtOMEdjmM7yoH2abfntpGiFxl5+5IuOX5JOXkGUOWuE/PcEhzgJYabjoW46Gwe/Qk8qUsjtVa9nj+ZYnvl1o1NzHRnVOx4xnzkv7i3GZgl2DayZqVt+YBHJa6kAq5WABxwSDXS+laB0VxWjJpaBoAQB6ACPWVQtmiaRObg4nUkEn1JM+kLmfus0k3EbZN3J3nbd8Jm5hZb+0lGR2iCw4+sHoSh9t1psFzy3A2hxLqQQhxJKhwB1S7iO67cpuaiN4bpVtnynTQ3JIauWZ5V1IiWlg9nHIjamkGW+ByEAcAK4Kh0g1dOlUptDKYpOu0ZDcM45cP8q7yXPNRtnHM7zv58VjMYx+FieOWvGLc5Ici2qI1DZXIdLjq0toCQpaz3Uo8clR7kkmoZ3uXWWnbZnmJ2XFMoyG95NYZlttsGw2CbcnHHnEBA6zHaWloDr55cKQQDxyRxU80qtdn6w1zXH2pnqrUHeA5rm+7EdFTnw2J12092qWvAs/wLO8bvuMuXGTNh3PELmwpTS5K3ElkqY4fUUuDhDZUvsfd7VC215/M8X8QjV3VXJtGtUrXiGcImRrRdpOCXdLS1GSytsrHs/U2laW1EFYAHbq4rpfStzUJ2gbQc8Jb3EH4WWApgUDQGRcHdiSPU9lzd8TqDm+omqejrWnWkeo+UN4HeH596k2rDrk/HbQp6KpKW3gx5bxIZWeW1KHp371MW/wDy6VqLtMyXCNPdPNRsgvuWMw0wIMPBrwXEpRMbWtT3VGHk8JaX7q+lR5SQCCDVwKVzmmHUPAOWIu7xPSwXQKmGsKwzAA7SR6kqrmx/MHcN2mYnjma4Jn9hvOG2r2e6W6bhd2RI6g8sJ8hHs/MnkKSeGusgHuBweIA8MaDm+n2qesQ1F0j1HxZGeXhmfZn7rh1yYjuID0pSkuPFjy2SA8g/hFJHc9+1dIqV0OqF9d1d2bgQepBPqAucUw2gKAyBB7ZfEyuaGtj+Z3nxMtPdZbTo1qlNwjE4bFruF5YwS7qaC+mSFrQkR+txCS+nlSUnnglPI4JzfixNZfrFp1h+A6VaVajZTcoN9Tdpht+F3VbDDHsy0p5dLAQVEuj3UklPSoK6SOK6KUrAMik2l912LriDu0jsujHFZ1YZlob0AI7wVp2kuXRcywO0XKPZsgtTjURhmREvdkl2uSy6Gk9SC1JbQpQBPHUkFJIPBNbjSlbVH+I8v3rCkzwmBg0sqI+KJo5qrqAvSTP9OMDumVxcDvj067w7S2HpoaUuMtJbZB6nP2lY90EgkcjjuJluW8Gy5LisiPo7ptqLkebyY5bhWKVh9xtwjSVDhBmSpLTcZltKiOpXmnsD09RqxNKyDfszRPskk8bxN+m5al3nFQZgRwsSRbqdVUjRbaDlWiG0HOtNrPemJGpudWy5zblcmFlDZusiOpDbTazwQhHISFHjuVK7c8Cv/h6aw6yaI6eS9tV32lajz8qg3iTJiyUwEwbdw8R1GbLfKUspSUnhaQ51J46QSAD03pVw4iq6po5obHBpkRujLkqEYqbWHMOLp4uznmuZexZzV/RDdZrNgueaG5dLuOdX9uWLrbIRFmiNpkyHHJC5T/QCwUSOpBSFLX0hPSCe395mNQNs/idX7Wq5aQ5vlmI5zaExIsjGbQuc4VLjsIKQBwnrS7H4KSoHpUFd+3PTGlRT+zNMjNjS3m0iI4WhS/zmrPvkO5EEGVz+36bdNbtcMcwDc3pPiEyx6mYMsyPuAmS29P8AZA950cgp9xUhsgKUykqHLi0pUspHV6tKN+u6vUqBHwaFsjyZGdBIiybncFv26yR3gOC++p5kKaSPnFrrKjxwlRJFX3pRgDAWe4STG4nODnfX45QcS6He8BE7wMpHDT/c1R3TWbLsU2Y5XprKj5jqJnOV2mTHU5ZbBOuJlT33Ap0hLDa0xWElZDaFlICEBI6iDWP8OW83TBdqFkwTP9P88xq+4ime5Ph3PELmytxpctx1CmOWP74JS4Pcb6l8g+725q31KlpLS8/ejkImI7npuUOAc1jT7pJ5zYz9ZrmltAOb49v81f1JynRrU+yYvqCuYxZbpPwi6NMKUqa0touksfgEqQknqc6Up/GKa6W0pUNhtKnRGTBA5Z97o7zVX1Tm4yef0FQ3eZtq1Ux/cRhu9rb9jj2SXrG3GG8kx6KeJU6M2C2XGB/xilMKU0pA5V2QUhXfiVdQN0R1F0ruuOaF6dZ9d8/yG3u26JaZ+LTram0vvIKC7NkyWkR2UtdXJ/CHqIATzzyLO0qmAGkdnd7BJPG+YncfTQhaYyKort9oADhbK28euoVedrmiVj2V7dLdh91XcLtcEuruN9kWa0y7i5InvAdXlMRm1vKQlKENg9HogE8E1VLw+JWbaabhNbMl1I0W1Uxy1ahXI3Czy5eCXZTagJchwIcLcdXlkofSeVcDsRzzxz0zpW2Mmua7txbHAx8IELHABQ8AZSHTxBJ9ZMr8B5HP01qupGokDTPH05BcMayq+pW+I6ImN2KTdZalFKiCWo6VFKfdIK1cJBIBPJFbXSsyCcloLZqou2TWu/ZPrDqHdc80C1aw+fm17jN2WTd8QmIiItUWIlthL8gILbC+sSHFBZCQXgAomrdUpVrYQ3cAOwjvv3lVvJO/67blRDQjZvd9KfEL1A1Ii2mUxgci1rvtkcShQii4zVlDjSfxetoGWAPVKHUegV3vfSlQ3y02Uhk0QOUkjtMdFLvNUdUObjJ5wAe8T1SqI+LjpNqDqFo5iOTafYzPv7+H5B7bNhwI6n3xHcaKfNDaAVKSlSUhXAPAVz6Amr3UqrmkwWmCCCOYII+CuxwaTIkEEdwR81WqwboLnn+kF1y2ToDqhZLauyrisNysckyLhPuS2lDyY8KKh1wsDgj2l3y0clIHPcpgjwlmMw0n05yTS7VDSrUTFbxcsjVc4Tl0w65sRXmlx2mzzILHlNlKmjz5ik/OHHPfjoZStWuDaj3ge2ADyBxfH043OJbNNlOfZJI6jD8PXhZc5/ErxXWew6+6L7hsPwC8Zti2CPtvSrbbo7kgsSW5QdUpaEJUUBxAQkOcEAtgH4A4bf1lGtu67a6xecJ225rj+OWa9w7m8m/Rwi8y/wAG631MW9nzFhhBd5U6tSSQQUpI6lDpjSsBT+x8HQOxDmSCedxZb4/tfFGeHCeQBHSxVIdW9Usp1U2C320QdBtULfe8gx5qxWi0OYzJlTJjqUMpceLcVLnkMA9XQt8tlYSSE+nOy+Hne52n+0iwYbnmB57j17xFqYq4wLhh10aeKVy3VoLCfZ+ZBKXEnpa61DvyBxVuaVu+oXPqVNXx0iY9SZ+SwawNp06ejJ9QB8AFzZ8POPnWIboNa8hzvR7UrGbbqNc1zLJMumG3JiOsGY+4EuulnoZJQ8k8uFKexBPNWK0113vmI51qjhetOHZtb7HGyua/jWQv4/Ol22Zb3OnlgPNtLCOhfX09XCVJUAknjirOUqjfIxjBk1uDpIIPMRHIq7/O57zm52LrBEcrqjG2bQeLZt4+p+5XBMIumIaZS7MLdbYj1pfhLustfkuSX48JSA6lkKaUU/gx1qX7gIqMb7OzGR4pFp17Y0X1XcwCFbxaXL0MAvHR1GA40XA37N5pQHFhPPT6AnjjvXTWlGeQ04yYCB1kGf5jGUI7zioDm+J6RH9omZlUM8WbTDPNUNEcKzPTjF7rfV4tfE3KVBjQXVSkxnWeA4WOnzB0qCApJT1J6jyBweLA6Qbk5uo+Gqzi6aK6g4pYIcJkOOXWyPuT5Uw8BTcWBGQ6+6ynvy8UpBJACT7xTOVKM8ge0ZEyOBgD4AdeFkf5yxxzaI5iZ+Ppxuuafh3pzjAtx+tN81B0b1Nxu26jXX2yyzLhhN0bYVzMfWEuuBgpZ919J6nClI4PJHFbZuA2/wCqWhG8O1b2tFMNuOYWO4kMZpj1pb8y4JbW0GXnmGfV4KQEL6U8kOI5I6TyOgFKN+zFLBY0xA4jKDvBGfRHfaOql+VQyeB3jcRFtyqZr1rXcNwGjl50k254Zlt4ybOISrO/JueOTrTBscZ8dMh6ZIltNoSUtlwBtBWtSvQEevnzvSODts2C3jQHDrBkuV3aVjE+1MN2KwS7g9PuUlCvNdUmO2vykFxwkFwgBIA5JFW7pVKlNr6b6YtjseQmw4XJ1ubzAAvTeWPY83wXHO1zxsBpbKJJNMfDQm3jTna1EwLULT/PcavuOSLjMlxLnh90ZU4yt8uJUwSxw+ohY/BtlTnIPu1EmypGcY9vm1lz7LtHNTbBj2okmUmx3K44VdGY6yqcFt+asscMBSD1dTnSlPB6iK6U0rc1C7aBtBzwlvcAH0A6rEUwKJo6Fwd2JI9SVWjDdc8iwfWXVfD9WcOzaLigvrczGcl+4M6ZbVsLhxw9G81ptYbCXEqUknhBK1jnkcGL9CNBIdw34ZTuS0ywqfiWnKbCYCVSbW7bEXu6PBPnOx4zqULDAACisoCVODlPVyTV56VnT+zLHatbhnf5cN+nrB0V6nnDho4zyvit19JCVUXxJrVJt2l+Daxw2VLOlue2bIpRSOSmJ53lOn83K2+fqFW6rBZ3hWPaj4Ze8CyyEJdnv8F63zWT26mnElJ4PwI55B+BANUeXNh7BJaQ4DeWkOA6xCszCZa/JwIPJwIPoVmIsqPNiszIrqXWH20utrSeQpChyCD9BBqqeqNt1QxzeRj+qmX6nZjZtJLTZAiJa7Nbpcy3S5ykuoebnpj9QZ7qbcS462Uq6QlK0qFbxtdyC/4paXdt+pcoqzDT5hMaFJcPAvtiSeiHPa/fcICWnQOShxB5+cnmea1cGtqNqUzIFxxBBFxvvloRBFoWbS403U6mZsY3yDbgYsdWmRnKqbuaVD3h4AjQLTDHbpc4d9uUN275TNs0iJbrJDZeS64409IQgPyVBPlobZ6uPMJWUJ9Z41Il5bp5o1fJWk2Lpv2QY/Y1ixWp0kiS801w02eCCr0Huggq44BBPNb1XmuU5Nst0q5LjyZCYrK3i1GZU684EpJ6UIT3Uo8cBI7k8CsniKTmNMTcnWYgdhl13rRpmo1zhMWA6ye+vIKA9lWuGr2tem9yf140+Xh+b2C5+wzoKoL0PzGltIeZd8h4lbZKXOCCfhyOAQKkvXTTpzVzRrNdMWZSYz2TWOZbWXl/NbdcaUEKVx8Arp5+rmvPozjmQW+1XrL8xg+w5Bmt1Xe5kHqCjAb8ptmNFUodituOy0FkEjzC5wSODUhVfaGNrNLHCJABA0MX9Z4brKtBxouD2mYMg9bdPWM7qnu0HURGie3TGtE8v0/y9jUPE0yra9jcWwyXHJr3tDi0OsyAj2csuBaVeep0Njk9ShxUaaRMar6Qbk9f9R9WNP7k9qHnMOC3hUa0QZM2DOStKumM1LS35aUtKRGS6twoCQ2pZAFdDaVL3GpUNV+bgQYtZ2cbietrAKGgMp+E3IEEdDIB3jtOpUQZq9qzo/tikq09tbud6gY1jrLcVmWpyQu5TUIQlxxQ6gt0k9awgKClcBIPJFYrZxrFqjrTpEu961YKrEs1s90dtN2tyojsX3gyy+255LpK2+pqQ0SlR9efQdhM18ugsdmnXk2+bO9ijuSPZYTJekP9KSehtA7qWeOAPpIrUNGcXv8Aj+MTbtmDLbGRZVdJF/usdtYWiK690pbjhQ7K8phthoqHZRbJHY1IcXVKjnZEdjOnMTO6BvCQG02NGYPcRryMdTwK3p1flNrc6VK6ElXCRyTx8AKpf4a+DZgmHq3rrqHjVyst91LzOVKbj3GKth8Q2VLKPcWAoJ63nUjt+JV06VVnkeX724eUkE/2gKXeZgZxB7AgfGeiVU7DtxW5dW827aM6i6Mi06ZT35MLGsiRBfSH3WozkltXtJUWnvMbYeJQlIKCEg8cHm2NRpItd/zLWqBdJdqkwcbwFh9UR99HQbldZTIbLjQ9S0zHW4gr9FLfUB+1nmG2qA8DbTLPppxKkwWEHeOeeXXXgFJdVHltajafbzsk1T1a1TzGDpu5aWomM2WHbZkuyO9TSA4ZC2Atth5DqXFfhEJUvqSUrIBSLcUoJa8PGkjuI77johgtLTrHoZVStwNhh70ZGGaZYdi91dxWz5JFyG/5ZcrS/BjMR4/V/esJUhCFyHnioJKmwW0J56lc8Jq2gHA4Hwr9pVhDW4RvJ6kAfAAdN6qfM7Ed0dASfiSlRVupzeNp1tv1JzCU6G/YsbnIZJPq+60Wmk/nLi0D9NSrVadabcNzWp9n0FtifacHw65R77qDLSfwL77X4SHZwfRS1L6Hnkj5iEoB4KwKxq0/Hb4OWKxO4anoL8TAzIWtKp4LvGPu35nQdTbhnkCtr2W6eStLNrGmuF3BgszY9jZlS21DgofkEyHEn6wp0j9FTUTwOaJASAlIAAHAA+FftdNep41R1TKST3XPRp+FTaw6Bcy96EzNc23paLaj4Porqrfca0+kQ3bxcIuB3foTxO8xwNhUcKc6WwDykEHngEmr06gapWVjSa75RDxvNriiZEkxIsCJh11XcHXlNK6U+yez+cgE8DrWhKO/dQqS6Vzuph2zHZjkS4z+LP8Ax6ytw/DtA2gZgNH8uX+fkuefhLMZhpPpzkml2qGlWomK3i5ZGq5wnLph1zYivNLjtNnmQWPKbKVNHnzFJ+cOOe/G8eIltMz7WJWH656Hobd1F04kIfjwisIVcI6HQ8hKFKIT5jbiSpKSQFBaxzz0g3UpW1Z5qubUFnNiCNMIgeme9ZUmikHMN2umQdZMn1yVb7LvNtt4wNMhGkOpSNRhF6F4UvELiiQmf08dBkKZEdLPX385TgSEdzwfdrDbK9u102u6bZTnWqRcl51ndyXf8kRa4r09UYqUotxWkMJWt4oLrhUUJPKlq45SkE2ppUTBc9oguEcAJmAOJAm5yHGZjytYTIBniTECTwBMW1m9o5r7IG8zxPehrdneZaQam2CwahTpLliuE/CLq2y6Fz1OI8xXs/DPKFBXLnSkcHkiniGNZxmu5nRPIMB0d1LyW26cXYTL3MtuF3N1hAE1hwpac8jpe91hR6mypJ5HBPNdKKVFP7LwMP8A0YjjExPc+nWX/aeNP/VmeExMdgqt758oezTaRlmOYVg+eX68ZjbhFtdug4bdXJJUH2+ovN+z9UYABR/DBHUB7vNf14cr93su13ENOMrwvLsZyLGmJLU+HfsdnW4DqlurQUOPtJbcBStJ4QokfEDirRUqWQwv/ejpEx8TKh8uDR92fWJ+AXludyg2a2y7vc5KI8OCw5JkPLPCW20JKlKP1AAmuSVp1f3I6S6kZlvwjbbLlkdp1IuyrfEkTW3ku23H2FoCPwKB1tF5CGgh5Y6AWT2V1iuu9KoAW1PFabgQOpv3AjTXOVckOZ4bhYmT0y7G+uQyhVIwDxK9HdS4UdGG6Zas3i9vJHXZ7dijkl5tfHcFxK/JA5/GUsD4nisxjWg+Z61a7WPctr/YGLA1h8ZTOE4YmSiU5AWs8rnTnUctmQTx0ttlSUdKPeUpNWfpWgIDxUAuMuGkjjBOcxpe6zglpYTY58dYPDtORtZKUpVVZKUpREpSlESlKURKUpREpSlESlK81yuVvs9vk3a7TWIcKEyuRJkPuBDbLSQSpalHslIAJJPoBUEhokqQCTAXppVaV7/tFLXfWYea47qDhuOT1lu1ZhkWMPw7Fcz+KWZB5UEqHdKnEIBHfnggmXNMdaME1kNzmabzX71ZLYtDH3dZZ4t0t89XW1GdJHnFvhPWpAKAVgBRUFBNgC4SPrnu67xvVSQ0wfrlv6bjuW9UpSoUpSlKIlKUoiUpSiJSlKIlKUoiUpSiJSla/n+d4xpjhd5z/M7kiBZbFEXMmPq79KEj0SPVSieEpSO5UQB3NVc4MBc7IKWtLyGtzK2ClRloZN1MyqxyNSdSnHbY5lAblWrGA2gJskDuWUOqA6nJS0qC3ST0pJCEgBBKpNrRzSww7P4cOiq1wcJbl8eKUpUD749aJug22LNM8ss9UO+KiptlndRx1omyVBtC0c/jIClOD/ErGrU8Jhdn8zoOpstKTDVeGDX6nop4pUL7T7DqlD2zYdbtZMtu10zCfajJnz5LnMxj2gqcbQpagSXGkLQnqVyepPfmvppJqZkkTO7zoBqvNbk5fY4qbparslpLKMisyl9CZQQn3UPtr/BvISAnq6VpASsBPRUp+HVdSm4nrGccs+UnIFYsqB9MVRkfnkeR+JAzKmSlKVmrpSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlV83a7lNSNs2Ky9QbLoQnNMUtkZl25XMZO1b1RXHHvKCPILLi1gFTfvJ/f+nYmqve1glys1heYarB0rVdLs3Oo2mWK6iO24W45JZYd3VFDvmiP57KXCjr4HV09XHPA549BUZbcd5ukG6LIcuxrTdq+tSsOeSiSq5Q0soktqWtCXmSlaiUlSD2WEK7j3fo0cxzahon2hMjlms2vDqYqj2TF+eXdTvSlKqrJSla/n+d4xpjhd5z/ADO5IgWWxRFzJj6u/ShI9Ej1UonhKUjuVEAdzVXODAXOyClrS8hrcytgpUZaGTdTMqscjUnUpx22OZQG5VqxgNoCbJA7llDqgOpyUtKgt0k9KSQhIAQSqTa0c0sMOz+HDoqtcHCW5fHilKqs3vltbm+RW0FWMssRW4Skfdpcolb1x9mTJSylvjhKPLKk8kklXHoPW1NVb5mNqDJ2XePrgpd5XmmcxE9RP1xkaJSoq3Kak5Vpvpo4vTuFGm5xkk6Pj2LRpBAaXcpKilC189uhtAcdVz26WjVcdv8Asp3Q49rbZta9xG6a4ZYu2F6QuxQpktURb621ISnhZQ2lpJWVdKWgCUgcAUp/aOINmixPGJgbzBHcdFTyNkXJyHDKeAmexV46Vo2tGreP6KYDMze+sPzHEuNQrbbY3eTc57yuiPEZHxW4sgfUOVHsDX10nteokHFETtVL61PyW7OmfMjRW0oh2wrA6YcfgdSm2wAOtZKlq6ldgQkG+aY0+O7nFzuETmJHyxx+G/lpxOWRjdKVGWW6/YljucjS7H7Nfc0zNEZMyTY8dYZdegx1fNdlPPutR44V+KHXUqV6pBFf1g2vuG5lmcvTK42684lm0OP7arHchYbYlvReSPPYW044xJbBBBUy6sJI78Ub5suPWM43xrGUFHeXPh65TunTfIUl0pSiJSlQhq3u1wjSi/SMebwTULNpFrSly+O4fjy7mxY0KHUkzHApKW1FJ6+hJUsJ94pAIJguAsVIBOSmCXYrJcLnAvU6zwpFwtZcMGW6wlT0XzE9LnlrI6kdSex4I5HrXvqFcH3iaAao3Oy2LSvNBmF2vSgRb7THWuRBYH7Y/MSsJ9mbR8S50kkhKQpRAMtX+2SbzZJ1qhXmXaZEuOtlqfE6POirI4DiOtKklSTwQFJI7dwRUuBaJj6+uihpDjErIUqJNEdVr5kd2yLSTUtEaPqFg6mk3Ax0eWxdoLvJjXKOgk9KHQkhaOT5biVp546SZbqSMiLg5fX0RkbqJuQcx9fWhFxZKUpUKUpSlESlKURKUpREpSlESlKURKVrGo9lzG+YpJj6f5SLBkMdSZNvkuMJejuOo7hmQ2RyplfzV9JCwDylQUAaweh2r0PWLDnLu7bF2e/2aa9Zcjszqwpy2XNg8PMk/jI9FIX6KQpJ+PAN80gZj4b+U2O6ROYkfLB3/Hd2uOR3FSHXhs9ismPRVwrDaIVtjuvuyltRGEtIW84srccISACpSiVKV6kkk17qURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESvlKiRZ0Z2HOjNSI76Sh1p1AWhaT6hST2IP0GvrVZfEI3J3PbVt9m3zFHQjLMkkpsViXx1Fh5xKit8D4lttKin4dZRzyKpUf4bZiTYAbyTAHUq9NhqOjL5AXJ6BSVnetmhlvuc/TjLbxGvU1hgOXSzQ7PIvRiMHuFS2YzToYRx35eCRx39O9bFhWc6U3hTOKae5Zi8pcS3MT2rXaJjClMQXRyy6GWzyhpQI6TwAeRxWj7StE4GhOiFhx55suZFco6Lvk9xfPXInXR9IW+464feWQpRSCT2SkVWfw0MZsuTasbhtwVltzca137Ln7LZQ2OEJjNurec6PoSfMYPA7DjgelbYMNZ1FxmASSN4IFuBLhB3AmLwMcYfRFZupAAPGTPDytJI3wJte/1KVq2p+cSNN8GuuZxcLyHLHbYz5qbRYIyZE6SeeOG2ypPPHqeCTwDwD6Vk5wYMTslo1pcYC1LcfrFc9HMKts3F7XEu2VZJfbfjtgtslSgiTKkvJSrq6SFdKGg64SPTo71Ko54HVxz8eK5s6JbqLzrlqlJ3DagbftW8mdxR6TZ8Vx/FbHHm2/HSoBL7z7jkltbk5aeEkqbQEI7JB55Fmv2ZFy/wPdxn2SifrlXAIYMWZvyBAgfE9Y0VSZeQ3IW5mTJ+A6E6qxtKrl+zIuX+B7uM+yUT9cp+zIuX+B7uM+yUT9cqFKsbSsLhmSuZhi1tyZ3G71j67iyHjbLzHSxOi9yOh5tKlJSrtzwFH1FZqpILTBUAgiQtI1P1u0j0XhRrhqrqJYsXZmqKIwuMtLa3yPXy0fOXxyOSAeORzUbfs/Nm3+EJiv9M5/9pXlyHQDR/G4+abgtxtisOe31mNLuMmZeoCJUS12xgLWzDhx3+pDSUNgcqA63HFLUT7wAgvJNENIcZ2fYK7fdHsIj5dqRe7FFdl/J+IJMZ273Nt51pDnl9SA2w642kAjpSgAcAVWnLzBzlo5F5IaPQyfldWfDb6eY9GiXHpI4wdDZWawneLth1HyiDheD61Y3d75c1luHBYfUHH1BJUUp6kgE8Anj6qmStHxrQvRLDLuzkGH6O4RYrpHCgzOtuPRIshsEcHpcbbChyCQeD6Gt4qxiBGaqJm+SUrQcZy+95DrBmmOtPt/J/FoNriBAbHUq5vpdfe5X68JYXE93098msZud1lb0B0Ky3VRLTL820Qim2x3QSmROdUG47ZAIKgXFp5APPANZveGMxn/PDvpvkK7GF78A/wAfQ15FSjVTd21wOpOvWhu2AK67XfLu9mGSM8+69AtqS4yysfFDjye4PxQKsDo9ctQLzpZit31VgwYWXTrVHkXiNCQpDLMlaApSEpUVEEcgEcnuDx2qs761XLxXYzUokptGkalxgfQFc4hRH6FmtsBZtTKZ0cT1Y1zh/U0c1mHh+zPqN1AHR7mtPWHHqrkenYVjckyXHsOsczJsrvkGz2m3tl2VNmvpZZZR9KlqIA78D85ArJ1SzxJchz/Bm9I9SrXgk/MMFw7KvuzldqiJKg4WkJ9lW6AFcISS6oKUCgLCOfUVi5waWgmASBO6TE9P8LRjS6YEkAmN8AmOuSsTaNxekl5uNvtzV9ucE3Z5Me3Sbrj9xtsOc6r5iGJUphth5SvxQhZKu3HNQ1ukwmZr9uH0e0MkwXncUsDr+oGUKU2fJdbjKDMOOT6ErdU4Cn16eTUcNbnNAPEaTj+jmJ6s5lpvdGLqxeJlkkQ0Mu31iPytURLzbikEAgL46ufc6ug9Pu32rXCWObUcMnSOMDyno6/8MGxWeIODmNOYg8JJBHVvbFOYX4AAOAOAKqhv7lyNMrZpxucs4U3O0zyyKm4LR2L1mnER5jKuPUK5aPf0I5q2FVs8RyGzO2VaotvgEN22O+nn98iWypP+kCsK1TwQKw90h3QEEjqJHIreiwVXeEcnS3uInpMqx7D7UlhuSwsLadQFoUPRSSOQf5q+laPoXPkXXRLT+5y1FT8vFrU+6T6laojZJ/nNbuo9KSrgngc8D1NdO0MFCo5n3SR2XLQeatJrzqAe6/ar1uYulzgav7do0G4yYzM3O5DUltp1SEvI+5kr3VgHhQ+o1/EneDco0l2P+xD3EO+UtSPMaxSIpC+Dxyk+2dwfhUE7gdz0/INUNC7ovbNrfajYsxfmJi3HHIzT9xJt8hHkxUiUoOOjq6iklI6UqPPbis2GalM6YmdsQWzhDKgP3X/2lX+pVcv2ZFy/wPdxn2SifrlS7pbqG/qbjJyR/T3L8NUJK433NymA3EmkJCT5nltuODoPVwD1ckpPapAJuqzC3CvBfb/YsXtUi/ZLeoFptkNHXImTpKGGGU/StxZCUj6ya99cubJrRF3WbucrynU7Bs4zPSrSyYuBjOOWDHZN2gu3BLikJmTW2klClEIcWkL+lAHZKuaA4qgpjUEzuA+dwALSTmFYjCw1DpA5k5fAknQDVX8wXcloTqdkYxPT3VCx5DdC246lm3vF5K0I+cpLiR0KA+pRqSq0nTDUi2ahQJRtWD5fjLNsLbKWMhx5+1FaSDx5KHUjqSAOD09h2+mt2rRwiyqDKUpSqqUpSlESlKURKi3MN0m3bAr07jWV6x4vDu8clL8BM9L8lgj4ONNdSkH6lAVWjxMtw+eYYjCNumk8y6wsi1MkludMtDDj09i3BxLZRGQ375ccKlfN97htQHzuRK23rKtJtMbPYtJNM9AtUschrU3GXPn4JMioeePZUmZJUgcqJ5KlrPbn4AAVFKaoLhlMDiRnyAy4mbWlTUikQ05xJ4DTqc+Wt4VjIM2Lc4Ue4wH0vRpTSH2XE+i0KAKVD84INfeleW63ODZLXMvNzfSxDgR3JUh1XohtCSpSj+YAmj3NYC42ARrXOIaLleqlUpsmlu6XdjCb1mvO5LJdIsavSfa8TxfGGAh1m3q7sPznepJddcR0rLfcAK4BHzRl8e141h2r5HD083jXRjIMPubyYuP6qQoYYY80/NjXVpHIjuH4Oj3T8SeFKTYAzgf5XHQ/DcDw32zsqkiC5lwNR8eI4jS+V1b+la/kGf4Ti2HSdQcgyq1wsbixfbHLo5KR7N5PHIWF88KB+HHPPIA55rR9sGTZXnOksbP8scmhWVXO5Xq2MTBw7HtT8txcFsj4ARy0QPgFcVAEkjdn8hzNz0KEwAd5+Uk8hYHmFLFarqNqnp5pHYhkmpGWwLDAcdTHZXJWeuQ8r5rTLaQVuuH4IQlSj9FbVXPXRnIHdyPibai3vL+Zdo0WgP2vGIDvvNRJQeQw5ICT28xSg+rq9e6P3o4hs1KraLdQ49GiT1yA5zorOhlI1ToQOrjA/M8lbuybkdKLzdLdZ5E+/wBhk3h0MW35SYvdLI1NcV81DLs2O024tX4qArrPwFRr4k37ifU7/mUT+usVnN3u1mbuqxfHMXY1Vu2Fx7Jd0XOR7FH89E5KR2QtHmI4UkjqQslQSeT0ntxgPEgb8nZDqW11qX0QIaepR5KuJjHc/XXPtcHZnE5z6Wv3kdJW2yyNppgXFuhnLtBW8aEZVj+EbStPMtyq5t260WvCLTImSnAopabERrlRCQSf0CvDt11U2l6g3zK2NtkjF3bn5rc/I12WyKgqeccUsIceWWUB1RIX35UQST8e+x7Xf3Nmln8jrP8A1RuqxbNmGI2/Pdc1HaQ2j2+3r6UjgdSi4VH85JJ/TXp1/P8ApCrTO557EW5GR27cFDy7AyoNMA727i/fvYTLd5O2jBc6GmeVaqwYOTma3bvud7HKcc9oWoJSjlDSk9ypI554HPc17Mg3X6A4y7NFzzwri211TE65QrTOmW6I4k8KQ9NYZXHaKT2UFODggg8VV7xcbXBuuM6LRZrHWiRqBHirUlRSvynGyFpChwpPPA9CPQfRV67VjeP2TH4+KWmywolmixRCZgNMJTHQwE9IbCOOOnjtxxXIyXUi7UOLeBhrT09rj0i/S+G1A0ZEA8vM4dcuHWbMdyTH8vskPJcVvkG8Wm4Nh6LOgyEPsPoP4yFoJSofmNVf3bXA6k69aG7YArrtd8u72YZIzz7r0C2pLjLKx8UOPJ7g/FAqHtpF6l6Ab/tWdo9mfcGC3RteRWS39RLdtfU0zIKGgfmoLbykED18pFSU+tVy8V2M1KJKbRpGpcYH0BXOIUR+hZq1OHvoVG+y6XjeC1rnAHk5nUDjCq+WMrMObYbO8OLWyObXzwPJXI9Owr99KVHm4TPXdMtFMxzWICqfBtTqLc2n5zs50eVGbH1qecbT+ms6jixhc0Sd2/h1WlNuNwbMSuVm53HLxiDmM+IJjDKly5GrV3ecdTz+EgMyg1A5P7wogvJ+sPD6a7A4pktqzPF7Rl9ikJftt7gsXCI6k8hbLqAtB/mUKgbUPa7DyfZC9trbabdmw8UYjQ3CPW6R20uoc5/jSEck/Qs1F/hMazPZ9t1d0xvryhf9M567S8y72cENZUuOSD390+a19XlCtqTQxtTZAZFOC3i2Aw9ZAPVY1XeIWbVEY5B4GS5voS3jCmrKf/Hrdxh+NfPgab43MymUPVInzlGHEB+sMonKH+NU6VBe28/KzMNXtXV++i/5c5Y7ev1BgWlsRE9J+gyBLV/nVOlUb+zZxE/zeYTxAIb0V3ftHcDH8tj3cCeqqbm9wOsHiAYbpk8rzbDpDjb2ZzGD3Q5d5KgxF6h6Ettr8xP0FRq0OR3qNjePXTIpn+57XCfmu/4jSCs/6Emqj7alquW/7dFc5RJehs47Ba5+DXsx7D6vcFWS1xYek6K5/GjAl13GLohAHqVGK4BWFZ7qewB7cy1zuriSOwwjkANFtTYH7aabsg5regDZ9ST1Kqn4UNwmZ3pdqLrbkS/aMizrOZkifKV3UpDbTRbb5/eoLqwkegB4FeXxTb/K0qh6Ma/4+fIvmG5shlD6OynIrzKlvMKI7lCwx0kfQT9NfXwcFpVtHkpTxynLbiFfn8qOawvjSKH7GvFWx85eZxun/wDFJVdW2f8At30jTHsmjH9A+Fljsf8A7gVMfveLPXEr9wpbM+FHnR1dTUlpDyD9KVAEf6DX3rBYGy7HwbHY8gEOtWmIhYPqFBlIP+ms7VqzBTquY3IEhZUHmpSa92ZAPolaXnuqekWjFvN21FzfGsRjTXVOJXcJjUUyXe3UUpJCnFenPAJrxa/awWbQTR3KdW76158fHYKn24/V0mRIUQhlkH4dbikJ5+HPPwquO1baxj+qeMw9zW6mwQc/1Cz5hN1bYvkZMqDZbe6OqNFjxXOWkgNlKiSnkFXA4IJVg2Xl2HJsSeeQ5mCeAHIHZ0NaC7MzA5ZnkJHMnmRvdq3p7D7DMm3Cx6w4DbpdyX5s1+Iz5Lklf75xSWwVnue6ufWpm0y1b011lsDmU6W5nbcltTMhURyVBd60IeSlKihXPBCuFJPBHoRVDbzoTofgvif4pgOM6U4m/juZ4VKnXmwyLPHkQYr6PPKH2mFoKGSfZ2x7gA7q4+ca6EYrhuIYLahYsIxWz49bQ4p0Q7VBaiMBavVXltJSnk8Dk8VdkOpNedZjhDi09LGOiq+W1CwaR6tB73VZN3lxXo7rhoduJgLLDLl++QORqSeA/bbgCW/M+kNOtlwc/E1bOqfeKygI2h3K5oPD9syCzy46vilwSkpBH18KNW4tjy5FtiSHOep1htaufXkpBqKV6Lh915HQta7+4uPMqavlqsP3mT1a4j4YRyC9NKUoiUqFdQdy87Acsm4q1ts1qydEIoAumP49GkwJHUkK/BuKkoUrjng8pHcGtc/ZkXL/AAPdxn2SifrlQCHCQpIIMFWNpVcv2ZFy/wAD3cZ9kon65Uu4FqO3muEHObphmT4Yy35ynoGTw0RJrLbXPLi20OOAJIBIPV6fRUkhrS45DNQLkNGZWSzbPcK03sL2UZ9lVrx+0sEIXLuElLLfWfmoSVH3ln0CRySewBrWMa3A6U5Xe4WOW6/T4lwugP3Oau9jn2sT+AVERly2WkyD0gnhsqPAJ9AaqLs5u1w3s68Zjup1DYVKxXCLgbHp5Znx1RoK+OtyX0H3TI8stEr7kKcPHHQjiwmt+2O9606zabaiyNX7zZsbwGY3c3sXisksXKY06HGnVLDgCSCAk9SFnpBCenqJqWtINPxLYoJ/dabgneYgwN8TmRDyPOGXwyB+84aDhNpO4ndM90pSoUpUY3bcpoxab3PxxGWv3i42lXRcmLBaJ15MBX72SYTLoYI+IcKSPjVd/ET14zmyzMD2taP3R225jq5PbhSLkwopdgW5bqWlKQR3SpalKHUO4Q25xwSCLF47obYdPNDXdE9JpzmINIs71uhXWI0FSWJTjRSZqu48x7rPmEkgk/EduKAudSdVaJAJA4kZ8gJA4mcok2IDajabjcgE8AcuZNzy5rdcWyvG82sMXJ8SvcS7WqakqYlxXAttfBKVDkeigoFJSeCCCCAQRVYrhcV6N+IdbIUdRZsWu2LOCS1zwg3u2AlLvHp1KjcIP09voqU9q+gD223SlvT2fndwzG5PXGVdrheJqChciS+oFZCVLWQOw9VqJPUonvwIV3vrVb9x+068xu0lOdSIXI9fKeSwlY/NxWwDW7TSwmQSGn+MYesEg8YBWcl1CpIuASP4PMO4bB4EhXKpSlUVkpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKpJ4oeml0ynENMtRW7bIuFhwDMI9wyRhhsuKbtjikB18pHJKUeWOrgdgsn0BNXbr8IBBBHIPqKi4c1wzaQRzaQfkpEQWnIgjoQR81Amtu4OyStMr9ZdB50bUHN7tYpTlnt2Oym5amUqYV0ynloUUstp9U9RBcUAhAUo8VAOw2+RcZ24aNaUaa5BGXleQ3yTfctaYKHX7dAZkurle0oPJZK/LjxR1AKJcPT3SSL2WqxWSxNutWSzQbeh9ZddTFjoaC1n8ZQSByfrNf3DtFpt8mTMgWyJGfmrDkl1llKFvqHoVkDlR+s1anFNznDXD/SSQORJE7442q+Xta06T6iJ5gZbp119dKUqFKq1uKsrG3zUawbscKZFvhy7nDx/UeGwOlm5WyS6GWZziR2L8d1aCF/OKFKSTxVpAQQCDyD6V8ZsGFcoq4VxhsSo7vAWy+2FoVweRyk9j3ANff0o3yswbiY4AxbvJ6o7zOx8BPEjXnEDoEpSlESlKURU+8QPLbhk6tN9puMF03TV7IY7N0LQPLNkjOoclKJHoDwP81CxXg8UO53HBdD8Aziyw1qiYVqJY7s+20OyGWA70D6h19CR9ZFXJcgwnpTM56GwuTHCksvKbBW2FcdQSr1HPA549eKTYMG5R1RLjDYlMKKVKaebC0EgggkHt2IBH1gVDQWtbGYeHzyLSB0De5J1UuhzjIthLY5gyes9gAv4tdwj3e2xLrE6vImsNyGutJSroWkKHIPcHgjsarxjOq2ebl9QcssWl2Srw/TbBbkux3HJIsdl65Xu5tgF9iGX0LaYYa5AU6ULWokdHT84WRqlzNg1G2c6RapYLbMKn5BY73c7jPxHILVIjqXHk3RYaZizGHXEOhxEhxCUuNJcSsEc9BHFQ8+dxaPdOEb3S2ATyJ4E9jNNpLGtJvIBO4QZIHOOIBPMY5Ks70c23ZPuvwbUm+yJrlym5VKtN9fbmQb3axJ8lhDpUjzWnlQmmCl1pafeCQUlPapszvAJW5n+43lDqozOAQ5DObXO3SFK9olyRHSu3sFHT0ltK3VrXyR3bSODzyNOyPRjPtesJxnQ+74tL090isjMBm7szpbDl5yBmIEeVES3GW43FjqU2lS1qcLquAAhHc1aGFDiW6Gxb4EdtiNFaSyy02nhLaEjhKQPgAABW0BhMGQ0jBwDRYnrhgaFs63ykvAMRiDsX8WnQYpI0cBNrfaqe6nRTg3iVaS5rIHlwc8wy6Yt5p+aZUZSpKUk/SQpAFXCqEd2ej181S0/t99wMITnmn91YyvFVKISHZkY8mMo/BDyOps9wOSknsKxxCnUZVOTTfkQWuPQOJ4xC1wmox1MZkW5ghzehcADwU3VU7QjVvVtvXjVuHuJ1ZxCyWKFclxMWxOUtiHNjxEOq8uYFLShTrLjXQQvrcBUVfMKSmrB6UalWLVzArVndgDjTU9spkxH09L8GUglD8V5B7odbcCkKSfims7eccx7Im22cgsNuubbR6m0TIqHgg/SAsHg1ctNOpJ3ERpeL7rRbnZUDhUpwN4PG02/McLqnGead6a7h93OlmaaKWq0yEac3B+85hmNmaQIjxSE+zW/wBpbHRKfLgUVJClFtBV1EdXFXYr5RosaFHbiQ47TDDSQltppAShCR6AAdgK+tGwymKbcpJ6nPlkLdbkkkZc/Gc4A6Ce+Zv0yASqreJldno+0rIMVgJLlzzO52rHbeyn5zz70ttXQB8SUtrq1NVjyO1jcpudsMdhBkafaHTF3CbJHdm45UpIDMdB9FiI2orWR6OOBB7g8ZlgrPbTdkSCfwggu7jyjiQNVo15pA1RmBb8R9n1ueAJ0VgsKx5vEsOsWKsnlFmtkW3pP0hlpKB/2azVKVs95qOL3Zm6xpsFNgYMgISq4bo/9+bbZ/L6R/3XKqx9aNqDpLY9RcpwTK7rcJseTgN5cvcFuOUhD7qozrBQ5yCenh0ntweQKo2z2O3OaegcCfQK59h7d7XDqWkD1K3mlKURfhHI4Pxqh+zm22fZ7q5q7orqzcYuONZPkHyixG8XN1MeHeYSgoFtp9ZCC82Cnqa56u6iAQOavjXkudptV6iKt95tkSfFWQVMSmUutqI9OUqBBqGy1+Magg8iQe4LR6jWQd5mYTvB6gEfAn46LWMT1h01z3IZuNYNlkTIZNtZ86Y/awqVDjnkANrlNgsB088+V19fAJ6eBzUE6gb4V6e7wsd2v3TSm5Ktl/TDaRkvtCgkPyiEtFDXlkKaDikNKV1ghSj27cG0MOFCt0ZuFb4jMWO0OltllsIQgfQEjsKjLU5+TmedYlpdZ2HXEwrnDyu/yQg+VEiRHvNitlXoXHpTTYSn16GnVH0HMj9pT3TcbxrygX1yT3HzutwNu5JsMhdSpSlKIlKUoiUpSiKk+9zTi747uC0T3bMWiXdMdwCf7DlSIjCnnYMFayUzfLSCpTbZccKyAeOEn05IsbM3KaDRoUCZE1Vx67LupQm3RLNMTcpk1SvQMxo3W659fSk8fHgCpLrHW7HMetEp+dabDboUmUeX3o8VDa3T9KlJAKv00p+Rnhe7iLv5okdxI3cdD/O/xNYA7THxjjw1yAPIB+msdkuP23LMdumLXlta4F4hPwJSULKFKZdQULAUO4PSo96yVKhzQ4Frsipa4tIcMwvFZLPAx6zQLBamvKhW2M1DjN889DTaAlA5+PAAr4ZNjGO5nYZ2LZbZIV4tFyaLEuDNZS6y+2fVKkqBB/8Aj3rKUqX/AGkl95zUMHhwGWjJVTs3hi7QLLlbGTN4HcZceJI9qjWWbeJD9sZd555DC1HqHP4q1KSfQgirVNNNstoZZbS222kJQhI4CQPQAfAV/VKmThw6KIE4tUqgF5xWfss3y3/cFebbKXpDq1FVGvF4jsKdbsFxWtC+uUEglDSnUE+YR0gPHv7ve/8AX8rQlxJQtIUlQ4II5BH0VVssqCq3MSOYIgj6yIHJWMOYabsjB5EGQenwlRBm+5HBUWAwdIsqx/Oc1vTJbx2z2ee3OMh9Q4Q895Kj5UVBIU46opSlIPfqIBhPxKtVMJsG1PLdKspzS0jPL5aIDke0Mkh+Wfa2ut1pnuoN8tOkEnsEkc9qt5aMcx6wed9wrDbrb7Qrqe9kioZ8w/SrpA5P56yNUq0xVYWb8+Q0HzOu5WpPNN4fu+PHtYaXuq37VNwOi0ra/i9xb1KsaY+C4pZ4+SqckhH3IcLCWwmQFcFslba0jn1KTxVaNoGvejzO+bcBd5WfWuPBz252+PjUl5ZbaurgcU30sKUAFEqUkAfHntzXSeldJqYtqO0nUOEfiib9LfNYCmG7MNnGhaZ/Dl/n5Kgvi4yH4eJaMy4sJcx5nUKK43HbUAp5QbUQgE8AEnt37d6tI/ul0Et+HSMzvuplks8eC2TOg3CUhi4xHgPejOxFHzkvg+75XT1E8cA8jmmXimavae32VpphdhyAXO94fnbFwvsOHHddXAYaR76nOlPAPvDtzyavZguTaSavw2dRcGfsmRNsuqit3VqKlTrbiACUBa0haSOodvrrGlLqL8P3yemCmJ5E25jmtasNqsxfdA64nmOYz5HkqnbMtIM51C3Iaib5NSsZnY4zlgVbsQtNwaLUsW7htCZLrZ7t8tMtJSD3PUs8dPSTs2p0U4N4lWkuayB5cHPMMumLeafmmVGUqSlJP0kKQBVwqhHdno9fNUtP7ffcDCE55p/dWMrxVSiEh2ZGPJjKPwQ8jqbPcDkpJ7CpxNpGkRZlO284SC1x4kBxdxNgFGF1UVAfaffcMQILRwEtaOXdbhrnrbg23nTW56qaiPS0Wa1qZbWiG0HX3XHXEoQhtBUkEkq57kdgT8KgKPr9pjvU1JwPA9F769f8Vxma3mmXzDBfjtsmN/5PhLDyE8rXJKXSkAjpjnvU12h/SvdhouwvJcYjXnHb+yEXGzXJr8JDltK4djup7KafZdSpJ44UlSeQfQ1kNG9BNIdvuPyMX0eweHjlulv+0yUNOuvOPuccBTjryluL4HYAqIA9OKlrTTqE1PdILY3jKeR8wjOAMpUOcH0wKeoIM7jY9YtfKScwpArlLrtcr/4cu96frVYLNIlaeaswpS5UVhPuJmK95xsD060SOh1P8R1SR8a6tVicgxHFMtTDRlWMWm8pt0pM2GLhCbkCNISCEvN9aT0LAJ4UOCOT3rMsOMPBjMH8LhDh9ahaBwwFjhIMHqLg/WhK0bbHhcvT/QHBcZuaVC5N2dmXciocKM2Ry/IJ+vzXXKk+lK2qPFR5cBEnLcsabSxoaTJ37+Kp7ptFOA+JZqtZZQ8pnUbCrXkUEnsHVxFJjOAfSQes1byZEj3CG/AltBxiS2pl1B9FIUCCD+cE1X3dXgN9hXjCtzGA2qRccm0qluvTbfFT1PXSwyE9E+OgfjOJR+FbH75BAHKhU6Ytk9hzXHLbluL3Nm42m7xW5kKUyrlDrS0hSVD9B9PUHsaya0P2cUj7stPIklvTCcPNrty0eS2sag96COYAaesieTgqX7Dba3tYzHUPaXqHJTapTmQvZDhkqYfKZvtsdQhB9nWrhK3G/LR1tglQKj24STXy3pWNjdvrNpdtrwKSi7wcZvfylzudEUHY9oioT0NsvOJ5Sl9xJeCWyeruk8cd6ujlGG4hnFu+4+aYpZ7/AAOrr9lukFqWz1fT0OJKef0V/eNYniuGWxNlw/GrVYrehRUmJbYbcVkE+pCGwE8/oq7XEmm59yzD1wxhnlAJ3kaAwoIjGGWxz0xTijuY3TrCjfdNrjM22aH3vVe1YPJyl2zlhtFuYdLSQHHEo8xxYSoobRzySEn4DtzyPfts1vg7itG7BqxDsMiyLuyHESra+srXEkNOKbcb6ilPUOU8hXSOUkHgelbrmGV2rCMZuGVXrzzEt7XmKbjtKdedUSAhttCeSta1FKUpHcqUBWuaJYze8ZwCP8qYyY19vU2bfrpHSsLEeTNkOSFMdQ7K8sOBvkdj0c/GoZ7+K+UcOHGbk6i29Q8QGYLZzxG/hBgcQTuUV+IXpNlms+1LLsRweG9OvLCot0YgsjlyYIzyXFtJH4yigKKU/FQAHc1ncR3TbebXo7YsqOo9kixGLbHjptSZCV3JEhDYSYYhp5fMhKgUeUEdXI9KnOscjHcfauy783Yrei5uDpXNTFQH1DjjgucdR7fXUNBYHtGTiD1Aj4RyjiVZ0PLHHNoI6Eg/H48lWDbHo7m+Va3ZlvK1jsEmw3vKo6bRimPTAPabPZEdPSp8fiPu9CVKR6o5Xz3UQLYUpViQGhjbBogfW8mSTqSVW5cXOzNz8OwAAHAKoHiXsuZXpfgOjsDldw1C1Bs9qaaHclpK1OOL4+hPSgn89W5PTDiENNKWlhv3UIHKiEjsB9faq04zaxuL3Qo1iUgvYFpCzKsmLverV0vr3uT5bfwU0wlIYSodi4FkH3as5VWgihAsXEu5AhrR3DcXJwVn3q/hAb1kk/HCeLSqs7Od7/7KjJs1xC76Xz8KumKvFbLMmQp72mOHC2rq5bR0Otq6AtHf9sHerTVFWAOP53qvkuqDUd1ux26CjFbI64gp9tLb63JslAPfyi75TSFfjezrUOUqSTKtWkFjDrF+5g9RBjQkhQbVHxlNuFhI6GR0SlKVCJWGzPHxluH33FS+WBebbKt/mj1R5zSkdX6OrmszSqVGCqwsdkRCsx5puD25i6o34cN3xjQLSO/6BasXm14fnGKZHOeuMG7y24ipTDnT5UxkuFIdZUlPAWnke7345FWMb1wRnmQ2/HtDIsbK4wmNm9ZICpVlgRUqHmpbkp92VJUAUobZUoIJ6nCkABUj3bHMev5ZN9sNuuRjnqa9rioe8s/SnqB4/RXvbbbaQlppCUIQAlKUjgAD0AFbOqF5a5+YAHOAB6xceqzDAyQywJJ5SSfSbfAqrG5HfGduuvmCaRXPSu5XOy5W009MyFEhSEREreU0S22G1B3yukLc5UnhKhVqairXJx/LG7PoxaI7r8zK5bD1ycSg+XCs8d9tyU64r0BWEhhCfVS3uQCEKIlWqM/Z3uZdfeLRbcMp1IO5Wf8AtLWEC3eTPHONBGhVGd3eDO4TvR0P3VZJHcXgdkbcsN+nhBU1Z3Vef7PJfI/a2iuSOXD7qfL7kcirP37cTotYmGi3qHZ7zOlJ5hWqxyU3O4TifQMRo5W45z27hPSOeSQO9SKtCHEKbcQlSFApUlQ5BB9QRWPtGN47j/m/cGwW62+ceXPZIrbPWfpV0gc/pqGeWn4WgJP8xk+swemil8OqeLrAHaY9FidOrvnN+x9V6z3G42Py5klx2Ja0Pea/Eh9vKTJWklBfIBUsNkoSVBIKukqVWXctEVn++Pbbp/DBc+TP3XzK4Ad/KZbQhDKj9HLrZSPrNW3ut1ttitku9XmcxCgQGFyZUl9YQ2y0hJUpalHsAACST9FV520YtcdQNR8y3c5VbpERzM2mrNh0SUgodi41HPLTqknuhUlzl8pPoko+k1ZkGu1wyZ5uoBDeuKDxDXHRVfak4au8o6nzdA2RwJaNQrI0pSoUpSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJWrZ9gjOetWGJMuS40Sz32He3mUthQlmMouNNKJI6QHg05z3/a+OO/I2mlNQdxB6gyPVNCN4I6EQfRKUpREpSlEWAseCYrjeQ3vKLDaUwZ+SLaeuhZcWlqS82kpDymufLDpSQlTgSFKCUBRPSnjP0pTSE1lKUpRF8ZkVudDfhPLdS3IbU0tTLqmlgKHBKVpIUk9+ygQQe4NYzEMOxnAcdhYnh9mj2u029BQxGZB4HJJUok8qWpSiVKWolSlEkkkk1maUyTNKUpREpWKyrJbThmMXfL788tq22SC/cZi0IK1JZZbK1kJHcnpSew9ajDbDup013ZYdcsz02i3qJHtNwVbpUa7RkMvoc6QtKh5a1oKVJUCOFc+oIFG+YkN0EngCY+KO8oBOpgc4n4KZKUpREpSlESlKURKUpREpSlESlKURKUpREpWEzPNcS07xmfmWc5DBsdktjZdlzprwbaaT9ZPqSeAAOSSQACTUOYlr3qvrZHF80N0iYiYi6T7Hk+bTXbci4oHo7EgtNOPuNK55St0sgj0BqAZJA09Oe7hv0UkQJKn6lVrz/c1qRtzlW65bkNO7SMHuMpuGvMcSmvSWLY8s8IE2G82l1tBP8Axja3B6DjkgGxduuMC72+NdrVMZlwprKJEeQysLbdaWApK0qHYpIIII9QasBLcQyy67ju+iqkwcJzz6L00pSoUpSlKIlKhfVfU7JLrqJatvWktwREyq5xPutfrz5SXk45Zwrp84IVylUl5X4NlCgQD1LUClPCpjjMmNGajl5x4tISjzHTytfA46lH4k+po3zNxaacYsfW3MHchs7Dr8Jy7i/KDqvrSlKIlKqHmO4/V28eIHi22zS5+AcUslmN1zbzoiXFdK0KUB5nzmykKjhPSRyp33uR2q3lG+amKgyMx0MTyJBhHeV5p6iPUTHOIlKUr5SpUaDGemzZDbEeOhTrrrqwlDaEjlSlE9gAASSagkASUAmwWFseCYrjeQ3vKLDaUwZ+SLaeuhZcWlqS82kpDymufLDpSQlTgSFKCUBRPSnjP1Wqdv8AdFbJfGmcpx3UHH8TmO+Rb86umMPs45OX6J8mV3UUK/FWptKFDuDx3qVtM9cdPdZJlyGmVzdyG02oIQ9fYjXNsckK55jsvkgPOJTwpXQFJSFJBUFHirNBcLZfCPhp3G8KHEA3z+P5/wC9y3+lRBrvl2b6QiNrNapEi7YfZWg1l1gSyla24HUSq5RVAdfmsdRUtskpcaCuAFJBMqWm622/WqHe7NNZmQLgw3KiyGVdSHmlpCkLSR6gggg/XUN8wJGljw3d9DzGYIEnykA6/R7a9DkRPrpSlESsBiGCYrgbFwh4jaU2yLcpztyeitOL8hMh3guKabJKWgpQ6ilASkqUpXHUpROfpTK/19WTOyUpSiJSlKIlKUoiVj7/AGO35NZJ2PXYPmFco64sgMSXI7hbWOFBLjakrQSCe6SD9dZClQQHCCpBIMhY+wWCyYrZYWN43aotstdtYRGiQ4rQbaYaSOEoSkdgAKyFKVYkuMlVAAEBKUpUKUpSlESlKURKUpREpSlESlKURYHM8HxfUKzDHcxtYudr9oakuQ3HVpZfU2oKQl1CSA631AEtr5Qrgcg8VnUpShIQhISlI4AA4AH0V+0plZM0pSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpRErF5NlGPYZYpeTZVeItrtcFHXIlSV9KEAkAD6ySQABySSAASQKylaFneAyswzjDb9dbjGTjOIuyrvIgOAkybiEJREdV+L0MpVIX37+Z5Sh83kQTGscdw1+t+7NSPr6+uRXq0/1h061QlXa3YZkCpNwsLjbd0t8uFIgzYZcT1tl2NJbbdQlae6VFPSodwTWQyLUXBMSv9hxXJsttdsvGUPrjWaDJkpQ9PdQOVJaSe6iAR+kgepFU92AyJ+s2uGvW7J5xxNoya9t41YUeiXYcMABwj4noDHf6SuvPkY/u0eK3YLMPw9q0WxBdweT6pTOkJ93/ADv75YP/AEf1VZgxOotNi8YnDUDCXH0A5EwVV/kFY54DA4mQ3tiJ5gSr20pWran5xI03wa65nFwvIcsdtjPmptFgjJkTpJ544bbKk88ep4JPAPAPpVHODBidkrNaXGAtS3H6xXPRzCrbNxe1xLtlWSX2347YLbJUoIkypLyUq6ukhXShoOuEj06O9SqOeB1cc/HiubOiW6i865apSdw2oG37VvJncUek2fFcfxWxx5tvx0qAS+8+45JbW5OWnhJKm0BCOyQeeRZr9mRcv8D3cZ9kon65VwCGDFmb8gQIHxPWNFUmXkNyFuZkyfgOhOqsbSq5fsyLl/ge7jPslE/XKfsyLl/ge7jPslE/XKhSrG0rC4ZkrmYYtbcmdxu9Y+u4sh42y8x0sTovcjoebSpSUq7c8BR9RWZPPHapcC0kFQCCJCjzJdwmjmH5UjDclzZiBcVS2LetbkZ8xGJbyepmO9LCDHZeWnhSW3HErUCCAQRXyy7cXo3g0+fAyPLy0bOpKLrIjW6XLi2tSuOBMkMNLZinuk/hlo4BBPYiqXb+cUvGMaAae7dl3FidmerWo4lXCXGCvwz7slbrjiefe6UKfjNpJ9EISO3FWh1R2Y6S6r6IWXQK8y8gteN2WaxcA5a5qW5Mt9AX1rkOOIX5qnFOLWtSh1FZ6uQao3E6nj1DsPA2aSf4cR52yuruwtqBuhaT6uA/mjpfOynSNJjzI7UyG+2+w+hLjTragpC0KHIUkjsQQeQRX1rH4/YrZi9htuM2WP5FvtMRmDEa6iry2WkBCE8nueEpA5NY/PctdwbE5+VM4nkGTLgJSoWuwxUSJ8jlYTw02taEqI55PKh2B/NV6ha0kjJUphzgA7P5rYKr1hV0ubu+XU60O3GSuCxgmOutRlOqLTazIl8qSjngE/EgV5/2ZFy/wPdxn2SifrlQdiW56fE3hahZiNs2t767hh1jhm0M45GVcYoafknznmvaglLS+rhCgoklKuw47wz9qz+L+xyl37F4/D/exX+pVcxvIuRPH7D7cZ9k4n65U/2W5KvFngXddtmW9U6M1JMSa2ESI5WgK8t1IJCVp54UASAQe5qYMSom8L21BV/3zbScYzU6fXvXbG2L2l/2ZxtK3HGGXeeChyShBYbIPY9SxweQeK2fX/EM91FxKFp1hV0kWaFklwRCyO8xX0tSoFo6Frf9n5PPmu9KGAoA9IdKvxa1BnYTs9Zw84SNAMTVDUz5JlLhhVwPbjq9tJ9oCvjyF+tUBdJOg01P5DTWTNhF7mLDU9h+Z4WgRvU9MvNSGkPsOocacSFoWhQKVJI5BBHqCK8NhyGyZRbzdceuce4Qw+/G89hXUgusuqadSD8elaFJPHxSaqzhd5yzSHSHJNt1ovsiblOP5I1g+HTpKut/2Ge0l+HIWfxjFiuPlR9CIRqz2G4nZsExS0YZj0fybbZYbUKMgnk9DaQkFR+KjxyT6kkn41pDTLm5WjqJvuIBbI3ngs/MIa7O89DEjeCQYO4cVpGr+5rQrQSTEh6u6hw8cfnMmRHbejvuqcb6inqAaQr4gj9FfZ/cZo+3arJd4mUSbo3kVtavFtjWmzzrjMegup6m5BixmVvttqHopaEjkEeoNaN4gUdiTs11WRIZQ4lNiU4kKHPCkuoKT+cEA15vD7xeHYtp2nt1B8+5X6yxps+YsDzHuEBtlBP71tlDbaR6BKPpJ5pTlwfPu4f6sVv6c/S9rVPKWR72L+nDf+rL13ybjGvmjWZYZcNQcd1GssiwWhxTFxmOP+R7C6kgFp9DgStlzkgdC0hRJAA7isdZtzGit6yu24OnLJNtvl6Cjaol7s060m5AevsqpjLSZH/RlXNU929Yhb7p4n+vUOQpRtFoEXIG7b/8zuXMtspblKR6KWgSJBST6Kc6vUAjZ/F0Ubbo1p1lMIlm6WjUO3OQ5KOzjJLMhRKVeo7toP8AmipYQ5lCocqmC27EQ3PgZOWXO0uaQ+rTGbMV98DELcrZ56WvcHUzUPT/AEvxGXlWp18iWrH0ER5MiU0pxo+ZyAhSUpUSD3HHFaXtqzvbVm2IXE7YPk8jHLdcVtTW7HZ1W6OmYpCVqJQWmwpRSU+8Ae3A57cVLTKvOjoUsA9aASPziqMeFIyzGxnWuPHaS201qXcEIQkcBKQhIAA+A4pTEve0/dns5og7/anRVcQabHjVwHdrjI3ZQrCY5vH20ZdqJG0oxvVaBPyuW48y1bm4kkEraQtbgLhaDaeEtrPJUB27V6rtuy0AsXRIu+eKi2x2SmGi+OWicLMp5R4CRcvJ9jPJ+Pm8VVDeni8PKfEJ252KQotMXiBMh3At9lSYfU75rCj69K21ONn+K4qrLb27RbH9nmqtsXBZEWPiktbLIQAhstI6m+keg6VISRx6cCsnVMGyfrRGWIEb8BvG6RlnflfYU8W1fqw1DTO7F8Y6etpyYfYlMNyYzyHWXUhbbiFBSVpI5BBHYgj419KgHYPdZ162daUzrjIW++LChjrWrk9DTi20Dn6koSP0VIeot41it2R4XC0yw+yXazz7r5eVTbjOLK7dbwkHrYQO7jhPIHqAQORwepPVXp+DWNIaGPWJO7iuajU8SkKh1E+k/wClvdKUrJaJSla/muRX/GrSidjeA3bLpbj6WfYrbKhx1tpIJLq1y3mkdAIAPSVL94cJPfiCYUgStgpUA7YtxmW66ZfqvjmV4VAxpeneQNWNqNHmGW4o9Ci4XHeEpUepPbpSAAeOVepn6rRYHeAehEj0KibkbiR1Bg+qUrQ9bdYMa0L07n6hZStv2eM6xEjtOSW46X5T7iW2W1OuEIaSVqHU4o9KEhSj2FRXqdqhur0rwGZrC9i2muWWG0RTdLpYbPImtTm4KR1OLjzXCpqQUI5V3YbCgk8fAVQvaAXHIZnQa36XO4XMKwaSQ0ZnIan69TkrIV+KUlKSpRAAHJJ+ArUNINU8W1s00x7VXC3Xl2fI4Ylxw8npcbPUUrbWByApC0rQeCRyk8EjvWt7q8muWG7atT8ns61InW/Fbk5HWj5yHPIWAofWCef0VG1OdszHuIu0G3JTs7RtD2NBs4j1VLsfvr3iQbwrnbbm45I0J0aeDzduCj7PfLj1qQ048B2WlakOKAPYNN8cDzFc9ImmmmGkMMNpbbbSEIQgcJSkDgAAegqi/g6YbDsO1STlKG0+15PkcyQ85x7xbZCGUJJ+gFCz/nGr110Pp/q7G0RoASd7nAEk944AALBtTx3uqneQBuAMAfPiSof3g2S0ZDtZ1Wtt8bbXE+Sdyke+BwlxphTravzhxCCPrAqGvCfze85ns6sbF6kOPrxy6TrJHccPJMdspcbTz9CQ90D6AkD4V6PFK1di6Z7Tr/YGZIF4zt1vHoDKT760LUFSFAepAaSpJ+tafpreNg+jFx0J2s4Zhl9jKj3qUwu8XRlQ4U1IlKLnlqHwUhBQg/Wg1ls2dd5yhjf4gS7+058YWm0ezRbrLnfwwB/cMuEqwtKhOVrXk+f6sXvSDQ9mzKXhyGzlWTXdh2VBt8pwEtQGWGnGlSJBAKl/hUJbA79Sj0jK7c9aZGs+LX1y9W6LAyPDsjuGK36PEUpUf22I50lxnq94NuIUhYCiSOopJPHJN84kbp6SBPKSOcgiQZR3lMHfHWCY5wDyiDeylevjNlx7fDfny3Q2xGaU86s+iUJBJJ/MAagPQDXfPdatadXbexa7WjTTB7m1jtmuDbSxJmXJpP8AfnK+ooU2lXbskcco7nvW/bjZ8i17fdS7jEUpL8bEbu62U+oUIbpHFY7RUNPZjXb93EO0juL8lrRYKlcUT97Ce8HtlzChbw/nJOoWJ5zuYviCq6ar5VMlx1r7qZtMRZjQ44+hKAhz/wDC5q1tV68PuGxB2Z6UsxwAldjDx4/fLecWr/So1YWuzaKYo1DRbkzyjk0R3MX4rlovNVvinN0u/mMx0mFo2X62aa4RfkYpeb7Ik35bIk/ciz2uXdp6GSeA6uNDaddQgnnhSkgHg8GvfiuqGB5rZLhkGN5C1IiWlS0XFLrTkd+CtCetSJDDqUusqCeFdLiUngg8cGucuku96Ds91O1RwXdVpllkTIsnzKbeflHDiIeEyKrpQwnhxSCphCEDyy2VgJVwEjg8272s4tguUys83DYrrJcdSbVqzKbW0me10M2uIx5qUW8NK7p8vzlpIUlPbj3e5KudmKrR8RmrZnQOMWIztJGkxpYLapFOoWO0dEbwJuDlxGecbytM2BYPJvyNQd2WTRFJvWsd+kTbb5qffj2NlxSIiBz3AUB1fWlLZq3lee32632iBHtdqgx4UKG0liPGjtJbaZbSOEoQhIASkAAAAcACvRWhwgBjPZaAByAgddTxkqoklz3ZuJJ5m/YZDhCV85MaPMjuxJbDb7DyC2604kKQtBHBSoHsQR2INfSoR3mbgRto2+ZJqdEQ07eG0It9macHKVz3z0tlQ+KUe84R8Qgj41jVqCkwuP8AvcOuS0psNRwaPrj0Wz5/rBozjFxOnOYXaPcblLi+Y5jsG1P3iUYv79yHFadcS1/GWgJ+uvfguoejVybtOKaeZZivMq3quFus9uksNO+xpcKFuIipIWlCXOpKvdHSoFJ4IIqJthWkr2AaEWrN8qW7cM81HbRlGT3eWeuVKekDzGm1rPfpbbUkBPoCVkAc1COz7GrJnviAbjda7Pb2mbZj8kY1CLY9xUpawJTifhypcVajx8XifjXR4ZZXNB+YDi4jIFto4+ZwE+l7Y4w6j4zMpaAN4drwsCY9bXvxPgxLnBkW24R0PxZbS2H2ljlLjagQpJHxBBIqrWwK/wA6zWDUXbrd5bj8jR3LpdkgKdUSs2h1SnYfJPfsnrSP4qUirWVTXbytVu8RPczaI54jS7djs5xI9PN9lR3/AD/hFVnRvXNP7zHd2lrgegxAfiO9Xq/scX3XN7GWn1IPGArlUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlahrDa8gvmkua2XEwTe5+PXGNbgk8EyVx1paAPwPURW31XPfHqJrrpzplZZ+g0yy2q5XHII0C5Xy8pbMO0Q1oWfOeU4C22grShBcWCE9XwJBGNdoqUzTPveX+a3zWtFxZUDx7t+1/koy2B6q6QaR7NsbsuT5ZbbNfLE/cY97sr7gTdEXH2t0lj2T9uW8oFASgJKlduBWH8Mpb+qmU657qLhDdZVnuWKg24Pj8I1DjgrCP0B5pJ4+Lf1Vv2vGrrGVYU/pht8+T+eay5fa02eRc8eDb0azNOthEidLlo6hHaSCstoWrrUSjpSrg1Mm2rQ2y7ctFsb0kssgSvuPHKpkvp6TLmOKK3nePgCtR4HwSEj4V2Yi6tUrvzjCIykkF0cAGgdcOhXLhDaTKLcpxGc4AOGeJJnpOoUn0pSsVqqtbirKxt81GsG7HCmRb4cu5w8f1HhsDpZuVskuhlmc4kdi/HdWghfzihSkk8VaQEEAg8g+lfGbBhXKKuFcYbEqO7wFsvthaFcHkcpPY9wDX39KN8rMG4mOAMW7yeqO8zsfATxI15xA6BKUpREpSlEVE9TGXtZfFL07w7ylvWjSHF3cilgjlCJb3PQfz9S4hH+KavZXwRBhNy3Lg3DYTKeQltx8NgOLQnnpSVepA5PA+HJr70Z5KTaQ0LjzLnEz2gdEd5qhqHUNHQAD4yeqUpSiJVcMG/d66p/yBxz+sS6sfWjWjSWx2fWLItaGLhNXdcjs0CySIyynyG2orjq0rTwOrqV5xB5JHujijbVGuOQxerXD4kI69JzdTh9HtJ9AVvNKUoiUpX4SEgqJ4A7mhMXKKt+J4f8pt8GfZu5LDttxDH7PCZjD5qbtJaeK3j/HRFKUD+LIP01ZGq97Mfa8jxHNtYJ6VeZqPm92vEUr9fuey4IcQfm8qKkj6lVYSgEMYDuB5E+YjoSR0hDd7uZH8vlkc4nqqp+I3qxpzi22nPdPL9l9uiZPklhULTaFOcypnLqU8ttjuRyDyfQcGvjsc1+0YTtPw+2v6kWRiXg2Kx15Gw/JDblrQglClvpVwUJ6u3Uexq2VKhksDx96P6cUf3X3xopfDi392fXDP9tt3Fcxtu+4nRS1eIhrnnt11GtELG8pgRYtmu0hwtxZzrfs6VJbcUOCeUq4+ng8c1nvF01d02uum2PaXWrL4E3K7NmkC4XG0xll2REjpjPErdSkHoHDrfHPr1DiujdKNGGnRpj/p4euF2IeufyU4pqVah9+ektwn0UV27c5t+f06/umNauY2cXjSW7a9dDMAZbllsLDCie4c6SD0Ec9/SqdeFbq/pqzK1OweVl8GLf8AKs9nXWy26QotPz4qm+oONJUB1DpQskDuAO9dGaVZpDajn7xHctPxb2txWeE+E2mNCD2DgPR3fsuZO5rcPordd/2guaWrUS0zrBiLcti+XOM4XI0BxanEhLjiQQCDxz9HPfirF71dwOix2o5hFZ1Jsb7+c4nOGNtMSQ4u6hSS2CwlPJWOsgEjsPjVq6Vk6ni2X9VJ1cZ/Fn/j5rZtTDtI2kDINEfhy/yqc+HzrLhk/aNi2EYXeIWQZ1ieNSpErGGZCWpvmIec6EKS5wEBalNpCz7vvjvX3w/PdYoe83GNHnM/m5AG8MkZDqNFdQ17BAkOkCIzDSlALPQsgAFRK21BSypXvVb+qb6XaRa741qtrFdJ2PTIOQ6kZmy+nNESYq4cPF4/SWW2ElZeMgt+YyGy2AkkLUeEjq6TU8bavEcMw4ndJlscPbxXn2LLnDPD2bANIHGBBns0tEavvop/1Njbj3rvFVo1e9NodrEfiSjJ7XPkyC/1HuhUeQ2kI6ensUk889/o072Dfd/9Fegv2fvP67W46naKHUy7xbt/dc1JxT2aP7P7LjF+9hju+8Vda0eWrqX34559AK079iYf8JnXr7ZD/Y1g2wutXZr6RoG+b2lr2zKtCix1p80NWC8BfRz36eZnHPHpzU6jngdXHPx4qCo21IxpDUj9kprs75S0r6HcwCkK4PPCh5PcH4ipZzDILxjFmFxseDXvLZPmpa9gtL8Jp/pIPLhVMkMNdI4HPv8AV3HAPfi5IDVUAlyqpsU/38N1f/tHP/Zdq5FUv2vY7r9pJqjrFlGZ7bMpFu1Ny0Xy3rg32wvLhMkrBEhKp6OCAsE+X1+hAB7cz3uWyDXrGNLZN625YxYL9lseUypUW9PhpgQwSXlgqcbSVAAdisduojkgA0LhSoUy7RjAeBDWgzyOZ4FXDTUrVA3VzyOILiR3W16maW6f6x4m/g2puLxL/YpLrT7kOSVBJcbUFIUCghQII+BHxHoSKhvdvnUuzabvbddHrEm8ahZ5aXLJZbLFACLdb1o8l2dJPzWI7SCQFK4BX0pHPfjdck1J1Vm7erfqPpPpenJ81vlkhToFiVcGIrLT8hlKypx19xtJbbKiSArqVwAOOeRVPTjIvEY05RcbizsfsN3yW/OiRe8guGd21Uy4ugcJ6iJACGkD3W2UcIQnsBySSqUgXPoVMp83HSJ5WJ3WF7hTqENZWZnpw1mOempz4232zaMt7e9CMP0eTchcHMdhKbkSkpIS7IddW88pIPcJ8x1fTz3445rcc7w+06g4VfsEvqCq3ZDbZNslBPr5Tzam1EfWArkfXWsbfbdqNbNIbA3q4ytjMZXtU+8R1SkyRGkSZTr5YS4lS0qQ2HQ2npUQEoSAeAKkSr7S3xi9tW8zMZXziFSi7ww00ybRE58J4rnHtl1Ruuw/AM72162RmLdfselTLvg0+essWzI473HCWnz7vKXPfWjnrCVqHHKTU95xrqrbpd8Rumomu3y5ZyiUu33CxwrZDXIZdVGddZetsaG37UpBdaSz0Orf589B608Emx2QYzjeW21dnyrH7beYDh5XFuERuQyo/SUOApP81Uo8QvTprRfSfD9WdvGkOP265YNm9vyCaix2Jlglhtt4dToYQFFrrUlKvoC+frqr6xGF1U/caTwkNmN8ZneJsrNpA4m0x94gcYJid05DjBlZfE9Bc73Na4Wzc5ugx9WM4vi6kowLAp60l5klYKZlwHPSl5S+hXk9z1BCVdkcKs1rlqIjSXRzNNS1IStWNWOXcWkK9FuttKLaT+dfSP01UnbrrLuD31ZrYcnzDS0aeaS4dLbvDjSnHXXchurXeM2lxxCOplpzh4hKeOpCAVKPHFotzOm1y1g0Az7TSyrQm5ZBY5MWF1q6UmR09TSSfgCtKQT9Bqm1tfT2RzGCLOgDO49o/vOMnlGkAW2ZzKm0hzzIlsnTPIcAPWczJNX9j161Z0l2yQsouOhV1vb2UrmZreb+vIbcx7aZJLvnqS66HBwyEfOHPYn416/Dsj5/km3LVHVuzMR4uRanZbfr/Ykz1qSwHVpDbKlqSlR6A8lYJAPZPoazVym6haq7acR2zaYYlkmO3262CBjuW3S62aTBjY1CaYQ1NHmPISmQ+sJW22hkr5C+slKeFG02neBY3pbgti07xCGItnx6C1AiN/HoQnjqUfipR5Uo/Ekn41012sL60eyRgbH3ZBJEaQ1gB57lhSc7BSn2pxun70Gx4kucSOW9a3t80etuhGkWP6awXhKkW9gu3Kbx702e6S5JkKJ7krcUojnuBwPhWyai4wnNdP8AJsOUQBfbPMtvJ+HnMrb/AP5q/i2XDPns7vVvuuOWuNiUaJFVark3PU5LlyVdXnpcY6AG0I4SAeok8/zbLWO0N/WWuDveBHyy04cFrRJoOBbm2DzOfXjxlVf8Na+ruu0DD7NLSW7hiz9wx+eyr5zL8eW4OhQ+B6FI/nrYt7mV64Yfoe7ddBLnDtN9cusSNMvEuOHm7VAWoh2UpJbcAQk9AUsoUEIUpXA6eoYHCbQNtW5XI8cfSWMB1snm92aSezMDJujiVDUfRPtKEh1vnjlSFoHfirOVas47SBVNiYJ/EIxDlM82kHIhVpAbO40wLCY/CZwnoP6gRoVB1y1d0JTo/bY+s+pmn+bKctbDVwRHdiz0XmX5QDns0RHUXlOL6ultCCe44ArVvD+0PvGiOjFzYvVkfsC8uyWfkkWwvq5ctEN4oTGjLHwcDTaCoeoKuD3BqwkHE8Vtlxcu9txm1RJ7vPmSmITbby+fXlYAUefz1lqnF531AILhHCJB+IEbhYZlRh8jWEyGmeMwR8CZ39EqB95e5i5bU9JWtSrXp3Ky9166MW5UZuQWGo6XErV5rrgQspT7nSPd7qWkc1PFazqNmrOAYlMyE2+TcpY6Y9ut8ZBU9OmuHpYjoA9CtZA6j2SOVKISCRlUnD5TeRxm+XXLqtacYrjfw0z6Zrx6O6l27WPS/GtTrVb5EBjIoCJfscj9tiuHlLjK+w5UhaVJJ4HPTVb/ABT9KMp1T2uufJO2Sbk/i18iX+XCioK3nojaHW3uhI7qKUvdfA+CDVk9IsOm4Bprj+J3SQ2/cYUQKuDrfzHJjii7IUn+KXVrI+oitvrTaGBzzhgQ4EaixkcxbqFTZ3lgBde0HQ3EHkb9FCL243TZvTq0o0eukDNsgutqbGN47ZJTb0l8+UAgupB/vZlHbzHHelKOCD73CTUvYdfJ+H7dvkdYr4iNrBqHqLJF3hEIXcLYhuQ2Jr77CwS2luMy6rqWnjrdQPVQ56J26w2O0PyZNps0GE9MX5klyPGQ2p5X75ZSAVHue5r6M2i0xrg/do9siNTpKQh+ShlKXXUj0ClgcqA+HJqwdFZ1U+9n/MHR1iHam0RCzwRRbSHu5fylt+UyOszNvXVPdn0RWXbo9z2sTY64MjJYWKQnh3Ss29goe4PxHJbqeNwGqruk+nkm42OCq6ZZeFi0YtaG+7txurwIZbA/epPLi1eiW0LUfSvPtn0WY0D0esun65YnXUeZcL5P9TNuchRckvE+pBWohPPfpSmqUrVHVNzS3q4tJ7NF/wAQ3rSpemKe8g9Gz8XERvwu3KUqUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESv5WhLiShaQpKhwQRyCPor+qUReeDbrfa2PZbbBjxGeSry2Gktp5PqeEgCvRSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREr8IBHBr9pRF8IMGFbIjVvtsNiJFjpCGmGGw222kegSkcAD6hX3pSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIla/neJJzrGJeKP3eZbodx6WZq4hCXXopUPNYCiOUBxHKCpPvAKPSQeCNgpUEA5qQSLhfKNGjw4zUOIyhlhhCWmm0J4ShCRwEgfAADivrSlWJJMlVAAEBKUpUKUpSlESlKURKweb2FWT4rcbD93ZtmbmtBt+bCX0Pts9QLoQv1QVICk9Y7p6uRwQKzlfhAI4NVcJBAUgwZVS9jt5x6SrKsttqU2mBqjfp92xLHojCkR4tjt3lwkyvLSOlpTywFqUeOtSk+p5q2ta5iWnGn+BLmOYThVjsK7gvrlKt0BqOXj1KV73QByOpSjx6AqUfia2OtCZDRuAHYQOsASdTJgZKsQ5x3knuZ/1wWEzLDMX1Ax2VimY2dm52uYE+aw4SCFJUFIWhSSFNrSoBSVpIUlQBBBANZaNHREjNRW1OKQyhLaVOOKcWQBwCpSiSo/SSST8a+tKqLKc0pSlESlKURKUpREpSlEWAk4Jik3NImoU20Ik3+3wl2+FLecWv2RlaupzyUE9Dal9gpaUhSglKSSAAM/SlNI+t/xTWUpSlESlKURKUpREpVetX95eL6U3KwWpWm2cz13/ACmJijcyXZX7VCRIed6CtLspCFPJCQtSS0haF9PzwCDVhaN8zcYymOsA/Ag9UPldhOefqR8QeyUpUZXDdBtotE+TarruI0yhTYbq2JMaRltvbdZdQSlSFoU6ClQIIII5BFRImFMGJUm0qKv2WO1j/CW0q+2Vu/21P2WO1j/CW0q+2Vu/21SoUq0qKv2WO1j/AAltKvtlbv8AbVt2D6oaaanR5UvTbUTGcsYgrS3Kdsd3jz0MLUCUpWWVqCSQCQDxzxUwSokBbPSlfN59iOkLfeQ2lSkoBWoAFSjwkd/iSQAPpqFK+lKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSvmH2C8qMHkF5KQtTfUOoJJIBI9eCQe/1GvpREpSlESlKURKUpREpXz89gvmMHkecEBwt9Q6gkngHj145B7/VX0oiUr5h9lTyoyXkF1CUrU2FDqSkkgEj1AJB4P1H6K+lESlRtmOrlzgZe5pzpvg72YZPEiNT7i0Z7cGDbGHSoNGTJUFkLc6F9LbbbiiElRCU8E6fh+6qO7rGxt/1f0/nafZvc4qptkQ5Obn228sp56vZpaAklY6VcoW2g+79PAo3zkAazHGM435HLcdyO8gJOkTwnKd2Y7jep5pSlESlKURKUpREpSlESlKURKUpREqEt1md626TaX3/AFW0pfwuXGxa2OXGdar9bJTjshDfKlqakMyUBPCPRCmzyR84c9ptqG95P7lHVr+SFz/1Cq59qeadB9RuYBPYLfZmipWax2RIHqoa2H7m9x+7jH52ouTxNOsexe1XY2tyLCtU52bMWlpDi+ha5fQyAHEAKKXO/Pu/GrW5kxm0iwvI09utjt96BCmXbzb3pkUgeqVIaeZWOe3vBR4/eq9KpD4MH7ly/fyxl/1WLV+q79qYGuDG2s09S0H49FxbO8uBc69yOxIVSNqOuO7TXPLcyiagWvTLHLLgGSvY1cxAt09+VOksHl5LClyghtPSUkOKSr5w9w8Vbeqx7LlFq67hHENKcKdX72QhHHKiGY/YckDn85FaDfvEfu1k3Kp29zNueWQpjMd5xUZ15iVdJzxjlyM1HYjKcZAWenlxT5SlPUVdPSSMGuxNpt95zGu5ksDj/gDkAti0h1R2gc4cgHFo+Uk9SrtUqgOpfiCbmdvOTWm7bjNpCMbwK9SRHamW6/Nz5MfnvwtxoqZU6Egny1Bvq4PB7E1cS+626Z47pCvXW6ZPHRhabU3eE3FPJDkdxIU30J9VLV1JSEepUQPWoJApmqchY8DuKQTUFMZnLjyW0ZG7kbNkluYjCtsu8BH96M3KU5HjKXyP2xxttxQAHJ7IPPHHbnkVF2Sbr9aNwGtGruCarW3G7Y3gDrUBiHZGXPLS+mQ+06ouuqK3OfKHB4SOPxRUgYfq7um1PxGNqthOj+G2zG7i0JtpsV/vchq9XGGR1NuKW0ypiKtxPBShXmAdQ6lD1qq/hW3xzJtym5LInrTLtblzuaJa4MsAPxVLmylFpzgkdSSek8HjkGr0Wn9ZLHj3HGDoREW6nkc4KrWcP1bG37zBI3E3v25jKQumlKrfe91tyyjcfcdrWidis8/Jsftpul/u99lONQYKR5f4FpppJckO/hm+febSnk+8SCK+Onu6rKGdy8zajrXilotmUu2z7sWO72OU65AukfpKiktupDjLgShzsVKB8tXf0Kqs+0wlvvAkcQJmOx5wYlWf5MU+7E8JiJ7jlN1Ym7Xu0WGO1KvVyjQmX5LMNpb7gQFvvOBtpsc+qlLUlIHxJr21zK35ah7h2d5WiGAJaxpvHTkUK64zam7o+G7hKRJS2l64ueRy0eTwlLaXQhJUeVE1e+JqHk2B6Z3rUHcXGxXFW7El2VKXZbq/PioiJSkhRW6wysuFRUkICDyekAkniqscHUDXNhLh0bHrJNtBEqXNLawoi5gHqZ+QF9TkpGpVXso1y3by9OntadOdDMO+SzUBV3j2S+X2SnIJsAI8wL8plksMOKb94NFxZHoT1e7W77T902E7s9Mhn+Jwn7ZLhyDCu1pkOBbsGSEhXT1gALQpJBSvgcjnkAggaBpJcNW5jUTYSOduBsVUuADTo7I6GL58r/BTVSvwkAck8AVXqy7iM+1uv9+tm2bFMfuGP4zNXbJ2YZLNeat8qaj9sYhMMIU5ICOR1OlTaP3vX2NUmThGefTf6jrAzKtEDEcsuv1PQE5BWGpVU9CN7NwznWrMdtGqunqbBqNiCJDyE2iUZMG7tNAKJYLoQptSkLQpKV9ik8lQI4rU1+I7cFbkblt8Vt+ySBcrXDfUiDKlsOXS4zuhCo8ZlplS2EJWFhZdLykpRyo9ISTUg4sOG+IYhGoG78s5tEqCIxTbCYPA8e+eUXlXMu67m1aZrllYZeuKI7iojbyultbwSehKiPRJVwCfoqCtm2S7rsmwm+yt2OJ2+x3tm8ON2tEZDSFOROkclSWlqT0hfISrnlQ7nnso6bopuu3H5Y5qJD1W2g5PZrhh7rPsEK0utKM0LDhKEvSnGmHukISS40tQUFjhPcc5rZTvEl7v7Xm1/TgSMXiY1c2YESMucZDzoU2VFTquhISeR6JSePpNTTF3EX8oPISLjictbKHmwBtDo5mDY8IvzVi5d7tEC4wLRNuUZiddFOJhR1uAOSC2jrX0J9VdKe549KjLdRetesf0WvN022WGJd85bcYESO+lCyGS4A6tCHFBC1hPJCVH6exPANE06j7qb74oa7BcrbhU+/4zYZbNnsS75KZtEOE/HQ6T7SIqnFvFKklavJHUocdkpTVvtwu5/OttGjiNR8z0Qm5BOix213ZVguzJtMB5x3y0IVIfCJKgVFHvJikDq78Vk8g7OyqTAdf+ogDrF+cTqtWAtrmmBMfkD87b81LGj07Ue56W4vcNXrVEtuZyLYyu9xIigWmZRT76RwSPzgEgHkAkVuNRtty1Wm636H4jq1c7THtUjJoHtzkNhwrbY5WoBIUrgnskd+BWhWXcRn2t1/v1s2zYpj9wx/GZq7ZOzDJZrzVvlTUftjEJhhCnJARyOp0qbR+96+xrorSKzmEQbyBpe/QEx2GZWFGDSa4GRa51tbqc+5yCsNSqs7fN6U3UPXTKtr+rODMYxqHixeUly3S1ybdc2m+klxorSlxslC0LSlQPKSTyCOmtmtWv+c6s6q5xpvobZsaELTeS1br7fMgffUl64LSomNHjsAKKUdJCnVrHvdglXHNZi4aW3DhiHLKe9uds1c2LgbYTB57vnyvkFYClRDoVrBnmomQ55hepOnkPFb5gdwjQHfYrmqbHuCHmfNRJZUppspbUkjhJBI7gnkEVL1SRkd4B6ESFE/XJRPuW3CY5tu01eza8Qnbpc5khFssNmjq4fulxd7NMI+IHPdSuDwkHsTwDC+sK9w+j+3W7bj8m1YuLuoNhajXubj8ZLKceaYLzYetqY/QVLSG1qT56ll0rHUFAe7UNZ/qhiGs/ifWbHc6y+y2jB9DITstr7qXBqLHfu/SgqILigFOJdcaHHrxGP11I3iUbitKnNoOYY/h+peLXy65C5CtbUW2XiPKe6FSELcUUNrKukIbVyeOO4+mudz3N2YV2+04+XgJhv8xkmfdgZEzuxgO0Ci72W+1xJEkfwjKPenUCLgYDmFu1CwbHs8tAIhZFa4t0jpJ5KW32kuJB+sBXFZ+ot2sWOZjW2vS6xXBKkyYeI2pt5KvVK/ZkEg/mJ4qUq7tqY2nXexuQJHquPZnmpRY92ZA+CUpSsFslKr/q1nuS5PuE040L02v8qI9bJRy/N34TnHs9pZQpLER4j/75eWkdHqUtk+lWAo3zMx7yY4xae8jmDwR3ldh4A99O0HkQlKUoiUpSiJSoQXuOezHVTJdG9FbRjeQZBhqUfd1d5yP7nNx1qA9xptpiQ+8U9SQtXlobSpQT1lQKR8cD3QNXDWR/bvq1hasH1BMM3K1sN3AT7be4g6uXYcry2lKI6FktuNIUAk9jweDfPGHUSOIztvtflfJHeSZ0z4c92cc7ZqdaVCW57X/ONvOIys9suiE/N8ftcNcy7zY18jQhAQlQHdtwKccHB5JQk8D1qMcn8QBNn0EY3B2XR+VdceiQra/fFqvAZREkzA0RDjrDCzJdbDyCtSktNp5Cevr5SK4wQTugHhNh0JtOUxvCthMgb5jpE9ROWfYq3dKifJ9zOmGFaAwdxuXyptqxm4WuJc47DzH9+uGShKmo6WufeeV1Acc8epJCQTWk5Luj1LwDTpGtOoe3WbbMFS01Lmqh5C1LvVuiOEdL8iD5KGwB1JKkokLUkE8jsas/7Mua62EweB3E5KrPOGlt8Vxx5DNWOpUBYPr9qpmuXYLIhaNMydPs/tkq5Rb/AGy7+2KtjaGg4x7aQ2GkLe56QhC19Kvx1cEDRXt+F6xTXq36G6v6AXXCXrja37sxOXfo9xU+yhLnlJZZjIV5jjrjflJQFhXUoDg8jmSCHYDne3KZHOxtmoBBGIZW9YjpcXVt6VXnVbczqRophburGoOgLkfB4b7Kbg5DyVqReYDDi0oS89CDIZIClJBS3KWRz9AJraLfrrdtRmUTdAcKh5nbAy067erjeTarZ1ONpcDLTiWH3XXQlSesJa6EE9JX1BSUwLyRpnw57uvJSbROuXHlv+jkpeqsu6zXXczpRqHpxjuh+hZzex5HM8q+zvY5D/sw8xCfL62VBMb3CpfmuhSO3p7qq3Pb9uZsuuN3zHCJuLzcUzbT+eLfkFjlvokBpSioIdZfRwHmldJ4V0pP0gAgnRNzG8vMNtOUWa33vb3cbnjWQXePZrdkScijNMvPuBJILCUOOo6eV/PCeroPBqWj7WnrJBG505CcryNfmh9ioNQCDvbGZjh8L8VaKlaZqtl+dYVi5ven2lsvPbil3hdrjXWNAWlroUoueZIISeCAOkcqPV2HY1Bm3ze9L3H4Qq8YJo5M+U33RkxFWV28oDEOOyED2qXLLQDKVrWpCUJbcWooV0pISspq04yWtzFzy3/647ihGEBxyP18uvUK01KgbF90L6Nemdt2reApxDL7pbFXaxvwrt907Zdo6errDb5ZYcS4ny1noW0OyFd/m8zzVo8odofkYPqCFE3LdR8xI9EpSlQpSlKURfKU287GeajSPIeW2pLbvQFeWojsrg9jwe/Fc292+4beXtgyl+bjOrEPOsaxhFok5KZWLQ4pYM52QGWeWgT0qTGIKgUlJdb49ea6U1WDGdLbLuN0v1xXkSUriap5Bc7dCfUOfLi29KYEN1P+K7FW8n61fXWTy9hL6ebRijQwQMMbzM9Ny0ZgIw1MnHDOokEz0A9VP2nmcWPUzBbBqDjUgPWvIrcxcYqueSEOoCgk/wAYc8EfAgiq8blF7ssez/DoGi+uFljMZ5kP3IZs9zxeO8LZHRFcfekpfB63QhLCiUqA5K0gEVE3hM6pXmJiuabVc9UpjJdL7q+I8d0+8Ii3lJdQPpDb4X3+h5NWbkf+N+7KIx+2RNOcPckq+hM+6yOhH+cliC5+h7666arWGsx1P2HeYbojFB5xhnOcrrnpue2k9tT2m+U75nCD3IdGULddPLJm+HYzIb1Q1Oby+chxchy6KtLFraZZCByny21FISCFK6lKJ79zwKxZ3H7eAeDrzp1z/KmD/takJ1pp9pbD7aXG3ElC0LAKVJI4IIPqK046KaNE8nSTDOf8gxf9nWZJJkrQAAQFjv2SG3j8vWnX2pg/7WtvxnLcVzW1pvmG5Nar9bVLU2mZbJrcpgrT85IcbJTyPiOe1YH+4no1+STC/wD6gxf9nWyWPHrBjEBNqxqx2+0wkqK0xoMZDDQUfUhCAByfp4qRGqgzoqj+JP8A+R9DP/a7Yv8A+7VyKo5v2yS56iL02sGA6Yam5BJwzUi33q8Lh4Jd/IaixS4HVtvLjpbfHJHSWlLCh3BI4NXAwjO7Nn9teullt2RQmmHfIW3e8en2d7q6QfdamstLWngj30gp55HPIIqKN6Lo++T0wUxPKQRKmraq2fuAdcVQxzggrY65Yb6dA3tqWutq3pabYRbL9h9ynhOY4/MhNvxUPPHhxZQtJSlD/JIVx7j3B/GArqfWHy/EsdzzF7phmW2pm5Wa9RXIU2I8nlDrSxwofUfoI7g8EdxWbw5rhVp+03Lcd4PA/wCdFdpaQadT2XZ/IjiNO2qjPSnHNses+n1k1MwTS/BptlvsZMhhfyfh9bZ9FtLHR7q0KBSpPwKTW1q0K0RWClWjmDkH1Bx6J/s6506JZXknhn7opm3jUu5yH9Hs/le149eJB/Bw3FqCUPE+iSD0tPjtxwhzsPXqclSVpC0KCkqHIIPIIrd2Co0VqXsu7gjNp4hYtxU3GlU9oeo0I5qDc+2O7S9SITsPIdBcRjrdSR7Vabei2SEk/jebG6FEj6yR9VQDoLtjm7C9zDLGL5BLu+lOrLarMgzOPaLVd2gp6I28pICVpWkPtocAHKlhJHPBVfGvjKhQ5yENzYjMhLbiHkJdbCwlxJ5Soc+igQCD6g1SmfDeHt5HiDY9dx0MaWV3jxGFjunAi46TmNR3X0WtDaFOOLCUIBUpSjwAB6k1XDRm5Obns7m68XcKcwLGLlItmn1vUT5Mt5lRbkXpxPotalhbbHPzEJWoe8vmvfv41Jm6WbTNQcitMhTNyl29NnhLSeFJdmOJj9ST9IS4pQ/NUkaF4HD0w0awnT+Cylpuw2KHDUAOOpxLSfMUfrUsqUfrJpSuX1PuwBzMknm0AfzTmAlSzWs+9JPIRbkSf6YyJW9UrQM61OuNiyaBp/g+LJybLJ8RdxVEen+xRIMJKuj2iVI8twtpUv3EJS2ta1BXCelC1Jw+2vcLj25TT17ObFapFret90lWW5QHnUvezzGCAtKHUgBxBCkqSvgchXcA8ijfPMaX6TE8gbHjbNHeXPl3Ej0E8rqV6VEOKblsRzLcXl23Cy2a6OXbC7UxcrlcylHsYW75fDAIV1dYDqT3HHZQ+HeXqC7Q8ZG44iY+SGzi05jPsD8ClKUoiV85MmPDjuy5b7bDDCFOOuuKCUIQByVKJ7AADkk19K5/eKdrZlZawvaNpjLcZyHVOU01cltKIWmC48GW2eR3CXXOrq/iNKB7KNUeXS1lMS5xAA4n6k8ArtDbueYa0STwH1A4qVbJuU1I3QZjdMT2ptW+1YVj8ow7zqTd4hksuvAd2LXF5Sl9Y7EuuHoAPPSeU9UiTNveYu24uQtz+qjF/Sjqbua5FvWyHfgVQhFEdSOfVHSDx26h61umjWlOMaI6YY7pbiEVDNtsEJEZKgkBT7vHLjy/pWtZUsn6VVulavaxvlYZ46njw4AaZybrJjnP8zhHDdwO8751ygWVW9tW57M75q5lm1bX+NbY+peHo9pi3O3tlmJkNvISUyUNEny3OhaFKQDx7x4A6SBaSuZer13WrxltOWccVxKj2mNEuPl/FKokpawrj/1K0+v1V00qGHxKDKpzOIHiWmJjiI4TKl48Ou6kMoa4cMQmOhnpCUpSoUpUbbhNZoGhOl9xzp6Aq53JTjVuslrQrhy5XOQoNxoyfj7yyCSPRIUfhUk1UTXuadRN9ehGjjx8y14tCuOfz2PVK320qZhqI+lDiVEf4xqseI9tKYxG/AAFzo44QY4wpnA11QicImN5yaORcQDwU46G6X3DT/HXrxmdyF5z3JlIn5Rdz386SR2jtc/MjMgltpsdgkE/OUomSqVoGrusuPaRQrO3Ot86837Jp6bVj9itwQZdzlkFXQjrUlCEJSCpbi1BKEjkn0Bu50nLcAB2AHwCq1pAueJJ7kn4lb/SoRyPcBlml0iwy9bsBx3GbLkdzj2eLKt+XpnyWZT6whpLsd2NHKh1EdXkKeKRyojpBUJupFp4x1sfmO6TeOv12KUpX4SAOTUKV+0qC7TuZk6m5be8V294GjOI2MSDDvGRTbsLZZWpY7mKxISy+5JdSPneW0UJ5TyvuK+Om+8LA9RNaX9vKcXyW151a7Y/cL1FkR21RratpxKFMqfSv3yrqQtC0pKFIWg8gnpBvnIDdRI4gCZ5Recst4R3kknQweZMRznTPPcVsevunWQZDZWtQtMXUQtR8PbcmWGR6InIHCnbbJA46474T0kH5q+hxPCkg1n9E9Wse1x0wsOpuNBTca8R+p6M4fwkOSglD8dz6FtuJUg/m59DW8VUPaJOVgm5bcdoClXRboOQRcxtLP4rTdyZDj6Uj4JC/L7D6TSld7qW8Fw5giR1Bk7sMjMpUswVNxAPIzB6OgD8V8hEs7h8HyU2pGselLYb1DwphcmG2CQi9QU++/a5AHz23UglBPdt3oWnj3ud20n1MxnWPTmwam4hILtqyCGiWyFfPaUey2lj4LQsKQofApNbZ69jVRNjc5WG6m7g9vQV0wcNzVV4tDPPZmFckl4NpHwSlSSePpWaUrudS4YhwggOHXEDwIJzcSlSzW1OIaeoJHYiOOIDQKbrTt407suvd63HwU3QZffrQ1ZZfVMJiFhHRwoNcdl8NIHJJHu9gCSTWzUC0zt02+zT2dgDJcw7QN5+TkGRtj+93rqspV9zmV+ji0+W31gEhPUsHggA7HvF3DanY3kUbSPAdBtY8hssloOZJf8AEMdkuKUwpPIhw5QQUJWsHhx5JJbHKUcLPU3hdH9wuoGSZFp/onpns41R0hxePdG13C5XjHXI0Bi3MtuOuMla2uAt5aUJK1KCiVk8lR5ps/mqU3N9w+XgZPmP7rZJjU3yADlfyU3h3vC/KBYcSABwHH2bsUpSiJSlKIlKUoiUpSiJSlKIlKUoiVEO76I/O2s6sRozZW4rD7qQkDkniMsn/QKl6vJdbXAvlrmWW6xkSYU+O5FksrHKXGlpKVpP1EEisdppmrRfTbmQR3C1oVBSqtqHQg9iqG+C6+05thyJlKwVtZlK6xz3HMWKRV/a5waS6HbrPD31IydjSnTVWr+kWTyRJEODcWY1zgKTyG1dDhBU4lJ6FdIUlYSk8oPYT5J3AbrNR4i7BpTtLvWIT5afKVkGeXOLFhW0nt5vszKlvSSPUJSB3457dq661UV4ewaARkQQAIM20zmOK5qdM0ZY86kzoQSTpfXKJ4L17J0lydr1PR3Zk6w37y1/BXQiOhXH5lJIqvEkD79TF/kuf+6lVd3QbR+36G6Z23AYl0eusttx6ddbo+kJduNwkOF2TJUPgVuKUQOTwnpHJ45rn3qhds7sXi/ovGnOKRcmvETHG3BaX5yYZmM/cxXmobeUChDvRyUdfCSQASAeRmyKdaiyZwtLZ/DRc2d8Wnkrul9Ks7LEcXeq0xum8c9VbbxFMctGSbM9TWbuy2sQLWm4xlLHduQy6haCk/Akjp/Moj41zo1uynJ7Z4XW33GLo8+iBfMhlKl8kjriMPyVMIP0p4UFAfxE/RV59dbdrvvHxFnRGzaR5HpbiF3ksLy3IMpfhCT7K04HDFhRoz7ynVLUlP4RZQjgcfGtr3Q7McW1u2xQ9A8QXHsbuKsxnMWedBLUd6O2W0IdIBUULQpSVKAJ5V1cEjg5EOpse+Jl1MxvDDJPWQBqY3Qtmua97GkxAffdiEAfEndbWVYXHkwUWC2ItYbENMNkRw383yugdPH1cccVzv8ADXAG7rdOB/8AP9z/ALwl1ue3fUTxBtM8RtmhucbU2ckn2JlFtt2Vu5XGiwRFQOlpckpDinAhIHdHC1JABQFck4XZLtv3daN7gtTNQszsOJNY/m93eVPlS5im5MlKZbjgkRI7PmdCVeYshDykkAp+jv0iDthqA+UteATxwxP1ymCuSC3YvBI8wcy34ZlTBcdQdLIW6HIcQ28aUWTI9aZtvQcuyBxfssG0RE9AT7ZISlS3HD+C/AtI6lFKQtSenkV1yG3Zha/F60vbzfKYt8uj2MOuuPQ7d7DHaSYs/hpporcV0Dg91uLUSTyfQD6YrpDvL2qbx9R9RNNdFWdUMV1KmSHw993I8EJS6+X2y464SplTSlKSrqQUqHofTj1ar6N707PvP053TRdKrHnctq1+xS7RZLmmFCtZKH2vZ1yZBK1dKHw4X/LAUrqAQAADjsx/49Q53n90lrxhA0FxfiZNwFvtAlu00xqPL+9dtyd9jbgLWJX9b+f3f+1z/KET/vFFb94wd9uVo2oRYUNxxES7ZXbolw6DwVMJQ890n6utps/nArE79NBNw+Y5zoluG0vweJk2S6fvtOXmxwZiQkOpeafHlLd6CtrrS4gnjqAKVdPrxMWpWkOou8bbnkuD604fbNOp978mTYLc1P8AulJtchn3kPSn0BLaytfYttp91snlSlK4TnDm7GGAeZlRziN4xNIjfIGi0Dm/rfiH2XMaAdxwuBnUQTr0XtxnbaxkmJWq62zc9rW9a7pbmH44byOKW1sONhSQB7Lx0lJH6KyO2XaNpDtLk363aZ3y/vu5UlmRKjXe4NPkiOVAONpQ2gj9v4Ue47p9PjUnQS4+KPttsbOhg2+2XPrNaCWLLdJV4Ybbjx+fdSJHnJ6mR6pQ4lLiR29AEi5ugumWpFkkXPU/XbILdd9RMkZbjvtWtCkW2ywUEqbgRArlRSFKUtbiveWsjnkJTXW7CXuqUz5T3jMCM5nPQQbzAPIwFtNtKpciOVtd2WWuVokj2brcgvGK7aNUMhx9xbdxg4pcnYzjZ4U2vyFjrB+BTzyPzVEvhby7JK2UYMmzqbLkd24tTgk+8JPtjpV1fWUqQe/wIqz+R4/assx+54vfYqZNtvEN6BMZV6OMuoKFp/SlRFcxNP8AQ7xAthOeXzHtv+G2/VLTe+zDIjx5EltKUk9kLWgutuR3wnpSpQ6m1BIJ54HTz0nYKr2uycGweLS6x5h1uIC3qjHTYW5tcSRwIAkco7LpS1p/p/DzZ7UZnDrGzlk2MmA7ekwWkznmB3DRe461J4SO3PokfQKoBhrba/GlzJS0JUUY0FJJHPSfuZEHI+jsSP01ZzRzE9w2UXZvWfc3AtEO8WmK6nG8Hxx1LjFtWtBS6+88450PTFp5aSfM8ttC1cEdaiK5Y3pbujtniHXrdXL2zZAMPvEM2sR05FYTPaaERphLpb9u6CepkEpC+yVHuSODekMG1MLsg144CRYTlc/FVecdB8alvMw5pNs7AeltF0Pkf7nd/wARX/VXOfwY/wDzQ1f/AJUs/wCqXV9cuyTIrPiSrxYdOr5kNzeZ9yzw5UBmQhakEgOOSJDbIAVwlRS4rueQFDvVK/DP0Z3FbcTmuMaw6IXS2RssujFxjXSNerRKjxilC0rS8luWXR6p4KEL+PIHxihatVnVgHXGD8Apqn7Jv4gemFw+JC1nH/8Ahpr/APyZH/djFWD8S/8AcSal/wDNoP8AX49Qtrjo1uP0w8QC27q9H9I3NRbHeLWiBOgx7mxDWyv2b2daFrdPuDhLbgX0lPqDxUm7v8e3KaxbWLzpVbNFDdcxy5Edx5NpvFvbttpaTLQ95Dj8uS04+6ltpKVLQ0EKUrkcAVz1Gl+w06YF22PPGT2gzOXpO9Ihm2F5yOE9A1oPWREZ+sYHSzILxivhPMZDj7i27jB03uDsdxs8KbX0PDrB+BTzyPzVtHhby7JK2UYMmzqbLkd24tTgk+8JPtjpV1fWUqQe/wACKzu0HAc5te16z6Ba46S3PHnrXZH7JcPaLhb5cWew6pxJDSosh1Y5bX360J4PPBNVG0/0O8QLYTnl8x7b/htv1S03vswyI8eRJbSlJPZC1oLrbkd8J6UqUOptQSCeeB099Z7f1ysdHxBG8OcYO6Q7vC4qbCdlojVhMjm0C3IjsulLWn+n8PNntRmcOsbOWTYyYDt6TBaTOeYHcNF7jrUnhI7c+iR9Arn5uB0P3V7VNcMp3TbR3FZRjWXSVXHKcXDRkq83qKnepgEKdb6ytQW0Q631KHHTyTabRDDNwOZZFE1e3RJsVouluZW1juH2JwuxbQpxPS7KkPFSg9KUglsFKihCFLCe61cYPR/IdedJ7tqBYNQ9Ecju2MS8wvF3xe62KXBluGHJluOhp6OZCXUcqUpaVBJHC+FdPHfmLXNqNLTDgDGozHlOhmZjhvW4cHMIIkEidDkfMNbRE8dybLd4mmO7GDerpZ8a+TGewGo6Mjtb3CnFoR1Jadbd4BdaBUtPcBSCeCO4Js5VUdtW3TIbHuU1U3S5NiIwxOcobgWjHFPMuSG2B5SnpcryFLaS6840lfQlauOpXUeTVrq1cQ5jHRBIBI3Hd8ORss2gtc5syAbHeP8AcjjE3zXOLw09NcX1LzLcDq3qBitpvsq65w/CZ+6cJuSGSlx15zpDiTwT57YPH70V5t7+leFay7o9GdqenGF2G18OryXLH7XbWY6mbeCAA4ptII/BtvcA+pdb+kVN+kuE6h7T8u1asNg0kv8AmtjzjJHMrxd6zPRUtpekoAehylPOo9mCFpTw4QpJQeRyodNb3ts273fTq+5XrRqtcYd41U1CfD94lRSpUW2xU/tNvilQCvKbSEgqPBWUjt7oqlGPsD7tNrCRvcGjyx+K50t+8FpVJmtHtPc6DuBJ838thrJ4GJtmTrLjVqD9wnw7Xb4qEt+a+6llppPZKR1KIA+AFajozgOS6e4xMt2U6rXnUCXcbpJuaLnc0oSWmXiCiO0lBIDSAO3B47ngAcJER72Jln0907ybW7MHYl5jWDHnLXjGPyoocZF8mLLSZRCiQ450qaQn3eUID/B988b1tI0/uek23XAtNcguS5V6stkYNwQ471OR3Xup0tEeoSgqU2n6m+3pSmcYe85iB3JPfytPJwlVeAzAwZGT2AHbzEcwdwKl+ox3Ex9wMrTSe1tsn4xEy8g+Wu+trUjy+k8hkj3Evc8dJdSpv98B6iTqVR7MbS2Y5K7HYHYolc5NmTW7pqz5ZbcVn6ORM/YuxVnbWZ2+8uZGuaefLclLbf8ALW0UftKmuGun5oB6qsd7N4if/wA+tuf/ANS75+sVkdYNOcotWv2mOu2mlielzTMVimZNRilPtNjkJUpD7vJHV7M+lCwe54WR6VPla4sbA7IixGloy4REbrtkxKpGF5Gc3nnOfGQeYg2mFXL2bxE//n1tz/8AqXfP1iv0RvES5HN6258fH/wXfP1irGUqqlea2i4i3RReFRlTwwj2oxkqSyXukdZQFEqCerngEk8cc16aV/K09SFJ5I5BHI9RRxNyAoAiypZjeS6M23dfnzu07RVzNtUpifIzXIXr05BsNqUpYK0OPqDv4VTjYKkMMqJKFdx0r4jzX46kNeIdtkl6hDG2Z73trbSLEX1IS174Ulbj3BX3UeCEpHBPatw2k6W6v7Ksg1CwrLNK75muO5VfFXi05VjSo8l10EFPlS2HHUOtq4IPVwpPUV9+O9f1r7pFuFznXbSrdhZ9NJEtjA7n7McIRcIabqLarkqlKcU6I5fUpavwKXSAlLfvElfTNCKdTZXaNLSf3ZBkAaAExGcSclNcY2bSwZuDgP3oiCTqSBymG5qdN8f7kLVv+S03/sVidg2PWmDsv0utrcFlcaZYkyn21oCkuOPOLcWVA9jypR9a0HV3CdwE/bjnWl+MaH3G+37VJ29XF5SL/bGI9i9tkqLcd5Tz6S44lrpKvJC0dXICiOCd22ZM604FpVhujGqGhV0xpWMWYQnb393bZMhvLbPugIYfU8CoHn5nA4PJ9KUhapOZFPuA/EOMEgTrmFFU+xGQL/UsDTwkAnhkVpXimaV5FqXttiSMU/Dz8YyS33Ru2J567l1FUdMdpA7rdKn09CB3UQQOSQK/rcBua0v1M2vZHhOAT3b/AJ3mdgXZIWGQ463L0xMkN+WW5EQDzGA2VKKluJSkBB4J7c7bvsyFcXTqwYvh18da1OnZDBumDWmPFVJdudxhOpe8tbYICWAkHrcWUoQCCTWis6u+KQywh2XtN04kqCAVtsZS02onjuB1SSB/Ofz1iGirSqUn+y53/i0GDlJyIgkQDqti406lOq3Not/MSJG6cjabjRTXs80lyLQ3bXgumGWyEu3mz29RnJQvrSy886t5TSVDsQgudHI7Hp5Haq16+2yFdPFe0EZnx0vIZxeVJQlQ5AcbE9aFfnCgCPrAqa9u+7e7ap5vd9GNW9I7rpnqbZIAua7LLkpksT4nUEl+K+AlK0hRAI7jv2UrhXEI6i4duqy3enp9uYtu1m8NWHCrO9aXre/ldkEyT5iZILiOJRQOPaE8Aq79J9Oa6XPL9rp13ZSXSMrteBB/FbeDmuYMDdkq0W54Yg5zLT6i+5Tr4hCQrZjqqFAEfcUHv/y7VZDYnbIVp2gaTxYEdDLa8bjSFJSOOXHeXFqP1lSlE/nrVd4bGuerW368aV6f7fbzOuma2RpMh1+/2lhqzvF1KlsPFUnlxaQj1a60HkcK9a921uTrfprt+s2nmcbeb5CvODY/Hhx0MX+0Pt3p1s9PQwpMnhtXTwol7oT8Ao1Sn5RVn9z+nxJjfmOc2laP83hR+9/Vgj4HlrCiHa4oo8S3c20nshUG3qIHoT0sd/8ASf569Xix/wC9xpP/AO0q2f6t6sXorhO6bT/d1qlr/ftr13XY9RmmIzEWNlVlVJgpbLQC3AZQSrlKCSEqJHoOazfiA6fbjdfGsZwrS/QG4T4WIZVFv5vMnIrVHYnoabUOhptcjzU8lwjlxKeOn0IINRS8lHZGnNvhzww1AT2F+OitU81banDJ2OOMsgev+Vdx7/c6/wDEP/VVDfB8tkJjRfUO6tR0iVLz2a087x7ykNsMlCefoBWsj/GNWnm6j6rI07YySNt0yN7IpElcZzGvu7aEvsN9KuH1SDJ8gtkgDhKyv3h7nY8Vs8O7TncZt4x676aaqaCz4UXIcklXv7uRcgtUiPDQ4wgdDrSJBeJ6mgAUJV88cgAE0ojDWqTqwDriafh+WazqGaTI+9PQNePiQsfubUW/E221rQelSrXPSSPUgiSOP9Jq9c6dDtkKRcrjKaixIjSn333lhCGm0glSlKPYAAEkn6KoXrPhO6fP93umG4Gx7XruixadsPxXokjKrIiVNDhdBW2BKKUjhwEBRBPHB4rN+Ihqtl0zRvANHYmN3ywZBrLe2LXcbUwW5k+Nbm1JVLaT7MtxDiz1NJ4bUoFKlDnvVWFw2VjG+0XuAn993lJ1jU7gtXgO2pzj7IY2SP3WnFHHdvUqXPd8Y2AyNdLfprJf0khzWo7mRP3LyJsiKp9LBuEaB5KvMihagepbza1IBUlBHHNiGH2ZTDcmO6lxp5AcbWk8hSSOQQfoIqg2+zXDHLRs7u2k+KaZZ3Yfuy3bcXsyLrjUiDGCQ63wylawB1eUyoBI7nirtYRAcxTTywWy8yUtrs9lisS3nFcJSWmEpWpRPoPdJJrTy4XuHstIAJzNpM6Wsf4uAWXmlgObgSQNLgDvJH8PNbHSsNiGY4vn+Nwcwwu+w7zZbkguRJ0RwLZfSFFJKVD195JH6KzNQQQYKkEG4Wnax5snTbSjLs8I5XYrNLmsp/fvJaUW0D61L6Uj6zXx0RwhWm+kGHYM93kWezRY8pXxck+WC8s/WpwrUfrNQBujvm63PozWn+mu1aXcMejZFBl3O43DLLRHF1gQ5aH/ACmGvPK0JeLSfec4ISSCjk9rKYFkOS5PjbF3y3AbhhtzcWtLtpnTYsp1oA8BXmRXHGiFeo4Vz9IB7VFO7HO3kZ7gDfqXEHXyqalnNbunLjFumHl5lzs3XtO7Od/+A7o7a2qPiOon/gvJugcNhzhLUgqA/wDVll8fStpRq5m211vKpGo2raHEvNZhl8tqA8k8hdvtyUwGCk/FJVHecH/Kc/GsPvo27P7mdu1+wKzRGnskiFF1x/zFpbHtzPPDfWogJ8xCnG+SQB1gn0qRtCNOkaR6MYVpolCErxyyRIL/AEkEKfS2PNVyPXlwrPP102fyUnMd7tm/hcQ7+ktIHB3FK/mqNe33ru5tEf1BwJOpbwW90pSiJSlKIlKUoiUpWKyrKcewjG7ll+V3aPbLPZ4zkybMkK6W2WkDlSifzfD1J7DvUOcGAucYAUtaXGBmq0+JRgOlGdbaro1qK4ti7xH0fJFyIx50528ue6xFYbHBc84+4pA/F5V+ICJI2hYRqnpzt2wzDtZb6m6ZRboCW31fOVGa55ajLXyfMU0jpQV/Hp+PHJ0DRLFsg3F6hRd1uqlpkQbFAStvTHGZieDCiL7Ku0hB/wDmp9PBQD+1tkcckgi0VWY00WuBsXkEjdGU/vb91m5h01e4VXNi4bIB3znH7o00Jk5QlKVruXZ7juFSLDCvT7ntWS3VqzWyOyjrcfkLSpZ4HPzUIQtalfipSTUagb4HU2CnQndfoLlVb8Vjr/YtNK/4lOXWUyPo8vzj6/p6auDG6DHaLfzehPH5uKgfffppN1X2n6hYtamFPXJi2i7QkJHKlPRHEyAlI+lQaUkf41b9oBn8PVPRLBtQYT4dTfLFDlOEHnpe8pIdSfrS4FpP1g0o/s6jdzgejmgD1YUq+3Td+6R/K6f/ADHZRnu0yWy7ctFtXNe7U6+MnvlpjW1h51zq6HwkxoiGhx7qULfW6R35UpZ+Nf3sS0ojaFbUMOs9wSGJ8+AcjvLi+x9olJ85XWfpQgoR/wBHX1326B5PuP253vTvCn2E35uTFulvZkOBtqU6wvqLKlnsnqSVAE9grp5IHJEX7sNyGo+EbQ8inQtGr9iVyVZW7NOlX1+I1HgOvpSwpEfyXnFyXPfV5ZQkN8DqUodPScTUNGlWePbOEDkASO7zfkN4nXAKtSiz3fMTwNh6NFuZ3LE+GNGez6frbuZuDZLuoeavswVrHcQo5UpAB+j8OE/9H9VWW1aZ3ROXiGdCrhpYxavZ/wC+05bDuLsgv9R7tmK6hPR09PYjnnnvWu7HNM16S7U9OcRkxixNVaEXKahQ4UJEomQsK+tJd6f82p1rr2ikKLhRbkwBv8ow+puuajUNUOrEe2S7uZHYQFXL2bxE/wD59bc//qXfP1ins3iJ/wDz625//Uu+frFWNpWS0Uf6RN6/Nw7j/d5m6fyJRcb+53yRjTWWw3wevzvanFkq56eOngcc81QnVa0rvHjN4BHyEEw2LTHl24OfNPlQZLiOn8z6Vn84rptVYN321zKNUskwvXrRifBt+qmm0pMi2pmqKIt1ihfWqG8sd08kr6Ven4RYPAV1JB2CvSqnJpvGnlIkDUiZQtx0KtIZuaQOcgx1iFZ+sdkOQWbFLDccnyG4MwbXaYrs2ZJeV0oZZbSVLWT9AAJqpLGYxtRsuv8Adty21rUiRIisRIdhx6Rizl8tsb8CDJcYdYC4peU+VgSFqQry0tcFPvCtK0zw3Vze/pnBwzNskcxTRzHLzOtNwhNTFPZFkfscxaWosxzuiM22hLSV8LcWtSConukiC17gWsid+YAyknhIsJMmOKkFrYc+Y3akxMAdDc2tK1Pw/MJv24bdBqPvtyq2vx7NLmSrdiaZCeFOBQDXWn6Q1HQlrkdipah6pNWd3ZazZVjdywjQHSi4iHqBqtcTAiz0oC1Wa2NjqmTwk9itDfIRz26uT+LxUs6VSdO28XVi+lsONEsGJS3cdaYisluOy5G4S422fxwlRKSoc8rSvkkg1UORgFw1s8TLOJM/Lsgx9jTXCLdFtkmzPttPoXLAWrhTjawEqDr4PA59O9WhjnUdnYPIB1LWtL7/AIjnwNt6rLgKtd/tk9A4uDP6Z7i62DW2yYrti1T27y9M4q4N0yXMBi98kKdU5Lv8GQ1w65OdUeuU4l0tuBbhJSo9uAeKudVAp2mk/UPxD8Lw9jUDLMvsGi1pVk16fvktmQmJc5PIjR0FtpsJUQllzggnhJ9OKt9qllUxhEPCsJ1Mw3Gs1ukmOqDHvi0POPxw6C8GowcQ44otpcCSOQCPq7Sw4qTSc3OdHASG9pa51tDIzUOGGqQMmtE87u74XNEnlot/qmo5T4r59q9FaRf3tz9Pt3vcf/XVcqqibhIatOd8GgmtTw8u1ZGxcNPrk/6JQ88lTsNKj/GdUoD/ABaqz/kU+bh1cx7R3cQOqs+9CpyB6Ne1x7AEq3dVG3xaGbgM7yjTnWrbLeLavNNMpE1TdpnutoRJRJQhKukuEN9RSkpKVlPKV8hQIHNuap5oDpLetquf6nZNkmF6i5pec/vbk9V8ti40+FIih11bCAz5iHmXkh0pX5iSnlI6F9PaqRNQXiJIPEQI6gnorTDDaZsRwMyekeqjDTvcNk2q2v8Agmhu/ja7b7Bl8WWu6YXdmkvJie3tp6uQguOIcB8vspLi0haUgpHYjolVdoumWY62a+YlrhqPhzmJWDTeNMTi1lnPsPXKXOlJSh2ZK8ha2mW0oSkNtBxSuolSunjprI7y4W6SfpK0ztJmxY+YC6MKklxcVDqoXSvrDSpX4EK6/LJ6uD0hQHfsdHPw0m4h5pMgcTAJGhiCeAE3BVWsxVSGm0CJ4AkgHUTlxJ0gqeKqn4mWtN80V2qXybi8pyLeMols43EktqKVsB9K1OrSR3CvKbcAI9CoH4VM23fMMtzzRXFMoz5mO3ksiEWbv7Nx5S5bLi2XVo6e3SpbZUOO3Cu3atB30bcrrub0GmYPjMiM1kVrnx73ZhJV0svSWeoeStX4oWhxxIPoCQT25rLa6djTJkSAYv5ZGKN9p5rTZX+YPyMGJtBi07rwti2h6UQtFdt2B4GxFQxJj2dmXcSE8Fc19PmvqV9J61kd/gkD4VXLYWWdUtye4/czIKVxZuQJxa0SD2SYsb53B+tDcU1PS851s1Fw9nC8U0qvmBX+ZCTFul8yERlQ7Moo6XFRksvLVNcB58vpCW/RS1jjoVAm17QHVfAdF7btcynTufblJzJy85Xkin2FW6dbmpKH0COtLhcdXIDLDRSUJKEeYV8EAK6i7xNsfVNrEDd5nNB6Bs8wTGS5Wgs2RlM5yC7fZriOpdHWJzV8KpppjyfFI1iMc/ghp/aQ/wAenmdUbp/T081cuqibPYJz3cNuK3DJHmW675GxiNnf/FdYtjQbdWg/FKl9HBHxSaxpf8gH7rXnuMHxeOy2qfsHcS0f1B3wYVbuqaaAcnxHdypjf7nFnx0Ocenm+yt//wC1XKJABJPAFVF2LQVZlnuve4rpKoWd5s5bbO8fR+320KYQ6k/FKlKUPzoNKNtox7mP9S1vznolX9gRvc0diXfBsdVbulKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlEXzfeRGYckOBwpaQVqDbalqIA5PCUglR+oAk/CuaUl3UA+J2xuMb0M1SVp83bvuSq6fI24dX+4Cz5wY8rzejzDx8zq479NdMaVDfLUFTdPqCD6FHeam6nvj0II9QF5bXco94t0a6RG5TbMptLraZUR2M8EkcgLadSlxtX0pUkEfEV6qUqTwQcUpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpRFEG5bQH9kBjeNWtrIGrXKxTJ4GURUyopkw5jsYq4YkNJWgqbUFEchQIPB7+lbXpvp9Kw03q+ZBekXrJsnmJm3e4NxvZ2lFDYbZYYaKlltlttISlJWoklaiSVGt0pRvkBA1v8B/4jsjvMQTpb4n5nvySlKURKUpREpSlESlKURKUpREpSlEUA7gNEs6v2qGAbhtJXrZKy7T1MyIqyXZ5TMW72+UjpeaS8lKvIeA5KFlJTyeFdhWxta45miOGrjtm1PaugHCojH3IfaUv+JIE4NFP0KUpPb1A9KlulQ0YW4dL9JuY5m/MlSTiOI5/GMp6W5RuCrnp7o9qVmW4xW6DWC2QMYetdgXjeNYxDmJmPR4zjhW7ImyEANqdUSQG2ypCQfnKI5NjKUq0w0NGQ+ZJPcknqq5uLjmfkAB2AA/ylKUqFKUpSiJSlKIlVQ3VY8xa9ze3LWLJHG4+J43dbvbbnOfUER4EmXE4iOOrPZCVOo6OpRACikc9xVr6+b7DEplceSyh1pwFK0LSFJUPoIPYioEte2oM2mfkfQmNxuhuxzDk4Ed1UzK7F+zB3E4dMtfEzSDSGYq8vXRHvRcgyEdmWYy/mvMxwOVuJ5T1FSOT34lTcpqxl+k2EXnKrBb7QxbrHZJl4uF1vMdyRFK2wlLEFtlt1ta3n3FhIV1hKAPRRUBUwoQhpCW20JQhACUpSOAAPQAVGO4jRh7XTB4OItZC3axAvtuva0SIpkRZwiPB0RZDaVoUppZA54UCCAe/HBq4HAKbbCbn8Vi7jAiBuaG3vN2kYzUcJtYcBcDqZk73E2sBtWmM2bc9OcYudzxiPjkybaIkmRaI4AbgOraSpbCQABwhRKfQelbNWuYLjF1xe0PsX7JX77dJ8x2fNlrSptrzHCPwbDRUryWUJCUoR1HgJ5JUoqUdjrao4OcXARPpw6LGmC1oaTMa7+PVKUpVFdKUpREpSlESlKURKUpRErnDuX3X4RnuvzWmOoeI5/L0kwKWJFwiWjHXpAyi8Mr9xt7qKB7Eyoc8d/NWn0KeDXR6lVgh7X7r9dD0zHGDorSMBbv+GvfLlKp2jxQNCG0Jbb0y1fShICUpTh6gAB6ADzK/T4omhgHI0z1hP1DEFf7Wrh0qyrkqDZz4s+O2+C6nTnbRqpe5wSQ391ram3x+r4EqbLyyPq6Qfzeta/sPyvXfdfuNyDcRrzbJNrt+D2xVsxiz+xuxYcKRMPDi2UOe8tYabUFuKJUfMSOQAAOjNKmmQx2MiTBA4SInsSoqS9uAWEieMGY7hfikpWkoWkKSocEEcgiq56UWlzbDqDcNHp7amtOcxuj1ywecEnybZNfUVyLM6fRsKcKnY/PAUFLQD1AA2Nr8ICuygD3571DfK7F0PEf7AIPyJBl3mbh6jgf9Eg8DoYIqvuJ0RzTJNy2muufmZnesTwiKpX3BxmVHQ8i4pdUtLrrb7rYcZcSpKF+WVL/AAYSU9KiR9M80Z1C3a53jcnWDGHcN0nw+cm7MYxMlMv3PIp6AfLcmCOtxliOgE8Nhxa1dSurp5HTaWlKf2YaB7pLhzJmeMHKcoG4I/zkneADyAiOEjPfJ3r8SlKEhCUgJSOAB6AV+0pRMkpSlESlKURK5cQ9m3iJYXqlmuA6R6rtYrpbmGQy7s9d2bizy2zIWVKUhrgyW3wjhJDfQFKSPf494dR6VUNGMPN7ERoQYMHsFbEcBYN4M6gibjuVqOk2mWOaOac2HTTFEOfc2wxEx0OOnl19fJU484fitxalLUfpUa0rJ9D8lj6xTtbdKsutNhvt+sTdgvbN1tLk+NIbZWVx5KEtvsqDzfUpPBUUqSQPdI5Mx0q7yXv8R2d/UEH0JCo0BjcAyt6EEeoBUeaMaK45oxZbjGt02Xd75kM9y7ZDfp/SZd2nOfOdc6QEpSB7qG0gJQkAD4k133IfI207jMAxW8uuW+ztzZGquQzH3OXrtMt6ERbbb4vPd1YdWkhhHJ9OBys83Lr5uMMOrbcdZQtTR6m1KSCUHjjkH4Hiovja8afL2Y3YSGkDLygRCm2FzTrPr7U8wSCc7zmvja5j1xtkO4SLe/BdksNvLiyOnzWFKSCW19JKepJPB4JHIPBNaPr1o7aNddMbrp/c5SoEl/ol2q5Njl223FlQXGlN/HqQ4AexHI6h8akKlQ8B8xb5boO8aFSwlkHP59OO5RnobqVeczsT+M5/b02nUHFvLhZJbuCEKd4IRMjk/tkZ8JK21j+Mg8KQoCTK/OBz1cDnjjmv2rudiMnP67clVrcIgZfXdK1nUZebnEpkTTuNGXfp3TEiyJLgSzB8w9KpSwe6w0klflp7rKQnsCVDZqVRzQ4QclYEtMhYPCMStmBYfZsLsxcVCskFmEytw8rcCEhPWs/FSiCon4kms5SlWc4uJcdVVoDRASlKVClRHuAzzKIFqZ0s0pR7RqJmTLka2K6SWrRFPuPXSSR8xpkKJTz3cc6EJBJPG06O6WY3orppYNMcVQr2CxxUs+csfhJLxJU6+4fitxwqWo/So1uXA56uBz6c1+0b5QRqc+kwOknmTugA7zEbh88z6dBzMwxuIy7KrlARobpO6oZzmkZTKpqUlTePWtZKH7k+R83gdaWU8guO8AdkrIkDTLTvGtJcAsOm+HxBGtGPwm4UZP4ygke84s/Fa1FSlH4qUTWzcDkq4HJ7E1+0b5QRqc+kwOknqTpAB3mIOgy6xJ6wOQA1klSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlVv8QDWzVTQHbjdNQdIbah69Nzo0R2YuKJCbbHcKuuSWyCk8EJSOoFIKwSD6VZCqv78dw+sG2DTqPqjguO4dfMfTJat1ziXhMn2kOvFQQtstrCFI7AEHvyee49Ma58kTFx8cuuXVbUBLpibH4fLPosT4a24TWbcZohcMr1njIfmwLuuDBvCISIoubAbSoq6EBLZKFkpKkJCT6ccg1CGue/fWDLt49q2m6C36BiFvRkDGP3LI125mfLXIKgJBbbfCmkpb95IBSSpSSeQDxUzbMN1mrm6vQDMc0smGYZaMqsdyXarJBK5LVsWoMNOJ88grcSnlwg9A9AO1cr8IlarL8QluXbYeMOahK1ElqDEh98Wk3H2lzrSXAnzvJ6+rg9PVxx2rrcMf6QpUnCGkAkDX2d28EkjeQuVpw7DUqNMukgE6e1v3RAO4FdENr+8/V2/bzMu2pZ3Lk5fZ7XIuEeBf5dpZgXFhcQe8p9EZCGS0spUEkISeVN9+/FX/rSNK7TmiLA1kGrGN4db87mBbd0exoOLjuNpWfJAeeSl1fuBHIV2B547Vs2RT7ra7FPuNjsLt7uEZhbka3NSG2FynAPdbDjhCEEntyogCs3HBTa11y0QSNTvgK4GKo5zbAmwOnCSo33K7kMN2w4JEznMbZc7mm43WPZ4UC2pQqRIkPckBIWpI4CUKUe/wAAPjUrtr8xtDnQpPUkK6VDuOfga5mbss/1Z1t3SaF6M3LQK4wZ2OT15nIx05Fb313Jlo9SCXkL8poAR3h76gT19h3HPQzTvJM0yiyuz8504k4XORIU0iBIucacpxsJSQ75kdSkAEkjpJ5936xSmMVIvP3jHIQOvmxdkqHDUDBuE8zJ/tw91tNK+E6Q9EhSJUeC/NdZaU4iMwpAceUByEJLikoCj6DqUkcnuQO9UwsHiI5Fdty2Qberjt5v8G5WKC+WYEeY1OuVwnAtKabHl8RmGi24panVvFCQOSoehqDif4YzgnoM/wDWeSsRhZjOUgd/rPJXWpVGNQd+OvO3vUfFo25rb1Z8TwLMJaosa523IBcJFv4KQovqQOhZQFJUpICeU8lJV0kVuerO53dFDxGfqfojtni3TB7ZEcuX3RyK8Jiz7jCQnqVIYgJ4cbQUAqT5hDik8Hyx2BguAZ4hyvJ3Rv3fPTIqQ0l+AZ6cZ3b93DVW0pUR7WNxOP7o9GrTqxYra5bFynHYlwty3Q4Ycto8ON9YA6k8FKkq4HKVJ5APIrT4O5/KNXtSsl0z20YnZr4xhboi5Blt+nOMWqPMJI9ljtsoU5KcHSrqIKEJ6T7x5HOj2uY/wyL59N/K4vxG8LNrg9niDLLru52NuB3KxlKqVct6eWaIaw2XSDdjgNpxuNlR6bDmVgnuSLRJX1BJQ8h5CXGClSkhRJUE9aSfdPXU566654Ht505nal6gTHUQIqksxo0ZIckzpK/2uOwjkdS1EH4gAAkkAE1QkBgqaG3XKN8zaM+6uAS/w9c+m/lxyWw55KzuHjEuRpva7HPvqU8x2r1NdixfQ91LaacWfh2AHPPzhVXvDu3Xap7qoGot51Oh2SCvHrtFgwYdpjqbaYQptZWCpa1rWSUjuT+YCt8TqNu8ViJ1ImaK4W3bFRTOXiTd9kKv6I3T1dIeLIjKkhPfyeAnn3fMB71V3wV5CZmK6vy0IWhL+QxHAlY4UkFt08EfT3q1Fp8Sq12jQeRxAfCfVVqOHhsc3V0cxhcfyXSilK1fUvUzB9H8LuWoOouQR7NYrU35kiS9yeSeyUISOVLWo8BKUgkk8AVVzg0S5Wa0uMDNbDOZkyIUhiFL9lkONLQy/wCWF+UsghK+k9lcHg8HseKpPoHqLuLu++3OdFso10kZphOAWJEmYDj1vgBc2Qlny2iWG+v3C4s/P79B5HrUzR9x2dpws6v3zQm5WXT1DQmvSZl2R93GLeRyZrltQ0pKW0p99SPaC6EAnyyR01CnheRJObWzV/cnc2lCVqbm0pyMpY7+xsFRQB9QU+tP+ZVqTSK5nJrXEjiYaJBy9onKZbGirUcDREZlzQD3cb62bG7zcVeSo9151xwfbvpjd9Us+meXAtjfDMdCh502Qr9rjtA+q1H9AAKjwATUhVw58WfKtdZWv7GI6q3i0psEWGbjjNps8h1yOxDcdcbS6/1oTzJX5RKzwQAQlJ4rnrPILWNsXa+veMv8LekwEF7shp9ab12QkXXJ9RNJYmRafXtjF7xf7TGuVvlTYInoiF1CHQlxrqQHPdJSeFD15HpxXOrZR4hu5PW/dHY9HtRpuNu2aWLimV7Fagy4VMR3VpKV9R4HUgfD0q4+0uVuLf04sresFo0+iY4jF7d9wXMelzHZjifITx7Sl9CUJPl9JPQT73Pwrj1sgzaZp9vMtmS2vEbtlNxaeu7EGz2tAMibJcYeQ22FH3W09SgVOK91CQpR7CuzC2n+kH0/dhxAzMiQLZzlbkuQOc/YPE96W3yzEm+74XX+gmlcutXfEq3h7a9YYOMa96GYTbrNPZbnot1vkuuyVQlLKeW5iX1tKcSUqB5bA5T6AEGug991v08xvRdWvl5u5j4gmytX0SSn31R3W0rbSlPPdxXWlIT8VKArGQKRrT5RY8Dx7HsVtB8UUdTlxy/Md1v1Ko3oduu3UbtMXzTVHRewae4ni+MyXIlpt+QwZlwm3d9toOKbceZkMoYHSpvulC+FL47gE1IGz3ei3vA00v7+P2i345qHjiAzNtkxa34SXVpV5MhJSUrUwpSVAp5CkkEcnso2wm4i4AdGsHI/V96qSLGbEls6SNPqx0VpKVy6t3it632XcIdFM90pxWQm3ZI7j9xOPMzZEp9bTymlCKhTnvrWpPCAR6qHPFe/cd4he97bhl9juGoe37CsfxXIvMetsCRLcmTHGWynrbdlMP8AloeSFo5/BcDq9FcVRrmua14PldkdMp+CuWlrnMjzNzC6bUrR9EdWrBrppRjOrOMsusQMkgplJYdIK2HOSlxpRHYlC0rTyPXp5reK0ex1NxY7MWWbHio0ObkUpSlVVkpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESqVeLz+43uP8oLZ/21VdWqf+JVpvrfrjo5H0e0a0jm5M9cJ8a4y7p92LdDjxEsqV+C6ZD6HFrUSD2T0gfjc9q59pBcwADVvo4E+gW+zkNeSdzvUEKL/BP/AHPma/yvX/U49Ur0z/4VJj/2rzv649V7PDa0n3G7XdNc4wnVLQK7tuvylX+2OQb9Z3/bnfLaaMMASwG3D09aVLKW+ArlST0hVYcV2cb38f3bx9yb2219yGjM3cmctqMrsvm+S5JU4poKMrjqCVkc+nIrvLx/6lRre61rZP8AJ/8AyfohcQYf/T6tL3iTA/n/ADC7LUrE4teLnf7DFu14xS541MfSS7a7k7FdkRyFEcLVFeeZPIHI6XFdiOeDyBlT3HHPFYERZagzdUT21f8Ayx+IrrzrIv8ADW7BIcfCrYs90pcBCXek/wCNHeP/AEv11etK0L56FpV0npPB54P0VSjapp3rZtos+pWAK0muF+y3J8ynXe3ZEqTHRZpMZ5KAzJkveb5qAkhZU0ltTnJISk89QtZpbgLWm+GxccVcnLnPW69PulxcR0rnz5DinZD5H4vU4tXCfRKelI7AUp/sKbdzG8y4+Z3ZxdPG17xNT9tUdvcf5R5W9wBHCTunba5s6R/8Mpqd/J9z+qQa6PzpD0SE/KjwX5rrLalojMFAceUByEJLikoCj6DqUkcnuQO9c+NOdJNzGP8AiE5bufvW3LIG8PyOI9bmG2r9YlzGUeSw2h1bft3T3LHJAUSAr48UoW2trjlgeOpEDulW+zOAzxM9HSfRfDxrkpOiOn6ykdQy7gH4gGI7/wDCrvZQhLmil3bWkKSrFpAIPoR7Iqqj+J1o1uA3JYvi+n2j2i90u6LHdzdpV0fvFqixljyChLbaXZaXSeXFclSEgdPYqB5qxV2yPVCdt/lR0aEZP8rJVmXavuD907P1pkKilHml/wBt8nyOs8c9fmcd/LrnqNLti2hozLrDUywD4hbtcBtezu0AM8PPPwVN/C4u16tOw7Vq6WFThuVuud6kQAjuoPptbCkcD6eoCtm8Fq5wZm3jMIiVpVcWcweelknlag5Fj9ClH1PPSv8AmNbN4ZWjmuW3vTe/6Ua0aNXKzput7durNyTdrXMhlpcZtstuJZlKdCuWu3DagesckcGsVpptv1l2M68ZRlujeEydRNH86IcnWO2zGGbtZXUrUptTbchaEPpR1rQOlYJQr3uCkFXe5zW7S4k2fTY0HcWwYO6ct0i64WNcdnAGbHudG8OkW5TO8zZf14y2O225bW7XkMhpHttlymIYrn4wS608haQfoPuk/wCKPoqC9z+b3+9XnY1jGoLzn3OlRLFebul8npfkLchtqU4D6kICvX/0ivpq1OtmkepG9694limYYHdtPdJscuaL3d0XuRG+61+kISUtx2mIzrqWGQFLCluLCj1dkdga9W/3ZfK3O6aWJOnMqJacywZxT1hDivJYeZUlIXGKgPwf7W2UK9ApHB4CiRzUSdmIquEjxWvjXC1uH4mQM/LOoXTVA2j7MGPs3Nni4z8LE5X4FWxkf7nd/wARX/VXOHwYu2P6y8f/AESxv+w7UqaRayeIHc8ci6Z5ztWhwsmjMCDIzW45JHRbAAnp9qcjNBa3l/EoaXwtXxbB7ad4a23HdRt5k5UNRcZxi2Y5lc9ua+mVcCu6JU2HAlTbTAW0Eq6wSFuBSePStaTMNaoZsWQDv8wPwGv5rN7i6i0EXDgSN3lcPidOsSJv/XPLdJc7huC8Q3SXbBJKnMPw9CMsvUM8lqVIShb481PopIQ20gc/+mX9NdDaqhqTotl+Bby7Ju3w7EpuV2i4Y85jeTW22qaNwiEAeTMZbdWgPJ4ShC0JV1gDkBXPAzYQ2vTecgSeoa7DPDFCu4TRqNGZEdCRijjhnnktu37ajo0t2j6j5A26G5Mu0qs0QfEuzCI44+sBxSv82spst06RpPtc02wh1tLMxFiany2z2V7RJ/Du8j6lvcfoFQhvT0q3C7rdMorWMYQ7Zcex+8w7qMXuTzCbvkKG1KDqiUuFmMEoUS20pZUsklXllKUmftNrXkmW6l3LVnIcRuOMWyFZWsbxu13Ly0ywyXPOlyXG21rS0HFpjoSkq6umP1EDqAq1EEB+LNxHRrWkjnic6OESciFWtfAG3ABPMuIB7NbM6zAzUvVxS8aH91Dj/wDI2J/W5Vdra5leKtsx1v1r1Cx3VvSHF3MnYjWZNmuECK62mSwpt5xxDqULUPMSoPEe7yQU9xwea5a4OOmdzv8AxcPiV00SA2oN4/8AIH5K/ei3+8fgv8lbZ/VG64s+Gl+7/wAe/wCVvf8AVZFdJdukzeVA0vg5JqxpkmGrEsbasVjwazTojc2+PgNN+2zHpDoZY6ENjpbDgI6nSQT0INL9o+zfehoFuasGtOU7dZc21RJEz21iHk9lLyG5LTjZUgKlgKKfM54JHPHHIr0ZB/Sb6uhDvWY+R4TeDIHAAR+jvC1Bb6C/5cdJC/rxuAP7s+nh47/Jh3+tLqSN6VxusPwotHo0BbiY86Ji7E7p9C0IKlgK+rzEN/pAr+PEq2u7qt0Wslpu2mGhUx+w4zajbGrjJyC0s+3LU6pxTiG1ygtCB1ADrAUeDyB2qy2LbfMj1t2NQNtuuGCTsGvVsskSyIW7MhzgiTDbR5E5lUZ5xJQVJHKFFCvnp44IUeBjCdgezXGHRqQHOPwI7rte4DbmP0wFs7iWtHyPZVZ8LCwa+ZhoDf4WlGuWOYhbbfkr7cm3T8OF1eW65HZV5vne1NcJUOEhPT26D379rC7LvDzu20bU6+ahOazt5QxfbU5bn7e3YDBAWp5t1LvX7S5z09Ch09P4/r9NSNvukniJ7DNRL3bcI0SRnVhvhQ1KajSUvQJnllXlSG3ErSthYClD8IkdlEKT80joXpDC3KyGrrrHrxFQ1ezbVx7LpzjE5HssVHIWS8886lmRMcUlKQtSw22nkAjrUa7nPaXePT+7B3+zERmZ3DKdFxhjgDQf96RuzkHdbec+MrkzjH/CvD/2wyv+8F1aTxwwPklpQeO/3Run+qYqKbPs63uW/d+jcw9tskKg/LhzKl2xOV2XzgyuUXi0Fe19PWEq459ORVgPE20L3L7phguP6UaD3ORCx5t6fLnTL9aI4L8ltrmOlCpfUVN9BSpXzSr5hUnhR4Axw/R+z0vea4SN1m/kfohduIfr9ar7pBg7/a/MfQKm7wxf3EenP/J3D+vyKtLVZ/D5wvVjSrbvZtI9XtMZ2KXbF1voQ+5c4EyPPbekOuhTZivuKSU9YCgtKfUFJV34sxXobW4OrOc0yCZXDswLaYacxPxSlKVzrdKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlEUe61Z1qNpxiMzL8E03tuYs2qHImz4j+QG2yQ20nq/AAxnUOkgK5Clt+g46ue1dNqO/vPd3l4usPT/btDttusCo/3UuN0zLoQyHirpCEIhKU4vhCyBwB27qTyKtTqV/vc5V/kSd/qF1zi8Dv/wA2tW/+f2j/AFcmmz+arVDrhrQRzLo7JtHlpU3NsXOI7AFdM729eo9plP45boc+5obJixpkxUVl1fwSt5LbqkD6w2r81VawbeFrlnWumS6ARNrES333EW25F3mzc5SILDDvSWnEuIhKUsOJUCkJQTxzyBweLZ1WLSNCBvz19WEgE47ifJ/6F/8A+ApT/bCbiDbiASP8o8xScRmMPq4NPxVnE9RSCoAK47gHkc1+1WLW/wAQTRfQzUS0aXZFaMqcvV2uDMMvyLS7b4EdtTwbXIMmUG0utI5J62Q4k8fOHrWj6neJvYNOHW7/APscNV5+ALfSyjMXbOqFBkgngOR/OADiFfilam+r4CqhwcA4ZExw+r9VJaWktOYE8VdWv5cWptta0tqcUlJIQnjlR+gckDn85ArUNJtWsE1twC16madXpFxsV2aK2nSOhbaknhbbiT3QtKgQoH4j4jg1Gdu3VSdRbzerft70lvGo9txyUuDcb+3cotttapSfnsRXn1cyVp+JSnyxyPf7g1ZzS1xpnP4cfhwyVWkObjGX1bnnbOx3LQLVvky+5bx7LtRu+h7+LLmNPyZU65XdmS+pkRHH2lNtxuppPV0Dk+av4jgEVbyuVth1Bj6k+MFi98Tjl5x+ZFs7sC4Wq7sJalQ5TVtkBbaulSkLHcKStClIUlSSD3q/OuW5XAtCpuNY3eI9wveWZpNTb8dx21pbVLnvFQTzy4tDbTYUpIK1qAHPbngijb0aZObpFrz5iBG+QBlzR9qz2jIBp5Wkzug78lLNKrnlW79ekmbYpiev+lNywiDmsr2C0X1m6x7lATK5H4GSW+lbJ95PfpUnvzzwFEYfxD9e9VNBNArtkGluHypEiW2IknJPaY6GLGh1aWg55alh115RWAgIQUpPvKPu9Jzqv8On4md467uGYz3jetKbPEfgy16b+ORy3K0LTrTyA4y4lxB54Uk8g/pFf3VV9hOome3zQnTfG71o5k1vtTeNMrRlcu4252JNUByFJbbkKkgrJJHW0PQ9XFSHle4t5OU3jAtG9Mb5qdkOOFCL0i2TIkKDbHVDqSw9MlOIQXiOD5TYWpIPvdNb1meDUNPOJ9DExmOqxpP8Vgfll68cj0Uy0qu+g29TBdZdRL1orf8AFb3gOpVgK/a8bvflqW4lABUph5tRQ6AkhXwJSepIUnlVWIqsWDtCJB0I4K03LdRY8EpSouyPcXp/aMql4Djce85tlVvAM6z4xC9scgA+ntTxUiNFJ+CXnUKPwBqpIFlaLSsvjWc3XItWMzxBliL9xMUh2tkvBCvOXcZCXXnUFXPT0pYMUgcc8uHv8K3qqn6Ebh8Vxuzy8v1VxvJcLY1SyqZdrdfLxGZVa30PLDEFlUqM66hhXszDCR5xbBUD0lQIJteCCAQeQfQ1aCGtJ3CeBgEjoTHKFUnzuHHuBYEcDE85WlaxagX7THA7nmePaeXLMXrZHdlOwIMyNGUllttS1rUt9aewCT2QFqJI4Sai/ZJumuO7jTK86kz8PjY0iHf37VGhMy1SSGkMsrCluFKepRLp9Egdh2qW9Wf96vMv5P3H+rLrmN4bu6CxaHbZ7rjlt0/zLULMLhlM2bHxzE7U5OlIjCPGT7Q+UghloqCkhR5JIPAPBIpRcDUqtfo1pHMug+g6XVqzSGUnN1cQeQbI9T8F1kpVVNs/iH6Vbh83k6U3DGr7gWdMFwIsl9QlJkFsErQ2scHzEgEltaUq4BI54PHo3Mb8LDtZzK1YpnukGXy2L+SLRdYD0JUWYUlKVp5W8lTaklaeQsDsQfTvVyCC0fey48lUGcX7ufBWjpWEgXm/S8WF6k4lIi3MsqdFpXMYW51DnpR5qVFrk9u/VwOfWo+28bgRuEtF5yG36dX3HLXablItCJVykRXETJLCyh7yfJcX1ISocdfzSeeCeDSJcW6gT0y+JHdJGEP0Jjrn8j2UuV/C3Wm1IQ44lKnD0oBPBUeCeB9J4BP6DX9KJSkqCSogc8D1Nczrxuk16yTxH7LiNz0ayD2PC7ZP+5WFxLpbvbH/AD4hJmvOqkCN1ltQPT5p6E+6OVFXNWnFUFPUgnsCfrvkCrEQw1N0epA+fyzICtZvI3T5DtSwE57B0dn5bbgtuO9OF1jxIsV51RS2lYPW+ruBz0tdPcDqBPaUNEc9m6p6PYXqVcoDEKVlFihXZ6Mwoltlb7KVlCSe5AKuBzVVfE5vV3yLYRdb1fsSn4xPlXO2KetM99h5+KRMACVrjrcaJIAV7qyOCPjyK8mku8dvTzbPp1ZdNtEc81UlY5htqTfZOOwSYFtcTDbUtlckghbyU8FTbaVFPPvFJ7UpOAp1sebXtA5YSTlaJ15XSo046WDJzXE88QA9NOavXSoZ2tbqNO92OAvZvgbM6C7b5PsV0tc9KRIhP9IUAeklKkKB5SoevB5AIIHszrcDHsmYStMdN8EvWouawIrcy4Wu0Pxo7NsZc58tUyXJcQ0yVgEpbBU4oDno471Z7TTdhdn/AIn4X5KrCHjEMv8AMfG3OylqlVs0q3vYnmWsUnbxqXgV90z1IZT1x7TeHmJDE5PR1j2eSyoocJRyoDgAgHpKiCBZOkeUO0OSTct1CVi8lul0s1jl3Oy41Lv85hHLFtiPsMuyFEgcBb60Np9eSVKHYHjk8A/zleV45g2N3LL8vvMW02a0R1yps2SvpbZaSOSon/qA7k8Ack1CVi3R5rmePI1GwLbRmt6wR1JfjXNU2FFnzoo9ZMa3uuB1xBAJQFKQtY46UnkCqEgyJiMzu5nIa57juVwIgx/n5npvG9axtF3p33c/qdqNg9z0vThreBeSwpl64iXKXILzrbqXFJSlCeC16J6vj7xq1VcyPCov9uyrcjuQyezrdVBu1yTOjF1pTay07NlLT1IUAUngjkEcj0q8WoOvkDF8u/uY4Nht5z/OhDFwesdncjtCDGUeEPS5MhxDMdKjz0gqK1ce6gjvV/8Ao0S4eZzQSONye3oAqH9tWaD5WugcoEdyepKlWv4ddaYbU884lttAKlLUeAkD1JJ9BVatP98WNXfWhO3fV/Tm/aXZ/KSlduhXaRHlRLgFAlAZlMKKFKVwrgccEgpCir3agHxT9w+sGFOYnpHjmGXC1Ytk10Y9rvftkbrvflLaWqEwhLhW23ytAWp0I6j7o93qKoFywD3yADpnB7bs9M1P3590Se0+u/LXIFdFqVo2nGfZblsG5Tc30kvunyYHQWxerhb5HtKCFFa0mJIdCQngc9ZT6jjnvxGVz3W5LfbPMyzQzb1lupWLQFPJVfYs+Fb403yiQ4YTchwPSkgpUApLYSojhBVUOIZnz5Dju6o0FwkcuvDerDUqE9u+7vSPctgl0zbBn7lGdsHKb1Z5kRRuEBXSSAWWusuBQSrpLfV1FJHHUCkR5C8STQi56o3zTC3WbMVScdgPypb0iyuxnnpCHG224UeG4BJckOLdSlKFNoPPH1kWcMLsBziekTP5b9EBluLSY6zEK19KqLpR4juB6jWrUG5XjSLUbHXtO1oNxgmyuzZQbIdJLjbKT7OpIZX1h0pSnt7xr0adeIrp1q1gs7KtN9M86yS8t3F6FDxe1QEyrk4yhKCJUjy1FmIypSykKcc7lKunqIIEctwPQ5fX5FMs98dQrZUqpmk/iNaVZrecrw3UrE8h0yy7EUhcmxXpnzZMoFaUJbjIaHmPPFS2wGko6ldaSjqHPGNy7xHLdppmNkt2re3PUzBsTyCWIcLJb3EaaQFHj3nGUqJQAD1FPUVhIJ6TwRUgYi0D3ojcZyvkoJgOJ0z324Zq0uoGQXjE8FyHKMfxyRkFztNskzYdqjnh2c822pSGE8AnlagEjgE9+wPpUYbP9btSdftJBneqWlknBLuLk/DRCdbebTJZQElL6EPALSklSkd+eSgkHvwN51T1ew/R/D385yxF5kWxhlb5Nos8q5L6Ep6yoiOhfQgJ7laylA+KhWm7YN0uGbqsHvWoeEWG8220Wm7vWpCbi2j2h/y2W3C4G2lL4583gJBJ7fXxUM9qprAH8N8+sxHVS/2WRaSY/etl0zU0UqqUvxH9DoGtUjRe62TLrNLt0Z6ROnXm0uQQkpbC2mmYy/75edd6kBCA0CorT0hXPFYWf4kuOYhqdY8H1c0E1H07suTyBHtF/wAhhJjtukqCQtxrnltIKk9XClKQFAqSKM+0LQ33suP1GSO8gcXe7n8fmrNatZbkGBaZZPmmKYnJye8WW1yJsKzx+fMmvIQSltPSCTyfgASfQAnitK2n6w6ga6aM27ULUzTWRg97lSX2F255DrYcbQrhL6EOgLShXfgK/ekgkEVsusmteFaF4hIzXN2b67b4zTjy/uTZZVwUlCACpSyyhSWkgH57qkI/jVgNsW5DFt02nUjU7DbHdLXakXWRbGWrl5Yfc8oIPmKS2pSU89fp1H09aU/M58XgDpfPrIEJUs1hyknrbLpEypcpUIwNyU7O8qyfG9EtM5uaxsMnKtV5u67pHt8ET0AFyLHU51LecRyAo9KUAkDrrLaRbgomsOM5febPpzldruuFXSRZLhYLmiK1OXOZYbdU02Q8WSD5iUpWpxKT68hJ5qAZbiG7F0tcDUXGW8bwpIg4Tvjre3A2PY7ipVLrQdSyXEhxSSpKOfeKRwCQPoHI/nFQfvD1y1L2/wClDWb6WaUyc8uzl0YhOQ2W3nExmVhRU8tDIKyOUpQOOAC4CT8DTzb5un1/zze5qpc8h0UyC+TrBZV2GPidpu1uSbEw3MR1dbsiQ2y6tSx7621KJURx7qRxbjcBvO02204nCyLU3GMybmTGGVfc+DZXH0svuI6hHXN7Qw4OFApDxPukgEd6q69FlWYxQf6jA4yBeJiVZtqz6UThkegk8IJ1zhTFg19uuT4XYckvuPSLDcrpbY0yXa5B5dgvONpUthZ4HvIUSk9h6egrOVrGmudQ9SNOMa1Iiw3IEXJLRFu7bDywVMNvNJcCVKHYkBXBPp2qLYe6KdqDJuf7HvSK9akWqzyHIcm/IuMW2Wp6QjstqK9IV1SVJPIKkI8vn8etq3lqubEETbOL/AZSsaXmptdMgxfKbfE5wp5pUFbad4Ome5l6+2HHoV1x/LMVdUze8du7aESoqkrLalJUhSkOICwUkg8g8cgcjmdaqQQAdDccQrTcjUWPApSqzbhd/mj23bK7dgmTWPLJd7ucpEZn/wAEOw4KQXEoU77XJCG3G09QJUz5gHoeK0nVTxLrBp1zkVq28ao5DgLTwaXmiLSuJa3wTx1xnHUgOoJ7JUooCj6EjgmgcCA4ZExP1zVy0glpzz+uyufStF0V1q0+3Aae2/UzTS7mdZ7h1IIcR0PRnk/PZeRyehxJ9RyR3BBIIJ0P9lRFzHML9g2gunV21Jm4q97Ne7lFmx4FohSf/vb2t9X4V4fFLSFgfFQq7gWOwEX+W/lcXyuN6o0h7cYy+rc+Cnaqw70N4OX7TsWGVxNC52R2p2U3b27u9eY0aImS4hS0jy0Fx8j3VA8oQCU8c9wTmdN95uFZTqw9oFqPiN8011HQnrj2a+llbNxQQSFRJTKlNvAgEj5pPBA5IPEReMR+5CH8qbb/ANh6ufaHOZTa9psSBzBcAfrQragGueWOzAPoCR9blbjTTL5mcaX4vnkuC21Lv1ihXZyMwT0pcejocKEFXwBVwOah3Z/uL1e1/Tmx1V0Nn6e/Jy6Jh28yW3kCWg9fUj8KlJUtvpT1KT7p8wcAfHbtJcvtGC7X9PMkvjN0diRsPs/U3bbXJuEhRMNrgJYjNrcV+hPA+PArX9se8zTnddfcytunFivsSHhyojbsu6soYVJW+XR7jQUpSQnyT8/g9/mjiu6s0N2qqxlwJtu82fy6rjpPLtlpvfYnDffIy6lT9Soy1Z1/w/Sm72bDlwLpkuaZKVCy4vZGkOz5iU89Tp61IbZZTwep11aUDg9yQRUV5dvkg6NZzjeG7kNJb1p3Gy4qRab190otzg9aVJSpL6mFdTJSVo5PSoDqB545IwZ5yGt1MDidw3ngt3eUEnQT038laGlVi3S757BtOvFqiZ5pNltxtV96hbLxbHYa40haQkrQep1K0KHWOygOR3BPB4n7HchvV8xJrIpeJyLdOfjl9q2OzGHXFe7yhPmtqU3yrt3CiBz60b5mF4yFuu7mh8rg05m45b1n6VEGge4RWvMjKfYtNb9j0LFbvJsMidcJMVxmTOjr6Xm2Sy4srSkn5/zT6Ak88S/SLA6EAjkbjuE1I3GOozSlRTrPuKxDR2743ha7bcckzbMpCo2P4zavLMuYU/PcUpxSW2WU+qnFqAAB4B4PESZ54gNk0NzCBhe47RjMMHkXprzbTNhLj3mHMAUEqSlcdQX1hRSCgIJ7j4EEw0h0RqY5kZgbzwUkFue6em/lxVsaVCdx3daU2jSmZrFdrXnMKwwnn2HUP4fcUSklpKFLWpotctN8LTw64UN88jq7GoYm+KPpa7o67q3iumObX8ImSG1WuLFSp2HCaWECbOdQVtxGlqJSjqUoqUkgc8KIEgEg6QT1y+Pa+SBpIBGth0+u9s1dOlV7u+93SLHdC8J1wv0W8x0agpZbsGPtsIcuc2U4enyG0FQR2PqtSko4KTz7wB8Wo28G6aFmw3jX3RS8YjiuQTkW1N8i3aLc0W+QsEoTLaaIU2OEqJUguD3TxzVy0teWHMHD13TlNx3G8KocHNDxkRPTfGcWPY7irI15LvMl2+2Sp0G0yLpIYaU41CjraQ7IUB2QlTqkIBPpypQH0mv5mXuz26zPZFPukWPa48ZUx6a66lLLbCU9RcUsngJCe/PpxUE43upyXU22v5hont/ynMsMYdcaZvq7hCtn3TDaiFrgx5Kw48gEEBS/KCiCB6VQ5luoz4c93VWGQdofXlv6LT9vu9vJtbtzuY7fbzpAcM+Rltkvy1SroiZKXJakMt9P4IeUlPDpPuqXz2IVWyay7jtYNPNy2nWjuJaE3HJMVyxLZumRNIeKIfU6pC+FISW0eSlIcX5h7pUAOPU1L2MZhbM/8S/W/MLRFnxYtztEx1MefGVHksKEqIlbTrau6FpUlSVD6QeCR3q1Wou/vSLT/WDGNDzjmYzslyi6xLYwp2yvQIjQefDPneZKDanUBRPCmkrSrggKq1IYmbKT7TgCR94kuEcBlluVahwu2kD2WkgH7ogGfjnvVmaVgc6zrEtNMSuWc5zfI9osdoZL8uW+T0oT6AADkqUSQEpSCVEgAEkCq+ajbzsy0zxY6pX/AGo6hN6dtBt2RelzbcmYywsgJeXbw8Xm0nkfthQRyOoJqmIDM/lfKTkFbCTlr68hqrQ0qN8Y3A6b5tpBG1vwqTdcixuWwXWkWi1yJs0rB6VNGM0lTgcSoEKBHA45J471BenXiWaQ6q2/IpODYJnF2udrlohWrH4lvS/d7wooKnHER21qDLKOB1OurSkdQ54JCTYgtc5hzGY9FDSHNDhkcvroreV/DrrTDannnEttoBUpajwEgepJPoKrBt835YprTqvc9Cst01yfTfPYDS5DVovyE9UltKQpQSQAQsIIX0lPBT3So8VBfin7h9YMKcxPSPHMMuFqxbJrox7Xe/bI3Xe/KW0tUJhCXCttvlaAtToR1H3R7vUVPept++QB1MT9X0zQXDz90EnoJ+u5sui1K0fTXO8xzKJcJGY6P5DgSoRQGW7tOt8pUtJBKlI9jfeA6eACFdJPUOOe9RPtW3w6fbsMozLFsPxa/Wh/EXEq8y4oR0y46lqQlwdJPlq5QeUK78Edz34kDE/AM4np9fPcVEwzGcpA6nJWQpSlQpSlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURa9qK0t/T7J2Wk9S3LNNSkfSSwsCub3gdqT8ndW2+feE60Hj6vLk11AdbbebWy6gKQtJSpJ9CD6iuX+neA6x+GpuIzC9wtKslzzRXNldRm43EVMlW1CXFLZLjSe6VNBxbaurpStKgoK5HTUUCKdd+Kwe2AdAWmYPPIKawNSg3Dmx0xvBEHtmuoVVk0hSXN9m4B5HdDdgxJtR+hRYfPH81fFPiAadX+N7NplpRq1ml8dTwza4GHymCHD6B558IaaQD85ZUQB371uO2TS3OMW+WOq2rjEONn2pl0bul0gQ3vOZtUVloNQ4CXB2cLTYPUsdipSuOQATZgOPEcgD1JtA32JPCL5hVeQWFgzJHQAh0nsB14FVD8UCJFm7p9sMSZGafYfvQadbcQFJcQbhDBSoHsQQT2NdDM+wyw6h4RfMFyaC1LtV8t70CUy4kFJbWgp5+ojnkH4EAj0rnV4qk24W3cntruNotC7rOi3RT0aCh1LapTqZ0QpaC1e6kqICeT2HPerOZtu+N6wy42DSDSbUq6amTY7kKFj87EZ0H7ny1gpS7MlPNpittIJCipLygQOx78jADxP0caYEkvq21MkfXDVbPlu3B8x5ad9BY/XHRc7Ns+p+ZaT7BtzMGzT5CE228QbZb30KI8lycr2aStB+B8tCTyPQkGuknh72W0WPZtpcxZm20tybMJrxRx777rq1uk/SetRH6Kj3TTYBDxvZFke27ILvHXlGatOXO73RAK2m7sShbPSeOVNtKaaTz6q4WRx1cVDG0bcDrVs6xte2ncPtz1MujVklvDHbrjFkVcm32nHCsspIKUOI61KUhaFEjr6VJHFdeMeJUpuMuIpwdDgbhIH8RJ4hc7myxlRghodUtqMRBBPQRwuvTlbbbfjT4wpCEpLmPBSyBx1H7lyByf0AD9FWQ3T47tcsmomnetWtKbhJzXHpiY2H2u1uOuzbrK8wLbZbiN93ilwggnpSCodauDxVUJ1v3LZL4kdl3DWva/la7VEtbCDAeeZjrjxXYjrKfaJLihGRIAX5imUuKKQQknq5rKb14Gt+mG9vS7dQxpVkGZYjZ7bGjrg2qOuYYLv4ZMhg+WCEOfhutCyAlSgBz2PGNLys2drjHncJ1b5nmRxi05AnotKvnfWLb+RttHQ0CPnxA6rGeLFf8AVPINLdOb1l+F27ErUrLEqg29dw9ruoX7Os9UktjyGeB/xba3u/B6xxxVivE/UVbFcwUo8kqtBJ/+nWKrz4itz1z3LaGYtm+MbcsxsOMWDIG5ns14Y/8ADsjrZWkPKgMeYWGEn3Spa+olaT0hI5M+bj2M93ZbDMtaxjSXLMcvEyNElWyx3yO21cZqIzrLyylhC1FPV0OhCVcLV0pPSOoCsazT+q1BGVQE8oZf0PbJa0nD9aombFpHXEfzW77WL6/i+wrB8mit+Y9aMBE5tH75TUdawP501W7w18b1wz7b1LzHCdwsLHlXXKLnJu0V/EGLk8ucpSFLdcfW+lSipJQeCOwqTNhWrGVZjodh+hV50Cze1nHbW5Zciu19hfc+2ojpStKfIU5+EkOrBQktBCQnlZUoADqrTpod0Xhi6rZTiTGjOQ6k6T5FNMmG/aWHXB8Q08hxtCw0/wBHCHGnEjq6BweAFHurOb+vVnuNnjynT2p/qERvI4SOOk136nTY3NhGIa+zH9JmeB4q0cXYDlUvc9YN0+W7hF3PJrO/HW+zExVqC1NZbbLRbV0SD09TRKCrg9uOxq49VQwLUPX7dleLYLpo5f8AR/TK2ymp9zcvrqmrzkC2lBbURpnpSpiOVpSp1Z58xI6EnhSqtfWUFlMMNhJIHOL9d3XVaSHvLszAE8pt0/xparHiF665lpHpRZsP0wkqi5tqbe2cWs8pB4XEDvZ15B+CwFJSk+oLgUO6a2Odp/YNqe0y84np4wBcmLUqI3NV3kXO9zOlhEl1XqtxyQ6g8n0HAHYCvHva0EyzWHFMQzDTePHl5nphkkXKbRAkOhpu4+UoF2L1nshSwlJST25SASAeRrmqG5DAc1u2mGJXm25Zj0hWRt36/wBlueM3BM6M1b2VvobDSGVF/mX7IApnzEH1B4NZMa17H03e84A78ENy4CXm2uei1c5zXse3JrSR+OXWPEgNA5mNVse7HG8QwLYjnOIXVplVosWE/cyKHAOPOaaQ3GI/jeaGiPr4rZ9lt0yi9bUtLbnmTjzl2fxuKXXHiS442E8MrUT3JLQbPJ9eajTVDTvUXezeLTiuTY3eME0RtM5q43Fm6tmNecreaPLbPs3PXEig9yXel1R4IQngEWtgwYdrgx7bborUaJEaQwwy0kJQ02kBKUpA7AAAAD6q2DnHxKlTN5FuDcVzuJLss4F81iWtb4dNmTAb88NhvgNud5jQrW9Wf96vMv5P3H+rLqlHgtwITO2rJrg1EaRKk5fIbeeCAFuJRFj9CSfUgdSuB8Oo/TV5c8s8zIsGyLH7cEGXc7TLhsdaulPmOMqQnk/Ackd65/eHtmE7Z9p5kWjG4rTjUDFburIXrlFmtYpPucCY2tllvpafhNPJUoFo+nukEcEnsM9nIbWrTqxoHEh8kDotK4LqVKNHuJ4Atgeq1PxR8Wb053RaFa4YVHETIrtcUR5C46elUh6HJjlpSuPnKKHy2T8UpSPQVa3xE9vX7ITbVfLfaoXnZLiwN/shSnlxbrKT5rKfj+Ea60gfFXR9FapddJ8l3fblsJ1jy3DrvjWl2laVybDFvsRUS4X+5rWlfnmKvhxiOgttEeaErV0D3eFHpuOQCOCOQaoaROxiibOxPcOEkFvqJjdEq4qgbV4ouMLQeMAyOxid8qhehW8ydlfh6Tc0alqlahY1FRhSGerl6Rd3SiPAc49SV+a0sn4lLn0VbjQfS6Doto9iemEEhf3AtjUeQ6P+PkkdT7p+tbqlq/zqobpPtQcxLxJcrxTHp6hptafY9RpFqbV+AbnrDqYTK0/AtvPSHED94hP1V0vrpNTxWfrBEGpBI3YZB64y/hGFc4p+E/wBdrMjvxQR2bhjWSUrm1C/4ayX/Jz/APJCa6S1zS1+g6g7fvExsu5F3SrMMtw2+WlENTuOWpc51CzDMZbYSnt5iVJQvpURylXbmsaZDdppk5ecdSwgeq1eC6hUA/dPZ7SfRTL4tP7izI/8rWr+spqY9mkGFA2oaTR4MNiO0vELY8pDTYQkrXHStaiB8VKUpRPqSST3NVz8QnI9QtYtpbuHWTQnUMZPk1zhzYVlh4/MuUiPAae6w7McjNLYjvEI7seapaepPPx4mralnrePbVsMi5VhWeWa5Ydj0C13W2TMNuqJqXmmw3wyx7OVyASjnlkLABHVxU0PJR2gO++09MEHsbHcbZqK/nqUMP3XDrit3FxvF8lV3wnum3at7mLdFR5cSJkDHlMp7JSEyZ4AA/MAK+Hh2t606y23VvUbENc4eKXO851JkXeNIxZm6OulTaVNK81x5BSgBS0JRxwOg8etfXwzIOe4FrZrE5qNo7qRi8fUO5NzrPKuuHXJiMoJkSllDrxZ6GT0vpPLhSnsRzzwDpjuK7lfDc3J5dmenWk941C0kzeUuS7GtbLrobbU4pbaVKaQsx32StaApaShaSfifdilDPC8S00g2dzhhJB5xF8oU1QXvrll4qYo3iCJHebZ34qxudeH1nGp+tuIa853uTMrJMOehrirhYezDDrceQXkoX0STzyVKHPHorjvV0ap9iusW4Td95GKWnQzKNH8BfWhWRZDkTio9xmxPVUO3s9CVJLo9xUjkhKCrjhXTVv220NNpabSEoQAlIHwA9K0hzKYZkJJA5xJ68ecQQTSQ9+MXMAE8sh09LDOQOe/jE5zcbfp9ptpc1Kcj2rMsk67spCuA4xG8vhtX1dTwXx9LY+ir/2q3wbTa4dqtjCGYcOO3HjttgBKGkJCUpAHwAAFVY8R/arkO6DRWMxgaG3MwxGabpao63A2JiFI6Xo4WrgJUoBKkkkDqQASOeRpW33fPqcMRtWl2qu1bWGRqZaY7dtWIOPkRLgtCQhL7r76m0xwrgFalcoHdQJB4GdA+SpT97FP4gWgCOUR1lXrjzU36YSORxE+oIvlZR74ZDbbO63dC00hKEIviwlKRwAPuhL7CvlsWXrFrRqJuBzzD9a4mIXWZmhFxYk4wzdXXGE+amMAtx1BQhCUrQEgEe7X8+HziO43ANyeruUZZoBeWbLm95eTJuypCIsSG4ia8pS2faOhyU1+EVwptJ56R279tczTBtyXh9brcq1o0j0vumf6YZ7IdlXCBbWXHfLS64XS055SVqYcacUvy3CgoKV8epICjFNmzipYeFhnc6WkTzjPJTWmo/aCy58QOjeIIMcRMxnZT9q14e+f63akYnqpqDua86/YctlVufg4YzEPS0+HkpV0Se/C+eD8OTUX+MP/AOVNA+f/AKJJf/aiVLuMa8bit3DLeGYhoJlWkOJz+E5BluSLUxLEM/tjFtZKEqU84OUh/kpbBKuOrprRfFk0fz3ItPNM8x00xG435nT+8LVKhW9hch5thaGuhzoSCooBYCVHg8dQJ7cmpk0XUi6zW1Gu7kSfhzucoJqB4xfhu4scP6ThH1lYcpj8SjNbzguzLP7jYn3GJVwZi2kutkhSGZMhtp3uPpbUtP8AnViNuumO4KToHp3Jwvc/abdYnsYtrkCGnAIzwjtKjoIbLhkArKeeCogEkEn1rI5A/cN+m3jL8Fe0qyrArTfLQ2mBPyyOmHIXdELS635cUFSzHQtCOXlFPVyQlJ7kVS207ld1uzayjbnrFtXzvMIdmecasU2yQ3nnA0VE+U2tLa2pLXUSUqSoFIJSQewTFJvhvq06ntOwkcQARHOSTyNs0ecbKb2ZDEDzOEzytB4i6tHta2JvbadX8s1bGr72RO5jHfRcLamxogsec5IS95qel5fHSQsBPHYLPeq/aOWq2zfGS1PkS4LLzkCzvSYyloBLTpiw0FafoV0rWOfoUatPpJdta9TL6ddta8Ln6fY7YoT4xvCmlOzbitTieHZ01DKOpbvl8oajpQVJC18gqINVN0muOZWTxMM513u+iWrcTBMlhP22Fd3NP7x09XlR0ocW0I5dShSmFAEp5HUkkDvxehLNqptNg2nUAG6RYczoM9MwQIqw/Z6jhcuewk74Nz0GZ65EE9DNUokVrTLNnWozSFyLDPLykoALhEZYBUfj27d6pd4LjLKNsuSvpaQHHMxkhawkdSgIsbgE/Hjk/wA5q3OuOXR7No7kM1mwZNdn7rZZceDBtGPzp0t552Ovy0KZZaUtokkAlwJCSeFEVUvwmYGWaW6OZBpvqXprnuLXt3IZN2ZRdsSuUZl2MYzI6kvrYDXVy0sdBUFE8cA8iq7N5atefuNHZ8kdBfkp2jzUqMffce7YHc25rTLzb4KvGstZXEZUVWJMg9SAfwotDgC+/wCMOB39e1SD4ybDTu02A8tAK2cugKQf3pLMgH/QTUa3iVmS/FMt+vjGi+qzuAx4KbU5ek4DeOgKNvWyXA2Y3mFAcUE89PoCRyO9SR4rCMm1P0FtOm+mum2fZTfJd6hXctWrELnIbZipZe5U46ljoQvlaQWyrrBPdI4Nc1UE7HSA+9/8pP8AbflyW9MgbXUJ+6P+3Hxtz5qyGPvuz9o1ulS1F1yRpy046pXfqUq2Akn8/NVf8Fz9zHkn8s5X9Ui1NFk1OZt2z6BAe0/1I+7kfCW7IbKMEvBm+3ogBos+WI3PHX28z9r/AI1Qr4S0TL9LdKMg0v1N0wz/ABW9Schfu0X7rYjco0Z6OqK0CRIUyGkqBZUOlSgSSkJBJ4r0Xkfru1O0LRHHzk/C/K64Wg/qmzN1DjPDyR8bc7LVZMSJJ8a1gSYzTvl2API60BXStNoPChz6EfA+tZvxq2GToRgsstp85rL0pQ5x7yQqI8SAfr6R/MK1sv5gfFPGvatG9VRgKLebWL18gbwWyv7mlnr8v2bzOjzT089P1+netn8W235rqrp5h+nemGlmoGV3SJe03iWq04lcZEdhj2ZaUhTyWfLKyXR7gUVJ6T1BJ7Vwun9VoRo4f90u/tvyXdTIG1VCfu//ABgfG3NWy1aWqTtRy9x8+Yp3T+cpZV36ibevnmq7+Dt+5CV/Km4/9hmpV1M1MZn7R7sxbMB1FlXi94fKs8SzN4Pd1ThNVDDflOM+z9TQC3APMXwg8KKVK4NRL4UTOU6faGSdJ9RdN87xTIW79MuDaL1ilxhx3Y7jbRChIcZDIPKFjpKwrkDgHkV2CP1vaiMi0Rxh5Nul+V1wtkbJswOYJnh5AL9bc7KE7vk+5HwvtV8uuisIczrQ/NL89eTJa6gqI48rvy8kHyHwOlBDiShwIT0kHki9W1/WXQvXzH75qlotNUXr9OakZDDkAty4s1MdtlIea5ISfKZQApJKVdPIJ71HmgOv0hjTWbhO5PBc1s0+3zp8FEm84tcJUS929UhzyFpcSytKyWlJQW1HqPAIBBrU9hW3efpzq5rLq/acRuOG4JmVwRHxSxXCKuJIMRtxazIVGWAthBUohtCwlQST7oHHOWzeyKbrtDLO1AGGGnfNhvEX9krfaPaNRvtF0EbyZlw3ancZ4hRnsa/4SHc1/wApO/7xRUp+LqlJ2aXUkAlN+tZH1fhTUJ4VK1E2m+Ilqtk+R6J6gZVj+o6ZC7TKxizKm+aXn232yCSlvgELbXysFJ4J7d6lDxJ7lqBqftej6b2LRrOZ+a3e42+fJtNkx6fdWYDSCpwhc1lj2dSwOhKkoUeFEgcgdVcrwXfo/ZgMwGA8w+SOgz3aroYcO3VyciXEciyAepy3prnnt8088J2z3jHZDsebOwXH7SH2yQptqU3HadII9OW1LTz/ABqsTs1tlitG1TSmJjjbSYSsVt7/AODA4U64ylx1R4/GLilk/WTUYYTpxF3I7DYO3/JLBk+K3hrD7fZpTWQY7Ntq4VxjtJ8paRIaQHkJdYSolsqHSRyQSKq3tz3Cbvdk1vO3PVba3mWc2u1yHUWGXZI7zqkoUsqKGXW2nG5DRUSpIBSpHUQfglPoVXj9a2hhzc4OadCBisO88iuCkx36tQcPdBBGoJw39IXQ7B9uejWnGpmUav4ZhbFtyvMufuxOQ+6oPcrC19LalFDfUsBSuhI6iOTUlVCOhsnXDUi8uau6x407gUIxFQ8ewpE4vusNOFKnJdwWkJSt9XSlKG+keUnq595R4m6sS0sa1htAy3cPnGk75WwcHkuGpz38flOsWtC5q+L1Gjy8926x5TDbzT2QTG3ELSFJWgvQQUkH1BHwrotkWM2PLMauGI362sS7TdYbkGVFcQC24wtBSpBH0cGucvjCvzIuY7e5Nvg+2Smb5OcYjeYEec4HYJSjqPZPJ4HJ7DmrO5FvIiPYtNtmC6PanT9SlMqjxcTlYjOYUxLI4SZEtTfsiWEq4JdS8UlPcc1jTh2xOZE+epbU+zlv+Wua0dI2hjpjytvu8z9dPnFslQnZPl2XaOaBbwbTjc2QWsNjqVbHkqP4GURLjl5P0KCW21c/xBVt/COatadnFqkQUo9qkX25uT1j5y3/ADQAVH4nyw3+jitp2lbNY2km3fI9OtUXGbnkWpntcrMHGFco65LZQWEL+IbSpXvfFalkduKhTaVbNSvD5yjLdFNY8TyS7aaXi4m6Y5mVjtEi5RGnCkIUiS3GQtxgrQhvkFPZaFccpV110sOCq5lQyTTY2dC5vtCcuW+LLGoA9gfTEAVHOjc1wgW9Tum6w3jNWMY9a9Jta8fWYWSWK+uQGJjPuugdIkNe8O/uOMqI+jrP01ufiuXSTe9jFlvM1nypE+82WU83xx0LWw4pQ/QSazWuenN8366p6f2OBjF8tGjuCz1Xq83m921+2rvkk9IRFhx5CUPKR0hSVPKQlPDiuCSB1eLxWGslz7QyNpBptpnnmU35V8hTlt2XErjLisR223OVGS2yWSeVpHSlZVyTyBwa46jS3ZSwi5qBwGoEtkx+8QTyE5FdTHB20Bw92m4E7ycUDoIHMxmCrT7dP3P2mf8AJCz/ANTaqjvhLpSnVjcmlIAAyKOAB8B7TPq2egeplus23PD3L/h+fWyZjmPWy23G2yMJu4mtyW4yUKQhgRit4BTavfbCkjlPJHIqovhkRs7031c1dOpWjupeLs6gXWPMs0m5YXc246v74kkodd8goZ4D6D1OFKQArlXavRqkO2+qRkWv9XNI7gEhcFMEfo9jTnNP0BB7a7lt+0LKRqR4h+5HJclc867Y8hFgs6HDyY1vYklpaWwfQEstqPHxWT8atvrXt40f3D2q1WbV7D2r9Fss0T4SVSHWFNO8cH3mlJUUqHZSSeDwOR2HFDdzejW43anuzl7xNt+Fy8wsGTBSshs8KO5IWlSwn2hp1psFzynFIDqXUg9CweoAAdU14Bub3L7qYreJYPtxyTSa3TgG7zmWRPqSIMc/tgt7S2W1SJBHKUL+ahRClDtxXLQ+02aixvtMABm0OBJmd03nOb7p6av2e0VHk+VxkciAI5gCI6b1JG+fbvG3D7a8hwa2QUKvdoZF2x4JT3TMjpJS0n6PMR1tf54PwqB9pm81crYJfsmvMhUjNNK4KsdVHc7vSZJAathKT3JWpTbZ+JU2ur626Cza7fFtkdby2ojKGG1POqccKUpCQVLUSpSuB3JJJPc1zZtu09dk8Ta4Y3jE9Ten10iRdS75amiPJEhp9fs7LifTj20F1A/eEj0FQxgdVds5Pkq5nUFoJLuZZi4SBmhdhpNrAeankNCCQMPLFh7lXi21aVHRbQ/E9PpKvMuUKCJF2fJ5L9xfJelOE/Hl5xff6OKk2lK1qP8AEcXREqjG4Ghszx38eZXNvdZcL1of4lWlO4PPmn2tOpcBuxpu60kxbe4tuQy4lxXo2UqfDp59UlRHPSeNi8U+y2nWzFtJ9OtNpUS/55eMqbes0a3PJfdEBTCw9JJQT0MA+QouHhPbnntWR31a26k4Brfh2LXzQKdqbpS9anbk7Y7crzV3O4tuftklpLbhWzHHlqDa0BtS3EqJJSkDRcH8SfQrS6S65btj2UYC092nPWWwQ46ujnklQShnq49eCRWNAtdTY0+68uGhs+YO7zA3+6d+WtXE2q6o3MsDd49mJHQ3G8bs7r7i4y4+2HUmHJc85bWD3ZtxZ/HIguAn9NVt8Hy0WtraG5Kbt8cPXLJLh7Yvyxy/0pbQkL/fAJ7cGpV1d14wvVXaJf8AMNLrblOYR89xm52+yxrDjk24yFynGVs+U8iO0v2cpcV0qLhSPdPBNRV4Vb2SafaFDSHUXTTP8UyJm+TZjabziNyixnmHEoUlYkrZDKfmqHClpPIHAPIrWiHCvtGMXLGjqHyR2vyWNTCKFAMyDndsMA97c1K28PCNq8i04Zn+495yKzhV0DuOx4b7qXpkpRQREajsgrf6i037iRyOn1Snmq2+KVleqOabUGr3etPGMPxlzIreuPFuszzb28opd6FOss8sxRxySkuur78EIIIr0eJtiOs9n1w0Z3BYbgd2zTF8EkNvyrbb47kjyJTcpLxU4hAUUB1CUJDnBALYB+APk3y5jrXu12pSpmB7a83x+wWu5QrpI+UMcNXaZ09SCItvZLjimk+Z1qdWU8pHupPvFPI+X7Ni3PuNAA5vmPF0TyjM3XUzyVw3Qssd5Id5RyJjmSvd4hupF+xHw7NPbLapbzKs0i2G1z3UKIKowg+etBP8ZTSAfpHI+NXj0Vstox3R/CLHYWm27dBx63sxktgdPliOjg9vp9f01T7K9MMo3y7DrbgkbTXJMIyDFoFrXYjlDDcQ3C4RI3lO+WjqK0MLSVoS44EclYPTwkk43a3vH1W0twO0aCa8bX9YJWY4swi029+yY6ZTdxjtjpa6lrWhCSlISnzApTagnq6hXo1HDx9pYc3PDhxbBFupyzuuFgPg7O4ZNYWng6Rn0ETw4LXtpjbbXis7gkNIShJgTlcJHA5MmGSf0kk1++IelI31bW1ADk3WICf/AN5s/wDxrF7Zsc3M4/v81P1hu22++G1ZOZMCW6JjTEWAXHGHAUyH+hEroDfSvyer3uenkDv9t8zub55u+0UzrBNF9U7/AI/p5NiyLzcIeC3Ytp6Z6HXEt9UcF3pbb55QCDyACa59m8n/AKeHe7hnh7VzwuL5XW20ef8AXsPvTHH2fWxtmt88U/Kboi76BaasXpNqtmS5w1JnSnWA+0gx3GEtKcaJAcQkyFLKCQD0j0qbcz2+7jM+xG84Pk+6e2ybRf4D9umsjTyMkrYdQULAIk8g8KPBHoe9aN4gm3O9bvdA7TfdLIs1OVYvMVdrPEuMN62yZTZBQ8wW5KW3GXD0oWnrSnkoHoFA1F2iviBbnLZj0LTTUrZbqVkedWppEJcyDBfjNzVJHSl2R5jJDJPA6lgqQTyodIPApTa11N9F482ImN4IaLDhEHetHuc11Oq02wxO4hzj6zIVldnO1L9iJp5eNPY+okrLIVxuaroyt+3iJ7MpTSELSAHF8g+Wk+o781VfwcYkUXHXeYIzXnpyCI0HegdYR1Sj09XrxyAePSrd6Zo1Gw3F8h1l19amOZTkAaWvHschyrq3ZITfV7PBjtR0LW+4C4tTrqU8KWs/iITVS/Cntec6ZX/VCy6maTai4pIzC7RZ9qduuH3JiM4hPtBWFvlny2iPMT89SQeexNbMc51d+I38OOuJkC1iQAct2ZFzi4NbQGHLGD6Pkxuk+oyyHy1xSmH4xOkb8VIackWJnzVJ7FfLM5J5+n3QB+YV/XjEf+VNBP5SS/8AtRKx+s7uaXnxNcC1qtOjGqkzB8Wix7ZPvLOCXdTQX0SQtxCfZ+taEl9IKkpPPBKeRwTuniy6aag5/p7ppqZpxiV2v6MPvK5syFFguqlNsvIaUhxTHT5iQFMhKgU8pKhyB34yoODKOz1HZNqkngC+QeUGVtVaX7RVa3M0gBzwERzmy6AD0H5qxFhw7EcWfuErGMVs9oeuz5lT3IEFqOqW8fVx0oSC4vufeVye9R/oXrhfNb7QvJUaO5dh1mRFbLbuUMphSpco91oZjcqUWUj/AI5ZSFEgJSQCRGGzjcnuF10y/UKyaz6GLwa341LS1apPskmP5hK1pMdwvEh5xKUpUXG+lPB+aOU86YT4hp6wT0n69N4nIOHh49JA6n66d1aelKVVWSlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlEXK7xLNT8Iue6nQqRZL6xdWsEuyZGQuW/mSi38To6lIcU2CAtIZWSj5w47gc103xLNcPz60Jv+EZRar9bVLLftVulokNBYAJQVIJAUARyk9xyORWapSl9nRFE3u508XET8LJU+0q+KNwH8uXxSlKURKUpREpSlESlKURKUpREqLbTbLhfNx9/yebb5LVvxbGIlktrzrSktvPzHlSJZbURwoBDMJJI9DyPhUpUoLODtRPqCD6EobtLd8ehB+ISlKURKUpREqmW57f5nW3XXGJpVB2z5Bl9smwmX4lyhSnG1zXnAeUR0JYcSvoPCSOrq557Dtzc2lVIJcDNteNvoqwIAIi+nBQXtS06zawY/kGqurkFqHqHqdcRe71EQrqFsjpQG4dvCviGWQAf461+vrU6UpWjiDAaIAAAG4AQB2671RoIuTJNzzNz9aJSlKqpSlKURKUpREpXkul3tVjhOXK9XOJb4jKSpyRKeS02gD1JUogAfnrBaeanYBqxZH8k03ymDkFqjTXrcubCUVsl9ogOJSvjhYBI95PKTz2JoLkgaIbCStopSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURctPFk1Gw68ap6MWmwXpm7zMMvEqXfWbfzINvSXohCHejkJX+Cc9w+8OnuO4rpZhWf4RqNZxf8Cyy05Bb+oNqkW6Wh9La+kK6F9JPQsBQJSrgjkcis/SlL7Oj4Rvcu6uifglTz1A8bgOxJ+ZSlKURKUpREpSlESlKURU93Y78c02yau2TT+DtyvmY2i7QESUXSJKcaL76lqSWWEpYcStSOkEpKgT1jsBwTIm1HBs8EfKNeNYrKLRnmp8tmY/aioqVZbWwjogwCT+MhBUtfYe+4eQCKn6lKfkaZu69+BMxHYTnHMpU85EWFrbyBn84ymDoEpSlEXNqPuD1G2jbsNWMt3PaUZffLDmcptnG8rs0L2tiFa2VuFiK3yUoS2UuIK0BQWFo5KVFXNWFxTffj2q/EHQ7Q7VHMbg97rbj1lFstjKj8ZE2QsIaT9PSFq+hKqtBSopjDTbTdfCI3Zb+O82nPNS84nuqNtJn/AFw3bhZR/odptI0vwdVmuSoBut0uc6+3RNvbKIjcyY+t5xthJAPloKwhJIBUE9RAJIqQKUqxM/DkBYDoFUDU5m/U3J6lKUpUKUpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURRfuT1on7fdIbxq1EwWVljFiLTk2DFlpjuIjKUEreCilXIRyCRx6cn4VT/ABvxacmzG2C9Yhst1EvlvUtTYl219ySyVp+cnrbilPI+I5roDfbHasmslwxy+wm5ltukV2HLjujlDzLiSlaCPoKSRXNTY9fLrs63f5zspzSa4MeyWUq5YnIfPCXHejqZIPpy8wOlXH/GMBPrVaIL6xpOOYlvMZjtcakyFaqQ2iKgGRh3I5HobHQCFv33z/U3/AJ1Z/opH6pT75/qb/gE6s/0Uj9Uq/lQ/uZzO/WjDYOneCSyxmmpE4Y3ZHEd1Q0uJKpU7j16Y8dLrnP74IHxo4kCGiSbDmbDlfXTNQ0AnzGALngBcnsvzaxr9ctyul6dUJem0/DIsie/EhxZ0sPuyG2SELd7IR0jzAtHBHPLZqYawWDYZYdO8NsuCYvEEa1WGCzAiN/ENtpCQSfio8ck/Ekn41na1qYcZwZfV+uazp4sPnz+rdMkpSlUV0qq+4TeVqNtpxyRnGou2ySMZRcRbmJ8TLobq3VLKvLV5PQFjqSgnjvx8atRXOjxVJsnUzUXQja9a3VFzK8iTcJzaT3S2XEx21kfQAuSf82qEOdUp02G7nBvc37CVdpa1j3vyaCewt3MK42hWqmoWrWPsZXlmjj+D2q5wI1wtS5N7YmvSm3k9Q62mkgskJKTwo8+9xwCDXp121ijaPYpFlwrYb1lGQzmrLjFkQvpXcrk9yG0E/iNpAK3F+iUJUfXgGQYMONboUe3w2ktR4rSGWkJHAShIASB+YAVVPHLgdZ/EHyV+Srz7JoTjTFugNE8oTeLmOt54D98GElv6uK1fhqVhTZYGTxwi/c2bOhIMaLJhdTpGo+5EcsRIA6SZjMgFWO07smW2HE4cPO8qOQ5A51SLhNSwllnzlnqU2w2ke4yjnpQCSrpSCpRUSa2WlYTMM3w7T2yO5LnWU2rH7UyQlcy5S0R2go+iQpZAKj8EjuT6CqucMzZWa05C6xmc56cQvGH2KNaxPmZdfBaWked5fktJjvSHnz7p5CG2Fdu3JUkcjmtomxzMhvxEyXo5faU2HmVAON8gjqSSCAoc8jse9Vauu5LSG97hbRkV0vt1g4vhOMSXTdZ2O3KNBbnXF9CELcfdjpbbQGIr3S4tQQoOq6VHpVxaK23K3XiBHutonx50KW2l6PJjOpcaebUOUrQtJIUkjuCDwaBuKnJ1J6XiPTENYPJCYfA0A66z6gHiOaiTSTUzJImd3nQDVea3Jy+xxU3S1XZLSWUZFZlL6EyghPuofbX+DeQkBPV0rSAlYCZkqp+/uXI0ytmnG5yzhTc7TPLIqbgtHYvWacRHmMq49Qrlo9/QjmrWMPtSWG5LCwtp1AWhQ9FJI5B/mqWnHTxHMEtPMAEHqCJ/eBiBChwwPwjIiR3II6EdAQOK+lKUqFKUpSiJSlKIlQvuI1y1D0Nsk/MLPoorLsatUFMudOayJiE6hwr6PKQwtCluKPKOOn1KwAOamioW3H/APjJcNM9KUe8MrzGJKmt/voFtSqe7z/FK47CD/ygqpBcWgGJIHcxJ4CZ0yUggAkiYBPYTA4mIHNYnZ/u+xXd1iV7vtnxuXjd0x24ewXG0S5CXnWgpPU251BKfdVwsdwCC2oVKWpmS53imOi6ae6bLze5B4JXbUXZm3KDXSolaXHgUk8hI6e3PV69q56w/wD8ybxPXYZ/vLANdW+pH4rLUp9zt9QKJYI/iokV0B1tzZWnGkWYZwyOqRaLPJfio+LknoIZQPrU4UJH1mlR2PZm16YuRcC/mbZw7/GyljMO0OoOM3EE/dddp+t11E23ndHqluCRBv0LbHecbxKTKeiu3y6ZDFHSWlKQ4W44T5roC0lHIASTzwTwasfWn6PYSjTfSrEsDSeV2OzxYTy//SPJbSHFn61L6lH6zWD1K0MjalXxq+Paq6mY0pqOmP7LjWTvW+Mvgk9am0AgrPVwVfQB9Fa1QGOwNvBid/H63rKmcbcZtOn1rv5aKTKVA37EiD/hEa8/b2T/APa1sWA7fIuAZPHydrWHVe/qjocQIN/yx6dCc60FPK2VABRHPIPwIB+FVEaq5W65Zp7gObPQZ2Z4Pj9/k2guOW966WxmUuGtQHUplTiSWyelPJTxz0j6Kq34ViUp20XNKQABm17AA9APNRVwJb7MaM6/IeQ02hBUpa1BKUgDuST6VTnwp5sORttuzUeWy4tGa3pSkocBICnEFJIH0juKih+1qAfc/wDNiVf2bT++P7Hq5lVo386Wa1ai6KvXXQHUfLMYy3FlruTUSw3Z+Cq7shHDkdRZUlSlgDqbBPHUCnj3uRZelZ1WeI3CDB+YV6b/AA3Yolc2tidpxbdhpkuVedye4O3Z5jihFyK2NamT0AKPPRJaQokhtfB7EnpUlSfoJsx+wos/+EtuM/8AebPqqe8rS3MNk2v1t3x6DW1Rxu6SwxmlmZ5Sx1PKHmFQHZLT5788e48Eq/GAHQvSbVLD9adPLJqbgdyTNs19jJkMq7dbavRbTg/FWhQKVD4EGtw4V6fitEEWcNx/I5j4CyyLTRf4ZMg3ad43HiMj87qErrsYizo6m4G7PctbXSPdcZ1Ikr4P1hxKuR/NVWdf9pG/PRBkanaNbstTNSbTYnUXCVZJ+RT0zfKaV1qHkl9TUtHCfeSAlRBICFV1BpWfmaQ6mYIy17haDCRheJB+s1q2lmoFp1V03xrUexqBhZHbI9xbTzyW/MQCpB+tKuUn60mtY3J60T9vukN41aiYLKyxixFpybBiy0x3ERlKCVvBRSrkI5BI49OT8K2/BsEx3TqzvY/i0dyNbnZ8u4ojFfUhhyQ8p5xDY/ER5i1kJHYdXA7cCslfbHasmslwxy+wm5ltukV2HLjujlDzLiSlaCPoKSRTaCXhzqIg5gbtY5aT1UUAGECrcCxOp489Y371z+xvxacmzG2C9Yhst1EvlvUtTYl219ySyVp+cnrbilPI+I5rKffP9Tf8AnVn+ikfqlaDsevl12dbv852U5pNcGPZLKVcsTkPnhLjvR1MkH05eYHSrj/jGAn1rp1V4a6myqzJwnrqDxBVLte6m7Np7jQjgQqB/fP9Tf8AAJ1Z/opH6pVm9rGv1y3K6Xp1Ql6bT8MiyJ78SHFnSw+7IbZIQt3shHSPMC0cEc8tmv3czmd+tGGwdO8ElljNNSJwxuyOI7qhpcSVSp3Hr0x46XXOf3wQPjUh4Nhlh07w2y4Ji8QRrVYYLMCI38Q22kJBJ+KjxyT8SSfjRhBa4kbgOeZ7COBk6hHggtAPE8sh3M8sPFZ2lKVVWSod1Y3PYPprlULTGy226ZvqLdEhcPE8fQh2WGz/AMdJWohqKyOxLjqh27gGtV327pGtq2h8vKrWGXsrvbptWOx3AFD2lSSVPqT8UNJ5UR6FXQk/OrH7D9urmj+l6M/ztT1z1Q1EQm95Rd5qi5KKnfwiIxUruAgKHUPivq+ATxFMeKXHJrYBOpJuAOlyemZtLz4Ybq50xyFiT1sBqeAvuxvO72Zbzd4+BaWW50p8xFllZDOfeI9Q2uU3FDaF8duUtrTz8SO9fDQndHj2sGTZBpfkOMzsI1KxI/8AhrFrk8h1aWzxxIjPo4TIYPUnhYAPvJJSApJM21za3l3xWmfiXbfczxlfs9yvrEO03QNdjIjPTHIxC+PXlDqh3/eJ+gVamcVenROTzh5GCQeVoIvbKNa1PLQfV1YJ5iQCPWx35yuktKUqFKUpSiJUJ5jqRkme6vjQXSq7KtxsTLF0zjIGm0uLtkdw8sQGOoFPtUgAkqIPltAqA6lJ4lDOcst+B4Xfs3ux4hY/bJNzkd+PwbLSnFf6E1Anh92O4r2/sarZP+FybVW6Tcxu76vnKMh0hhAPr0JZQ2Ej0HJ4pT8zydGgHqT5Rys4n8MGxR/lYN7jA5D2j6gfxSMlZelKjy/6/aV49f5mKu32fdLvbePuhDsNjn3l2CSOQJAgsu+QeO/DnSeO/pUSJhIUh0rDYjmWL57Ymclw6+RbrbX1LQl+OrkBaCUrQoHuhaVAhSFAKSQQQDWZqxBFioBBySlKxeTZRjeF2KZk+XX632W0W9vzZU6fIQwwyj6VLWQB9Hr6mqkhokqwBJgLKVj7/bJN5sk61QrzLtMiXHWy1PidHnRVkcBxHWlSSpJ4ICkkdu4IrQ7LuT0Tvbbj6M4btsdEJy5IlXuDJtMd+G3wVyGHpjbSH2kggqW2pSQCCSOakK13W2Xy2Rb1ZbjGn2+cyiTFlRXUusvtLAUhaFpJCkkEEEHgg1LmyCD9Z/keygOggj6+pHdRdojqtfMju2RaSalojR9QsHU0m4GOjy2LtBd5Ma5R0EnpQ6EkLRyfLcStPPHSTLdVM3eXFejuuGh24mAssMuX75A5GpJ4D9tuAJb8z6Q062XBz8TVs6lpx0w85yQeYgz1aWm1gSQLBQ4YH4BkQCORkR0IIHCCTMqENr+e5bna9WBld4cuAx/Uq82S29aEp9nhMhnymR0gchPUrueT39am+q47Mf2zXH/2v5D/ANTFWOqG/sqR3sYe7GypP7SoP33/AN5SlKURKUpREpSlESlKURKUpREpSlESlKURQru03Gy9rWlw1U/uczcutzE5qJcG4s5MZUNtzkIeUVIVynr6UfDgrTVX7H4r2YZPamL7jWyPUq7W2UCWJkFx19h0AkEpcRFKVcEEdj6g1efUfAse1SwO/adZZEEi05DAet8pHHcIcSR1J+hSTwoH4EA1z88M3Psh0N1a1A2JanSimdYp8i444tw8JeSOC8hvn8VxstyED6C4arRBfVfTcbxib09odPanWYVqpDKTagGsO65Hvbhmtq++f6m/4BOrP9FI/VK/l3xRNSGGlvv7DdVm220la1rbfCUpA5JJMTsBV/qg3c7dbjlEfH9u2LzHWLvqbIci3F9hXDkDH2QFXGRyPmlSFJjoP7+QOPSpMmGtzNh/ngMydACVDYzdkLn/ABx3DU2W0bddXrhrzpFY9WJ2DysTbyBC5ES3ypQfdMbqIbdKglPAWB1AcfNIPxqSq8lptVusVqh2Szw2okC3x24sVhpPShppCQlCEj4AAAD81eutKhaXnAIGizphwaMeeqUpXhvl9smMWiXkGSXeFarZAaL8qbNfSyww2PVS1rISkD6SazJAElaAEmAvdSoBc387N2nFNq3C4mSkkEpkLUP0EJ4P6K/n9n5s2/whMV/pnP8A7SpUKwFK8FivtnyeywMjx65MXC13SM3Mhy46wtt9lxIUhaSPUFJBB+uvfUkFpgqAQ4SErX8/zvGNMcLvOf5nckQLLYoi5kx9XfpQkeiR6qUTwlKR3KiAO5rYKqbu2uB1J160N2wBXXa75d3swyRnn3XoFtSXGWVj4oceT3B+KBVCC5zabTBcYndqT/CAXcYhXBDQ57rhonnuHUwBxKmXQybqZlVjkak6lOO2xzKA3KtWMBtATZIHcsodUB1OSlpUFuknpSSEJACCVSbX56dhWJyvLsXwWwyspzK/wLLaIKQqRNnPpZabBPABUo8ckkAD1JIA5Jq73N0sPrXXnqqNDjncrL1F+udy1Iw2zsaoaeuPXVrF0uSL1jHloULxbuxe8lRHUiU2lJW1welfCkKB6gpPqse4PSq/3e32Ri83S3ybu55VtVecfuNqZnuEchEd6Ww22+ogEhLalEj05qRSAQQRyD6iquDoluem6eO8b1ZpbMG415fLgVhcJzLHNQ8RtGc4jcm59mvkNqdCkI9FtLTyOR8CPQg9wQQe4rN1U3ZzcnNPdYNcNrrqym34hf0ZHjjJJ4ZtlzT55YR9CG3Fdh9Lhq2VXOFwbUZk4AjqMuYNjxBVBLS5js2kjnuPUQRwKUpSqqyUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlUG8V3RC83HCcd3RadByNl+lktqQ/Ijj8KYHmhaXO3c+S90r+pK3D8KvzXgv1jtWT2O4Y3fYTcy23WK7CmR3Byh1lxJStBH0FJIrKqHwH0zDmmRzH55HgVpSc0HDUEtNiOB+pHFR7tn1ws24nRLF9V7SptC7tECbhHQefZZzfuPtH6OFg8c+qSk/GtU0h/8Ali1kyjX2T+FsNg8/DMK57oW004PulPR8D5shAZSoeqI30Kqgmhs3WTaprnqhsJxVmdIXnslPyQufHKba28QF3E/UmEVrVxz+FjJH011awXDLDp1htlwTGIojWqwwWYERv4httISCT8VHjkn4kk/GuollQjaqYhrhYbifa5YRLeIcdQucNdTnZnmSDc7wLtP8VjP7u4rO0rx3i723H7RNv15ltxIFujuS5T7h4S0y2kqWs/UEgn9FVVtGrWpGtmkmT7jZeTXTAtM4ECfNxq0WltpF1vMeOlfTKlyXELUyl1aPcZYCFccEuK5Arnc+A4gThEn5dToMzB0BI3azEQN5gcT/AI1OQtqRM8aQ5fe85jZTf7i82u2Iye4W2ypQ2E9MSIoRlEkd1dUhmQoE/BQHoBW/VTS6zM32VYRpBdk5tdb9Yb5eLXjGUWC7Ft/y5U5KlOTITwQHULS/1qU2pS0KCjwEkc1J++XJNWsU215NedFrgbbkSHIjSrinjmBEW+hMiRzwegIbJKl8HoT1K7dPIvV+yZa+EhnUBonrIcTx5hUpfav3YhiHIkwOkERnbip8rnDix/u9+L1fb2f74tGj9jVFZJ7oS+hoNEfnD8t4/wDR/VVgsU1mTtj2n49kO4jV22ZrljdvWtqRAmJlPX2S4tao8aKU+9JVwpDfmAd+kqJ45NaT4bW33UDT2yZvrnrFbHLbmmrF0N0dgPpIfhxetxxIcB7oWtby1FB7gBHPB5AuxmDai6ZFMOv+8RhbHEAk8BfcqPfj2aBm8i37oMu6GAOsb1dKqbbD1qn6z7qL1IJMl3Up2Ion18tkOJQPzcE1cmqe7VYysD3j7mtNpg8tV4uNszKAD281iS2vzVJ+oOLSk/XVKX7YjexwHPEx3wBPIFXqfsQdz2k8oc34uA6q0+b5fZtP8Ovmc5E8WrZj9vkXKYsdyGmWytXH0nhJ4H01U/Zjj173Jur3na4sC4XC7zJDeB2R/wDCQ8btjbhbDjDZ932hxSFdT3HUQkcEBXAsprfp4rVrR7M9Mm5Yiu5PZJlsafV81tx1pSUKPHwCiCfq5qs23TXjG9C9qrmmWpj8bEtRdKbDNjS8dujqY781TAcUy/ECyPaWnR0ELa6hyoj6OaNf4ZqVDctaMI/mxEcbNHAOO8q7mGo1jBk5xn0wg8CSTzaFN+g7DGVS9S9RJjLchjK8rlwI4cSFIXAtyU29CeD2KFOMSF/QfNP01Bvh2396LmW4XSKzPqXhmCZ461jbPV1Nw2n3JHmxmj8G0KZBCR2HWT8aztt1kXheieO6IbcGmNR9UGrKxAWq1OJkW20zXGwX5txmJ5ZZSlxa3PLKvMWeAE9+RJm0rbdbtsmlTeHqun3ZyK6y3Lxkl4IIM+4u8eYoc9+hPASnnvwOT3Ua1aw03uvZrQyfvEFt+MBpk73QNYyLxUY2BcuxR90Q639QAG4TuWreI5DZnbKtUW3wCG7bHfTz++RLZUn/AEgVLehc+RddEtP7nLUVPy8WtT7pPqVqiNkn+c1B3iZXZ6PtKyDFYCS5c8zudqx23sp+c8+9LbV0AfElLa6sfhWPN4lh1ixVk8os1si29J+kMtJQP+zVaPsVToXNHUNJPo5qtV9qkODz0JaB6td2WaqH9Ttt7Op2ULyheuWsGLFbLbP3PxjLV2+EOkcdQZCCAo/E896mCof1O212zVDKF5TK1j1extxbLbJg4zmsu2Qh0jjqDDZ6Qo/Ej1qpEkWVgYBWpfsLY/8AhT7jf/eC7/s6fsLY/wDhT7jf/eC7/s6fsJ7H/hI7iv8A3mT/AP40/YT2P/CR3Ff+8yf/APGrKq3TSrb01pVkLuQo1q1Zy0uxlRvYsryldxiI5IPmJaUgALHTwFc+hP01LNRNpVt2tulGQu5FE1c1YydbsZUb2TKsxlXSIkKIPWll09IWOngK9QCfpqWas4yBdVAglKheN/44btZkj58TTnDm4qfoTPusjrX/AJyWILX6HvrrcdVNbdKNEbVHvWq2dWzG4kxS0RTLWeuQpA6lJabSCtwgEdkgnuPpqvmyzczozrBkefzbVmEdvMMyy2bPj2WalTE1VtjMNMRVJQoDqT5DAcPTz0lageODVaXnq+XNoJ9MMdnE9JVqnlp+b3iB64p5S3CecLE+KrobI1P26L1Bx5hfyk0zk/d2K60PwvsnYSkpI7jhIQ7/ANDX9YVrkxuq0D0JiJfQ9cs0yWExkrKT3SbOFTJnUPglbkVnt+9kJ+mri3O2wbzbZVoukVuTDnMLjSGXBylxpaSlSSPiCCR+mud3h67bcs0X3SawYve3pa8awBxbOOoeB8pX3TLbgkI/jmNFZQoj48j6abN5arqZytUHBzIkcneW2pTaPNTbUGYlh5PmCOLTJnQLo1SlKIlKUoi1rNdMtNtSmokfUXT7GsqagLU5ERe7THnJjqUAFKbDyFdBIABI454FYOybeNAMavETIsc0N0+tV1t7geiToWMwmJEdY9FNuIbCkHv6gg1INKDymQh8wgpSlKIsRluJ49neMXTDcttTFys15iuQpsR5PKHWlp4Uk/R69iO4PBHcVzy2Y4tqxtO3l5htKtPm5XptcYqsialIfSo2VtaT5Dzw5/BuL6QytvsVkIcSOkd7ibjtdH9I7Fbsfwy0pyDUfMpBtmI2EK7yZJHvSHuPmRmUnrcWeAAAORzyPTt20LY0UxSWbxd1ZBm2TyTdstyJ5P4W53BQ78fvWWwehtscBKR6ck0o+Wo6sMoLT+8dB/D7U6GALklqtemKRzJBH7u89fZjW5NhBlelKURKV8Is6FODphTGJAYdUw75TgX5bifnIVx6KHxB7ivvRFQbxXdELzccJx3dFp0HI2X6WS2pD8iOPwpgeaFpc7dz5L3Sv6krcPwq1G2fXCzbidEsX1XtKm0Lu0QJuEdB59lnN+4+0fo4WDxz6pKT8akK/WO1ZPY7hjd9hNzLbdYrsKZHcHKHWXElK0EfQUkiuS+hs3WTaprnqhsJxVmdIXnslPyQufHKba28QF3E/UmEVrVxz+FjJH01WgSC/ZgJxeZn4veE6B2fME5BWrAODa5MYfK78Oh44TbkQBdX70h/+WLWTKNfZP4Ww2Dz8MwrnuhbTTg+6U9HwPmyEBlKh6ojfQqp8rBYLhlh06w2y4JjEURrVYYLMCI38Q22kJBJ+KjxyT8SSfjWUuVxhWe3Srtc5KI8OEyuRIeWeEttoSVKUfqABNWeW02wD5WjPLiTwkyTulUaHVHYou7T0A4wIHGJXppVWdAcz1C3fWu76ySs0vuF6evXCRb8PtNkU1HkymGFlCrhLfWhS1KUsKCWUkNpCT1BfrXo2K6r6sayY/qFk+ouVsX6023L5VgxuS1bmYheiREpQp8hsDqLiiFHnkBXUE8J4SJa0k4SIOHFHCQO/mFvmCAJAEi4xYesE9vKb/KCqv8AiLxH9TN+W3rRy6ErsjhhSXGVfMX7RcFJf7fW3GSK6egBICUgADsAKpV4g+hGdXXJtON1eklgevuS6T3FqVPs8dJU/cLah5LxDSR3UpBC/dHcpcURyU8GQMb3G47uMv4t+lOtUXEsetdijXi7TGEQlXYSH3XmxEU3NQ43G8nyFF3raUolxsJKR7yq0DGzeF7we8nkYIPKAeojNWria/i+6WsA5ixHOSDyMqy1cvlxX923ixIutnBlYlos2yiTLT7zXmwypQSD6cqmOKAHxS0o/CptnbotW9VMUvOie3eAcw1Gj3Sdj03N2WAzYLVEQ4Ut3FySgFlUhbJSoMs9X4QKISAEoMz7T9smEbW9O1YVjs0Xa+zHRMyO8ugCRcJihz1KHJKEAEhCCewJPcqUTaiIqt2g5NEt4uIs7k0HXMmNDFapmk6gM3WdwANxzJECMhfIidl3Aa347t+01m5/for09/zW4Fptcc/h7ncHj0sRW/X3lK9TweEhR4PHFQ7qTqZrjoDjmn+q2p2cRbgMiym3WTJsYjW6M3brcxO6kj2R0I9pU4woJ5W46tLgC/cRyOI93VLznVbfbo1pBhsa0y2sFtUjPno12edbhuSQ4ptlTpbStXuFtHTwn1WfTk1hN4kjW7UzVfQjbhmsHDWzkWXs5G+jHpkp9xEOB3dW6Hm0BKOlbhHHPJQfoqNn87qZOb6gEfutdDu8Pk6ACIvM1/K14HusJJ/eLSW9pbG8uMzaOg9Ki3XfX6y6CYpdMsveGZXe41ptzlyfVabd1sNtpJHC5DiktIPPHulXVweQk1IGNXtnJsctWRx2FstXWExNQ2sgqQl1sLCTx25AVxRvmBI0j1mPgeyHykA6z6RPxHdQpv3nybbs51YkxVKS4rH3WSU+vS4tCFf/AFqjW47ZITFv25aXQ4wAbaw+zhIH/NGzXy3SYRJ1H256kYTCaLsu641ObioA5Kn0tKW2B+daU1r+yHLWM22l6WXthwLLeORre937h2KPZ1g/X1NGlL/rf/j/APlSr/0v/wAn/wAal3KRfFYxd04wWxeDAkC3+Zx0+0+Wryuefh19NcytqXiE6d7acIa0D3H6e5niGYW+4TXrvd1W8PiXJfkLcXKkJUQ91+8ByEOBQQCDxwKtZvbzTX/EWdPWdGc2seE2i5X4s5Pk94YQ5Et8dKUqbQ8pba0tNuHzB1kJBUEJ609XfNbjNRtumTaX3TGMmfxfUO43eC9Gs2OW9TFyuFxlrbKWxFZbK1pV1FJ80cBv5xUAOayLnMa+q0TPlw6mL2i4mY4kDcCtMLXllJxj3p0Ey282tE8ATvWW2k6aWfT/AE1l3mzavXLUxvO7q9lS8jmkcyvaENpSEgE9KUoaQOOfUHsn5om2oW2a6O33QTbXhOl+UPhy8WuG49PSlzrSy++8t9bKVehCC50cjsenketR/t43yHW7cJnmgl60ruWJysVclC3zJMhThmojvBtwOILaQ0shSXEgKUCknv25PVVa3xzQpmYBj8LYHKwjmuam5xpeNUESb83EnLO5vwVqaoAxk7287f8AXPT668y9K9B0qlOWxXeNcr4hYbS4+n0X0OFzoB5HDB7e+rm/9UI2b27HNte4Xcbimrl8tuNXS/X5vIbRMu8pEVq5WhbklwOsuOEJWEF0BYBPSo8H0rGmQNoDnZNa4j8QgDqAS7+GdJW1QE0HBuZLQfwmZ7wG/wAUarKeLc9CuegmKadRoLcnIMxzKBbbOOPwjSylYWpPH0hSWz9TlXGwTFIGCYTj+E2tITDsFsi2xgAce4y0lsf6E1QvcDmsXVnept5yC/wnrNpFY5lwm2vJbqUxoF1uDLfndaFOEdLfUyyG1r6fM4WpHUnhRuRoll191CgZHncuat7HrvfHhiqC0lCfuUy22yl9J4ClJedbeeSpXPKHEEdiKtQB8FxyLnFx5NwsaOZ8xA3SdFWvHitH3WgDm4uceggAnfbVQP4rKAjaHcrmg8P2zILPLjq+KXBKSkEfXwo1bi2PLkW2JIc56nWG1q59eSkGqk+Jey5lel+A6OwOV3DULUGz2ppodyWkrU44vj6E9KCfz1bxppDLSGWxwltISkfUBUUf2T/xn+ymprXq040Zfq90fA91XTZj+2a4/wDtfyH/AKmKsdVcdmP7Zrj/AO1/If8AqYr3ZRrzleZ69yNt2iH3NZuOPQW7nmOS3BhUlizNOceTGZYSpHmynAQfeUEITySFkFIhl6dFgzNNnpTaSeg+phHWfVccg9/q8gdyrAUqs2Ja4awRd3p2v3pWO5FZ7di7mTT8gaiORpyELWlthlxpKy0lYWTyQOFoUkgIIPVKGpuY65Y5d4sXS/RO0Znb3Y/mPy5eXotK2Xuojyw2qM71DpCT1dQ9SOO3JmRha4ZOmOhIPqCkeYt1EeoBHoQpJpUC/wB1Pd7/AIJON/8AvPa/UK+kbVDdu7Jabk7UMbZZWtKXHBqY2roST3V0+wd+B34qQJMKCYEqdqVFusmV60xlwcM0Jw61zMjuTK5L16yLz27JaWEkDlxTQ633lE8IZb4PAKllKQOqq8jV7xB2cenZM3nGiT0ONmbWERiizT+Zs1cxqIpbXv8AHloeW4CTweGVkA9uatOJ2Ef7uG/Egc1ZwwtxH6sT8ATyV+qVFGh9s3O29y6ncPlGn12bWlr7mJxa3ymFNq97zC6p9RBB93gBP09/hUkX+92/GbFccjuzvlQbVEemyV/vWmkFaz/Mk1LyKYxE2zUMBe7CBfJe+tC1s1ZgaN4M7k71tdu10mSmLVY7QwoJeulzkK6I8ZBPp1KPKlfipSpXwrb7FdUX6yW++Nw5MRNxitS0x5KQl5oOICghYSSAoc8EAkcg9zVbtXZiss3y6Jaeyvft+N2O+Zmtk90rldIisLI+lHW4R9BVRzD4gomxJM8gC53XCDHGEa8eGaouAJG4kkBvQkieEqedOrTmdpxaOjULI0XnIZSlSp7rDCWYzDi+5jx0Achlv5qSsqWrjqUSTWzUqq9j3NjVbVLP8Bs2smL4C9hF7dsEKySmGHbzeH2kjrkBMl1KfJU4ShCG0FR6eVLHUADngugDQmBuEDrmBqfUoGw2SdYniZPTI7h6BWPzC03u+Y1PteNZK9j91eb5h3Jpht4x3kkKSVNuApWgkcKSeOUlQBSeCNN0N1ZlamWe72nJ7azac1w64rsmTW1lRU21KSkKQ8yVe8WHm1JdbJ79KuDyUmsjohkmoeXaW2LIdVcU+TeUy0Pe323oCC10vuIbUUBbnQVtJbcKOtXSVlPJ4qGhLOF+IqbbCPlxNR9NRKmtp9HJtumFDbp+sMuqTz9HFS0RV8PfPdoLp5Q0jqDojv2ZfujsSGx3IM8CNVZ+ucnij6Z5Fpll2B74tLmCzfMMnxod8LY7OMhf97uOceqSSthfPql1A9BXRutb1HwHHtU8Cv8Ap1lkUSLTkUB63ykcdwhaSOpP0KSeFA/AgGsauNsVaXttMjmNOokdVpTwGWVPZcIPI/ln0Xi0q1TxfVzS7H9WMclo+49+tqLglS1j8Byn8I2s+gU2oKSr6Ck1G+3Rh3UrJMp3N3VpXl5YoWjEkODgx8cirUGnAD6GS8XJB+lKmfoqgm1K5ayYffM+8M64x54el34pVeWiUpttiJ6ri6k+qQ+wGvK49FySfWutNotNusNqh2Ozw2okC3x24sWO0npQ00hIShCR8AAAB+auk4HH9Yp+y4eXkbk77eyDr5wucY2jwKntNPm6ZcL+0RpDV66/CQkFRPAHcmsFnmbY7pthl7z7LZoiWbH4L1wmvcclLTaSo8D4qPHAHxJAqrV7y/W3VTb1etdchyq6YNb8gt4GGYjY/KakK9rUlmA7cJakKdW66t5pXlMlpCUqAPWe4wJcQ4MEkfOYHWDHJbACRiMAn4RPaRPMb1YXQ/Lr5n2nzGbXt5taL1PnyraENhATbTKdEP09SWEtqJPclVYjVnQi1a35LZo+pMlFzwOyo9rOMEKSzcrl1e47LIPDrLSB7jJ90rWSrnpSKii53nItouoGjOnEDMLlkmB51MTh4td1DTsm0ykMD2d+K+hCVlklPStt0r6QQUkelTrrdqladFNJcq1UvaSuLjdsdm+UPV50DhpsfWtwoTz/ABqtWdTptNYXa0nu0fGCHDiQcxaKQfUcKcQ50Ho4nsJBbG4EZZwFp/oht2zfcLqTJiaIae/JrAYFvxduOnGYXsyrksKlzHCjy+krQ2uI3zxyOFj4mv62iaIbfdQNFYmolx0Q0/nLye93u6MLfxmE4Woy7lIDDSOps9KENJbSlI7ADtWR2VYVllt2jC+5IHDl+o6bpl1xWscLXKuBWton6PwXk9vhWveFRmbmS7RrVjkxh5mdhl3uNilJcQU+8Hi+OOfXhL4B+tJrRrHMc+i72mNbPMk4+z4jgVm57XtbVb7LnGOQbDD1bPUK3kOHEt8RmBAisxo0ZtLTLLKAhttCRwlKUjsAAAAB2FaPqvl97x2Thdgxl5tq55Vk8S29S2wvpiNpckyzwe3ePHdSD8CsH14rTNeddsixTN8R0J0ntsG4ajZ0Hn4zlwClQrNbmR+HnyUoIU4E8EIbBT1qHHUOO+lr0gyPKtfotnvOuWeT7hhuLG5vXZh6JG9nuNweLTPkxkMeQhIZiyfcWhZKXE9RV6mjSXua7Se+G5HIxhOhNpkGLuAY1zdY7YvKD6gxnF4gibS1TZ9arl4rsZqUSU2jSNS4wPoCucQoj9CzUp7U9Zsp1PhZ5h2dvxZ2R6Z5XLxaZdIrAZauiGuC1J8se624pJ4WhPuhSTxwDwIz1OinBvEq0lzWQPLg55hl0xbzT80yoylSUpJ+khSAKlg+3pEZEOI44qT47yO6h/7GqDmIn+Go3F2g9BKuFVIPEWzTO9LM30V1eGAXHMNOcLvMy5ZDb4aSpKJflJRFfd7EJ8vqdU2pQ6QscEgkVd+qmbZ9X9VzqFqqjc5q5htqbj3pcfH8TkOR4Uy1xG3HQHldaUKcZdb8ooc6nArgnqHcVS5qNwmC3zcLEW9ZG4idFewpuxCQfLx8wP5X7arR7RuJ0E8RW+4dg+Fau5hgk/Gbwxkk3F5UNEd2/txlJdS15iHFIUG1oC+ApR9VdHuhSb11Sq4ab6c6671NPdV9ErNbPuRpw1Pl5Xl1nZQiHdJbiPLjQW32x0SnUEuKcUkqCEq6VK5ITV1a1BBotIEEkkjsJ6hotwkWKzMioRMgACe5joTnxg3Cprjy1W7xW8qjRjw3dtKI0iSB8Vty2kJJ/QkVcqqe6NxFZv4jOt+oMYFyBhuNWjEEOj5pkOhEhxIP0pLagR9dXCqlP/j0uTj0L3uHcEHkVZ/7ep/COzGA9iCOaUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiLAvYJhkjM4+or+LWxzKIkFdtYu6oyTLbiqV1KZS5x1BBV345+J+k1nqUppH1v+KaysBqBh8HULBMiwK5vuMxMjtUq1Put/PQ2+0ptSk/WArmqjyUamaXaH6fbWs00su90kt3m22Bi6Y+9EfiXy1wXPa3ChDj6HWXVxoqkrQ6hKAVK4cIq69atecEZveoWN53KuK+nGYdwYjQvLHSp+V5KS+Vc9ihttxAHHo8rvUN8r5GpaT/DJHxI/ivwO8zYOkxwxAA/AHpbjD9w0gzzX7VHFtQtZ7M1jOHYFM+6uO4f7U3Kly7mBw3NuDjRUyjyh+1stLcHJJUs/NqxCkpUkpUAQRwQfQiv2lTk3CMvmczz+QAFgAmbsRzy6DT49STmStSsmkelGNXleRY5pjidquziytc+FZYzEhSj6kuIQFE/XzW20pTSE1lKrXuNxmbplqviG7zHIL8hnG4zmO5zGjNlbj2Ovq6vagkd1GK7w6QOSUdf72rKV/LjbbzamnUJWhYKVJUOQoH1BHxFRcOD25i4+BHIgkHgbXU2ILXZH6nmDBHEBfGBPg3WDHudslsyoctpD8d9lYW262oApWlQ7EEEEEfA1EO5iz2nMbdhOm1wtkSarLMut8dSX2UuFMSKVTpXHIPAU1EU2T/6zj41JOF4RjGntjRjOH237nWlp515iGl5a2o/mKKlIaSsny2wSeltPCE88JAHas0pppa0OLbQpbZJQogEpJHB4Pw7VYxiBGQIPYgwfgd4VROEg5kEdwRPzXygW+Ba4qINsgx4kZocIZYaS2hI+pKQAK9FK+MyK3OhvwnlupbkNqaWpl1TSwFDglK0kKSe/ZQIIPcGoJOYzUgBVpyO1jcpudsMdhBkafaHTF3CbJHdm45UpIDMdB9FiI2orWR6OOBB7g8WcrDYhh2M4DjsLE8Ps0e12m3oKGIzIPA5JKlEnlS1KUSpS1EqUokkkkmszUiGtDBz5k5n4AbmgCTEqDLnFx5cgMh8SeJJtklKUqFKUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJWja1ax4ToLpxd9Ts/uAjWy1NcpbSR5sp89m47ST85xauAB+cngAkbzWjar6H6Ua42yFZtWcKg5LBtz5kxWJalhDTpT0lYCVDk8cjv8ASapUDi2GGD9fQVmFodLslXnbjkWm71+uO5HXXWHAP7pWXRwzFt3ylhraxa0c9TVtZ5c7L7hTyxwVLJHw5Ngv7v2hP5asD+0kP/aVG33vjZj/AIPuN/zv/wC0p9742Y/4PuN/zv8A+0rUkWDRAGQ3D6uTmTJNyqAG5cZJzP12A0EAWC3y57mNuVlYVJuuvmncVtI5JcyeECfzDzOSfqFVV3KeLJotgdllY7oJM+X+ZS0FiG8wwtNsiOq7Ba3FAF4gkEIbBCvQqTU3fe+NmP8Ag+43/O//ALSvVadhm0Cx3WHerVoLjjE23yG5UZ0B5RbdQoKSrgrIPBAPccVTAHkB5IbrGcKwcWiWiTpOS3PbngV1030YxjHMjlLl5A5FNxv0pzuuRdJSi/LcUfiS84v9AAqSaUrSo7G4uiJ00HAcAqMaWNAJk79/HqlYF7BMMkZnH1Ffxa2OZREgrtrF3VGSZbcVSupTKXOOoIKu/HPxP0ms9Sqaz9bvgraQla7qLiSc+0/yXBlzFRE5DaJlqMhI5LXnsqb6wPq6uf0VsVKpUY2qwsdkRHdWY803B7cxdVQ0RxvXnS3bVadutk0pcgZfZLbKtLeQyrhF+4SSpbpTOStt1UhfUFhYa8kK6+y+ge/X7so0x1b0u0+wbTPJcVuGKwMNhXNeQKfkRXUXy6yZJU2WS04tSmUILiytXQSVtAA9KuLXUrbGS91R1y7PjE/Mk843Qs8ADGsFg3Lhl8hF9J1JKVpmQ6LaOZbdDfMr0mw29XInqMy4WGLJfJ+nzHEFXP6a3OlU4q3Bco1bntfNnG47V7RjDtvb2Vx8xyp29YxDYZeZQ35yUpbWyhltQdaKEtApSU9JQR1DuBf/AGy4JqBiWBP5DrDNYlahZnNVfskLAAZivLQhtqG1wSA2wy202OCe6VHk88mXaUp+SmGG5ADZ4CIHoJOsDJKgx1C4WBJMcTMn1MDjqVXbPtN8qxHddZdymNYfcMpt0vDpGIXiDbHI6ZkZYkpkR5CEvutpWhRCm1cK5T7p4I5I92j+i2UStX8i3M6xxGI+XXmImzWCytvpkIxyzIPUGS4nlC5DqvfdUjlIJKUqUOSZ7pRnkAA0mOGIknvJE7iRkj/OTOsTxwgAdoHUAqBt+H7jzVn+Tcj/AK01J+lH+9dh3+QLf/V0VjdUtENPNZ7e5Z9RIt8n21+OYsi3xckuUCJJaKuopeYiyG23e/xWlR7AelZPANNsX0ztirNijl99i6Wm0M3PILhdAyhtPShDXtjzpaSB26UdI7DkdhSn5Q+few/04v8A+vRH+bBGmL+rD/8AytoIBHBHINVl0EtP7G7V7J9vVwSpjE8vnycs0/kr7NAu+/PtYPoFtL/CoR6qbWo/inizdYDM8FxXUG1tWfLbSmaxGlNToyw4tp6LJaV1NvMutlLjTiT6KQoHuRzwSKN8r8W8QeRg+hAI5RkSjvMzD1HMT8iR1nMBZ1aEOoU24hK0LBSpKhyCD6gisbaMWxjH3HXrDjlrtrj/AO2rhw22S5/jFAHP6aylKIlRVaXHs/10fyWJGdTYcBtsuxMyloKUzLpKcZVJDfPzkMNx20FQ7FbriR3bVUq0oLODt0+oj4E9YOiG4jl+f1wka2V4LpYbHfAyL1ZoNwEZfms+1R0O+Wv98nqB4P1ivfSiLy3K1Wu8xFQLvbYs6MoglmSyl1skehKVAjtXpSlKEhCEhKUjgADgAV+1j7/Y7fk1knY9dg+YVyjriyAxJcjuFtY4UEuNqStBIJ7pIP11BkAwpESJVc8ZtY3F7oUaxKQXsC0hZlWTF3vVq6X173J8tv4KaYSkMJUOxcCyD7tWcrH2CwWTFbLCxvG7VFtlrtrCI0SHFaDbTDSRwlCUjsABWQq1mtDG5D1JMk9SeggaKt3OLzmfQCwH1mZMCVXHZj+2a4/+1/If+pitJ0Os0zbpr5r9ddRsbyaQ5n+RMXywXO22GZcWrjDKXCmMhcdtYQ60pZSW1lJ44UPd7i0OG6e4hp+b4cStAgHI7xIv1z4ecc8+c/0+a776j089CfdTwkcdgK2OqsGAMjMMaw9AyY6tEcNLqXeYvByLy4dS4iejr8dbKiWkytYbXuP1qzi+aZ3yyZ1n9ysNtxhubbnJUOJYmwA9IclNAxh5bQ5WgO8+alKB1FQ5tJqdgus2U3eLM0312GDwWo/lvw/kvFufnO9RPmeY8oFPYgdI7dufjUk0qYGBrPu2HaPzJ3uJO6EnG5/3vr0sBuAA3zAv9x7dV/hjD/3eW7/aV9I2kW6RqQ07I3fh9pC0qW1/c+t6etIPdPIc5HI7c1O1KkGDKgiRCjzcBnmSaZaMZXmmH4zcsiyC329Qtdut8Rch5+Y4Q2z+DQCopC1pUrgdkpUarHrFpnqZpVs50eg4vi92yXJMGyrHckv9vt7C5EqW/wCep6YQhAKlkyHiSeDx6+g5q79Khvldi1xMd/IZjqc+Q3KTDm4TlDh/MI9BlzKx93vlvsNgm5LdlrjwrdDcnSVKT7zbTaCtZIHxAB7VVS+5JqluD0asuUXHKH8OsWq1yg2mxY7aosZyQu0SnQp16bIkNulTqoSH3ehkNpQOAS53NW1lxIs+K9Bmx2348htTTzTiQpDiFDhSVA9iCCQRVcEbf71YNRcMwLBNQsxs2EYnbrhd4CDFhym7Q+vpisRYr78dZIDT8vhLxdUhITxwOAIbepB4RugSXA8wIHXUhDIpyDcTJ4mA09HGTvtpK2bSnUfLW9c850ByS8fKZjFbTbb1CvpitMSENyi4n2SWllKWS6ny+tKkIRyhXdPI5On6xRFYfve0Q1HlDotuSWi9YS88fmtylIEqMgn6VltxI+kipx060txDTCHPZxqLIcmXiUZ12uk59UidcpJAHmyHld1ngAADhKUgJSlKQBXj1n0oteseCyMSnTnrbNZkM3Kz3WOkF+13JhYcjym+fUoWByPxklST2JqZwFj8y3PiCCHRxDXECYkgE5lAA4PZkHZcCIInhiAcQJgEgSAFlNTrhm1p06yW6abWiNdcqiWuS9ZoUlXDUiYlslpCu47FXA45HPpyPWqZajaV6Da96YJuOreiE/Idc7jZQzLdxrErvapCrt5XShSnXWm220JV0jzJKvL4T8RwKubp3Pzifi8caj2KNbMhiqVGmeyPpdiy1I7e0sEHqS0584IcCVp7pIPAUdmqr6QdIJsYuMxnkdJm9psNysyoWwQIImxyOVjviLXi53rQNAMKyjTjRPCcEzW9qu19sVkiwrhMLhc8x5CAFALPdQT80KPchINRAzDVm3iIyLtDBchababogzHB6In3GWXENk/T5DRVx9Yqw+W3K/2nHZ0/FsdVfbs23xDt4ktx0vOkgJ63FkBCBz1KPdQSD0pUrhJ1HRPShemNkucu+3Jq75fldwcveTXVDZQmVNWAAhsHulhpCUNNJPohAJ7k1qahqVzXdpiPMuBbHKHE8wBqsgwU6Hgt/dHRpDp7gDqTopFpSlUV1gYuCYZCzGbqFExa2M5NcYbcCXdkRkiU/GbPKGlOcdRSD8OfgPoFZ6lKaQmsrQdetKYuuOjeXaTS7iqAnJrY5CRKSnq8h08KbWR8QFpSSPiARVfbvlWptstujmiOb6KX9682W4R509NikwJMO7xrOxyh6KtyQ2UIMowVFMhLRTzx37E3ArVlYIy9qe3qXJuS3HYtiXZIkMtgJYDj4efdCueSV+WwnjgcBr1PPaGDC+dCQTzZiLT/ADGOt8kd5mRuBA5OgO9BPS2aiayaMZvqjrVZde9cYkS1M4ay81hmIRpIlfc914dLs6a+kdDklSQEpQ31NtgDhaz3qfJcOJcIzsKfFZkx3klDjLyAtC0n1CknsR+evtSrZNDBkPnc9/8AGQCZuxHP8sh0/wA5klfyhCW0hCEhKUjgADgAfRXyhwINuaUxb4bEZtS1OqQy2EJK1HlSiB8SSST8Sa+9KhFX3VPSfM7RuOxTc9gNgGTu27H5GKX2woksx5bkJx3zm5ERb6kNF1DnPUhxaApJ7KB7HU9PM81gzGTqPlulukc9m8ZdkTsKDe8mlRY1stkWC0mEnzEtOuPvqQ8zJX5bTZQpSyPMSDzVq3UrW0tLa+hZSQlXHPSfgePjWt6Z4NG01wKyYNFnLnC0RUsuS3EBC5TxJU68pIJ4UtxS1kcnuo1DRALTlBA/idiOV85vn5jBjKXGTiGcj0ED0jh5bjfq23fQiy7fsBVisK7SL1d7pOfvOQXuSkJeutzfPU9IUkchIJACUgnhKQOSeScDuz0evmqWn9vvuBhCc80/urGV4qpRCQ7MjHkxlH4IeR1NnuByUk9hU3UqXkuggwREcMMYYGVoEDK0ZKGQ2ZuDMzrOc85MnO61HSjUqxauYFas7sAcaantlMmI+npfgykEofivIPdDrbgUhST8U1nLxjOOZEGk5Bj9tuYZPLYmRG3ug/SnrB4/RXiseCYrjeQ3vKLDaUwZ+SLaeuhZcWlqS82kpDymufLDpSQlTgSFKCUBRPSnjP1LiHGQI+XJQ0FoiZ+tV82GGYzKI8ZlDTTaQlCEJCUpSPQADsBWoav6n2bSDALnm93adlLjJSxb7ewOqRcZzh6I8RlI7qcccKUgD6SfQE1udYC8YJimQ5LZcuvdoRNueO+cq1uPOLU3FcdASt1DRPl+b0gpDnT1pSpQBAUoGjgX2mJ116cd3e+Ss0ht4nh9aKNdp+jl40i00ekZotD2c5rc5GU5Y+khQ+6Uo9SmUn4oaT0tjvx7pI9ammlK0cZNhAyA3AWA6Cyq0EZmSbk7ybk9TdKUpVVKUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKItC1f1XXo/jj2WytOMvyi1wo70qe7jzMR9cNpsdRUtp6Q04occn8GlfASeeO3MMaE+IPpvuSvcmx6P6T6n3pdv8AKM+Sq3W9iNCQ4SEqddcmJA+ao9I5UQk8JPFT/qYlKtN8rSoAg2ScCD8fwC65xeB2lPyc1bXwOr260Dn6vLk02fz1ajXZNaHd3RHJK/kpMc3NziOwBXTS93GVabTKuUKyTrw/GbLjcCCplL8gj8RBecbb6v8AGWkfXVcsU344nm2qN00Yx3QnV9/MLJ3uductEBn2JHKeFuuuTQ2lB6kkKKuFAjp55qzdVe0hiR29++v8hDKQ4vHcU6lAdzyy9z/2U/zClO9UB2UHuASP8o+1JzhmI9XBvzlWgSSUglJSSOeD6iv2oTz7ebts05zOBpzfNULZKyq43Nm0N2i2EzJDMlx0NhD/AJXKWOFK97zFJIHwqPM08T/Z/hGXLxKdndwuBZfMaRc7ZanpMBpxJ4UPOSPwgT8S0Fj6CagODojXL0/Md1JBbIOn16weytfX8uOJabW6oKKUJKiEpKjwPoA7k/UO9YrEsuxnPMbt+YYbfIl4st2YTJhTYjgW082fQgj9IIPcEEHgiozvm6jTaDlV1wrE7Xled3ewK8u8s4jY3rk3bHP/AEb7yeGg52P4ILLnY8pqXS04DY/l+WqhsOGIXC0uyb79Ock3J2rbHasGzWLkM8PrflXm1m2tMNojrfSoNPEPK60o7ctpHCgeT6VZeuWlq1ExLVHxgMRyzDLg5KguWJyO4l6M5HfjyG7dJS6y8y6lLjTqFAhSFpBH81dEtVdatN9F7dAn6gZCITt3lJg2qBHjuSp1ykqICWY0dpKnHVklI91PA5HJHNS39ixzszI5w4tEc4EKHftnMbkA08pEnt8lvNKhRvdvpjAzix6eZ5acrwO75Qry7H8p7OuJHuTvIHlNPpK2w5ypI6FqSrlSRxyQDru+XdLO2u6PT8kx7E7xd8gnNKj26Q1bHnbdb3FEIEiXICfKbCVKT0tqUFOKIAHHJGdR/hsx56Did3P4arSmw1H4B9Dfy+OisbSqxbIdf2dRtHsBxy827PJWSOY+3KuF4umNXBqDKdA5WtM91oMOlRV26Vnq+HPFSbqJuK0+08vjuI+yZJlWTMMJlP2LFLHJu86Myr5rj6GElLCVfi+apPV+LzW1ZngvLCd/WN3ZZUneK3EP9Tv7qUKVEGh+6zRfcDPulgwS/wApjIrGVJumP3eC5AucPhXSSth0AkBXAJTyEkgHgnipfqpBEE6qQQZG5KVUPxGN3N+206Uv27BsevbmT5E37HFvaba99zrSHAoeYqSU+UZHCV+W0FFXI6lAAAKk7a/rXadScCxiws2TOGLlAxm3vS518xm4QY0lYZaStTcqQ0lt8qUeoFKlFQ5UOR3qKX2oeW+6QPQk9ovz4Kan2RaHe8CfUAd5tyU3UqIJ+6LTleT3rDcItuTZ7d8aX5V6axW0rmtW53v+CdfJSz5vZX4NK1LHBBSOKzWjO4DS7Xu0XG66dX5yQ7ZZSoN3t8yM5Em22QOeWpDDoC0HlKhzxwelXBPB4A4hI3T038uOSO8pg8uu7nwUi0qEpW9LbK1qNbdKYGrNmuuR3Jx5Hk2132liKGmluuLfkI5ZaCUtq5BX1D6PXjw6Lb4NvG4DUm7aWaYZTLuV4tUdyV5jkBxqPLZbWlK1sOKHvAFSfUDkHkcjvRvn9m+Z6DPsjvJ7VsvXLup7pUOZNup02suS3rEMctOWZzdsZ4F9YxKxvXNNrUQSG33EcNhzgK/BJUXOx5TWu4p4gOz/AC+K09C1wscB9xwMmFdQ7BlNu88dCm3kpPPPbtyPro3z+zdHeT2rKwtK1LUbVTBdKLSxd83vXsaZr4iQIrEdyVMnyCOQzGjMpU6+4f3qEk8dzwO9RVZd8eiEvUGBpfl7GXYBkN4IFqj5ljsm0puBJ6U+Ut0dPc9h1FPJ7Dv2o3zuwtufnu58Ed5W4nZfV+SsFSqt7/8AdhdtsGkUuZiGMXmdkt5bMWBcUWx5dttalkpD78np8oLB56GirqUrjkdPPOf2ea5wNR9KsGxuRac6VfI+JQJM+6XnGrhGhy3UstJcWia+0Gn1KUrqBSs9Q5UOQCaU/tQ4t90gehJ7RfnzSp9kWh3vAn1A9Zty5KwtKVCmZbytuWD6i2rSW6ajRJWXXe5s2lm129pclbT7jgbAeWgeW0EqPCupQI4PbkcUF3BozOSGzS45DNTXSqt5j4lO03CMvbxO65pcZDS5BiLvUK0vP2pt0HhQElI4cCT6qaC0j6exqzdvuMC7W+NdrXNZlwpjKJEeQysLbdaWkKStKh2KSCCCPgaDzNxjLeh8rsBzXppUCZjvT0hxRi4XK3WfOctstnccbud8xjFpdwtUNTZIc65iUhlXRweotqUE8d+DW+6ea7aWas6dOaqab5OMix9ltxx1y3xXnpLam09S2jGSgveaBx+D6Os8jgHkcwCC0vGQueW/lxUkEODTmbdd3Nb9SudejW/+7ahbvc/+U+G6gwsSxyxqtNnx6BjE+fOYeTKb82TMix21rZdX08e+AEAJRzzyTZvWLevoToI7Y2tVZ2SWP5RwkTrcp3HJpS6ghJKCQ37rqepIW2rhSSQCBUi9NlT72Xcx1IEwoIio6mPd/IT2JiVPNKi/PtxWA6ZaaRtW80t2UwMceQHHXvk/LW7DQSAlchlKCtlJJHBWB6j0r+tDtxWnG4myu5Npeb5MszalITcZlmkQ4zy0q6VJbW6lIcIPr088d6kAklozGfDmokYQ7Q5ceSk6lRfYdxmneRaxXDQiCxkbeX2uMqbLjybFKZYbjDsl4vqQGy2s9kqCuFE8DvXo1G3BaaaZZBAwq73CfdcsurZeg45Y7e9cbm+0PV3yGUkttjg/hHChHY+92qs2B35ceXY9laDJG7Phz7hSRSoRxfeFo3fdQmNJshev2D5lNAVBs2W2h22OzknsCw4vlp3kggBKySQQAeDW36sa+6M6GW5Nz1Z1IsmNIcQXGWZcgGS+kdiWmE8uu9/3iTQkNaHHIoAXEtGa3+qwar7+9OdKtWsZ0Yn6fZ2/fcpuka3Q5Mm0qgQVJdkBgvJckFK3UpUfVDZB49RyDX7m3iP7VMBxfFcpvmZTltZhG9ut0SLbnHZaYnmKQJDzY7tIJQrgKIUoA8JPB4rX4i11tt+3TbTL9aJCZEK5XSNKjPpBAdZXPhKQoc9+CFA9/pq1NpdtNKmfZL8J9bcDYjvuVahDdnqPGYZiHpf1+G9dMqVGut24PTrb1Y4+TamuXeJaZL6IqZkS1vS20vLPCG1eUlRClcHgcd6z2D6lWbO7I7kUO0ZBaIDTSX/Mv1ofthU2UlXWEyEpVwAOSSBx2qoIILhkM+CkiCAczktspUIDd7phJsMzN7RYs2u+F29TntGVW7G5D9s6GyUuOtqSPNeaQQrl1ptaPdJ6iBzUl4ZqJiGoGCW7UvFbuJWOXWH90Is1bS2QuPwT5hSsBSRwCfeAP1VJs0uOQz4c90wYTMhozP0e2q2SlQ/t+3YaKbnDkCNI8ikXBeNSEMzkSIbkdXSvq8t1AWPebV0L4Pr27gdqmCpIIidb91EgkjdZKUpUKUpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURa5qQlS9O8pQgEqVZZwAHxPkLrnD4HZHyb1bHPf2+0f6uTXTyXFYmxXoUlsLZkNqacSfRSVDgj+Y1yh26XuZ4Yu5HN9Pdc7VcoOmucOt/cbKmojj0T8Etwx1qKAf+LdUhxIBUlQB46e9Rs5DNoe11sbABza6Y5kZb1NcF9BpbfA6TyIiemq6x1WTSMc779flDuE49iaSfoPkv1l71v8AdototyZsTWiz32S+AIttsaXLhOkuH5raGGUqX1k9gFADv3Ir92r4dmcu96h7g9Rcak43etUrlFeh2OXx7TbbRDZ8mG3IA7JeUCtxafxSsA9wRVmA48WgB7m0c7k8gqvI8Mt1cR6EOnl5Y5kKpPil45Yr1ui222+52uO+xd7mIM5JTwZDCp8UFtRHBI4Wv4/jH6avpqxobp3qppBddHr1i9tRYpVvXFhR2oqEIgOBBDTrCQOG1Nq4KSnj049CRVD/ABVLs3YNye2u+uwpkxFuuipSo8NkvPuhudEUUNoHdazxwEjuSQKtTn2+XQC26dTL7p9qFaswyeaw5HseL2h32i7zJ6kkNMKhJHntHr46utCekA89+AecAP8A0aWAScdSwzzEdZmOOS2cS3bg4W8lO5yyM9Mp4Ln/ALQ9wecaL7CtwUaFcXkzMMuceFY3eSTEkXElhZb+joUgugfviT8av14deH2nEtn+nrluaSZF+gqvlxkerkmVIcUta1q9VKA6U8n4IAqDdLNgWVQtgWb6RZIGY+ouoqjkUptax0xpzakORYq1jt28oBR9Ap1fqBydQ2Mb6sD0C03G2rdU5c8ByXBXnosRy4WyQtL8ZTilpbUGkKUlaCpQBI6VI6Ckk9q7cf2lVjyMeGn1DWw4Df5rnhB3LmLRgY+mPJiqdMRlpO7y2HEnivpkVtgW/wAanH3IMRphU2x+0yChIHmOm1PpKz9JISkc/VVlN0eiWjdz1P063OauarysRj6ZSUrjR1PITHnO+aHW20ggrLhWngpbClLSAkDtzVMrtrCi/eKnjmsVl05zmfj6bMyhhuJYH3Zz8R2E8y3M9lCfNbaUpfUOsJV0cKIHIFbDvm1QuWmG/fRnVPVuyXZ7SyzwY8yAyuMpTbEhXmh9zy/QyGlqZWpPzuG0cfCsKXlZswPlIe4An3TiecraWgxGsQtavmfXIuMDbD3vK0RPO8r+/Fc1IvWZ6b6aX6Bp5f7BZ42Woet12vSUQ5MpfkLILcQkvtI4AVy8Gl8ge5x3qxPigrU7sXzFxZ5UtVpUT9ZmsVV/xPtdLDr9odheY6XYzktwwiz5Qh17KZdqdgwpDq2HEobYS+EPODsvqc6AgEJSFEngWB3eZpZtz3h6Ztk+j0a7Xa3tMQpLTj1rkRVSW4z7Dr62UOoSpxCEhYKwOkltfBPHNY1xGy1LezUBO8CKdzuy3C11rSM7VRM5sI4TidYd951UobTL+jFNiuAZQ435iLPgyJ6kfvg0wpZH/wBbVc/Dxve6DMNHL5qjg7Gl9ykZplVxuV4n5FNuCbg7KBSnoX5LSkBtKQkISD2B9BW87C9xGlGrO3HCNAbP92LrkMXHnLJkMONbXfKtbKW3EF9+QsJZShY6QkJWpaisAJ7K6av7btwOU+GPqRlu3vcRiN7ewu43Fc+03SDH6/e7IElkKKUvMutpb6glXUhSOOCeoV3VyP1+sXGzwcJ0Pnk9wBzI4FcdEH9SpBou0jENR5YHYkzwPEKxls2W7kX96Vm3bXTI9N7GpLzKb3bLHJnq9sYDPkPceYyAVrb47KPHUkHmr41VPBt1Nz3cXy3WDbpjuT2vDY0tqTkucXSF7G0GG1hRgwQokuvukBCl8ANoUo91FPFrKyALKTWRAEwNYPynIcyBBBOhIfUL5kwJ6fPf01VEvGT/AHJkH+V0D/UyKtloYgOaHafoJICsUtQ5B4P+5G6rJ4uuK3zJdoUqXZYD0tNiv0G5zQ0gqLcYBxtThA/FSXU8n4Dk+grYNum6HGNZdvFtxLQ37r3XMrLgYjyVItrrcW03NiCG2mnn3QltS1vJAQlsrJAKiAkE1hScG7PtNp8wMakCnf8AJa1W4q2z3gYSJ0kvC8u33DNHtmdzy7SPTrKMq1QzXI7p92pljgMtSJcFBT0t+0ucoYijg93JDrZXyOB6Cob2F3O9P+ILuUYu1rTaHpgdlS7ciSH0MvCYngFaQErI8xfcDjueOfWtE8NLdrpjoxjGZaX6tQMnRqddsnenKYYskq4XG8vKQhHkENoUsPIcS5yHeke+TzyVV/e2fWezaTeItrTJ1Zx6/wCNXTOetFps33PcmzFvPSGn2WVJjBxIUWj1FXV5aeFcr4HNdNFsbVTkzNNwnT2Ww0b4y1JIBtMLGs7Fs9S0faNJGvtGXHdv3CYvCzW47CMWyTxcNL8fu1mju2652aK7OjBAS3KKG5hCXQPnpIbQlSTyFJHSeR2roReNMMQtM7INRsKwWwxM9m2Z6C1eGYDTct7hH4JtToHUU9SW+xPolI9AK5s6365aQueK9ptncbUSwSMaslvjW24Xhic25CjPqblp6VvpJQAkvNhR54TyeSODx1NhZFaLxYBkuNzo97gOsLfjPW59D7ckJB7NrSSlXJBA4PHNc4aD+jxeL1LjMST8st4W1Qxt2U+WnY5SB+eaoN4OOUx3NNdSMCv7i2sztmXPXG7x5XaUoPNNo63Ar3iQ6y6k8+h9fWvHtw02hQ/FE15cxu0RZeIxbaHbiSwlyOzcpJiveWORwHPM9pPHqOF1E+NbwNm1+1FybUvcxpvnOKaoybg9DcRY0yrX7BCbUUtMqchSGH3nSnu8t4FRV7o4QlKRbna5vD2MZDcI+kmhl4i43cLnIW8xbplsehuXGSrupRfcBD7yv47hcV8Oa6KZL6jK4FwzDAuLtA9AJi99YusaoDGVKRNi+ZPBxPxkTaxmJUVSc31c1L8TXM7Tp61iMyVpjiaYNoi5RIktxowf9nVKfZEdCj5ylPdBJA9ztz2ArObutom6vd5j1gs2UStHbDKxycuZDuFvmXNbwC0dK2vfY7JJCFdvigVFO8WHqjsz3rw96WIYtKvmFZLGaiX9DAPloPlIZeYdUAfKKkttOtrV2Kxx8CDO2PeJzp1q+0zi+3bTTN8xzy5NhMS0v25MaLCcV282bJC1IaZQe6lJ6vTgevNc9FoqbNTaLubMjXFiLpHAzIOXRbVHGntD3ZNdEHTDhDYPERB/Na54l9qyexeHpCsmaXKPcb/b5Fhi3OZHUpTciU3wlxxJUAohSgT3APerPbUv3MelH8jLP/VG6rz4iGneoMvYBc7PdLnIynIrK9Au16mNNftxTI6pC20Ae60guHpH4raBz6E1tOw7dBpnqjo7pzprhirvc7/YMbiwL8hq2vJj2hcZgN8vyFhLf4RSAG0tqWtXVz0hKVlO9El/6yDdxe023YDJHCVhUaGfq5HshjhffiEA8Y0Vsq5k+JrYLNf94m2i03i3tSol2uDECa0sdn46rkwlTaiO5SQtY4/jGum1cr/Ee1S07O9Xb8pvMrQ6jDLtGeyBbUtDibYBcmVKD5ST5akpaWSlXBAHJHes6cfrmzg/fHwK1qT+q14zwH4hWe8S3FccTsZzeAzZITMaxtW522stMJQiGpEtlCfKSBwjhClJ7ce6oj0qC8w1hyjTvwe8Vvlnub7F2vVmiY2zLQshxll2QttfSr1B8htSAR6cjipc8S7VnTNeyzKokTPLBLfy1mGixtxri08q5BM1lS1MBCj5iUhCipSeQOO5qKdPcAs+6/wq7XpBp3kNruWX2C2IkJtzUtsvR58eW64hh5HPLRdQFpSVcA9YPPHesXhzqG0jPzsJ5AHF6WPZa0y1tWgTaz464cPSb91O+j9i3ZWLRzEMXxLHdC/k4xj8NiG07OuvLkZTCeCsBjpKlA8q+BKj9NavsL2Zav7UMyzmflmXYtMxrLUIeZtdnekr9llIdUUEB1pA6Q24tHPPJ4TUGbUPE8xTRnTqFoZujxjKrJkuENC0syW7cXFPR2h0ttPNKKXG3UJAR80hQSDyDVzNDdTNRNf8mVql8l75hWmsOIuNYbfeGQxcL++4U9U59oElphCU9LSOeVlalnsEgehUIdtD61K4dN9INxPHdqDfKSuJgLaDaNSxEW4jdw38Oyqlsa/4SHc1/wApO/7xRU6eJtob/do2tX2XbYfnX3CFfKS3FKeVlLKT7Q2Pj3ZKzx8VITVWdJdWMP2qeJRrirW6XLsEHMPaV2uV7A/ID5ektyGAlDKFrUFoKgCEkdSen1rp9j91YzTFYt2lWG4W+Pd43Wq33WOGpCWlg8Jea5PQSk8lBPI54UAeQODAav6OoBmYYOjgSRPI6Ls8QUtvrF2ReerSADHAi0qqGmO47DNUvDkl6l6jgXJmLi0rHsgiBQLsqchv2YND1/CPqU0U/W8mov8ACYznIcEjajbU9TY6rTfsGnG8sRJKx1NR3QkSE8+nSlflr5Hb8NzWk7YdteoONbuM621XBLv9yLCsni6gqZcQSiW4Er+5bXPoUkrQtafQmH9VbJvw0S1LtO7vAs+0Vect8rWaA/gV7kNIJDZW15bryuPQ+ynrB+Bi8/CunxDUq+OwftxEHQxLZ3faS0jdG9c/hhlM0HH9iZneMjG/7OHTqTwlWd2r9OYL1E3S3KOerUO6LTZSpPvIx+3BTEPj6PNKXnvr81NV18KXKHtY8/1512yxftmVXm8xI/nunqXGhK85aGEE/NbHShIA7cNJ+iugGK4rZcNxS04XYoiWLVZoDNtisgdksNNhCU//AIIFc2dELNcPDf3bZhYNS2X4Gjupiv8AwNlKmlGBEeS4pcZuQ4B0tKSHHWldXH4q/mckRTwM2ksBtgLWk7wRn+88DqZ3qz8T9nLyL4g5w4X9GSOgG5TD4t+mlryja49qMhhLV9wG6Q58Ca37rzbbzyGHUBQ7gEuNr/O2k1puuWcTdaPCIGpGXsty71LsVtMiS6gKcXJYuTbCnuT3ClFClEj9+fprdd8Oodr3J6ZI2x7cb1bM7yfNp8IT37LLRMhWa3NPJeXJmSGiptlPU2gBKj1K5PSD2Bwm+WLpjt68POTt4GY2tq7MWa1221W92ShEy4qblsqdfQxz1lJKHVqUBwO45rjqgt2SsD7z24eMATHWOoO4rppkO2midwM8AXCJ9Tyjet32LaJaTZVsgwG0ZbgFkv0W/wAMXO5N3SGiV7TJS6tKFqLgPdCUpQj96kADgVAnigyG8Z3MbY5Vssj0pu1XAOR7db20hx0NTohSyyklKeo9ISkcgckelWM8NnU3T7Kdqen2I2DMbRMv1jtTjFytTcxszIqkPrBLjPPWlPvJIURwQocHvVV/Ej1c0xuO7fb89bM8sc5rDrw27f3Ik5t9NtAuEdRD5QSG1ANLJSeCAO4r0dpn/wBTpYTANSZ4Q4A8hZcND/6fUxCSKcdZBI5lWD2e6o4ZvQya8axZ9IU7lWFXNyNacIlo4j4s1yQ3KS2f90SnAFBUlQBQQpCEoAJVZrXHCbvqVo3m2n1guIg3LI7BOtkSQpRSlt11lSElRHcJ5IB+omueG82FbNrur+M7+NtOZY5JhZJMVDySzw7oyqPeeSPOcaCFEOoXwA50clDgQ56kkWmm70NOtU9ruZ6s6G5rCXkdlxiVdvuQ442bjbn22+el6Ork8BfCevgoVz2J5rlqBlbZHA2DQQ4bjFzxBF5znPRdVMvp7U0tM4iC0772HCDaOo1VYNp+/SLt0ttv2i7wMFnYZLxZs2qLeHI5XHVHKj0CS2AT0cHgPt9aFp4JAHKj0K0hxHCsR0tsOKYJcGLri0eGRbH0OoeaeiOKUtASpHurR0r4BHqAKrtqzmmybdpt6dyTVHI8TbSi1LfQZkxqPerHMLfJaQgkPIdC+B5fBDnA91YI5+3ho4ZqHpXs/tsXUuBc47y5k+7W23yG1e0x7cvpU235Z7pKlJccCPX8KPTmt3uLhV/WLOaASdHXPrmdbSdVgxob4Zoey4kAatsD2yHAwNynbSbb/o1oUm7p0k0/tuNfd6QJNxMTrJfWOekcrUopSnqV0oTwlPUeAOakKqzbMd7Nt3gnMkwdNbpiisSlss8yZQkoktu+Z0EqDaPLdHlnqb97jkcKNWZqrg4Ri3COUW9OysCCXAZgmeeqUpSqqUpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURK88+3wLpEcgXOFHlxXh0uMvtpcbWPoKVAgj89eilM0WCseBYNjEhUzGsMsVpfWCFOwbcywsg+oJQkGs7SlEXMbxQcksEDdhttEu8RGjaLq3MnhTyQYrJnxSFud/cSQhZ5PHZJ+iulEKz48iYu/W6125MqYkKXNYYQHHkn0JcA5UPT41qdx0B0IvE+RdbtopgU2bMdU/IkycbhuOvOKPKlrWpslSiSSSTyTWy4vh+I4PbDZcLxa0WC3lxTxiWuC1EZLhABX0NpCeogDk8c9hSj9nQFI5hznfzEW6RnruSr56xqDc0fyzf1WYrwTrDY7nKYnXKzQZcmKeWHn4yHFtH191RBKf0V76URfNDDDbrj7bLaXHePMWEgKXwOByfjwK+FytNqvMb2O8WyJOj9QX5UllLqOoeh6VAjkV66UReaXbLbcIKrZPt8aTDWkIVHeaStopHoCkjggcD4V9mmWWGUR2WkNtNpCEISkBKUgcAADsBxX90oi8lttFpszS2LRa4kFpxZcWiMwlpKln1UQkDk/XX5c7Pab0wIt5tcSeylQWG5LCXUhQ9DwoEc/XXspRF/DTTTDaWWW0ttoASlKRwEgfAAelf3SlEX8uNtvNqadbStCwUqSochQPqCPiK89utdts8VMG026NCjJJKWYzKW0An14SkAV6qUReNqzWdm5O3hm1Q257yQh2UlhIeWkegUsDqI7ehNf0bVazcReDbYpnhvyRK8lPnBv9718dXH1c8V6qURKh3d7l+qmCbcs3yrRe0vT8ugwAYKGGfOdZSpxKXX0N8HrU22VrA4PdPoeODMVKzqsNRhYDEq9N4pvDiJhUB22bydh1x0VxvHtQcistvyKFCT93o+WWtb8mRclDmVIW+ttaXi46Vr6uoq4IBCeOBt1yh7aN0Uqy2PbppZZ7g9bL5b7nJzy34v8Ac2HZW4slt5wMzFNNqffWlstpbZ6x7/UspCe9nbroro3frurIL5pLhlxui1dapsuwxXpClfSXFNlRP181t0WLGhR24kOO0wwykIbaaQEoQkegAHYD6q6C8Of4pEGQbZSDI6cPVYhhazwwZERfO/z49wv6daafbWy82lxtxJStChyFA+oIPqK81ss9psrBi2e1xIDJUVFuMwlpJP08JAHNeylZq6/FJStJQtIUlQ4II5BFeW22m1WaP7HZ7bFgsdRX5UZlLSOo+p4SAOa9dKIlKUoiUpSiLwyLFZJk9q6y7PBfmxxw1JcjoU62P4qyOR+g17qUoi8ciz2mXOYucq1xHpkUEMSHGEqdaB9elRHKf0GvZSlEXzTHYQ8uShhtLzqUpW4EgKUE89IJ9SByePzmjseO+tpx5htxbC/MaUpIJbVwU8pJ9DwojkfAkfGvpSiJXykxo01hyLMjtPsOp6VtuoCkqH0EHsRX1pRF5LZaLVZY3sdmtkSBH5KvKjMpaRz9PSkAc166UoiUpSiJX8qQlaShaQpJ9QRyDX9Uoi8MixWSXcGrtKs8F6dHHDUlyOhTrf8AirI5H6DXupSiLzQrbbraHU263xoofdU+6GGko8xxXzlq4HdR+JPc16aUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiVHOq2uWLaWT7Ji7kKfkGX5Q6pmw41akoXNnFI5W576koZZQO63XFJQkA9yex3HKsms2F4zdsvyKWmLa7JCeuEx9Xo2y0grWr9ASapv4erd612yTUDexnzClXPMLi5YcXZc7i22SMr9ra+jqXwFEeqmlH8Y1DB4lQsGTRJ5TAA4k9gCdIMv8jMZzJgc8z0AvzgayLF6d66xct1BvOkGWYrLxLOLLb2Luu1yJTUpuXb3VFKZEd5s8LSlYKFghJSrjsQQahbe5uP1b021E0g0U0DegDMM+vQVK9qiJkJRb0LSkhSVfNQoqWVLHCgllXBHrWnZxlLjni66fWS0uHljT2TDuYSfVChLfSlX5iGlfzVtejGJ/wB2ne7qduNurfn2XTpKdPsUKhyn2ptPM95H+KtxxsEevmqHwqaR8R1Jxy8znDgx7mj+YhgPM6WUVPsxVaM4aG83tB9AXEchqrgJ54HURzx34r9pSiJSlR5pZrFA1Wv+e2uzWV9m34PkCscFxU8FN3CS2yhcjy0ge6G1r8s9zyUn09KC5wjOJ6SB8SO6Gwk746wT8AeykOlKURKUpREpStbm6l6cW2U7AuOoGNxZLCih1l+6sIWhQ9QpJVyD9Roi2SlauxqlplKebjRtRcYeedUENtt3eOpS1E8AABfJJ+ioZ38bjZO23b9c8hx58JyzIHU2PHUgdSky3QeXgPj5aAtY+HUEA+tUqPNNuICTkBvJsB1JAV6bPEdhmPkNT0C2667h2blm95060hwe4agXnGABkDsOWzEgWx0jlMZcl49K5JH/ABSArp/HKK2jRrV/EtccEi57h5ktxnXnocqHLQESYMtlZQ9HeSCQlaFAg8EgjggkEGtT2k6KxtAtBcbwp5JXeXY/3VyCU4epyVdJADkhxaj3UQo9AJ79KE1XfwscneyePrzMiuFVokakzZ8Hv7o8/lSuPzpS2a2wYajqJMlrZJ4hzW24HEYm9hxWOPHTbWbYFwAHAtcRPHyjhc8Cr2UpSqK6UpSiJSoM2+ZtleU6pa72XIL3InQsbzNiBaWXSOmJHNujLLaOB2T1qUr86ifjU50F2td94Nd/MAfmhs5zdxI7Ej5JSlfhIA5J4Apkii/WTV2dhc+w6eYHb4t41CzJxbVmgSFKDEZhscv3CWU+8mOyCCQOFLUUoSQVciSLa1NYt8Vi5zES5jbKESJCGvKS64EgKWEcnpBPJ6eTxzxyaq1s8uB1o1O1c3P3E+e1cL4vC8WUruI9lt5HUW/oDzylOK+tNWbyXHLRl1gn4xfmXnrdcmFR5LbMl2OtTavUBxpSVoP1pUD9dQJFJrtXAO6ES0diJ4k5gBSY8Qt0aSOoMOPcEDgJtJWv53rLpdpndbDYc5zW3Wq6ZRcGbXaILiyuTMkOrCEJQ0gFfT1EArICE8jqIrc65k70dGtMNIdzm1dnTrDoVmXc8yC50hBW7JlqRLghBefcUp1zpBVx1KPHJ49TXTarU4fR8X95w/lj8z6Kr5bVwfug9y78glKUqFKUpSiJSlKIlK+MyZDt8V2dPlMxozKSt155YQhCR6lSj2A+s1rX91nSz8peKf8A1Zjf/b0RbXUUSNVb5g2sMXTrUhqGmy5m4v5HXuO2ptCpKEdTlslAkgP9IUtpYIDqQpPAUj3t8sucYXkkpULHcvsl0kIQXFMwrg0+sJB4KilCieO47/XUcbudOZGpm3/LLXaXFsX+0RTf8fltdnYt0hfh47javxVdSOnkfBZHxqlR4ojxXeyM+WscRmOIjIlXYw1j4bczlz0nhv4XFwFMVee4XCBaYEm6XSYxEhw2lvyJD7gQ202kEqWpR7JSACST6AVHe2rVprXTQjCtVUhCX79amnZiEeiJaOW5CR9QdQsD6uKrPvuzq66sau6bbFMOuT0cZ1LbueZvR1cONWZpRWWeR6daWnVkfxED0UQdKtN7KooCMROEbuJ5AAk8As6T2Pp+M72QMR3xu5k25qXJu8a0t4ZM1jtemOSz9Kbc8EP5aFss+ax5gbXMjw1qD7sZCj3cISSApSErA5M033NsesWCz9RX7gy5Y4FqcvKpTagULioaLvWk/EFA5H56hjer8nsA2TalW2FDYhWqDiblphRm0hLbSVpSwy2kfAAqQAKrtkOSZhknhp6O6S2F5ass1cjWjC4JJJUmMpRLzqvj0JjMnqPwCqzqS9lRlHMFjWk734gJ0sQD1OavTgOpuq5HEXcAzCTGuRPYRCmnw89adcNwWkt61U1iMEQ7rf5KcaRHhpYUiCjhJSenstKXOpCVHlR6Fck9qtNWs6Z6f2DSrT7HtOMXjhm145b2bfGHHBUltIBWr+Mo8qJ+JUTWzV0VsGOKeQgDjAieZzPFY0sZbifmZPKTMdMkpSlZLRKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpRFT/wAVjPJeFbOcihQHlNv5TPhWIKSeD5bjnmOD9KGVJP1KNejbttq19080PwrFMe3Nv4/DjWeO99zEYbbnxFdeT5zqPMc99fDji/eV3NeHxV8JuGW7WvurCiuSGcTyS3XychCSoiIkrZdVwPgkPdR+gAn4VJG6LW9/T3R+PaNMUfdnUDP46bRhNshqC3ZD7zYHtQHwZZQrzVOHhI4SCRzWbCWUqrgJcXgAcmiB1LjHI8Vd4x1KTSYaGuJP8Vz0AE8CFU3YVY8n1c3zay685Tk7mUsYihzGId7XDbiiY71hlC0ttDoTwxGUeE/+kBPrXSS2We0WRhyNZrXDgMvPOSXG4rCWkrecUVOOEJABUpRJKvUkkmoi2hbdbdti0Qs2m7TyJd3WVXG+zU//ADVcXQC6oH1KU8JQnn8VAPqTXz1XGgmt2SWPSHJ9Tlx73Zr6zd49stV5MR2VMiArMZxSeznSFBa2QesABXu8c10OAZ4eztM4QGyNby4jhJJG+yxBx46xEYiSAdLQ0cLAA7rqbaUpWauqYbtfECxHTDJXdA8AuD8bPp6vZZV5nWmW5Bx9tSeVSS220p2WtKe6ENIUgnjqUACD69F92OynRHTe0ac47qZfZLNuQpyTNkYdfC/PluKK35Lp9j7rccUpR+jnj0Aqd9ddA8N11xtEG8oXbchtavaseySD+DuNlmJ7tvMOjhQAUB1I56VDsR6EYjazqvkup2n023ahNMs51g93k4tlCGU9Lbk2MRxIQPgh5tTbo+HvkDsKijOFzT7WZ4tytuAJuOIMn3ZqxLXD2cuTom++QDB0giBPm1b74VtJ/KNdvsbfP1On3wraT+Ua7fY2+fqdWNpUqFEule6vQvWrI3MT02y2fc7o1GXLWy9j9yhJDSSkFXmSY7aPVQ7dXPf0qWqUqTFoUCdVGeuGD55qdabfp9jGTSMZsN3dcGTXmBI8u4ogpT/uaIeD0OPKPSp38RCV8AqUOKg64bENn2KZto/pdiekiEXjN8oCZr7t6uDrrlphMLkTSrqfIBX0toKgAR5h44PFdCqpxYMg/ut+Jre2mF+datGcG9gQQeUpuU9xCnD+fylFB/5M1Wm0GuxoFyS48mAuj8JIDSMvMdSZtUcRRc46CBzccM8xMzn5eFtwtfht7JrPcot2gaFQUyYbyH2VOXe4upC0kFJKFyClQ5A7EEH4iq4b9bTfNdt9ehO3izXk21m3xF39cn2dMhMdwuLcU4Wl+6vhEJPCVdj1cfGuk1Uu1AxJzH/FI0yz+6NdFuybB7habfIX2R7fHDyltA/vvKcBA+PJqWEmvRBNg4nqGOI7kD0UOAFGqRmWgdC5oPYE9JWU3JQtw+iuhOa6m3TdvKktWS0urbjHC7Yz7Q8vhtprrA5T1LWlPI7jnkV5/Cm0tmacbSrTd7rHU1Pza4SMiWFjhXkr6WmOfqLbSVj6l1493Fvue7nU7HtoeDyXFYzZZ7F+1Nu8dX4KDHR3j2/r9DIcPKuj1TwhRHAVxZ3NblN0108RH0+xL7ozIqYlnslsZbV5Da3FoYZLhQCW2G+oKWr8VCFfHilFxax9TV8NHEA59XWH4ScjJVRicyn92XHgSIA6NknmNy+EvUPKGNZoOmUfSy+P2KRZXLpJy4LQm3xngspTE4PdTh454BBAIPBHJG91XHQDWnUXMNftVtHMjultyW0afs20G/Q7d7EEXF9sqfh9AWsKSjg9J56k9JCio8GrHVIHka7ORM7wSY9LDeIN5kwT53N3WjdYf7O4yNIWnap6u4BotjaMt1HvEi22pySiIl5i2ypqvNUFFI8uM24sDhKu5Tx29e4qI/vhW0n8o12+xt8/U6sbSqibyrGNFQHb9vS24Ypqhrpe77m9yjw8nzFi4WtxGMXZ0vMC3x2ypSW4ylNnrQodKwlXbnjgg1O8TxANqM6UzCi6h3Zb0hxLTafkfexypR4A5MPgdz8a8+1z/fm3J/y+j/8AdcWrH1ZkeHSnLBT7YG+sa9Y0UPnxKkZ4398R+vmla3qXdX7FpxlV7jKKXrfZJ0psj4KbYWof6RWyVicusqckxS9Y6o8C6W+TCJ/5RtSP/wCaubawXbO8Nzg/BbbOWtrMLspHxVcfDJtzUDZTp64gDrnJuEx1XxUtc5/kn+YfzVaSqo+GFc1vbQ8dxuWkon4pdLtY5zR+c081McX0kfA9LiatU++zGZXIkvIaabSVLWtQSlIHqST2Ar0NqcHVXObkbjkbj0hceztc1mF3tAkHnJn1lUA8Rn91HtK/lgr+uQK6B1zg8RbOcLd3P7WnWsusziLTlPtNwUic0pMRkzIXDjpCvcT7i+6uB7p+g10Rtd9sl7Qpyy3mDPSgAqVFkIdAB9Cekn14Nc+z/wDEH46n/itq3/I/gZ8XL3VHGru4bSTQlVtTqhkcy1G7hww/Iss6eHPL6ern2Vlzo46k/O4557c96kelQZ0UiNVXL74VtJ/KNdvsbfP1On3wraT+Ua7fY2+fqdWNpUqFGWke5HR3XWZcIGl+TTbq/amkPS0v2SfBCELJCSDKYbCuSD2SSak2lKkxooE6qAdV9tCNyWbSGdc5kuRpvZC0my4rb7k7HZucgoCnZs9bRStRSo+W00FAJCFKJJXwK7YFsV2d5zuW1LxCHo3FOK4BarRbVRk3i4FLl3k+bIec6/aOrlDPkI6ergEntzV+L9erfjdjuORXZ9LEG1xXZsl1R7IabQVrUfzAGqq+Go9ccu0dyzW69NqTP1Rze7ZASr1DAcDLaPzJ8pQH1VWi0YyAPZaSeJJDRO+QXEbsIiAArVXHACfecAOAALjHYA78Rm5UoaQ7NdtGg2ULzTSfS2JYr2uMuIZgny5KwysgqSkPurCeekckAGplkNIkMOMOJCkOIKFA+hBHBr6V4b7dYtislwvc50NxrfFdlPLJ4CUNoKlH9ABqld48ImpkAeyU2EvAYLk+qqP4Vsp0barnYFLJZx/Nb3bY4J+a2HUucfzuKqA9BsR1C3EeIFr1q7iOpLmIP4ZLVYIdwFoYuPLfWYyW0tv+6nluIolQ7+8fpNWL8LqwzLbtPt+RTmi25l1+u1/Skjg9DsgtpP6Q0D+YisDsRxpGmGse6HFchUiHPZzVN6Kn1BHVbZCXnmHuT+J0qUer0B5rcgs2hpqe02lf8UU2u6iXeukqrnB9J+DJ1X0xPcOhhvcawoa8TSBrrjGl+LaU3nX6XnEnUm/sW5ixjGYMAvBpSVhXmMjrPDymB0+h6u/pXQTTPS+wYFp7g2GuWyFKewm0xoEGS4wla2HERwy442ojlBUOoEjgkKIPrVXdOsTk7ut3H7J+6RnFaX6YNuWfAS6nhF5nhREi4oB9WUrJCF+iihsg+6oVbvMM4xHALWm85jf4lqiOOpYaW+r3nnSCQ22gcqcWQCelIJ4BPHANVpnw6HmsXnEeUQzqRf8AiA0hKgx1obcMGHqbu6Cw6FZ2lRdt+wbTLFMWumR6VZfPye0Zxd5OSLucm7m4JeeeICg0v0ShPR0hPqOCFEkVKNCCIBztPA6jobJIMxleOWh6hKUpUKUpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlEXxmQ4dxiP2+4RWZUWS2pl9h5sLbdbUOFJUk9lJIJBB7EGtOwbRLSPTO4P3bAtO7FY5shoR1SYkRKXQwDyGUq9UNA9w2nhI+it3pQWMhDcQV8321PMONIeWypaCkOI46kEj5w5BHI9e4qij9kxq6bwMO02gXVTGBbYMafyjIbpLdAL15mIJDklwAAuFJMhau3Ki7zV76hB3adgy8/zDMmMivzFv1AmQ7hk9gQtgw7m/FT0thay0X0tK9VtJcCV+h90qSYZLarX7r9YIaekl1veDeYl0Gm5u+3SfN1IGHkTyM1x5DMuO1KjOBxl5CXG1j0UkjkH+avpX4AAAAOAPQV+1YxNlUTF0rC2XDcWx28Xu/2KxRINxySQ3Ku0llvpXMebbDaFuH4kISlPP0Cs1Soyv9fVlKUpSiJSlKIla3jmm+BYhkOQZbjGJWy2XnKn0Sb1OjMBD09xAISp1Q7q4BP6ST6k1slKCxkIbiErXs20+wnUe0osed4vbr5CaeTJZamMhfkvJ+a62r5zaxyeFJII5PfvWw0qCAc1IJGSweHYNh2nlkRjmDYxbbFbG1qdEWBHSyguKPKlqCR7y1HuVHkk+pNZC8PXCNaJ0i0xEypzUZ1cZhSukOuhJKEE/DlXA5+uvZSoqA1GkTnqjCGEGFSnYnatQ8Yw+12iTi94hZdlGSXXK9ULherNIipjuLU6hqG0t1KQ68pQZUPLK0pQlxRI60dV1qUrRzgYAER8hAA4AAAfnJVQIJJOc+pJJPEkn/QCUpSqqVGmlWkD2m+a6m5a7fUTk6g5C3e22EsFsw0piMseWVdR6zy0TzwOxHapLpSmgboAAOQAA9AhuS7eSepMn1KUpSiKrumVp/Y57pcwwCX+Aw7WmSvK8aeI4aZviEcXCDz6BbiAl9A7cpQoDkpNWWvFntGQ2qXY7/a4dyts9lUeXDmMJeYkNKHCkONrBStJBIIIIIrCai6c4xqjjLmL5VGeUx5zcqLJjOlmVBlNK6mpMd0d23UKAKVD8x5BIOet0V6Db4sKTcH5zsdlDTkp8IDr6kpALiwhKUhSiOT0pA5PYAdqgAeGGO923Madh5eQBuZUk+cvGt+uvc35zpCjsbYNtKRwnbxpkB9AxK3/AOyrZML0r0w03XMc0704xfFl3AIExVls8eCZARz0eYWUJ6+nqVxzzx1Hj1raaVaSFUgHNKUpUKUpSlESlKUReO8We15DaJthvkBidbrlHciS4r6Att9lxJStCkn1SUkgj6DXjxDD8XwDGoGHYVYYdlslra8mHBhtBtplHJPCUj6SSSfUkknuazFKC0xr8v8AZ7pmlV93lZHe7hp9H0GwJ4nNNWnVY9BCASYdvUB90JzgHo00wVAn984gDuasFWn2DS/HbHnd81KdemXPI74hEUzZziVmFCQeUQ4yUpAaZCuVkAdS1HqWpRA4qWNqENf7OvEbuuR4TrCs1xZ5me1pwO/pnxMDKVkNPsIsmmuDWDT7G2fKteO26PbYqT6ltpAQCfrPHJP0k1iM40R0i1KubF5z3TqxX2cwz7MmRMhpWtbHV1eSs/8AGNdXfy18p578Vu9K0e41HY3ZzPVZsaKbQxuWXRfGJEiW+KzBgRWo0aO2lplllAQhtCRwlKUjsAAAABUAbxMotGjGnd+3DPy5D+QWCwSbDjEFZSplFxnuIbS62jjqLpIbBPPZtCwAOpXNhajvXPQ/FNfMPjYjlcudDTbrrEvcCXCUjzY02MvqaX0uJUhae5BSpJBBPoeCMarPFEHWx3wbOjjhJjitaThTMxlkNJFwDwkCeC1HZ1gFu0X0NxXReRdGnsnsloYut+i+Z1OsSJzjryiofAFwPJT9IbNTlWp4Dp5AwRu5yjdp96vN9lCZdrvcC37RMdCAhAIbQhtDaEJCUNoSEpA+KipR2yuiq/xHl+/tyHAZBYUmYGhp/wAnieJzSlKVmtEpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURYiVldhhZTbsLkTum8XWHJnxYwbWrrjx1NJdWVAdKQFPtD3iOertzwayUqVHhRnpkt1LTDDanXFqPAShI5JJ+gAVE+Gq+VO43P8lPvxsRtVtxOKr4JkOhU6Xx+dL0IH/ErW992e3TBdsuVR8aQ47kWWpZxOyMNftj0yesMBKP4wQpxQ/xazqOcKILBLzkN5JIZHMYe60pta6rhcYaMzwgFx6X7LEbJd1OYbrrZneWXXCYVlxqzZAq2Y7MjuOFU5gAqPmBZI60pLRKk8JJc44HSebMVGO2rRa17fNEMU0otiWyuzQU+3PIH+6Jq/fkO/Xy4pXH1cD4VJ1dFVrWOwMMgQJ3wIJ6m656bnPbjcIkkxuBMgdBZKUrTs31UxXAciw7FL2Zblzzm6LtNpYjM+YVOIZW844vuOltKEHqV345T271nqBvt3WmhO6T0FytxpSlESlKURKUpREpSlESsRe8rsOOT7Ja7vO8mVkU4262thtay/IDLjxT7oPSA2y4oqPAHT68kVl6iK7q+VW5+wWoe/FwPFpV5eHwTMuDwjRz+cMxpv6F0F3Bu+fQEn0EDjCGzS7d8yAPUieCl2lKURKUqHtV9UMpxDXLRjAbM5GTas4nXli7JcZ6nFIjQC810K/E98An6R2oLkDehsCdwJ7CVMNKUoiV4r1ebTjlom3+/XGPAttujuSpcqQsIaYZQkqWtSj2AABJNe2qsbq7q5qlrLpZtIiuq+5mTSHMqzJCCR5llgHrRGVx+I++lKT9SOPjUGXOaxubjHLUnkACTwCkQAXuyAn8hzJgDiVNukOoV31TxxzN3sWdsliuTvmY+JSz7XNgcDolPNcDyQ53Uhvkq6Ckq4Kikbw442y2p55xKG0JKlKUeAkD1JPwFG222W0tNIShCAEpSkcBIHoAPgKqJvl0SjL2/wCrOoF01O1EuKmbLMnxLM5kK2LXFc491Ijx0t+YhPPZLxcH081TaKgpMc9osAT2+Z6CdwVqFM1XNYTckDv8h1Mb1a6xX+xZRaY9+xm9QLvbJaSqPNgyUSGHgCQShxBKVAEEdj6g176rj4dX7ivSv/JT39aeqx1dNen4NV1PcSOywo1PFptfvEpSlKyWiUpSiJSlKIlKUoij/V3U6ZpHboOZXHHlTsQYeLeRzo7hMi0MK4CJnldJ81hB583ghSEnrAUEqFb1DmRLhEYnwJLUmNJbS8y80sLQ4hQ5SpKh2IIIIIr8nQYdzhSLbcYzcmLLaWw+y4kKQ42oEKSoH1BBIIqsuzPJJ+JX/UvanfZTjzuk13QLA48olbmPTEl6Egk91eUCW+foCB8KM8zizWJHIQCOkgjhinII/wArQ/SYPWYPyPHDGZVoaUr8JAHJPAFEWJteWWG9X29Y3bZ3nXDHlsN3FoNrAYW82HW09RHSolBCuATwFDnjkVqO4fV+FoLopl2rc2O3J+TtuXIjx3FlKZElRCGWiR3AU4pCTx34JrD7aib1ieQakud153k9yvLSz6qhoc9lhn8xjRWFD/GqGN/1ouGtV80m2mWZ5xCc8yA3jIFtHuxZbekLeUr6OpS09PPqtAFUqNe8NpsMOdhHIkDF/LcngJV2FrHOe8S1snmBMfzWA5hTZtX1UzXWzQXE9U9QMWiY/eMiirlqhRFLLXklxQacSFkqSFthKwCT2UO9SxXktVrt9jtcOy2mI3FgwGG4sZhscIaaQkJQhI+AAAA/NXrreq5rnksECbclhSDmsAeb6pSlKzWiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiLWNTdRsV0jwC+6lZtO9ksuPQ1zJbgHKilPohA/GWpRSlI+KlAVXbRex53usxpvXPcDcbjZ8MvSFS8ZwC3znYkRu3dy3KuTjRSuW44kBYbWryUpIPQSe3m8U7F8nyjZ3kjeNMPvptk+BcrkyyCVLgtO8ungeoSSlZ+gIJ+FSPqTldrjbTfbNN5jK2Mhx+BYsbdjqBSV3ANRIpRx+9L6T9XSfoqjcTmVHgS4ENaDvImeZMNbyMXyu6A6mwmA6STyIEdAZO+RpnFumG2XT/ACXb7I1Bwy0IwHMMjcueT2O/48PufKgNvPOOwEKLXSHWUsBgKacCkEFXbk81te0bUK07w9AcE1R1Ox6FOyXFbu6pSwkpabu8UKaEttCSE8qbc6gCCEqWeAOBx6N0mqUbQ/ReLo7pjCcu2oWTWtOMYZj8EdclRLXk+0lI+YyyjlZWrhIKQOfXja9oGgg216AYzpZIktybpEbXMuz7Z5Q5OfUVu9J+KUkhAPxCAfjWzC3FVLTLBhDfxNJuOTYmNcOoti+S2nIh7sRd+FwyPAumOGKLFTNSlKorrS9V9ZdMND8XdzDVXNLbjtsR1Btcp3hyQsDny2Wxyt1fH4qAT9VVT0e3BaM6x6vL3Napay6e4vbrTBes+AYzc8st7c6HGdI9puMtsvfgpD4SlAbPdDY4Pc81bfUfS3TvV7GX8P1Mw615HaJAPMecwF9CiOOttXzm1j4LQQofAiqr6SaQ6eaEa0N7Zs+0+xXJcbyOFIu2nt+uljiPTghggyrVJdLfLy2kqDjbh94t8gk8ACKU+KZzg4exxdcMxpE+9hU1I8K2UjF3t0mJ1y93Ep//AGVO2D/CP0u+2Fu/21P2VO2D/CP0u+2Fu/21Zf8AuCaF/kXwT7OQ/wDZ0/uCaF/kXwT7OQ/9nUqFiU7qNsKlBKdx2lxJPAAy+38k/wBNUnNOtPtIfYcQ424kLQtBBSpJHIII9Qa0dOg2hqSFJ0YwUEHkEY7D7f8A8Ot5QhDaEttoSlCQEpSkcAAegAqbRxUXngvLeJ7trtM25sW6TPciR3H0RIwBekKSkkNoBIBUrjgckDkiqh5ftEyTWO2ztW90OvuoVikpjOXEY5iN6Rb7Vj0RKCvyO7a/PdQke+8eOpQPA4Aq5FVe8SDUeTgW1jILPaHim9Z1Ij4lbUpPvLXLX0ugf9Cl0fpFYVg7CSwS4wG8yYHckX00iTO1KC4BxhubuQuewk8dZgKD9u/h8YPqvovi2pWVaza1wpuTRVXNEeNliUIbjOuLVGHCmFHq8ktFR54KiSAB2q42k+l+HbZ9L5OPRcwyGfZLUZN1l3PJroZkhtHT1OKU4QkJQlKOelKQB3PqSTt2B4xGwnB8ew2GkJYsVri21sAfistJbH/ZqNd5mM5TmO1jU7G8MaeevE3HpKY7LPJceCQFLbSB3JWhK0gfHq4rbbavgNquo5CY5C4WWyM/WHUxVsTE8Jz+KinR7JM63yyrlqVd71e8S0SjTXoGO2O1S3IM7JvKUUuS5slopeQx1ApSw0pIUQQsqCfe9mku2LRHOVak5PFwWHZEzcolWqw3WyFUC4QGLe2iIp5iU0Q6lRlNSVk9RCjx1BQ7VmdrOd4hhuwbD85sjzLluxnCDKlJbI7S4zKlSWyPgvzkuAj15Ne2ZqPZdpG1nG3co652UG1NMwLMwC5Mvd+kJ8xbLLaeVLUuQ6oqIB4BJNW2hjdmfUY2SGgN4uJNiIuXHCYjLEALQq0XOrtY82LiTwAAu3gBiEk5wSdVjti2t+YaoYxnGn+ot2+7OTaV5RKxeVd+gJVc2G1KDEhwJ7eYQhQVx69IPqTVmXXWmGlvvuobbbSVrWtQCUpA5JJPoBVadg233KtDNI59z1J4+XeoF2eybIW+QTGee7oYJHYqSCSr6FLUO4ANWXWhDiFNuISpCgUqSocgg+oIqaoc2ASMQa2d2LCMX9U5dEplpJLfZkx+GTHpCjBW6fbChRSrcdpeCDwQcvt/IP8ATVAOuG4bQG67k9vV8teuOn8y22a5ZAu5TI+TQnGISXLYpDZecS4UthSvdSVEcnsO9WVVoNoapRUrRjBSSeSTjsPkn+jqvmumj2klv3M7drXA0txCNCuVyyJM2MzZIyGpKUWxSkBxARwsJV3HUDwe4qjfbbzVnew/8LvgVNf7KnbB/hH6XfbC3f7at5xPNMOz2zoyHBcss2R2pxamkTrTPamR1LT85IcaUpJI+I57Vrv9wTQv8i+CfZyH/s62mwY3juKW1FmxawW6zQG1KWmLb4rcdlKj6kIQAkE/HtUiNVBnRZKqc6bPKyXxPtW58s9fyTwG02iJz38tL6mn1cfRyoq/nq41U507ZVi/igaqQJY6BmGn9ru8Qn/jBHW1HVx9PBSr+aopf8lk5Q/vgd8pU1P2D/4P+4xXGqCt9P7j/Vr+TEv/AKhU61BO+pSU7P8AVoqIA+TMod/zCubbf+NU/CfgunY/+RT/ABD4rD+HV+4r0r/yU9/WnqsdVcPDpUlWyvSwpIP/AILeHb6fanqsfXpbb/yan4j8V52yfsGch8F4L5frHjFolZBkt5g2m1wWy7KmzpCGI7CB6qW4shKR9ZIFR5+yp2wf4R+l32wt3+2qSLpa7Ze7e/abzbos+DKQW340plLrTqD6pUhQIUPqIrTv7gmhf5F8E+zkP/Z1y3ngum0cViP2VO2D/CP0u+2Fu/21eq17ldud8uUWzWXX7Te4T5ryI8aLFyqA68+6o8JQhCXSpSiSAAASSa9v9wTQv8i+CfZyH/s6+8DRTRq1TmLna9JMMhzIriXmJEewxW3WlpPKVJUlsFJB7gg8irCJvkqmYtmt0qO9a3tVpWPRMU0eS3AvmQyfYnMhkNJdj2CL0lTstTRI81wAdDTfoXFpKuEpVUiUqrmhwg/X+1YEtMj6+uy5y6/7IMXwoYU0vcPrZfMyz/MLbYPaJuVJ6HEurLkx8Npa5HRHbdKR1EJJRzyOxlWF4YOkkKYxMGt+ubpYcS4ELzFISrg88EpYCgPzEH6CK++b5B/dN8SDT/TqKvzYGk+J3HJ5yR3SmdNSI7YV9aW1tqH+Oat9VqRw021BqXEcADhju0nqq1L1CzcADzPm/tc0cwv5bQGm0tJKiEJCQVEknj6SfWqcpfVjHirLjRvcazHSlLklI7BbjEshKj9JCWePzVciqcMsKyrxVJEyMOpnC9KkMSVD0Q7IllSEn6yl0n9FRTvtFP8Aj/7T0cAKDwP3P+4xTDuq3I2fbPpsnKXbUu95FeZbdoxqyNK4cuVwd7IR27hA9VEfDgDuoVFGdaP36DoNkuqG5K9OZ7qDKtS/ZLO86oWC0z5PDUWJFgA+SvpddaQXnUuOLI56gDxWv76LM5F3ObXc+yY9OFWrKnoc19z9oiz3SyqKpwnskKU32J7e4an7XI/KPMNLdNEHqRd8nTfJyP8A9TtbSpXJ+r2oQx/nVWmxtWlif7zy0/utGGSOJBLpziANZ0c80qgDfdbi/EfNA5eWIymSZtEF7jNK4G0bb/H1q0BcGJ5DpzHtvt8aEtTVuyKKFtMPtTYoPlOLX1FYe6fNCu4V3qy+BwMH1AGNbhIeOtN32/4rERGmuKUp5q3yQiUGPXpA6lAkgcnj147VWnfPeLxuJm23ZNo877deL7Oizs3uLHvx8dtDTgcHtCx2S64tKSlsnqIRxx7wNW+xXHLbh+MWfEbM2UW+yQI9uiJJ5KWWW0toB/zUitGPdUa+q/V3l7Q6NwJMbpxRmVk5jaeCkMw3zdwWzxtO+C2dFlKUpVVZKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlEX8PsMyWXI0llDrTqShxtaQpK0kcEEHsQR8Kqnn227T2Hqhp1prp9MybFbTcLlPyq5WmzX2SzAYbhNjpdjxiotxVmXJjEFlKB2UQOe9WvrDHEbEcxGeKirVeU202hDxdUUojF3zVJSjnpBKwkk8cnpSPgKAAPDv92kj+qP8AakmWFu/52PoStfwHRPTTTS4zL7i+O83y5JCZ17uMp64XOWkfiuS5K1vKTz3COrpHwAreaUqSSVWISlKVClK1PM9MMRzy+4nkuQxH13HCrobvaH2X1NqafU0tpQVx85CkLIKT2PA+itspTUHdfqLhNCN9uhsUpSlESlKURKj7VbQjTfWqdiU/UK0v3BeFXhF8tTaJTjTYlI+aXEJIDiQQDwfo+gkGQaUyIO4g9RcHoU0I3yOhsfRKUpRFVjcLt406QbXZcIbvGKz9T8tgwLvFsV1fiQp7QUZM152GlXs5cMeM9y4GwokgqJqY8P0C0xw3I/lrHs8u8ZQGyym/X+4SLpcG2z6oaekrWWEH4oa6En6K2u64jYr1kVkym4xVu3DHTJVbl+aoJaU+35biukHhSujlIJB4ClceprM0Z5GwN/YQBA4WnqUf5zJ3X4mSZ53HZKUpREqI9UtKskzPW3R3UO0vQk2zA5t4kXRLzikuqTKglhvykhJCj1kc8kcDv3qXKUFiDuQ3BG8Ed7JSlKIlVa3YWSRprqrpju7t7KlQcKku4/mHQOSLDOPQZBHxTHdUHD9SifQGrS15bnbLderbKs93gsTYM5lceTGfQFtvNLSUqQpJ7FJBIIPwNQZa4Pbm0z8iDwIJB4FSIILHZEQfzHEGCOIC+zD7MlluTHdQ606kLbWhQUlaSOQQR6gj41EGoW0XQXVadcp+oWM3u9fdZ0PS4z2WXdMRaxxxxGRKDKB2HZKAOe/FbXpLpxL0px93C4+TyLtj0J7iwMS2yZFshce7EU91EvobPIbUUhSUdKSVdPUd4qXNaTI/z9b9J3qGlwEH64/lqoWwDZ1t90tmW6Zp9it6sYtUlMuLGjZbePZUuhXVyqOZRZcBPqlaCD8QammlKkuJsVAAGSUpSoUpSlKIlKUoij7FtCNN8O1Yy3WuyWl9OW5qzHj3WY7KccSWmUpSlDaFHhsHoQSB6lIqQaUoLNDRkLDgENyXHM5ry3W6W6x2uXervNZhwYDDkmTIeUEoZaQkqWtRPoAAST9VVs2YYvcskn6h7p8jhORpusF3RKs7Lyel1jH4qS1ACgfmlxHLhH0KQal/VrSsavQ7bi19v7sfERI9ovtpYZ4XekIKS1Gce6vcj9QJcQE8uAJT1JT1BW9MMMRWG40ZlDTLSAhttCQlKEgcAADsAB8KM8pLznEDkSCT1gAcJnOx/mAZpMnpMD1k8cMZLFZfh2K5/jk3EM2x+De7Lcm/KlQZrIdadTzyOUn4ggEEdwQCCCKrZjm3jFL3r3kdng5hqCzjeC49CtEaK3mFw6mX5y1PyIzcjzfaENBhmF7iXB84fACrWVhrBiNixmdfLlaIqm5OR3D7qXFxbqll2R5LTIPvE9IDbLaQkcAdP11DQA/FwPfK/QnkpJJbh5dLzbqBzC8WA6aYDpbZlWDT7FLfY4TjhfeTFa4XIdPznXnDyt1w/Fa1KUfia2alKsSTcqoAGSUpSoUpSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSo5e3I7eo+SfI9/XTAG755nk/c9WRwxIDnPHllHmchfP4vr9VbnkGT41iVsVesqyG2Wa3pUlBl3CW3HZClfNBWshPJ+A570m2LRNcOqydK1XH9V9LcsnKtmLalYreZiWlPKj2+8x5DobT85ZShZPSORyeOBWMx7X3QzLcmVheLayYTeL+lRSbZBv0V+UVD5wDaFlRI4PIA7fGkSY1TSVvtYXMMyxjAMelZXmN4ZtdphdJkS3goob6lBI56QT6kD0rNVoS9fNDG8vTp+5rHhKcmU77OLOb9F9s83njy/J6+vr5/F45+qouThbmpsLnJati+8ra3muR2/EcU1wxi53m6yExYUKPJJdfeUeEoSOO5NTNXBjAWGY3ipMR47SGmm9W5aUIQOAke3OdgK7z1akRV2WntGWLTo0/NRUmntD6H3fzI+S8N7vdoxqzzchyC5RrdbLbHclTJclwNtMMoSVLWtR7BIAJJrQNGdy2hm4RNzOjuokDJF2ZaUTmmmnmHWeokJUW3kIUUEggLAKTx2Nf3uJs2nGYaR5Fp1qhnFvxW0ZdBetPt0uezFKVrSSC2XVBKlJ46un4gHntVZvD32g6VbbsmyvJsa3C2DUi93KCmGpFpcYQ1ChBwL6nEIfdJUpSU+8SAOCBzzzUUvNUcH2aBIPG9vhuzmbQlTy02lntE35Wv8AHtGqtdqtrTpVofj6cn1Yzq141bnFltlyY4et9YHJS02kFbigO5CEkgVq+nW7Lb5qpkcbD8N1Eacvk6MJkK3XG3TLZImMEEhyOiY00X08AnlsKHAJ9Aa5GeKZqnD1K3bx4FqyiDesbx622+HCXCmIkRQXPwr5CkEp6ipfCjzz7gB9KvLl+zi1biN1uH7ncG3DWiZYsRNq9otdrcRLciLh8LTHadacKG0OEEqChyOtZ4Vz2bN9q1tR9mlxB4Ae9xkxbom0fZOLGe0GgjiTHl4WJ7K9dKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlc//ABftw+U6VaTY/phhd1fts7Pn5KbhLjrKHU29hKPMaSod0+Yp1AJHqlKh6KNdAK5l+NjpffbzheAasWyG6/b8dlS7Xc1IST5CZPlqZWrj0SVNKTyfipI+Ncu1XDQfZJE/LuYB4FdGze0SMwDHOPkJI4wtDsGoGz+J4YsrSOTmuIfLyVYJF0XBWkGcbz5qnW/e6efMHCEA8/NHT6dqljw2dTZGvWzjOtMNUGmskbwlL0BgXJAkdUB2MpbCFdfPPlqQ6Ek9wkJA+aK2Twutx+k1+2y2bTjIsssVqyXCVSIkmJcZTTDjsVTy3Wn0eYR1o6XOkkc8FHfjkc2ds2sGBar4dn0nTx1yfZrCxJgLvjLaBbpz4YWXUxXgfwwaPCVrA6Oo8JUrhXHV+kvI3anC+Nk8BGTo5HDwyXP+j/M7Z2m2F/Uk2I9MXHNcXPDS0xs+r+5M4FktzucaxT7BON0j2+SY67hGSpsqirdT76Wlnp6+gpUUgp5AUay3iTaQYRtl3P2mJonbXMXiOWWDfI7MSQ4fZJYedR1tKUoqT3ZSr17Hnisz4Pykp3hNBSgCrGrkByfU/gj/APYrN+M+tKt0liSlQJThsMEA+n99Sqna3GkdlcyxM/8AyfkFGygPG0B1wI/8Pz7WVxvEA3PZhpjsrxa747c3IGValxIEIzY6uhyO07ED0pxsj5qiPcBHceZyOCBUSeFjtC0T1M0Dvepuq+A2zKblf7xJt8ZdxbLhiRmUoHLJ55bcU4pZLieFdk8EcV7/ABKtOL1luxTSHOrLGcksYfEtb08Ngq8uLIgtt+aePgHA0Cf49b74PuomMyNqN0sEq8w40nEb7NeuKXnko9njOpS6h5ZJ91B/CDqPb3FfRWoawbRts6GBwEty4XPcrLE79X2SNRfiYd62HoqAaP2BnFfEzs2Mx502a1adUXoTcia8XpDqW5i0hTjh7rWQByo9ye9d+K4C6X5Xjt+8TK15jartHest01UdlxJvV0tusuzlltYJ47KCk8fnFd+qy2fEf0dRLs7/ANrFrXgbdVDcrfFygvfDjthyLahqgi/WaFcBBxqfNi+0sJcLEhtlSkOoJHurSQCFDg1zw8EdCHdVNSm3EBSF47GSpJHIIMnuDXR/eUpKNqOrSlqAHyRuY5J/9QqucPgiKSNWdSEFQ6jjsYgc9yBJH/xFV2IA7ZUH7nyerbZ/xKZ/f+bFBPiM4PhuCb0L3imFYrabDZW2rSpFvt0NuPHSXI7alkNoAT7xJJ7dyTXcnTXS3TXS6zrg6bYDj+LR5/lvymrPbmoiH3AkALWG0gKVx25NcT/FGdbG+3IVFaeG2LN1Hn5v96tHv+iu69uUldvjLSQQplBBHxHSKnZCf1Bp/ed8lXav+YPwj4BeilKUUpSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiVjsix2w5dY52M5RZ4d1tNyZVHmQpjKXWX2lDgpWhQIIrI0qCA4QclIJBkKoC/Ci2WLyQZAMAuqWfN802tN8k+xk889PHV5nT9QXx8PSrD5Donpnk2midHpuOLhYchhEVNqtE+Ta2wwn0a6ojja+g/jI6ulX4wNbxSpN2eGctygWdjGe9Vvwzw7doGneT2/M8G0tm2S92t4PxJsPK7y240ofX7X3BHYpPIUCQQQSK9GoPh+bStVcuuGd6iaYzb7fbo6XZUyVlN4KlEnnpSkSglCBzwlCAEpHASABxViKUN4nTLhOaC0xqtLw3R3TvA9Pf7lNgsTzuJ+QuL9y7rcJN0b9nWnpUxzLcdV5XT2DfPQBzwBzUBQPC62b27LlZXG09n+Wt0OqtC7xJVblEHqCVNFXKkcgHoUoo+HHHarY0qZOPxPe36qIGDBpuVcvveOz17JrpmF00Zt9zul3muzn1zJchbKVrUVdLbAcDKEDngJSgcDgVYCzWa1Y7aIVhscBmFbrcwiLFjMp6W2WkJCUISPgAAAK9tKgGG4RkhAJxHNaNq9orpzrvjAwvVG0TrrZPNDy4Ue8TYDbygOAHfZXWy6kc89KyU88HjkA1HWmGxDazovljGcaXadTsevcZCm0yYuUXc9SFDhSFoVKKHEn16VpI5APHIBqfqVDQGkltiVLvMIdcKsuQeG1szyy8y8jynSifeLrOc82VOnZfe3331+nUtxcwqUeAPU/Cpu020uxDSWwDF8JReWrYhQLTFyv0+6eQAkJCGlTHnVNIAA4QghPx45JrbKVZpLW4W2CO8xxOuUpSlQiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpXz9oY872bzm/O6ery+odXH08evFEX0pSlESlfND7Lji2m3kKW3x1pCgSnn05Hwr6UReK7XuzWCKJ19u8K3RioID0uQhlHUfQdSiBz2Pb6q8FtzrCLzMbt1ozGxzpbvPlsRrgy64vgcnhKVEngAn9FMsxXDM7tb+MZnj1mv8ABWPMcgXKK1Kb9CAotuAjnueDx8TXDPw64keB4hGLQYjQbYjXC9tNIHolCYkoAfoAFKP2tfwTqCZ5JW+zoOrDSLc5/Jd6KUqHN4epF70j2yai6gY3IMe7WuyuCC+n1ZfdUlpDg+tJcCh9YrOtUFGm6odBK0pU/Fe1g1MLbMl1y0Twy8/J3MNYcIsV15CfYblkESLI5PoPLccCu/5q3GJLiz4rU2DJakx30Bxp5pYWhxBHIUlQ7EEehFcXPDr0lvevGj+5G2WJVqezq92qDb7fdry2HygyDIU+C4pKlILoTwVjkg9KvhXQ3w9dAtW9uOgytPtX73EmTzdpEyDDiylSG7dFWlADIWQB3WlxfCfdHX68k10eGWy1+Ya08DOg5C/yynnNQOMsyxEcRABnqbfNWcpXzbfYeK0svIcLaulYSoHpP0H6DX0rNaJSlfwl1taloQ4lSmzwoA8lJ454P0URf3SlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiol4rG7LL9AdO7Fp/ppd3bTk2bqfLtyjq6X4UBnpCy0r1Q4tS0pCx3SEr44PBEH5Ho9oPZfDjh6oWm7WRvWRizw8r+UjN1SL8ZzryHFpLwX5x4bWUdHPA4546hzXn8b3DLwMg0z1CQw4u1KhTLM46B7rUgLS6kH6CpKl8fT5Z+ip48PfS7afrftcxK9T9FNNr1kdoYVar+7Lx2E/LEtpagFvKU2VErb6FhR9er89ZbOw1tnrOBh+IX1ABMRw9k8yN60rPFKvSBEtjLQkxM8faHIFYvbzqoN5nh95crWBK7tkOFRLlFenB1bLzkmPDLsWZ1NkEOdKwCfRSkqJB5IqlPhn4/n+s2tt/wAAa1ZyPHLbPxeQbvLgSVGcuGJMfrZjOrJDC1kpBd6VEI6wByeR1al27QWx6L6x49oTjuL2hi0Wq6Q743j1qbiRhOTAWShS2kJbdcSlSQrpKignpVwe1cz/AAXf3UWQ/wAjJf8AW4ldNEtrbdUMWNMHgThffdBidxFslhWDqOxsE3DyOQxNt0noeKj3dJiFx2F7xg3ofmN9jiEzBvUR+XL8x9aXe7jD6khIeQpSFghQ7pVwefWuiHiV7uMn0M0Cx6Lp7PXasr1GHlx5jSuHYENLSFvuNn4OcuNoSr1HUSOCAaor4v37sZf8m7Z/1u1MfjFYleH9PdD85ZZcXa4kB+1yFge6086zHcbB+jqS05x/iVx4nP2FocbeIW9JdbrAHXiuotDNtlufhzzMNv0klbL4fOxzENR9Dou4vL8ty6JqLkkuXKs9/t94eZkWxLTqmkucA8PqUttalB3rSpJCeB3Jqv4dDUpjxBsVZnSRIktz70l54ICQ4sQ5QUrpHYcnk8fXXTvwysltNz2P4M61MZSmzN3GHNKlgBlaJbyz1n4e4pKu/wACDXMfw8JEeR4huMSWHkONPXK+LbWlQKVpMSUQQfiCK9EDB+k/Db7Ia6OUWPUXO/ovPJL/ANGvqO9om/8AVI4xkN3Vd5qp74o2lmF5dtcyjP77AlPXvDoQXaHm577LbRekx0udbSFht3kAceYlXHw4q4VVq8SD9xRqh/k+N/XGK8zbLUXO1C9HZP27RvI+KoD4RegOmGsszPL3n9onzJmLS7RJti413lw0trJfUSpLDiEuDltHZYUOx+k87Lv33X59q5uetm0XT3KZ9hxJi+wMfvLlufUy9c5b7raHUrWkhXlN9fQG+eCpKirn3eM/4HJHsuro57+ZZu36JVVy1sw256T+KEyvJmlsxp2pFtyGM+4OEuxJM1t5K0n4gdSkn6Cgj4V6T2h+37PQcPIYMaE+X4iT0nRcDXFmy7RWZ7YkA6gQfnA68Va3xHdC8K2u6S4drTtptQ05yTHL5Htbk6wLVFclxnWnCPaCk/h1BbSeS51FQUoHkHipv2/atWDfbsvuFz1KtLT91hMyrbfGo7imALhHa625LRbILZUlbbgAPAUVD0Fat4x8+PF2kxojq0hybldvbaBPclLT6zx+hJrW/Ci0+vuJ7OMyyq8x3Y7OXTp823ocHHXGaihkOj6lLQ4B9ISDXnlxfsu2Yz7NxwMNy3Zk876LtDRT2jZcOtjxEuz7Aemqot4fCNYNStwC9OsO1OumPLyCxzolwvPmrfkQII6FurjJUrgPq6EtpWfm+YVDuBV0Nnnh/bi9uG7idnVxzWLKwJluYh2emeS/fW3UKDSHo/chaVqStRWeApB6Srmqs+EEQN40cEgc45cwPr/a67mV6LvsvCqtzLSO5cPTTiSuIfaGpSdliHoGn65CISlKVzLdKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpREpSlESlKURKUpRFpurmkOn2ueCXDTjUywNXayXEArbUShxpxPzHWlj3kOJPoofWO4JBphjfg76WYrkztxsmuepMGyyD0v2+HKZjPPtc/tTkhtI6k/A+56fQe9dAaVDWhrsYzUlxc3CclE9923YTL0TGgOE3i+af4r7OqG58mHI7Ul2OtKg62p2Qy9z5nUStYAcUfxu55hjQzwydH9uuoULUvTDVXVKFdYg8p1t64W1yPMjlSSuO8j2EFTaukc8EKHAKVJIBFv6VYOLahqj2jmfr4cTvVS0FgpnIafWvFU21j8LfRfXnUG56m6k6raq3C9XNQ6lIuNtbaYaT2bZaQIPutoHYDufiSSSTOd024YPlmiH9wPUy53rOrAI6YwmXxxj29KUftKg7HaaSFt8AJWEdXb3irk8ytSqBoFM0vd3fWvHNWLiXir7w1+tOCpJpp4WuC6cSJ9jj68anTMDub/nzsQRcxEhXHjt0SyyE+akpACgEoKgACeO1fbBPCd26YLlaM5hZTn4vbE1c2K9AvKbciGpSiQlj2ZtDjaQD0j8ITx8aurSrhxBDtR9fJVIBBboV8ozCYsZqMhxxaWUJbCnVla1ADjlSj3UfpJ7moq3G7dLNuYw7+59lmoWZWDHnjzPg48/DZTcOFpWgPLfjOr4SpAIShSQSfeCuBxLVKq9oqe1dWYSz2bKqWgXh3afbZshkZDpFrTqra1TyyLjDemWqRFnttqKktutrgHt3UOpBSsBSulSeeakPcntF0c3S2qFH1EtsyLd7SSbXfbU8GLhC788JWUqSpHI56VpUOe44PepqpVnEvADtMuHIqG+Qkt1z481VfLtiMbWKVjjO43XLMNR7Hii/Nt9ldjQ7bHec4CSuUqM2lx9RSOOrqSfXv3VzOWbaV23K9NXdLLBkV5we0uRE29DuMezR32IgT0eQ0XmXUNoKfd5SgKA+aRW7UqHAPYWOyNzxPHf15I3yOD25iw4ct3RUq0x8KfRjRnMoeoGmWsmr1hv8AAC0sTGbla1kJWkpUlSF29SFpIPBSoEVdGM0tiO0y7IckLbQlKnnAkLcIHBUrpATyfU8AD6AK+lKsXEjCclGEA4tUpSlVUpSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlKUoiUpSiJSlKIlQrn2R7voOXXCLprphppdcabUj2CXdsmlxZboLaSvzGkRlpTwsrA4UeQAfjxU1Uoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoirr8rd/H5GNHftlO/U6fK3fx+RjR37ZTv1OrFUoi17T+Xnc7ELfK1Ms1ntWSrDnt0O0TFyojRDigjy3XEIUrlsIJ5SOFEjuBydhpSiL/2Q==", + "text/plain": [ + "" + ] + }, + "metadata": { + "image/jpeg": { + "width": 400 + } + }, + "output_type": "display_data" + } + ], "source": [ "display(Image(\"../../docs/source/img/TD3_algorithm.jpeg\", width=400))" ] @@ -668,7 +706,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 32, "id": "c715f90e", "metadata": {}, "outputs": [], @@ -737,7 +775,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 33, "id": "753dbab1", "metadata": {}, "outputs": [], @@ -964,7 +1002,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 34, "id": "6bb09b5b", "metadata": { "id": "moZ_UD7FfkOh" @@ -979,22 +1017,33 @@ " \"algorithm\": \"matd3\",\n", " \"learning_rate\": 0.001,\n", " \"training_episodes\": 100,\n", - " \"episodes_collecting_initial_experience\": 5,\n", " \"train_freq\": \"24h\",\n", - " \"gradient_steps\": 24,\n", " \"batch_size\": 256,\n", " \"gamma\": 0.99,\n", " \"device\": \"cpu\",\n", - " \"noise_sigma\": 0.1,\n", - " \"noise_scale\": 1,\n", - " \"noise_dt\": 1,\n", " \"validation_episodes_interval\": 5,\n", + " \"off_policy\": {\n", + " \"episodes_collecting_initial_experience\": 5,\n", + " \"gradient_steps\": 24,\n", + " \"noise_sigma\": 0.1,\n", + " \"noise_scale\": 1,\n", + " \"noise_dt\": 1,\n", + " \"replay_buffer_size\": 50000,\n", + " \"action_noise_schedule\": None,\n", + " },\n", + " \"on_policy\": {\n", + " \"clip_ratio\": 0.1,\n", + " \"entropy_coef\": 0.01,\n", + " \"vf_coef\": 0.5,\n", + " \"gae_lambda\": 0.95,\n", + " \"n_epochs\": 25,\n", + " },\n", "}" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 35, "id": "5cff2f6a", "metadata": { "id": "iPz8v4N5hpfr" @@ -1025,7 +1074,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 36, "id": "1fea4585", "metadata": { "colab": { @@ -1036,7 +1085,396 @@ "lines_to_next_cell": 0, "outputId": "e30f4279-7a4e-4efc-9cfb-61416e4fe2f1" }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:assume.world:Connected to the database\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:assume.scenario.loader_csv:Resolution of demand_df (<15 * Minutes>) is higher than the simulation (). Resampling using mean(). Make sure this is what you want.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Training Episode 1 2019-03-31 00:00:00: : 2592001.0it [00:02, 991249.70it/s] \n", + "Training Episode 2 2019-03-31 00:00:00: : 2592001.0it [00:02, 1168968.18it/s] \n", + "Training Episode 3 2019-03-31 00:00:00: : 2592001.0it [00:02, 1205543.57it/s] \n", + "Training Episode 4 2019-03-31 00:00:00: : 2592001.0it [00:03, 821862.33it/s] \n", + "Training Episode 5 2019-03-31 00:00:00: : 2592001.0it [00:04, 577856.96it/s] \n", + "Training Episode 6 2019-03-31 00:00:00: : 2592001.0it [00:11, 235069.21it/s] \n", + "Training Episode 7 2019-03-31 00:00:00: : 2592001.0it [00:08, 318400.46it/s] \n", + "Training Episode 8 2019-03-31 00:00:00: : 2592001.0it [00:05, 477884.59it/s] \n", + "Training Episode 9 2019-03-31 00:00:00: : 2592001.0it [00:04, 545623.43it/s] \n", + "Training Episode 10 2019-03-31 00:00:00: : 2592001.0it [00:06, 393400.86it/s] \n", + "Evaluation Episode 1 2019-03-31 00:00:00: : 2592001.0it [00:02, 1262570.10it/s] \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:assume.reinforcement_learning.learning_role:New best policy saved, episode: 1, metric='avg_reward', value=61.50\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Training Episode 11 2019-03-31 00:00:00: : 2592001.0it [00:07, 346508.87it/s] \n", + "Training Episode 12 2019-03-31 00:00:00: : 2592001.0it [00:07, 324367.06it/s] \n", + "Training Episode 13 2019-03-31 00:00:00: : 2592001.0it [00:10, 242621.67it/s]\n", + "Training Episode 14 2019-03-31 00:00:00: : 2592001.0it [00:06, 412858.92it/s] \n", + "Training Episode 15 2019-03-31 00:00:00: : 2592001.0it [00:04, 523350.99it/s] \n", + "Evaluation Episode 2 2019-03-31 00:00:00: : 2592001.0it [00:02, 1070269.39it/s] \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:assume.reinforcement_learning.learning_role:Current policy not better than best policy, episode: 2, metric='avg_reward', value=57.01\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Training Episode 16 2019-03-31 00:00:00: : 2592001.0it [00:04, 552326.05it/s] \n", + "Training Episode 17 2019-03-31 00:00:00: : 2592001.0it [00:04, 543949.28it/s] \n", + "Training Episode 18 2019-03-31 00:00:00: : 2592001.0it [00:04, 561718.46it/s] \n", + "Training Episode 19 2019-03-31 00:00:00: : 2592001.0it [00:04, 543953.28it/s] \n", + "Training Episode 20 2019-03-31 00:00:00: : 2592001.0it [00:04, 559934.73it/s] \n", + "Evaluation Episode 3 2019-03-31 00:00:00: : 2592001.0it [00:01, 1345558.64it/s] \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:assume.reinforcement_learning.learning_role:New best policy saved, episode: 3, metric='avg_reward', value=66.05\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Training Episode 21 2019-03-31 00:00:00: : 2592001.0it [00:05, 506177.33it/s] \n", + "Training Episode 22 2019-03-31 00:00:00: : 2592001.0it [00:04, 530512.33it/s] \n", + "Training Episode 23 2019-03-31 00:00:00: : 2592001.0it [00:04, 544089.37it/s] \n", + "Training Episode 24 2019-03-31 00:00:00: : 2592001.0it [00:04, 538804.19it/s] \n", + "Training Episode 25 2019-03-31 00:00:00: : 2592001.0it [00:04, 538528.11it/s] \n", + "Evaluation Episode 4 2019-03-31 00:00:00: : 2592001.0it [00:01, 1364078.16it/s] \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:assume.reinforcement_learning.learning_role:Current policy not better than best policy, episode: 4, metric='avg_reward', value=64.28\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Training Episode 26 2019-03-31 00:00:00: : 2592001.0it [00:04, 547367.66it/s] \n", + "Training Episode 27 2019-03-31 00:00:00: : 2592001.0it [00:04, 564322.76it/s] \n", + "Training Episode 28 2019-03-31 00:00:00: : 2592001.0it [00:04, 537950.05it/s] \n", + "Training Episode 29 2019-03-31 00:00:00: : 2592001.0it [00:04, 566923.26it/s] \n", + "Training Episode 30 2019-03-31 00:00:00: : 2592001.0it [00:04, 549385.66it/s] \n", + "Evaluation Episode 5 2019-03-31 00:00:00: : 2592001.0it [00:02, 1221583.52it/s] \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:assume.reinforcement_learning.learning_role:Current policy not better than best policy, episode: 5, metric='avg_reward', value=65.46\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Training Episode 31 2019-03-31 00:00:00: : 2592001.0it [00:04, 523688.09it/s] \n", + "Training Episode 32 2019-03-31 00:00:00: : 2592001.0it [00:04, 529345.27it/s] \n", + "Training Episode 33 2019-03-31 00:00:00: : 2592001.0it [00:04, 540787.27it/s] \n", + "Training Episode 34 2019-03-31 00:00:00: : 2592001.0it [00:04, 567992.69it/s] \n", + "Training Episode 35 2019-03-31 00:00:00: : 2592001.0it [00:04, 531993.25it/s] \n", + "Evaluation Episode 6 2019-03-31 00:00:00: : 2592001.0it [00:01, 1316386.22it/s] \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:assume.reinforcement_learning.learning_role:New best policy saved, episode: 6, metric='avg_reward', value=69.56\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Training Episode 36 2019-03-31 00:00:00: : 2592001.0it [00:05, 475786.75it/s] \n", + "Training Episode 37 2019-03-31 00:00:00: : 2592001.0it [00:05, 460533.94it/s] \n", + "Training Episode 38 2019-03-31 00:00:00: : 2592001.0it [00:09, 260005.26it/s] \n", + "Training Episode 39 2019-03-31 00:00:00: : 2592001.0it [00:05, 515289.20it/s] \n", + "Training Episode 40 2019-03-31 00:00:00: : 2592001.0it [00:05, 447086.59it/s] \n", + "Evaluation Episode 7 2019-03-31 00:00:00: : 2592001.0it [00:02, 966271.13it/s] \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:assume.reinforcement_learning.learning_role:Current policy not better than best policy, episode: 7, metric='avg_reward', value=67.00\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Training Episode 41 2019-03-31 00:00:00: : 2592001.0it [00:05, 490473.18it/s] \n", + "Training Episode 42 2019-03-31 00:00:00: : 2592001.0it [00:05, 439153.32it/s] \n", + "Training Episode 43 2019-03-31 00:00:00: : 2592001.0it [00:09, 277823.54it/s] \n", + "Training Episode 44 2019-03-31 00:00:00: : 2592001.0it [00:05, 475217.48it/s] \n", + "Training Episode 45 2019-03-31 00:00:00: : 2592001.0it [00:04, 520578.15it/s] \n", + "Evaluation Episode 8 2019-03-31 00:00:00: : 2592001.0it [00:03, 789397.34it/s] \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:assume.reinforcement_learning.learning_role:Current policy not better than best policy, episode: 8, metric='avg_reward', value=69.18\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Training Episode 46 2019-03-31 00:00:00: : 2592001.0it [00:06, 416578.49it/s] \n", + "Training Episode 47 2019-03-31 00:00:00: : 2592001.0it [00:04, 537399.97it/s] \n", + "Training Episode 48 2019-03-31 00:00:00: : 2592001.0it [00:07, 337344.51it/s] \n", + "Training Episode 49 2019-03-31 00:00:00: : 2592001.0it [00:05, 473846.16it/s] \n", + "Training Episode 50 2019-03-31 00:00:00: : 2592001.0it [00:05, 518293.14it/s] \n", + "Evaluation Episode 9 2019-03-31 00:00:00: : 2592001.0it [00:02, 1245685.89it/s] \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:assume.reinforcement_learning.learning_role:Current policy not better than best policy, episode: 9, metric='avg_reward', value=63.51\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Training Episode 51 2019-03-31 00:00:00: : 2592001.0it [00:05, 453088.33it/s] \n", + "Training Episode 52 2019-03-31 00:00:00: : 2592001.0it [00:07, 330962.71it/s] \n", + "Training Episode 53 2019-03-31 00:00:00: : 2592001.0it [00:04, 542997.91it/s] \n", + "Training Episode 54 2019-03-31 00:00:00: : 2592001.0it [00:05, 502525.80it/s] \n", + "Training Episode 55 2019-03-31 00:00:00: : 2592001.0it [00:04, 551052.63it/s] \n", + "Evaluation Episode 10 2019-03-31 00:00:00: : 2592001.0it [00:01, 1341055.28it/s] \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:assume.reinforcement_learning.learning_role:Current policy not better than best policy, episode: 10, metric='avg_reward', value=67.42\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Training Episode 56 2019-03-31 00:00:00: : 2592001.0it [00:05, 511823.05it/s] \n", + "Training Episode 57 2019-03-31 00:00:00: : 2592001.0it [00:06, 402593.08it/s] \n", + "Training Episode 58 2019-03-31 00:00:00: : 2592001.0it [00:05, 485062.62it/s] \n", + "Training Episode 59 2019-03-31 00:00:00: : 2592001.0it [00:04, 538266.65it/s] \n", + "Training Episode 60 2019-03-31 00:00:00: : 2592001.0it [00:04, 525532.39it/s] \n", + "Evaluation Episode 11 2019-03-31 00:00:00: : 2592001.0it [00:02, 1195235.67it/s] \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:assume.reinforcement_learning.learning_role:Current policy not better than best policy, episode: 11, metric='avg_reward', value=68.11\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Training Episode 61 2019-03-31 00:00:00: : 2592001.0it [00:04, 544008.62it/s] \n", + "Training Episode 62 2019-03-31 00:00:00: : 2592001.0it [00:05, 497786.84it/s] \n", + "Training Episode 63 2019-03-31 00:00:00: : 2592001.0it [00:07, 328211.03it/s] \n", + "Training Episode 64 2019-03-31 00:00:00: : 2592001.0it [00:06, 388692.51it/s] \n", + "Training Episode 65 2019-03-31 00:00:00: : 2592001.0it [00:06, 397518.62it/s] \n", + "Evaluation Episode 12 2019-03-31 00:00:00: : 2592001.0it [00:02, 1159921.48it/s] \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:assume.reinforcement_learning.learning_role:Current policy not better than best policy, episode: 12, metric='avg_reward', value=68.65\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Training Episode 66 2019-03-31 00:00:00: : 2592001.0it [00:05, 506934.76it/s] \n", + "Training Episode 67 2019-03-31 00:00:00: : 2592001.0it [00:04, 547509.54it/s] \n", + "Training Episode 68 2019-03-31 00:00:00: : 2592001.0it [00:04, 552427.20it/s] \n", + "Training Episode 69 2019-03-31 00:00:00: : 2592001.0it [00:04, 526214.28it/s] \n", + "Training Episode 70 2019-03-31 00:00:00: : 2592001.0it [00:05, 494668.29it/s] \n", + "Evaluation Episode 13 2019-03-31 00:00:00: : 2592001.0it [00:01, 1333859.46it/s] \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:assume.reinforcement_learning.learning_role:New best policy saved, episode: 13, metric='avg_reward', value=70.40\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Training Episode 71 2019-03-31 00:00:00: : 2592001.0it [00:05, 508292.60it/s] \n", + "Training Episode 72 2019-03-31 00:00:00: : 2592001.0it [00:04, 554271.66it/s] \n", + "Training Episode 73 2019-03-31 00:00:00: : 2592001.0it [00:04, 525895.46it/s] \n", + "Training Episode 74 2019-03-31 00:00:00: : 2592001.0it [00:04, 539194.21it/s] \n", + "Training Episode 75 2019-03-31 00:00:00: : 2592001.0it [00:04, 551343.99it/s] \n", + "Evaluation Episode 14 2019-03-31 00:00:00: : 2592001.0it [00:02, 1000922.99it/s] \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:assume.reinforcement_learning.learning_role:Current policy not better than best policy, episode: 14, metric='avg_reward', value=69.46\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Training Episode 76 2019-03-31 00:00:00: : 2592001.0it [00:04, 545890.83it/s] \n", + "Training Episode 77 2019-03-31 00:00:00: : 2592001.0it [00:05, 477560.91it/s] \n", + "Training Episode 78 2019-03-31 00:00:00: : 2592001.0it [00:04, 536458.46it/s] \n", + "Training Episode 79 2019-03-31 00:00:00: : 2592001.0it [00:04, 547842.25it/s] \n", + "Training Episode 80 2019-03-31 00:00:00: : 2592001.0it [00:04, 536775.64it/s] \n", + "Evaluation Episode 15 2019-03-31 00:00:00: : 2592001.0it [00:01, 1343056.59it/s] \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:assume.reinforcement_learning.learning_role:Current policy not better than best policy, episode: 15, metric='avg_reward', value=67.95\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Training Episode 81 2019-03-31 00:00:00: : 2592001.0it [00:05, 486530.95it/s] \n", + "Training Episode 82 2019-03-31 00:00:00: : 2592001.0it [00:04, 539689.90it/s] \n", + "Training Episode 83 2019-03-31 00:00:00: : 2592001.0it [00:04, 544936.53it/s] \n", + "Training Episode 84 2019-03-31 00:00:00: : 2592001.0it [00:05, 496245.07it/s] \n", + "Training Episode 85 2019-03-31 00:00:00: : 2592001.0it [00:04, 522360.74it/s] \n", + "Evaluation Episode 16 2019-03-31 00:00:00: : 2592001.0it [00:02, 1270856.69it/s] \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:assume.reinforcement_learning.learning_role:Current policy not better than best policy, episode: 16, metric='avg_reward', value=68.02\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Training Episode 86 2019-03-31 00:00:00: : 2592001.0it [00:04, 528713.40it/s] \n", + "Training Episode 87 2019-03-31 00:00:00: : 2592001.0it [00:04, 551481.60it/s] \n", + "Training Episode 88 2019-03-31 00:00:00: : 2592001.0it [00:04, 532663.42it/s] \n", + "Training Episode 89 2019-03-31 00:00:00: : 2592001.0it [00:05, 457437.49it/s] \n", + "Training Episode 90 2019-03-31 00:00:00: : 2592001.0it [00:04, 548926.22it/s] \n", + "Evaluation Episode 17 2019-03-31 00:00:00: : 2592001.0it [00:03, 703000.16it/s] \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:assume.reinforcement_learning.learning_role:Current policy not better than best policy, episode: 17, metric='avg_reward', value=64.67\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Training Episode 91 2019-03-31 00:00:00: : 2592001.0it [00:04, 535718.12it/s] \n", + "Training Episode 92 2019-03-31 00:00:00: : 2592001.0it [00:04, 558564.58it/s] \n", + "Training Episode 93 2019-03-31 00:00:00: : 2592001.0it [00:05, 504703.96it/s] \n", + "Training Episode 94 2019-03-31 00:00:00: : 2592001.0it [00:04, 530632.01it/s] \n", + "Training Episode 95 2019-03-31 00:00:00: : 2592001.0it [00:04, 538231.77it/s] \n", + "Evaluation Episode 18 2019-03-31 00:00:00: : 2592001.0it [00:01, 1310741.68it/s] \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:assume.reinforcement_learning.learning_role:Current policy not better than best policy, episode: 18, metric='avg_reward', value=66.13\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Training Episode 96 2019-03-31 00:00:00: : 2592001.0it [00:04, 530328.46it/s] \n", + "Training Episode 97 2019-03-31 00:00:00: : 2592001.0it [00:05, 493579.96it/s] \n", + "Training Episode 98 2019-03-31 00:00:00: : 2592001.0it [00:04, 527213.53it/s] \n", + "Training Episode 99 2019-03-31 00:00:00: : 2592001.0it [00:04, 555877.80it/s] \n", + "Training Episode 100 2019-03-31 00:00:00: : 2592001.0it [00:04, 539233.36it/s] \n", + "Evaluation Episode 19 2019-03-31 00:00:00: : 2592001.0it [00:01, 1315933.69it/s] \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:assume.reinforcement_learning.learning_role:Current policy not better than best policy, episode: 19, metric='avg_reward', value=68.80\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Training Episodes: 100%|██████████| 100/100 [11:04<00:00, 6.65s/it]\n", + "example_02a_base 2019-03-30 23:00:00: 100%|█████████▉| 2588401/2592000 [00:02<00:00, 991155.61it/s] \n" + ] + } + ], "source": [ "import os\n", "\n", @@ -1110,7 +1548,7 @@ ], "metadata": { "kernelspec": { - "display_name": "assume-framework", + "display_name": "Python 3", "language": "python", "name": "python3" }, @@ -1124,7 +1562,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.9" + "version": "3.10.16" } }, "nbformat": 4, From 2ba98f7f93df9b80ce98165ecebe8449949fd749 Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Sat, 28 Feb 2026 13:14:23 +0100 Subject: [PATCH 22/44] updated the 04a notebook, documentations of RL_algorithms folder code files --- .../algorithms/maddpg.py | 2 - .../algorithms/mappo.py | 111 ++++++---- .../algorithms/matd3.py | 50 +++-- assume/reinforcement_learning/buffer.py | 174 ++++++++------- .../reinforcement_learning/learning_role.py | 175 ++++++++------- .../reinforcement_learning/learning_utils.py | 68 +++--- .../neural_network_architecture.py | 85 +++---- examples/inputs/example_02a/config.yaml | 207 ++++++++++-------- examples/inputs/example_02d/config.yaml | 114 ---------- 9 files changed, 455 insertions(+), 531 deletions(-) diff --git a/assume/reinforcement_learning/algorithms/maddpg.py b/assume/reinforcement_learning/algorithms/maddpg.py index 87ac2220d..01743327c 100644 --- a/assume/reinforcement_learning/algorithms/maddpg.py +++ b/assume/reinforcement_learning/algorithms/maddpg.py @@ -28,8 +28,6 @@ class DDPG(A2CAlgorithm): - Target networks updated via Polyak averaging for stability - Replay buffer for sample efficiency and decorrelation - Unlike TD3, DDPG updates the actor at every training step without delay. - Attributes: n_updates: Counter for gradient updates performed. grad_clip_norm: Maximum gradient norm for clipping. diff --git a/assume/reinforcement_learning/algorithms/mappo.py b/assume/reinforcement_learning/algorithms/mappo.py index 49cd8d3b5..2c78fd3d8 100644 --- a/assume/reinforcement_learning/algorithms/mappo.py +++ b/assume/reinforcement_learning/algorithms/mappo.py @@ -24,21 +24,25 @@ class PPO(A2CAlgorithm): """ Proximal Policy Optimization (PPO) Algorithm. - - A policy gradient method that alternates between - sampling data through interaction with the environment, - and optimizing a surrogate objective function using - stochastic gradient ascent. It is an on-policy algorithm. - - Args: - learning_role (LearningRole): The central learning role. - clip_range (float): Clipping parameter epsilon. - clip_range_vf (float, optional): Clipping parameter for the value function. - If None, value function is not clipped. - n_epochs (int): Number of epochs to optimize the surrogate loss per update. - entropy_coef (float): Entropy coefficient for the loss calculation. - vf_coef (float): Value function coefficient for the loss calculation. - max_grad_norm (float): The maximum value for the gradient clipping. + + A policy gradient method that alternates between sampling data through + interaction with the environment, and optimizing a surrogate objective + function using stochastic gradient ascent. It is an on-policy algorithm. + + Attributes: + clip_range: The epsilon parameter for PPO clipping. + clip_range_vf: The epsilon parameter for value function clipping. + n_epochs: Number of optimization epochs per rollout. + entropy_coef: Coefficient for entropy term in loss calculation. + vf_coef: Coefficient for value function term in loss calculation. + max_grad_norm: Maximum gradient norm for clipping. + n_updates: Counter for gradient updates performed. + actor_architecture_class: Actor network architecture class. + critic_architecture_class: Critic network architecture class. + + Example: + >>> ppo = PPO(learning_role) + >>> ppo.update_policy() """ def __init__( @@ -51,17 +55,16 @@ def __init__( vf_coef=1.0, max_grad_norm=0.5, ): - """ - Initialize PPO algorithm with specific hyperparameters. - + """Initialize PPO algorithm with specific hyperparameters. + Args: - learning_role (LearningRole): The learning role object. - clip_range (float, optional): The epsilon parameter for PPO clipping. - clip_range_vf (float, optional): The epsilon parameter for value function clipping. - n_epochs (int, optional): Number of optimization epochs per rollout. - entropy_coef (float, optional): Coefficient for entropy term in loss. - vf_coef (float, optional): Coefficient for value function term in loss. - max_grad_norm (float, optional): Maximum gradient norm for clipping. + learning_role: The primary learning role object. + clip_range: The epsilon parameter for PPO policy clipping. + clip_range_vf: The epsilon parameter for value function clipping. + n_epochs: Number of optimization epochs per rollout. + entropy_coef: Coefficient for entropy term in loss. + vf_coef: Coefficient for value function term in loss. + max_grad_norm: Maximum gradient norm for clipping. """ super().__init__(learning_role) @@ -95,9 +98,14 @@ def __init__( def create_actors(self) -> None: - """ - Creates stochastic actor networks for all agents. - Initializes the ActorPPO network and its optimizer for each agent strategy. + """Create stochastic actor networks for all agents. + + Initializes the ActorPPO or LSTMActorPPO network based on the configuration, + as well as its optimizer for each agent strategy. + + Example: + >>> ppo.create_actors() + >>> # Creates actor network and optimizer for each strategy """ config = self.learning_config ppo_config = getattr(config, "ppo", None) @@ -129,9 +137,14 @@ def create_actors(self) -> None: strategy.actor.loaded = False def create_critics(self) -> None: - """ - Creates value networks for all agents. - Initializes the CriticPPO network (Centralized Critic) and its optimizer. + """Create value networks for all agents. + + Initializes the CriticPPO network (Centralized Critic) and its optimizer + for each registered agent strategy. + + Example: + >>> ppo.create_critics() + >>> # Creates critic networks and optimizers for each strategy """ n_agents = len(self.learning_role.rl_strats) @@ -151,11 +164,20 @@ def create_critics(self) -> None: ) def extract_policy(self) -> dict: - """ - Extract all actor and critic networks into a dictionary. - + """Extract all actor and critic networks into a dictionary. + + Collects actor and critic networks from all learning strategies into + a structured dictionary. + Returns: - dict: Dictionary with keys 'actors', 'critics', and dimension information. + Dictionary containing all network components organized by type: + - 'actors': Primary actor networks + - 'critics': Primary critic networks + - Dimension information for reconstruction + + Example: + >>> policy_dict = ppo.extract_policy() + >>> # Contains all networks ready for saving or transfer """ actors = {} critics = {} @@ -177,15 +199,16 @@ def extract_policy(self) -> dict: # ========================================================================= def update_policy(self) -> None: - """ - Update actor and critic networks using proximal policy optimization (PPO). - Checks if enough data is collected (batch_size). - Computes Generalized Advantage Estimation (GAE) and Returns using the last value estimate. - Updates the Actor and Critic networks over multiple epochs (n_epochs) using mini-batches. - Calculates the surrogate objective with clipping (clip_range). - Calculates value function loss (MSE) and entropy bonus. - Logs metrics and gradients. - Clears the on-policy buffer after the update. + """Update actor and critic networks using Proximal Policy Optimization (PPO). + + Performs one complete training iteration consisting of: + 1. Checking if enough data is collected in the rollout buffer. + 2. Computing Generalized Advantage Estimation (GAE) and Returns using the last value estimate. + 3. Updating the Actor and Critic networks over multiple epochs using mini-batches. + 4. Calculating the surrogate objective with clipping. + 5. Calculating value function loss (MSE) and entropy bonus. + 6. Logging metrics and gradients. + 7. Clearing the on-policy buffer after the update. """ logger.debug("Updating Policy") diff --git a/assume/reinforcement_learning/algorithms/matd3.py b/assume/reinforcement_learning/algorithms/matd3.py index 2cc2ee455..47950dbad 100644 --- a/assume/reinforcement_learning/algorithms/matd3.py +++ b/assume/reinforcement_learning/algorithms/matd3.py @@ -24,11 +24,28 @@ class TD3(A2CAlgorithm): clipped double Q-Learning, delayed policy update and target policy smoothing. Open AI Spinning guide: https://spinningup.openai.com/en/latest/algorithms/td3.html - Original paper: https://arxiv.org/pdf/1802.09477.pdf + + Attributes: + n_updates: Counter for gradient updates performed. + grad_clip_norm: Maximum gradient norm for clipping. + critic_architecture_class: Critic network architecture class (CriticTD3). + + Example: + >>> td3 = TD3(learning_role) + >>> td3.update_policy() """ def __init__(self, learning_role): + """Initialize the TD3 algorithm. + + Sets up the algorithm with gradient counters, clipping parameters, + and critic architecture. + + Args: + learning_role: Learning role object managing agents and replay buffer. + Must have off-policy configuration. + """ super().__init__(learning_role) self.n_updates = 0 @@ -38,24 +55,21 @@ def __init__(self, learning_role): self.critic_architecture_class = CriticTD3 def update_policy(self): + """Update the policy using the Twin Delayed Deep Deterministic Policy Gradients (TD3). + + This method performs the policy update step, which involves updating the actor + (policy) and critic (Q-function) networks using the TD3 algorithm. It iterates + over the specified number of gradient steps and performs the following for each + learning strategy: + + 1. Sample a batch of transitions from the replay buffer. + 2. Calculate the next actions with added noise using the actor target network. + 3. Compute the target Q-values based on the next states, rewards, and the target critic network. + 4. Compute the critic loss as the mean squared error between current Q-values and target Q-values. + 5. Optimize the critic network by performing a gradient descent step. + 6. Update the actor network if the specified policy delay is reached. + 7. Apply Polyak averaging to update target networks. """ - Update the policy of the reinforcement learning agent using the Twin Delayed Deep Deterministic Policy Gradients (TD3) algorithm. - - Note: - This function performs the policy update step, which involves updating the actor (policy) and critic (Q-function) networks - using TD3 algorithm. It iterates over the specified number of gradient steps and performs the following steps for each - learning strategy: - - 1. Sample a batch of transitions from the replay buffer. - 2. Calculate the next actions with added noise using the actor target network. - 3. Compute the target Q-values based on the next states, rewards, and the target critic network. - 4. Compute the critic loss as the mean squared error between current Q-values and target Q-values. - 5. Optimize the critic network by performing a gradient descent step. - 6. Update the actor network if the specified policy delay is reached. - 7. Apply Polyak averaging to update target networks. - - """ - logger.debug("Updating Policy (TD3)") # Stack strategies for easier access diff --git a/assume/reinforcement_learning/buffer.py b/assume/reinforcement_learning/buffer.py index b084020da..3728789fe 100644 --- a/assume/reinforcement_learning/buffer.py +++ b/assume/reinforcement_learning/buffer.py @@ -32,20 +32,18 @@ def __init__( device: str, float_type, ): - """ + """Initialize the replay buffer. + A class that represents a replay buffer for storing observations, actions, and rewards. The replay buffer is implemented as a circular buffer, where the oldest experiences are discarded when the buffer is full. - + Args: - buffer_size (int): The maximum size of the buffer. - obs_dim (int): The dimension of the observation space. - act_dim (int): The dimension of the action space. - n_rl_units (int): The number of reinforcement learning units. - device (str): The device to use for storing the data (e.g., 'cpu' or 'cuda'). - float_type (torch.dtype): The data type to use for the stored data. - observations (numpy.ndarray): The stored observations. - actions (numpy.ndarray): The stored actions. - rewards (numpy.ndarray): The stored rewards. + buffer_size: The maximum size of the buffer. + obs_dim: The dimension of the observation space. + act_dim: The dimension of the action space. + n_rl_units: The number of reinforcement learning units. + device: The device to use for storing the data (e.g., 'cpu' or 'cuda'). + float_type: The data type to use for the stored data. """ self.buffer_size = buffer_size @@ -91,28 +89,26 @@ def __init__( ) def size(self): - # write docstring for this function - """ - Return the current size of the buffer (i.e. number of transitions - stored in the buffer). - + """Return the current size of the buffer. + Returns: - buffer_size(int): The current size of the buffer - + The current size of the buffer (i.e. number of transitions stored). """ return self.buffer_size if self.full else self.pos def to_torch(self, array: np.array, copy=True): - """ - Converts a numpy array to a PyTorch tensor. Note: It copies the data by default. - + """Convert a numpy array to a PyTorch tensor. + + Note: + It copies the data by default. + Args: - array (numpy.ndarray): The numpy array to convert. - copy (bool, optional): Whether to copy the data or not - (may be useful to avoid changing things by reference). Defaults to True. - + array: The numpy array to convert. + copy: Whether to copy the data or not (may be useful to avoid changing + things by reference). Defaults to True. + Returns: - torch.Tensor: The converted PyTorch tensor. + The converted PyTorch tensor. """ if copy: @@ -126,13 +122,12 @@ def add( actions: np.ndarray, reward: np.ndarray, ): - """ - Adds an observation, action, and reward of all agents to the replay buffer. - + """Add an observation, action, and reward of all agents to the replay buffer. + Args: - obs (numpy.ndarray): The observation to add. - actions (numpy.ndarray): The actions to add. - reward (numpy.ndarray): The reward to add. + obs: The observation to add. + actions: The actions to add. + reward: The reward to add. """ # copying all to avoid modification len_obs = obs.shape[0] @@ -146,15 +141,14 @@ def add( self.pos = 0 def sample(self, batch_size: int) -> ReplayBufferSamples: - """ - Samples a random batch of experiences from the replay buffer. - + """Sample a random batch of experiences from the replay buffer. + Args: - batch_size (int): The number of experiences to sample. - + batch_size: The number of experiences to sample. + Returns: - ReplayBufferSamples: A named tuple containing the sampled observations, actions, and rewards. - + A named tuple containing the sampled observations, actions, and rewards. + Raises: Exception: If there are less than two entries in the buffer. """ @@ -174,9 +168,17 @@ def sample(self, batch_size: int) -> ReplayBufferSamples: return ReplayBufferSamples(*tuple(map(self.to_torch, data))) class RolloutBufferSamples(NamedTuple): - """ - Container for roll buffer samples. It holds one batch of training samples - from PPO's rollout buffer. + """Container for rollout buffer samples. + + It holds one batch of training samples from PPO's rollout buffer. + + Attributes: + observations: States/observations the agent saw. + actions: Actions the agent took. + old_values: Critic's value estimates. + old_log_probs: Log_probability of taking each action. + advantages: Generalized advantage estimates. + returns: Expected returns. """ observations: th.Tensor # states/observations the agent saw actions: th.Tensor # actions the agent took @@ -186,24 +188,12 @@ class RolloutBufferSamples(NamedTuple): returns: th.Tensor # expected returns class RolloutBuffer: - """ - Rollout buffer is used in on-policy algorithms like PPO. - + """Rollout buffer used in on-policy algorithms like PPO. + It corresponds to the transitions collected using the current policy. This experience is discarded after the policy is updated. In order to use PPO, the current observations are needed to be stored. - the observations include actions, rewards, values, log probabilities and done for each action. - - Args: - buffer_size (int): Max number of elements allowed in the buffer - obs_dim (int): Dimension of the observation space - act_dim (int): Dimension of the action space - n_rl_units (int): Number of RL agents - device (str | th.device): PyTorch device config - float_type (th.dtype): Data type for floating point numbers - gamma (float): Discount factor - gae_lambda (float): bias-variance trade-off factor for Generalized Advantage Estimator - + The observations include actions, rewards, values, log probabilities and done for each action. """ def __init__( @@ -217,7 +207,18 @@ def __init__( gamma: float = 0.99, gae_lambda: float = 0.98 ): - """Initialize the rollout buffer.""" + """Initialize the rollout buffer. + + Args: + buffer_size: Max number of elements allowed in the buffer. + obs_dim: Dimension of the observation space. + act_dim: Dimension of the action space. + n_rl_units: Number of RL agents. + device: PyTorch device config. + float_type: Data type for floating point numbers. + gamma: Discount factor. + gae_lambda: bias-variance trade-off factor for Generalized Advantage Estimator. + """ self.buffer_size = buffer_size self.obs_dim = obs_dim self.act_dim = act_dim @@ -236,8 +237,8 @@ def __init__( self.reset() def reset(self) -> None: - """ - Reset the rollout buffer. + """Reset the rollout buffer. + Clearing the buffer and allocating new storage. """ self.observations = np.zeros( @@ -314,16 +315,15 @@ def add( value: np.ndarray, log_prob: np.ndarray ) -> None: - """ - Add a transition to the buffer. + """Add a transition to the buffer. Args: - obs (np.ndarray): Observation of the agents - action (np.ndarray): Action taken by the agents - reward (np.ndarray): Reward obtained - done (np.ndarray): Whether the episode ended - value (np.ndarray): Value estimate from the critic - log_prob (np.ndarray): Log probability of the action + obs: Observation of the agents. + action: Action taken by the agents. + reward: Reward obtained. + done: Whether the episode ended. + value: Value estimate from the critic. + log_prob: Log probability of the action. """ if self.pos >= self.buffer_size: self.full = True @@ -344,13 +344,13 @@ def compute_returns_and_advantages( last_values: np.ndarray, dones: np.ndarray ) -> None: - """ - Uses Generalized Advantage Estimation to compute the advantage. - To obtain the lambda-return, the advantage is added to the value estiamte. - + """Use Generalized Advantage Estimation to compute the advantage. + + To obtain the lambda-return, the advantage is added to the value estimate. + Args: - last_values (np.ndarray): value estimation for the last step - dones (np.ndarray): whether the last step was terminal + last_values: Value estimation for the last step. + dones: Whether the last step was terminal. """ # taking the final value estimates and episode-end flags, # and making them flat arrays providing one number per agent. @@ -395,14 +395,13 @@ def get( self, batch_size: int | None = None ) -> Generator[RolloutBufferSamples, None, None]: - """ - Generator for generating batches of transition samples for training. + """Generate batches of transition samples for training. Args: - batch_size (int | None): Number of samples to be accessed per batch. - + batch_size: Number of samples to be accessed per batch. + Yields: - Generator[RolloutBufferSamples]: A generator yielding RolloutBufferSamples + A generator yielding RolloutBufferSamples. """ if not self.generator_ready: raise ValueError( @@ -422,15 +421,15 @@ def get( start_idx += batch_size def _get_samples(self, indices: np.ndarray) -> RolloutBufferSamples: - """ - Helper function to sample data from the buffer. + """Sample data from the buffer for given indices. + Converts numpy arrays to torch tensors for given indices. Args: - indices (np.ndarray): Indices of the samples to retrieve. - + indices: Indices of the samples to retrieve. + Returns: - RolloutBufferSamples: The batch of samples converted to PyTorch tensors. + The batch of samples converted to PyTorch tensors. """ return RolloutBufferSamples( observations = th.as_tensor( @@ -466,10 +465,9 @@ def _get_samples(self, indices: np.ndarray) -> RolloutBufferSamples: ) def size(self) -> int: - """ - Return the current number of stored transitions. - + """Return the current number of stored transitions. + Returns: - int: The size of the buffer. + The size of the buffer. """ return self.buffer_size if self.full else self.pos diff --git a/assume/reinforcement_learning/learning_role.py b/assume/reinforcement_learning/learning_role.py index dbfb7273d..3411dc381 100644 --- a/assume/reinforcement_learning/learning_role.py +++ b/assume/reinforcement_learning/learning_role.py @@ -41,10 +41,11 @@ class Learning(Role): - """ - This class manages the learning process of reinforcement learning agents, including initializing key components such as - neural networks, replay buffer, and learning hyperparameters. It handles both training and evaluation modes based on - the provided learning configuration. + """Manages the learning process of reinforcement learning agents. + + This class handles the initialization of key components such as neural networks, + replay buffer, and learning hyperparameters. It handles both training and evaluation + modes based on the provided learning configuration. Args: learning_config (LearningConfig): The configuration for the learning process. @@ -140,14 +141,16 @@ def __init__( self.all_dones = defaultdict(lambda: defaultdict(list)) def on_ready(self): - """ - Set up the learning role for reinforcement learning training. + """Set up the learning role for reinforcement learning training. - Notes: - This method prepares the learning role for the reinforcement learning training process. It subscribes to relevant messages - for handling the training process and schedules recurrent tasks for policy updates based on the specified training frequency. - This cannot happen in the init since the context (compare mango agents) is not yet available there.To avoid inconsistent replay buffer states (e.g. observation and action has been stored but not the reward), this - slightly shifts the timing of the buffer updates. + Note: + This method prepares the learning role for the reinforcement learning training process. + It subscribes to relevant messages for handling the training process and schedules + recurrent tasks for policy updates based on the specified training frequency. + This cannot happen in the init since the context (compare mango agents) is not + yet available there. To avoid inconsistent replay buffer states (e.g. observation + and action has been stored but not the reward), this slightly shifts the timing + of the buffer updates. """ super().on_ready() @@ -168,8 +171,8 @@ def on_ready(self): ) def sync_train_freq_with_simulation_horizon(self) -> str | None: - """ - Ensure self.train_freq evenly divides the simulation length. + """Ensure self.train_freq evenly divides the simulation length. + If not, adjust self.train_freq (in-place) and return the new string, otherwise return None. Uses self.start_datetime/self.end_datetime when available, otherwise falls back to timestamp fields. """ @@ -209,8 +212,7 @@ def sync_train_freq_with_simulation_horizon(self) -> str | None: return self.learning_config.train_freq def determine_validation_interval(self) -> int: - """ - Compute and validate validation_interval. + """Compute and validate validation_interval. Returns: validation_interval (int) @@ -244,8 +246,7 @@ def determine_validation_interval(self) -> int: return validation_interval def register_strategy(self, strategy: LearningStrategy) -> None: - """ - Register a learning strategy with this learning role. + """Register a learning strategy with this learning role. Args: strategy (LearningStrategy): The learning strategy to register. @@ -309,10 +310,10 @@ async def store_to_buffer_and_update(self) -> None: logger.warning("No experience retrieved to store in buffer at update step!") async def _store_to_buffer_and_update_sync(self, cache, device) -> None: - """ - This function takes all the information that the strategies wrote into the learning_role cache dicts and post_processes them to fit into the buffer. - Further triggers the next policy update - + """Process strategy data into the buffer and trigger policy update. + + This function takes all the information that the strategies wrote into the + learning_role cache dicts and post-processes them to fit into the buffer. """ first_start = next(iter(cache["obs"])) for name, buffer in [ @@ -402,22 +403,22 @@ async def _store_to_buffer_and_update_sync(self, cache, device) -> None: self.rl_algorithm.update_policy() def add_observation_to_cache(self, unit_id, start, observation) -> None: - """ - Add the observation to the cache dict, per unit_id. + """Add the observation to the cache dict, per unit_id. Args: unit_id (str): The id of the unit. + start: The start time. observation (torch.Tensor): The observation to be added. """ self.all_obs[start][unit_id].append(observation) def add_actions_to_cache(self, unit_id, start, action, noise) -> None: - """ - Add the action and noise to the cache dict, per unit_id. + """Add the action and noise to the cache dict, per unit_id. Args: unit_id (str): The id of the unit. + start: The start time. action (torch.Tensor): The action to be added. noise (torch.Tensor): The noise to be added. @@ -434,13 +435,14 @@ def add_actions_to_cache(self, unit_id, start, action, noise) -> None: self.all_noises[start][unit_id].append(noise) def add_reward_to_cache(self, unit_id, start, reward, regret, profit) -> None: - """ - Add the reward to the cache dict, per unit_id. + """Add the reward to the cache dict, per unit_id. Args: - unit_id (str): The id of the unit. - reward (float): The reward to be added. - + unit_id: The id of the unit. + start: The start time. + reward: The reward to be added. + regret: The regret to be added. + profit: The profit to be added. """ self.all_rewards[start][unit_id].append(reward) self.all_regrets[start][unit_id].append(regret) @@ -454,26 +456,24 @@ def add_ppo_data_to_cache( log_prob, done=False ) -> None: - """ - Add PPO specific data to the cache dict, per unit_id. + """Add PPO specific data to the cache dict, per unit_id. Args: - unit_id (str): The id of the unit. - value (float): The value estimate V(s) from the critic. - log_prob (float): The log probability of the action. - done (bool): Whether a terminal state or not. + unit_id: The id of the unit. + start: The start time. + value: The value estimate V(s) from the critic. + log_prob: The log probability of the action. + done: Whether a terminal state or not. """ self.all_values[start][unit_id].append(value) self.all_log_probs[start][unit_id].append(log_prob) self.all_dones[start][unit_id].append(float(done)) def load_inter_episodic_data(self, inter_episodic_data): - """ - Load the inter-episodic data from the dict stored across simulation runs. + """Load the inter-episodic data from the dict stored across simulation runs. Args: - inter_episodic_data (dict): The inter-episodic data to be loaded. - + inter_episodic_data: The inter-episodic data to be loaded. """ self.episodes_done = inter_episodic_data["episodes_done"] self.eval_episodes_done = inter_episodic_data["eval_episodes_done"] @@ -499,11 +499,10 @@ def load_inter_episodic_data(self, inter_episodic_data): self.turn_off_initial_exploration(loaded_only=True) def get_inter_episodic_data(self): - """ - Dump the inter-episodic data to a dict for storing across simulation runs. + """Dump the inter-episodic data to a dict for storing across simulation runs. Returns: - dict: The inter-episodic data to be stored. + The inter-episodic data to be stored. """ return { @@ -517,14 +516,13 @@ def get_inter_episodic_data(self): } def turn_off_initial_exploration(self, loaded_only=False) -> None: - """ - Disable initial exploration mode. + """Disable initial exploration mode. If `loaded_only=True`, only turn off exploration for strategies that were loaded (used in continue_learning mode). If `loaded_only=False`, turn it off for all strategies. Args: - loaded_only (bool): Whether to disable exploration only for loaded strategies. + loaded_only: Whether to disable exploration only for loaded strategies. """ for strategy in self.rl_strats.values(): if loaded_only: @@ -534,9 +532,10 @@ def turn_off_initial_exploration(self, loaded_only=False) -> None: strategy.collect_initial_experience_mode = False def get_progress_remaining(self) -> float: - """ - Get the remaining learning progress from the simulation run. - + """Get the remaining learning progress from the simulation run. + + Returns: + The remaining progress as a float between 0 and 1. """ total_duration = self.end - self.start elapsed_duration = self.context.current_timestamp - self.start @@ -574,14 +573,14 @@ def get_progress_remaining(self) -> float: return progress_remaining def create_learning_algorithm(self, algorithm: RLAlgorithm): - """ - Create and initialize the reinforcement learning algorithm. + """Create and initialize the reinforcement learning algorithm. - This method creates and initializes the reinforcement learning algorithm based on the specified algorithm name. The algorithm - is associated with the learning role and configured with relevant hyperparameters. + This method creates and initializes the reinforcement learning algorithm based on + the specified algorithm name. The algorithm is associated with the learning role + and configured with relevant hyperparameters. Args: - algorithm (RLAlgorithm): The name of the reinforcement learning algorithm. + algorithm: The name of the reinforcement learning algorithm. """ if algorithm == "matd3": self.rl_algorithm = TD3(learning_role=self) @@ -596,9 +595,13 @@ def initialize_policy(self, actors_and_critics: dict = None) -> None: """ Initialize the policy of the reinforcement learning agent considering the respective algorithm. - This method initializes the policy (actor) of the reinforcement learning agent. It tests if we want to continue the learning process with - stored policies from a former training process. If so, it loads the policies from the specified directory. Otherwise, it initializes the - respective new policies. + This method initializes the policy (actor) of the reinforcement learning agent. It + tests if we want to continue the learning process with stored policies from a former + training process. If so, it loads the policies from the specified directory. + Otherwise, it initializes the respective new policies. + + Args: + actors_and_critics: The pre-initialized actor and critic policies. """ self.rl_algorithm.initialize_policy(actors_and_critics) @@ -617,20 +620,23 @@ def initialize_policy(self, actors_and_critics: dict = None) -> None: ) def compare_and_save_policies(self, metrics: dict) -> bool: - """ - Compare evaluation metrics and save policies based on the best achieved performance according to the metrics calculated. + """Compare evaluation metrics and save best performing policies. - This method compares the evaluation metrics, such as reward, profit, and regret, and saves the policies if they achieve the - best performance in their respective categories. It iterates through the specified modes, compares the current evaluation - value with the previous best, and updates the best value if necessary. If an improvement is detected, it saves the policy - and associated parameters. + This method compares the evaluation metrics, such as reward, profit, and regret, + and saves the policies if they achieve the best performance in their respective + categories. It iterates through the specified modes, compares the current evaluation + value with the previous best, and updates the best value if necessary. If an improvement + is detected, it saves the policy and associated parameters. - metrics contain a metric key like "reward" and the current value. - This function stores the policies with the highest metric. - So if minimize is required one should add for example "minus_regret" which is then maximized. + Metrics contain a metric key like "reward" and the current value. This function + stores the policies with the highest metric. If minimize is required, one should + add for example "minus_regret" which is then maximized. + Args: + metrics: Dictionary of metrics evaluated. + Returns: - bool: True if early stopping criteria is triggered. + True if early stopping criteria is triggered, False otherwise. Note: This method is typically used during the evaluation phase to save policies that achieve superior performance. @@ -717,19 +723,18 @@ def init_logging( output_agent_addr: str, train_start: str, ): - """ - Initialize the logging for the reinforcement learning agent. + """Initialize the logging for the reinforcement learning agent. This method initializes the tensor board logger for the reinforcement learning agent. It also initializes the parameters required for sending data to the output role. Args: - simulation_id (str): The unique identifier for the simulation. - episode (int): The current training episode number. - eval_episode (int): The current evaluation episode number. - db_uri (str): URI for connecting to the database. - output_agent_addr (str): The address of the output agent. - train_start (str): The start time of simulation. + simulation_id: The unique identifier for the simulation. + episode: The current training episode number. + eval_episode: The current evaluation episode number. + db_uri: URI for connecting to the database. + output_agent_addr: The address of the output agent. + train_start: The start time of simulation. """ self.tensor_board_logger = TensorBoardLogger( @@ -754,12 +759,10 @@ def init_logging( self.update_steps = 0 def write_rl_params_to_output(self, cache): - """ - Sends the current rl_strategy update to the output agent. + """Sends the current rl_strategy update to the output agent. Args: - products_index (pandas.DatetimeIndex): The index of all products. - marketconfig (MarketConfig): The market configuration. + cache: The data cache from the strategies. """ output_agent_list = [] @@ -801,20 +804,16 @@ def write_rl_params_to_output(self, cache): def write_rl_grad_params_to_output( self, learning_rate: float, unit_params_list: list[dict] ) -> None: - """ - Writes learning parameters and critic losses to output at specified time intervals. + """Writes learning parameters and critic losses to output at specified intervals. This function processes training metrics for each critic over multiple time steps and sends them to a database for storage. It tracks the learning rate and critic losses across training iterations, associating each record with a timestamp. - Parameters - ---------- - learning_rate : float - The current learning rate used in training. - unit_params_list : list[dict] - A list of dictionaries containing critic losses for each time step. - Each dictionary maps critic names to their corresponding loss values. + Args: + learning_rate: The current learning rate used in training. + unit_params_list: A list of dictionaries containing critic losses for each + time step (mapping critic names to their losses in dict). """ # gradient steps performed in previous training episodes if is_off_policy(self.learning_config.algorithm): diff --git a/assume/reinforcement_learning/learning_utils.py b/assume/reinforcement_learning/learning_utils.py index 8d72f4aeb..20aa2acd5 100644 --- a/assume/reinforcement_learning/learning_utils.py +++ b/assume/reinforcement_learning/learning_utils.py @@ -45,9 +45,7 @@ class ActivationLimits(TypedDict): # Ornstein-Uhlenbeck Noise # from https://github.com/songrotek/DDPG/blob/master/ou_noise.py class OUNoise: - """ - A class that implements Ornstein-Uhlenbeck noise. - """ + """A class that implements Ornstein-Uhlenbeck noise.""" def __init__(self, action_dimension, mu=0, sigma=0.5, theta=0.15, dt=1e-2): self.action_dimension = action_dimension @@ -76,9 +74,7 @@ def noise(self): class NormalActionNoise: - """ - A Gaussian action noise that supports direct tensor creation on a given device. - """ + """A Gaussian action noise that supports direct tensor creation on a given device.""" def __init__(self, action_dimension, mu=0.0, sigma=0.1, scale=1.0, dt=0.9998): self.act_dimension = action_dimension @@ -88,15 +84,14 @@ def __init__(self, action_dimension, mu=0.0, sigma=0.1, scale=1.0, dt=0.9998): self.dt = dt def noise(self, device=None, dtype=th.float): - """ - Generates noise using torch.normal(), ensuring efficient execution on GPU if needed. + """Generate noise using torch.normal() ensuring efficient execution on GPU if needed. Args: - - device (torch.device, optional): Target device (e.g., 'cuda' or 'cpu'). - - dtype (torch.dtype, optional): Data type of the tensor (default: torch.float32). + device: Target device (e.g., 'cuda' or 'cpu'). + dtype: Data type of the tensor (default: torch.float32). Returns: - - torch.Tensor: Noise tensor on the specified device. + Noise tensor on the specified device. """ return ( self.dt @@ -115,9 +110,9 @@ def update_noise_decay(self, updated_decay: float): def polyak_update(params, target_params, tau: float): - """ - Perform a Polyak average update on ``target_params`` using ``params``: - target parameters are slowly updated towards the main parameters. + """Perform a Polyak average update on ``target_params`` using ``params``. + + Target parameters are slowly updated towards the main parameters. ``tau``, the soft update coefficient controls the interpolation: ``tau=1`` corresponds to copying the parameters to the target ones whereas nothing happens when ``tau=0``. The Polyak update is done in place, with ``no_grad``, and therefore does not create intermediate tensors, @@ -127,9 +122,9 @@ def polyak_update(params, target_params, tau: float): See https://github.com/DLR-RM/stable-baselines3/issues/93 Args: - params: parameters to use to update the target params - target_params: parameters to update - tau: the soft update coefficient ("Polyak update", between 0 and 1) + params: Parameters to use to update the target params. + target_params: Parameters to update. + tau: The soft update coefficient ("Polyak update", between 0 and 1). """ with th.no_grad(): for param, target_param in zip(params, target_params): @@ -139,9 +134,10 @@ def polyak_update(params, target_params, tau: float): def linear_schedule_func( start: float, end: float = 0, end_fraction: float = 1 ) -> Schedule: - """ - Create a function that interpolates linearly between start and end - between ``progress_remaining`` = 1 and ``progress_remaining`` = 1 - ``end_fraction``. + """Create a function that interpolates linearly between start and end. + + Interpolates linearly between start and end between ``progress_remaining`` = 1 + and ``progress_remaining`` = 1 - ``end_fraction``. Args: start: value to start with if ``progress_remaining`` = 1 @@ -151,11 +147,10 @@ def linear_schedule_func( of the complete training process. Returns: - Linear schedule function. + The linear schedule function. Note: Adapted from SB3: https://github.com/DLR-RM/stable-baselines3/blob/512eea923afad6f6da4bb53d72b6ea4c6d856e59/stable_baselines3/common/utils.py#L100 - """ def func(progress_remaining: float) -> float: @@ -168,17 +163,18 @@ def func(progress_remaining: float) -> float: def constant_schedule(val: float) -> Schedule: - """ - Create a function that returns a constant. It is useful for learning rate schedule (to avoid code duplication) + """Create a function that returns a constant. + + It is useful for learning rate schedule (to avoid code duplication). Args: - val: constant value + val: Constant value. + Returns: Constant schedule function. Note: From SB3: https://github.com/DLR-RM/stable-baselines3/blob/512eea923afad6f6da4bb53d72b6ea4c6d856e59/stable_baselines3/common/utils.py#L124 - """ def func(_): @@ -205,16 +201,18 @@ def copy_layer_data(dst, src): def transform_buffer_data(nested_dict: dict, device: th.device) -> np.ndarray: - """ - Transform nested dict {datetime -> {unit_id -> [values]}} into - torch tensor of shape (timesteps, powerplants, values). Compatible with buffer storage. + """Transform nested dict into torch tensor. + + Transforms nested dict {datetime -> {unit_id -> [values]}} into torch tensor + of shape (timesteps, powerplants, values). Compatible with buffer storage. Get tensors from GPU to CPU. Args: - nested_dict: Dict with structure {datetime -> {unit_id -> list[tensor]}} + nested_dict: Dict with structure {datetime -> {unit_id -> list[tensor]}}. + device: PyTorch device config. Returns: - th.Tensor: Shape (n_timesteps, n_powerplants, feature_dim) + Shape (n_timesteps, n_powerplants, feature_dim). """ # Get sorted lists of units and timestamps (for consistent ordering) all_times = sorted(nested_dict.keys()) @@ -262,9 +260,11 @@ def transfer_weights( act_dim: int, unique_obs: int, ) -> dict | None: - """ - Transfer weights from loaded model to new model. Copy only those obs- and action-slices for matching IDs. - New IDs keep their original (random) weights. Function only works if the neural network architecture remained stable besides the input layer, namely with the same hidden layers. + """Transfer weights from loaded model to new model. + + Copy only those obs- and action-slices for matching IDs. New IDs keep their + original (random) weights. Function only works if the neural network architecture + remained stable besides the input layer, namely with the same hidden layers. Args: model (th.nn.Module): The model to transfer weights to. diff --git a/assume/reinforcement_learning/neural_network_architecture.py b/assume/reinforcement_learning/neural_network_architecture.py index 74c8d69eb..08da7d719 100644 --- a/assume/reinforcement_learning/neural_network_architecture.py +++ b/assume/reinforcement_learning/neural_network_architecture.py @@ -13,8 +13,7 @@ class Critic(nn.Module): - """ - Base Critic class handling architecture generation and initialization. + """Base Critic class handling architecture generation and initialization. Args: n_agents (int): Number of agents @@ -55,9 +54,7 @@ def _get_architecture( return hidden_sizes def _build_q_network(self) -> nn.ModuleList: - """ - Dynamically create a Q-network given the chosen hidden layer sizes. - """ + """Dynamically create a Q-network given the chosen hidden layer sizes.""" layers = nn.ModuleList() input_dim = ( self.obs_dim + self.act_dim @@ -86,9 +83,11 @@ class CriticTD3(Critic): """Initialize parameters and build model. Args: - n_agents (int): Number of agents - obs_dim (int): Dimension of each state - act_dim (int): Dimension of each action + n_agents: Number of agents. + obs_dim: Dimension of each state. + act_dim: Dimension of each action. + float_type: Data type for parameters. + unique_obs_dim: Dimension of agent-specific observations. """ def __init__( self, @@ -117,9 +116,7 @@ def forward( obs: th.Tensor, actions: th.Tensor ) -> Tuple[th.Tensor, th.Tensor]: - """ - Forward pass through both Q-networks. - """ + """Forward pass through both Q-networks.""" xu = th.cat([obs, actions], dim=1) # Concatenate obs & actions # Compute Q1 @@ -135,9 +132,7 @@ def q1_forward( obs: th.Tensor, actions: th.Tensor ) -> th.Tensor: - """ - Compute only Q1 (used during actor updates). - """ + """Compute only Q1 (used during actor updates).""" x = th.cat([obs, actions], dim=1) x = nn.Sequential(*self.q1_layers)(x) @@ -195,10 +190,10 @@ class CriticPPO(Critic): """Initialize parameters and build PPO value network. Args: - n_agents (int): Number of agents - obs_dim (int): Dimension of observation per agent - float_type: Data type for parameters - unique_obs_dim: Dimension of agent-specific observations + n_agents: Number of agents. + obs_dim: Dimension of observation per agent. + float_type: Data type for parameters. + unique_obs_dim: Dimension of agent-specific observations. """ def __init__( @@ -223,9 +218,7 @@ def __init__( self._init_weights() def _init_weights(self) -> None: - """ - Apply Orthogonal initialization with appropriate gains. - """ + """Apply Orthogonal initialization with appropriate gains.""" def init_layer(m): if isinstance(m, nn.Linear): if m.out_features == 1: # Output layer @@ -245,9 +238,7 @@ def forward(self, obs: th.Tensor) -> th.Tensor: class Actor(nn.Module): - """ - Parent class for actor networks. - """ + """Parent class for actor networks.""" def __init__(self): super().__init__() @@ -265,9 +256,7 @@ def __init__(self): class MLPActor(Actor): - """ - The neural network for the MLP actor. - """ + """The neural network for the MLP actor.""" def __init__(self, obs_dim: int, act_dim: int, float_type, *args, **kwargs): super().__init__() @@ -299,16 +288,16 @@ def forward(self, obs): class LSTMActor(Actor): - """ - The LSTM recurrent neural network for the actor. + """The LSTM recurrent neural network for the actor. Based on "Multi-Period and Multi-Spatial Equilibrium Analysis in Imperfect Electricity Markets" by Ye at al. (2019) - Note: the original source code was not available, therefore this implementation was derived from the published paper. - Adjustments to resemble final layers from MLPActor: - - dense layer 2 was omitted - - single output layer with softsign activation function to output actions directly instead of two output layers for mean and stddev + Note: + The original source code was not available, therefore this implementation was derived from the published paper. + Adjustments to resemble final layers from MLPActor: + - dense layer 2 was omitted + - single output layer with softsign activation function to output actions directly instead of two output layers for mean and stddev """ def __init__( @@ -384,9 +373,7 @@ def forward(self, obs): class ActorPPO(nn.Module): - """ - PPO Actor network with stochastic policy (Gaussian). - """ + """PPO Actor network with stochastic policy (Gaussian).""" def __init__( self, @@ -463,9 +450,7 @@ def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor: return th.clamp(action, -1.0, 1.0) def get_distribution(self, obs: th.Tensor) -> tuple[th.Tensor, th.Tensor]: - """ - Get the policy distribution parameters. - """ + """Get the policy distribution parameters.""" x = F.relu(self.FC1(obs)) x = F.relu(self.FC2(x)) mean = th.tanh(self.mean_layer(x)) # Bounded to [-1, 1] @@ -478,15 +463,14 @@ def get_action_and_log_prob( obs: th.Tensor, deterministic: bool = False, ) -> tuple[th.Tensor, th.Tensor]: - """ - Sample action and compute log probability. + """Sample action and compute log probability. Args: - obs: Observations - deterministic: If True, return mean action + obs: Observations. + deterministic: If True, return mean action. Returns: - Tuple of (action, log_prob) + Tuple of (action, log_prob). """ mean, log_std = self.get_distribution(obs) std = log_std.exp() @@ -511,17 +495,16 @@ def evaluate_actions( obs: th.Tensor, actions: th.Tensor, ) -> tuple[th.Tensor, th.Tensor, th.Tensor]: - """ - Evaluate log probability and entropy for given actions. + """Evaluate log probability and entropy for given actions. Used during PPO update to compute importance ratio. Args: - obs: Observations - actions: Actions to evaluate + obs: Observations. + actions: Actions to evaluate. Returns: - Tuple of (log_prob, entropy, values) + Tuple of (log_prob, entropy, values). """ mean, log_std = self.get_distribution(obs) std = log_std.exp() @@ -546,9 +529,7 @@ def _compute_log_prob( class LSTMActorPPO(ActorPPO): - """ - PPO Actor network with LSTM architecture and stochastic policy (Gaussian). - """ + """PPO Actor network with LSTM architecture and stochastic policy (Gaussian).""" def __init__( self, diff --git a/examples/inputs/example_02a/config.yaml b/examples/inputs/example_02a/config.yaml index 7a78a8321..64034431d 100644 --- a/examples/inputs/example_02a/config.yaml +++ b/examples/inputs/example_02a/config.yaml @@ -1,154 +1,179 @@ +# SPDX-FileCopyrightText: ASSUME Developers +# +# SPDX-License-Identifier: AGPL-3.0-or-later + base: + start_date: 2019-03-01 00:00 end_date: 2019-03-31 00:00 + time_step: 1h + save_frequency_hours: null + seed: null + learning_config: - algorithm: matd3 - batch_size: 256 - continue_learning: false - device: cpu - gamma: 0.99 learning_mode: true - learning_rate: 0.001 + continue_learning: false + trained_policies_save_path: null + trained_policies_load_path: null max_bid_price: 100 + algorithm: matd3 + learning_rate: 0.001 + training_episodes: 100 + actor_architecture: mlp + train_freq: 100h + batch_size: 128 + gamma: 0.99 + device: cpu + validation_episodes_interval: 5 + + # Off-policy parameters (required for TD3/DDPG algorithms) off_policy: - action_noise_schedule: null - episodes_collecting_initial_experience: 5 - gradient_steps: 24 - noise_dt: 1 - noise_scale: 1 + episodes_collecting_initial_experience: 3 + gradient_steps: 10 noise_sigma: 0.1 - replay_buffer_size: 50000 + noise_scale: 1 + action_noise_schedule: linear + noise_dt: 1 + replay_buffer_size: 10000 + + # On-policy parameters (required for PPO/MAPPO algorithms) on_policy: - clip_ratio: 0.1 + clip_ratio: 0.2 entropy_coef: 0.01 + vf_coef: 0.5 gae_lambda: 0.95 n_epochs: 25 - vf_coef: 0.5 - train_freq: 24h - trained_policies_save_path: null - training_episodes: 100 - validation_episodes_interval: 5 + markets_config: EOM: - market_mechanism: pay_as_clear - maximum_bid_price: 3000 - maximum_bid_volume: 100000 - minimum_bid_price: -500 - opening_duration: 1h - opening_frequency: 1h operator: EOM_operator - price_unit: EUR/MWh product_type: energy - products: - - count: 1 - duration: 1h - first_delivery: 1h start_date: 2019-03-01 00:00 + products: + - duration: 1h + count: 1 + first_delivery: 1h + opening_frequency: 1h + opening_duration: 1h volume_unit: MWh - save_frequency_hours: null - seed: null - start_date: 2019-03-01 00:00 - time_step: 1h + maximum_bid_volume: 100000 + maximum_bid_price: 3000 + minimum_bid_price: -500 + price_unit: EUR/MWh + market_mechanism: pay_as_clear + base_lstm: + start_date: 2019-03-01 00:00 end_date: 2019-03-31 00:00 + time_step: 1h + save_frequency_hours: null + seed: null + learning_config: - actor_architecture: lstm + learning_mode: true + continue_learning: false + trained_policies_save_path: null + max_bid_price: 100 algorithm: matd3 + learning_rate: 0.001 + training_episodes: 50 + train_freq: 24h batch_size: 256 - continue_learning: false + gamma: 0.99 device: cpu + validation_episodes_interval: 5 early_stopping_steps: 10 early_stopping_threshold: 0.05 - gamma: 0.99 - learning_mode: true - learning_rate: 0.001 - max_bid_price: 100 + actor_architecture: lstm + + # Off-policy parameters (required for TD3/DDPG algorithms) off_policy: - action_noise_schedule: linear episodes_collecting_initial_experience: 5 gradient_steps: 24 - noise_dt: 1 - noise_scale: 1 noise_sigma: 0.1 + noise_scale: 1 + action_noise_schedule: linear + noise_dt: 1 replay_buffer_size: 10000 + + # On-policy parameters (required for PPO/MAPPO algorithms) on_policy: clip_ratio: 0.2 entropy_coef: 0.01 + vf_coef: 0.5 gae_lambda: 0.95 n_epochs: 25 - vf_coef: 0.5 - train_freq: 24h - trained_policies_save_path: null - training_episodes: 50 - validation_episodes_interval: 5 + markets_config: EOM: - market_mechanism: pay_as_clear - maximum_bid_price: 3000 - maximum_bid_volume: 100000 - minimum_bid_price: -500 - opening_duration: 1h - opening_frequency: 1h operator: EOM_operator - price_unit: EUR/MWh product_type: energy - products: - - count: 1 - duration: 1h - first_delivery: 1h start_date: 2019-03-01 00:00 + products: + - duration: 1h + count: 1 + first_delivery: 1h + opening_frequency: 1h + opening_duration: 1h volume_unit: MWh - save_frequency_hours: null - seed: null - start_date: 2019-03-01 00:00 - time_step: 1h + maximum_bid_volume: 100000 + maximum_bid_price: 3000 + minimum_bid_price: -500 + price_unit: EUR/MWh + market_mechanism: pay_as_clear + tiny: + start_date: 2019-01-01 00:00 end_date: 2019-01-05 00:00 + time_step: 1h + save_frequency_hours: null + seed: null + learning_config: - actor_architecture: mlp + learning_mode: true + continue_learning: false + trained_policies_save_path: null + max_bid_price: 100 algorithm: matd3 + learning_rate: 0.001 + training_episodes: 10 + train_freq: 24h batch_size: 64 - continue_learning: false - device: cpu gamma: 0.99 - learning_mode: true - learning_rate: 0.001 - max_bid_price: 100 + device: cpu + validation_episodes_interval: 5 + actor_architecture: mlp + + # Off-policy parameters (required for TD3/DDPG algorithms) off_policy: - action_noise_schedule: linear episodes_collecting_initial_experience: 3 gradient_steps: 24 - noise_dt: 1 - noise_scale: 1 noise_sigma: 0.1 + noise_scale: 1 + action_noise_schedule: linear + noise_dt: 1 replay_buffer_size: 10000 + + # On-policy parameters (required for PPO/MAPPO algorithms) on_policy: clip_ratio: 0.2 entropy_coef: 0.01 + vf_coef: 0.5 gae_lambda: 0.95 n_epochs: 25 - vf_coef: 0.5 - train_freq: 24h - trained_policies_save_path: null - training_episodes: 10 - validation_episodes_interval: 5 + markets_config: EOM: - market_mechanism: pay_as_clear - maximum_bid_price: 3000 - maximum_bid_volume: 100000 - minimum_bid_price: -500 - opening_duration: 1h - opening_frequency: 1h operator: EOM_operator - price_unit: EUR/MWh product_type: energy - products: - - count: 1 - duration: 1h - first_delivery: 1h start_date: 2019-01-01 00:00 + products: + - duration: 1h + count: 1 + first_delivery: 1h + opening_frequency: 1h + opening_duration: 1h volume_unit: MWh - save_frequency_hours: null - seed: null - start_date: 2019-01-01 00:00 - time_step: 1h + maximum_bid_volume: 100000 + maximum_bid_price: 3000 + minimum_bid_price: -500 + price_unit: EUR/MWh \ No newline at end of file diff --git a/examples/inputs/example_02d/config.yaml b/examples/inputs/example_02d/config.yaml index 8c48781c4..de46a341c 100644 --- a/examples/inputs/example_02d/config.yaml +++ b/examples/inputs/example_02d/config.yaml @@ -58,117 +58,3 @@ base: minimum_bid_price: -500 price_unit: EUR/MWh market_mechanism: pay_as_clear - -# --------------------------------------------------------------------------- -# MADDPG configuration (off-policy, no policy delay / twin critics) -# Switch algorithm: maddpg — all off_policy params remain active. -# On-policy block is kept for reference but not used by this algorithm. -# --------------------------------------------------------------------------- -# base: -# start_date: 2019-03-01 00:00 -# end_date: 2019-04-01 00:00 -# time_step: 1h -# save_frequency_hours: null -# seed: null -# -# learning_config: -# learning_mode: true -# continue_learning: false -# trained_policies_save_path: null -# trained_policies_load_path: null -# max_bid_price: 100 -# algorithm: maddpg -# learning_rate: 0.001 -# training_episodes: 100 -# train_freq: 100h -# batch_size: 128 -# gamma: 0.99 -# device: cpu -# validation_episodes_interval: 5 -# -# # Off-policy parameters (active for MADDPG) -# off_policy: -# episodes_collecting_initial_experience: 5 -# gradient_steps: 10 -# noise_sigma: 0.1 -# noise_scale: 1 -# action_noise_schedule: linear -# noise_dt: 1 -# replay_buffer_size: 10000 -# -# # On-policy parameters (not used by MADDPG, kept for reference) -# on_policy: -# clip_ratio: 0.2 -# entropy_coef: 0.01 -# vf_coef: 0.5 -# gae_lambda: 0.95 -# n_epochs: 25 -# -# markets_config: -# EOM: -# operator: EOM_operator -# product_type: energy -# products: -# - duration: 1h -# count: 1 -# first_delivery: 1h -# opening_frequency: 1h -# opening_duration: 1h -# volume_unit: MWh -# maximum_bid_volume: 100000 -# maximum_bid_price: 3000 -# minimum_bid_price: -500 -# price_unit: EUR/MWh -# market_mechanism: pay_as_clear - -# --------------------------------------------------------------------------- -# MAPPO configuration (on-policy) -# Switch algorithm: mappo — only on_policy params are used. -# Off-policy block is not applicable for MAPPO; it is omitted. -# --------------------------------------------------------------------------- -# base: -# start_date: 2019-03-01 00:00 -# end_date: 2019-04-01 00:00 -# time_step: 1h -# save_frequency_hours: null -# seed: null -# -# learning_config: -# learning_mode: true -# continue_learning: false -# trained_policies_save_path: null -# trained_policies_load_path: null -# max_bid_price: 100 -# algorithm: mappo -# learning_rate: 0.001 -# training_episodes: 100 -# train_freq: 100h -# batch_size: 128 -# gamma: 0.99 -# device: cpu -# validation_episodes_interval: 5 -# -# # On-policy parameters (active for MAPPO) -# on_policy: -# clip_ratio: 0.2 -# entropy_coef: 0.01 -# vf_coef: 0.5 -# gae_lambda: 0.95 -# n_epochs: 25 -# -# markets_config: -# EOM: -# operator: EOM_operator -# product_type: energy -# products: -# - duration: 1h -# count: 1 -# first_delivery: 1h -# opening_frequency: 1h -# opening_duration: 1h -# volume_unit: MWh -# maximum_bid_volume: 100000 -# maximum_bid_price: 3000 -# minimum_bid_price: -500 -# price_unit: EUR/MWh -# market_mechanism: pay_as_clear From 171b5e96618d8dfcad5df3f9bc6046f097607b07 Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Wed, 1 Apr 2026 11:37:00 +0200 Subject: [PATCH 23/44] fixed the PPO implementation, the policy update logic, corrected the ordering for the agent --- .../algorithms/mappo.py | 50 ++++++++++------- .../reinforcement_learning/learning_role.py | 55 +++++++++++++++---- assume/strategies/learning_strategies.py | 26 +++------ 3 files changed, 82 insertions(+), 49 deletions(-) diff --git a/assume/reinforcement_learning/algorithms/mappo.py b/assume/reinforcement_learning/algorithms/mappo.py index 2c78fd3d8..3b2092776 100644 --- a/assume/reinforcement_learning/algorithms/mappo.py +++ b/assume/reinforcement_learning/algorithms/mappo.py @@ -48,12 +48,12 @@ class PPO(A2CAlgorithm): def __init__( self, learning_role, - clip_range=0.2, - clip_range_vf=0.1, - n_epochs=50, - entropy_coef=0.05, - vf_coef=1.0, - max_grad_norm=0.5, + clip_range=None, + clip_range_vf=None, + n_epochs=None, + entropy_coef=None, + vf_coef=None, + max_grad_norm=None, ): """Initialize PPO algorithm with specific hyperparameters. @@ -73,15 +73,25 @@ def __init__( self.critic_architecture_class = CriticPPO config = self.learning_config - ppo_config = getattr(config, "ppo", None) - - # Use PPO-specific config if available, otherwise use defaults - self.clip_range = clip_range if clip_range is not None else getattr(ppo_config, "clip_ratio", 0.2) - self.clip_range_vf = clip_range_vf if clip_range_vf is not None else getattr(ppo_config, "clip_range_vf", None) - self.n_epochs = n_epochs if n_epochs is not None else getattr(ppo_config, "n_epochs", 10) - self.entropy_coef = entropy_coef if entropy_coef is not None else getattr(ppo_config, "entropy_coef", 0.01) - self.vf_coef = vf_coef if vf_coef is not None else getattr(ppo_config, "vf_coef", 0.5) - self.max_grad_norm = max_grad_norm if max_grad_norm is not None else getattr(ppo_config, "max_grad_norm", 0.5) + on_policy_config = config.on_policy + + # Using on-policy config unless explicitly overridden via constructor args. + self.clip_range = ( + clip_range if clip_range is not None else on_policy_config.clip_ratio + ) + self.clip_range_vf = clip_range_vf + self.n_epochs = n_epochs if n_epochs is not None else on_policy_config.n_epochs + self.entropy_coef = ( + entropy_coef + if entropy_coef is not None + else on_policy_config.entropy_coef + ) + self.vf_coef = vf_coef if vf_coef is not None else on_policy_config.vf_coef + self.max_grad_norm = ( + max_grad_norm + if max_grad_norm is not None + else on_policy_config.max_grad_norm + ) # Update counter self.n_updates = 0 @@ -107,9 +117,7 @@ def create_actors(self) -> None: >>> ppo.create_actors() >>> # Creates actor network and optimizer for each strategy """ - config = self.learning_config - ppo_config = getattr(config, "ppo", None) - actor_architecture = getattr(ppo_config, "actor_architecture", "mlp") + actor_architecture = self.learning_config.on_policy.actor_architecture for strategy in self.learning_role.rl_strats.values(): # Create PPO Actor @@ -212,10 +220,12 @@ def update_policy(self) -> None: """ logger.debug("Updating Policy") - strategies = list(self.learning_role.rl_strats.values()) + # Keeping strategy order aligned with rollout-buffer column order. + sorted_unit_ids = sorted(self.learning_role.rl_strats.keys()) + strategies = [self.learning_role.rl_strats[u_id] for u_id in sorted_unit_ids] n_rl_agents = len(strategies) - # Get buffer (will be RolloutBuffer for on-policy algorithms) + # Getting the buffer, this will be a RolloutBuffer for on-policy algorithms. rollout_buffer = self.learning_role.buffer # Check if rollout buffer has data diff --git a/assume/reinforcement_learning/learning_role.py b/assume/reinforcement_learning/learning_role.py index 3411dc381..ed637533b 100644 --- a/assume/reinforcement_learning/learning_role.py +++ b/assume/reinforcement_learning/learning_role.py @@ -331,8 +331,11 @@ async def _store_to_buffer_and_update_sync(self, cache, device) -> None: # Add data to buffer - type depends on algorithm category if is_on_policy(self.learning_config.algorithm): - # For on-policy algorithms (PPO/MAPPO), use RolloutBuffer + # Using RolloutBuffer for on-policy algorithms (PPO/MAPPO). for timestamp in sorted(cache["obs"].keys()): + sorted_unit_ids = sorted(cache["obs"][timestamp].keys()) + n_rl_agents = len(sorted_unit_ids) + obs_data = transform_buffer_data( { timestamp: cache["obs"][timestamp] @@ -351,13 +354,43 @@ async def _store_to_buffer_and_update_sync(self, cache, device) -> None: }, device ) - - values_data = transform_buffer_data( - { - timestamp: cache["values"][timestamp] - }, - device - ) + + # Computing MAPPO value targets with the centralized critic + #using the joint observation available at this timestamp. + if self.learning_config.algorithm == "mappo": + values_data = np.zeros((1, n_rl_agents, 1), dtype=np.float32) + obs_step = obs_data[0] + unique_obs_all = obs_step[ + :, self.rl_algorithm.obs_dim - self.rl_algorithm.unique_obs_dim : + ] + + with th.no_grad(): + for i, unit_id in enumerate(sorted_unit_ids): + strategy = self.rl_strats[unit_id] + obs_i = obs_step[i : i + 1] + other_unique = np.concatenate( + (unique_obs_all[:i], unique_obs_all[i + 1 :]), + axis=0, + ) + centralized_obs = np.concatenate( + (obs_i, other_unique.reshape(1, -1)), + axis=1, + ) + obs_tensor = th.as_tensor( + centralized_obs, + device=self.device, + dtype=self.float_type, + ) + values_data[0, i, 0] = ( + strategy.critics(obs_tensor).cpu().numpy().reshape(-1)[0] + ) + else: + values_data = transform_buffer_data( + { + timestamp: cache["values"][timestamp] + }, + device + ) log_probs_data = transform_buffer_data( { @@ -373,7 +406,7 @@ async def _store_to_buffer_and_update_sync(self, cache, device) -> None: device ) - # Add to rollout buffer + # Adding data to the rollout buffer. self.buffer.add( obs = obs_data, action = actions_data, @@ -383,8 +416,8 @@ async def _store_to_buffer_and_update_sync(self, cache, device) -> None: log_prob = log_probs_data ) else: - # For off-policy algorithms (TD3/DDPG), use ReplayBuffer - # rewrite dict so that obs.shape == (n_rl_units, obs_dim) and sorted by keys and store in buffer + # Using ReplayBuffer for off-policy algorithms (TD3/DDPG). + # Rewriting the dict so obs.shape == (n_rl_units, obs_dim), sorting by keys, and storing it in the buffer. self.buffer.add( obs = transform_buffer_data(cache["obs"], device), actions = transform_buffer_data(cache["actions"], device), diff --git a/assume/strategies/learning_strategies.py b/assume/strategies/learning_strategies.py index 7337fafd8..15c07d539 100644 --- a/assume/strategies/learning_strategies.py +++ b/assume/strategies/learning_strategies.py @@ -73,8 +73,8 @@ def __init__(self, *args, **kwargs): self.exploration_noise_std = self.learning_config.exploration_noise_std if self.learning_mode or self.evaluation_mode: - # learning role overwrites this if loaded from file or after initial experience episodes - self.collect_initial_experience_mode = True + # Keeping initial random exploration only for off-policy methods. + self.collect_initial_experience_mode = is_off_policy(self.algorithm) if is_off_policy(self.algorithm): @@ -290,30 +290,20 @@ def get_actions(self, next_observation): # ============================================================================= # 2.1 Get Actions and handle exploration # ============================================================================= - # only use noise as the action to enforce exploration + # Using only noise as the action to enforce exploration. curr_action = noise - # For PPO, store dummy log_prob and value during initial exploration - if self.algorithm == "mappo": - self._last_log_prob = th.tensor(0.0, device=self.device) - self._last_value = th.tensor(0.0, device=self.device) - else: - # For PPO/MAPPO, always use the policy (no initial random exploration) + # Using the policy forMAPPO (no initial random exploration). if self.algorithm == "mappo": - # PPO: use get_action_and_log_prob for proper stochastic sampling + # Using get_action_and_log_prob for proper PPO stochastic sampling. curr_action, log_prob = self.actor.get_action_and_log_prob(next_observation.unsqueeze(0)) curr_action = curr_action.squeeze(0).detach() self._last_log_prob = log_prob.squeeze(0).detach() + # Computing the value later from centralized observations in learning_role. + self._last_value = th.tensor(0.0, device=self.device) - # Get value estimate from critic (if available) - if hasattr(self.learning_role, 'critics') and self.unit_id in self.learning_role.critics: - critic = self.learning_role.critics[self.unit_id] - self._last_value = critic(next_observation.unsqueeze(0)).squeeze().detach() - else: - self._last_value = th.tensor(0.0, device=self.device) - - # PPO uses stochastic policy, no external noise needed + # Using stochastic PPO policy with no external noise. noise = th.zeros_like(curr_action, dtype=self.float_type) else: # TD3/DDPG: if we are not in the initial exploration phase we chose the action with the actor neural net From 47349dab1212a10769374a185b56886b43390cf8 Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Thu, 9 Apr 2026 12:46:56 +0200 Subject: [PATCH 24/44] added 'get_distribution' method and splitted the forward method into _extract_features and forward to avoid redundancy in LSTMActorPPO --- .../neural_network_architecture.py | 43 +++++++++++++------ examples/inputs/example_02a/config.yaml | 2 +- examples/inputs/example_02b/config.yaml | 4 +- 3 files changed, 33 insertions(+), 16 deletions(-) diff --git a/assume/reinforcement_learning/neural_network_architecture.py b/assume/reinforcement_learning/neural_network_architecture.py index 08da7d719..eb328296b 100644 --- a/assume/reinforcement_learning/neural_network_architecture.py +++ b/assume/reinforcement_learning/neural_network_architecture.py @@ -355,7 +355,7 @@ def forward(self, obs): outputs = [] for time_step in x1.split(1, dim=2): - time_step = time_step.reshape(-1, self.num_timeseris_obs_dim) + time_step = time_step.reshape(-1, self.num_timeseries_obs_dim) h_t, c_t = self.LSTM1(time_step, (h_t, c_t)) h_t2, c_t2 = self.LSTM2(h_t, (h_t2, c_t2)) outputs += [h_t2] @@ -551,6 +551,13 @@ def __init__( self.num_timeseries_obs_dim = num_timeseries_obs_dim self.activation = "softsign" + if self.activation not in activation_function_limit: + raise ValueError( + f"Activation '{self.activation}' not supported! Supported: {list(activation_function_limit.keys())}" + ) + + self.min_output = activation_function_limit[self.activation]["min"] + self.max_output = activation_function_limit[self.activation]["max"] self.activation_function = activation_function_limit[self.activation]["func"] # Compute timeseries length for LSTM @@ -596,8 +603,8 @@ def init_layer(m): nn.init.orthogonal_(self.mean_layer.weight, gain=0.01) nn.init.zeros_(self.mean_layer.bias) - def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor: - """Forward pass""" + def _extract_features(self, obs: th.Tensor) -> tuple[th.Tensor, bool]: + """Build latent features from timeseries and stationary observations.""" if obs.dim() not in (1, 2): raise ValueError( f"LSTMCell: Expected input to be 1D or 2D, got {obs.dim()}D instead" @@ -632,24 +639,21 @@ def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor: h_t2, c_t2 = self.LSTM2(h_t, (h_t2, c_t2)) outputs.append(h_t2) - # Concatenate LSTM outputs + # Concatenate LSTM outputs with stationary observations outputs = th.cat(outputs, dim=1) - - # Concatenate with stationary observations x = th.cat((outputs, x2), dim=1) - - # FC Layers x = F.relu(self.FC1(x)) - mean = th.tanh(self.mean_layer(x)) # Bounded to [-1, 1] - if not is_batched: - mean = mean.squeeze(0) + return x, is_batched + + def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor: + """Forward pass""" + mean, log_std = self.get_distribution(obs) if deterministic: return mean # Sample from Gaussian during training - log_std = self.log_std.expand_as(mean) std = log_std.exp() # Ensure positive # Add small epsilon for numerical stability std = std + 1e-6 @@ -657,4 +661,17 @@ def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor: action = mean + std * noise # Clamp to valid range - return th.clamp(action, -1.0, 1.0) \ No newline at end of file + return th.clamp(action, -1.0, 1.0) + + def get_distribution(self, obs: th.Tensor) -> tuple[th.Tensor, th.Tensor]: + """Get Gaussian policy parameters from LSTM features.""" + x, is_batched = self._extract_features(obs) + + mean = th.tanh(self.mean_layer(x)) + log_std = self.log_std.expand_as(mean) + + if not is_batched: + mean = mean.squeeze(0) + log_std = log_std.squeeze(0) + + return mean, log_std \ No newline at end of file diff --git a/examples/inputs/example_02a/config.yaml b/examples/inputs/example_02a/config.yaml index 64034431d..b3dbfb9c8 100644 --- a/examples/inputs/example_02a/config.yaml +++ b/examples/inputs/example_02a/config.yaml @@ -4,7 +4,7 @@ base: start_date: 2019-03-01 00:00 - end_date: 2019-03-31 00:00 + end_date: 2019-03-07 00:00 time_step: 1h save_frequency_hours: null seed: null diff --git a/examples/inputs/example_02b/config.yaml b/examples/inputs/example_02b/config.yaml index a10036ae3..3b5008609 100644 --- a/examples/inputs/example_02b/config.yaml +++ b/examples/inputs/example_02b/config.yaml @@ -4,7 +4,7 @@ base: start_date: 2019-03-01 00:00 - end_date: 2019-04-01 00:00 + end_date: 2019-03-06 00:00 time_step: 1h save_frequency_hours: null seed: null @@ -17,7 +17,7 @@ base: max_bid_price: 100 algorithm: matd3 learning_rate: 0.001 - training_episodes: 100 + training_episodes: 400 train_freq: 100h batch_size: 128 gamma: 0.99 From cdfc3d6c82a76233feea19e31c6ba7b91b3ae4b0 Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Sat, 25 Apr 2026 10:34:04 +0200 Subject: [PATCH 25/44] =?UTF-8?q?Fix=20Path=20import=20bug=20in=20learning?= =?UTF-8?q?=5Frole.py=20=E2=80=94=20fixing=20the=20runtime=20error=20for?= =?UTF-8?q?=20continue=5Flearning?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- assume/reinforcement_learning/learning_role.py | 1 + 1 file changed, 1 insertion(+) diff --git a/assume/reinforcement_learning/learning_role.py b/assume/reinforcement_learning/learning_role.py index e4bd95cef..b6272e7dc 100644 --- a/assume/reinforcement_learning/learning_role.py +++ b/assume/reinforcement_learning/learning_role.py @@ -5,6 +5,7 @@ import logging from collections import defaultdict from datetime import datetime +from pathlib import Path import numpy as np import pandas as pd From 90ca11273d4f3cee70dd42f848b9f21310897f2e Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Sun, 26 Apr 2026 09:32:40 +0200 Subject: [PATCH 26/44] Fixed assume import logic API --- assume/__init__.py | 46 +++ assume/reinforcement_learning/__init__.py | 38 ++- .../algorithms/__init__.py | 24 +- tests/test_public_api.py | 290 ++++++++++++++++++ 4 files changed, 396 insertions(+), 2 deletions(-) create mode 100644 tests/test_public_api.py diff --git a/assume/__init__.py b/assume/__init__.py index 359e409ba..01f8839ce 100644 --- a/assume/__init__.py +++ b/assume/__init__.py @@ -5,6 +5,21 @@ from importlib.metadata import version from assume.common import MarketConfig, MarketProduct +from assume.reinforcement_learning import ( + A2CAlgorithm, + DDPG, + LSTMActor, + Learning, + MLPActor, + PPO, + RLAlgorithm, + ReplayBuffer, + ReplayBufferSamples, + RolloutBuffer, + RolloutBufferSamples, + TD3, + actor_architecture_aliases, +) from assume.scenario.loader_csv import ( load_custom_units, load_scenario_folder, @@ -16,3 +31,34 @@ __author__ = "ASSUME Developers: Nick Harder, Kim Miskiw, Florian Maurer, Manish Khanra" __copyright__ = "AGPL-3.0 License" + +__all__ = [ + # Framework version + "__version__", + # World & scenario + "World", + "load_scenario_folder", + "load_custom_units", + "run_learning", + # Market primitives + "MarketConfig", + "MarketProduct", + # RL orchestration + "Learning", + # RL algorithm base classes + "RLAlgorithm", + "A2CAlgorithm", + # RL concrete algorithms + "TD3", + "DDPG", + "PPO", + # RL actor architectures + "MLPActor", + "LSTMActor", + "actor_architecture_aliases", + # RL buffers + "ReplayBuffer", + "ReplayBufferSamples", + "RolloutBuffer", + "RolloutBufferSamples", +] diff --git a/assume/reinforcement_learning/__init__.py b/assume/reinforcement_learning/__init__.py index a10131609..099d8b56e 100644 --- a/assume/reinforcement_learning/__init__.py +++ b/assume/reinforcement_learning/__init__.py @@ -2,5 +2,41 @@ # # SPDX-License-Identifier: AGPL-3.0-or-later -from assume.reinforcement_learning.buffer import ReplayBuffer +from assume.reinforcement_learning.algorithms import ( + A2CAlgorithm, + DDPG, + LSTMActor, + MLPActor, + PPO, + RLAlgorithm, + TD3, + actor_architecture_aliases, +) +from assume.reinforcement_learning.buffer import ( + ReplayBuffer, + ReplayBufferSamples, + RolloutBuffer, + RolloutBufferSamples, +) from assume.reinforcement_learning.learning_role import Learning + +__all__ = [ + # Learning orchestration + "Learning", + # Algorithms base classes + "RLAlgorithm", + "A2CAlgorithm", + # Algorithms concrete implementations + "TD3", + "DDPG", + "PPO", + # Actor architectures + "MLPActor", + "LSTMActor", + "actor_architecture_aliases", + # Buffers + "ReplayBuffer", + "ReplayBufferSamples", + "RolloutBuffer", + "RolloutBufferSamples", +] diff --git a/assume/reinforcement_learning/algorithms/__init__.py b/assume/reinforcement_learning/algorithms/__init__.py index 645e5c991..8fa7dae2e 100644 --- a/assume/reinforcement_learning/algorithms/__init__.py +++ b/assume/reinforcement_learning/algorithms/__init__.py @@ -5,11 +5,33 @@ from torch import nn from assume.reinforcement_learning.neural_network_architecture import ( - MLPActor, LSTMActor, + MLPActor, ) actor_architecture_aliases: dict[str, type[nn.Module]] = { "mlp": MLPActor, "lstm": LSTMActor, } + +from assume.reinforcement_learning.algorithms.base_algorithm import ( + A2CAlgorithm, + RLAlgorithm, +) +from assume.reinforcement_learning.algorithms.maddpg import DDPG +from assume.reinforcement_learning.algorithms.mappo import PPO +from assume.reinforcement_learning.algorithms.matd3 import TD3 + +__all__ = [ + # Base classes + "RLAlgorithm", + "A2CAlgorithm", + # Concrete algorithms + "TD3", + "DDPG", + "PPO", + # Actor architectures + "actor_architecture_aliases", + "MLPActor", + "LSTMActor", +] diff --git a/tests/test_public_api.py b/tests/test_public_api.py new file mode 100644 index 000000000..76c844006 --- /dev/null +++ b/tests/test_public_api.py @@ -0,0 +1,290 @@ +# SPDX-FileCopyrightText: ASSUME Developers +# +# SPDX-License-Identifier: AGPL-3.0-or-later + +"""Tests for verification of the public API symbols are importable consistently. + +Import layers which are covered are as follows: +- assume.reinforcement_learning.algorithms for algorithm-level package +- assume.reinforcement_learning for RL sub-package +- assume for top-level package +""" + +import pytest + +try: + import torch as th + + TORCH_AVAILABLE = True +except ImportError: + TORCH_AVAILABLE = False + + +# --------------------------------------------------------------------------- +# Layer 1 – assume.reinforcement_learning.algorithms +# --------------------------------------------------------------------------- + + +@pytest.mark.require_learning +class TestAlgorithmsPackageExports: + """All algorithm classes and helpers re-exported from the algorithms package.""" + + def test_import_rl_algorithm_base(self): + from assume.reinforcement_learning.algorithms import RLAlgorithm + + assert RLAlgorithm is not None + + def test_import_a2c_algorithm_base(self): + from assume.reinforcement_learning.algorithms import A2CAlgorithm + + assert A2CAlgorithm is not None + + def test_import_td3(self): + from assume.reinforcement_learning.algorithms import TD3 + + assert TD3 is not None + + def test_import_ddpg(self): + from assume.reinforcement_learning.algorithms import DDPG + + assert DDPG is not None + + def test_import_ppo(self): + from assume.reinforcement_learning.algorithms import PPO + + assert PPO is not None + + def test_import_mlp_actor(self): + from assume.reinforcement_learning.algorithms import MLPActor + + assert MLPActor is not None + + def test_import_lstm_actor(self): + from assume.reinforcement_learning.algorithms import LSTMActor + + assert LSTMActor is not None + + def test_import_actor_architecture_aliases(self): + from assume.reinforcement_learning.algorithms import actor_architecture_aliases + + assert "mlp" in actor_architecture_aliases + assert "lstm" in actor_architecture_aliases + + def test_algorithm_hierarchy(self): + """TD3, DDPG, PPO must all be subclasses of A2CAlgorithm → RLAlgorithm.""" + from assume.reinforcement_learning.algorithms import ( + A2CAlgorithm, + DDPG, + PPO, + RLAlgorithm, + TD3, + ) + + for cls in (TD3, DDPG, PPO): + assert issubclass(cls, A2CAlgorithm), f"{cls.__name__} not subclass of A2CAlgorithm" + assert issubclass(cls, RLAlgorithm), f"{cls.__name__} not subclass of RLAlgorithm" + + def test_actor_aliases_map_to_nn_modules(self): + from torch import nn + + from assume.reinforcement_learning.algorithms import actor_architecture_aliases + + for name, cls in actor_architecture_aliases.items(): + assert issubclass(cls, nn.Module), f"alias '{name}' does not map to an nn.Module" + + +# --------------------------------------------------------------------------- +# Layer 2 – assume.reinforcement_learning +# --------------------------------------------------------------------------- + + +@pytest.mark.require_learning +class TestRLPackageExports: + """All public symbols re-exported from the reinforcement_learning sub-package.""" + + def test_import_learning(self): + from assume.reinforcement_learning import Learning + + assert Learning is not None + + def test_import_rl_algorithm(self): + from assume.reinforcement_learning import RLAlgorithm + + assert RLAlgorithm is not None + + def test_import_a2c_algorithm(self): + from assume.reinforcement_learning import A2CAlgorithm + + assert A2CAlgorithm is not None + + def test_import_td3(self): + from assume.reinforcement_learning import TD3 + + assert TD3 is not None + + def test_import_ddpg(self): + from assume.reinforcement_learning import DDPG + + assert DDPG is not None + + def test_import_ppo(self): + from assume.reinforcement_learning import PPO + + assert PPO is not None + + def test_import_mlp_actor(self): + from assume.reinforcement_learning import MLPActor + + assert MLPActor is not None + + def test_import_lstm_actor(self): + from assume.reinforcement_learning import LSTMActor + + assert LSTMActor is not None + + def test_import_actor_architecture_aliases(self): + from assume.reinforcement_learning import actor_architecture_aliases + + assert isinstance(actor_architecture_aliases, dict) + + def test_import_replay_buffer(self): + from assume.reinforcement_learning import ReplayBuffer + + assert ReplayBuffer is not None + + def test_import_replay_buffer_samples(self): + from assume.reinforcement_learning import ReplayBufferSamples + + assert ReplayBufferSamples is not None + + def test_import_rollout_buffer(self): + from assume.reinforcement_learning import RolloutBuffer + + assert RolloutBuffer is not None + + def test_import_rollout_buffer_samples(self): + from assume.reinforcement_learning import RolloutBufferSamples + + assert RolloutBufferSamples is not None + + def test_all_declared(self): + """Every symbol listed in __all__ must actually be importable.""" + import assume.reinforcement_learning as rl_pkg + + for name in rl_pkg.__all__: + assert hasattr(rl_pkg, name), f"__all__ entry '{name}' missing from module" + + def test_replay_buffer_and_rollout_buffer_are_distinct(self): + from assume.reinforcement_learning import ReplayBuffer, RolloutBuffer + + assert ReplayBuffer is not RolloutBuffer + + def test_buffer_samples_are_distinct(self): + from assume.reinforcement_learning import ReplayBufferSamples, RolloutBufferSamples + + assert ReplayBufferSamples is not RolloutBufferSamples + + +# --------------------------------------------------------------------------- +# Layer 3 – assume (top-level package) +# --------------------------------------------------------------------------- + + +@pytest.mark.require_learning +class TestTopLevelPackageRLExports: + """RL symbols must be reachable directly from `import assume`.""" + + def test_import_learning(self): + import assume + + assert hasattr(assume, "Learning") + + def test_import_rl_algorithm(self): + import assume + + assert hasattr(assume, "RLAlgorithm") + + def test_import_a2c_algorithm(self): + import assume + + assert hasattr(assume, "A2CAlgorithm") + + def test_import_td3(self): + import assume + + assert hasattr(assume, "TD3") + + def test_import_ddpg(self): + import assume + + assert hasattr(assume, "DDPG") + + def test_import_ppo(self): + import assume + + assert hasattr(assume, "PPO") + + def test_import_mlp_actor(self): + import assume + + assert hasattr(assume, "MLPActor") + + def test_import_lstm_actor(self): + import assume + + assert hasattr(assume, "LSTMActor") + + def test_import_actor_architecture_aliases(self): + import assume + + assert hasattr(assume, "actor_architecture_aliases") + + def test_import_replay_buffer(self): + import assume + + assert hasattr(assume, "ReplayBuffer") + + def test_import_replay_buffer_samples(self): + import assume + + assert hasattr(assume, "ReplayBufferSamples") + + def test_import_rollout_buffer(self): + import assume + + assert hasattr(assume, "RolloutBuffer") + + def test_import_rollout_buffer_samples(self): + import assume + + assert hasattr(assume, "RolloutBufferSamples") + + def test_all_declared(self): + """Every symbol in top-level __all__ must exist on the module.""" + import assume + + for name in assume.__all__: + assert hasattr(assume, name), f"__all__ entry '{name}' missing from assume" + + def test_rl_symbols_consistent_across_layers(self): + """The same class object must be reachable from all three import paths.""" + import assume + import assume.reinforcement_learning as rl + from assume.reinforcement_learning.algorithms import DDPG, PPO, TD3 + + for name, algo_cls in [("TD3", TD3), ("DDPG", DDPG), ("PPO", PPO)]: + assert getattr(rl, name) is algo_cls, f"rl.{name} is not the same object as algorithms.{name}" + assert getattr(assume, name) is algo_cls, f"assume.{name} is not the same object as algorithms.{name}" + + def test_version_still_present(self): + import assume + + assert hasattr(assume, "__version__") + assert isinstance(assume.__version__, str) + + def test_non_rl_symbols_unchanged(self): + """Core non-RL exports (World, MarketConfig, etc.) must still be present.""" + import assume + + for name in ("World", "MarketConfig", "MarketProduct", "load_scenario_folder", "run_learning"): + assert hasattr(assume, name), f"Pre-existing export '{name}' missing after __init__ update" From c8a20590e977c826eba62255cac1fe9d20a8d20d Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Sun, 26 Apr 2026 09:37:50 +0200 Subject: [PATCH 27/44] Fixed test_matd3.py to use the nested off_policy config structure --- tests/test_matd3.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/tests/test_matd3.py b/tests/test_matd3.py index 0471f84eb..1d6fdbf35 100644 --- a/tests/test_matd3.py +++ b/tests/test_matd3.py @@ -9,7 +9,7 @@ import pytest -from assume.common.base import LearningConfig +from assume.common.base import LearningConfig, OffPolicyConfig try: import torch as th @@ -46,19 +46,21 @@ def base_learning_config() -> dict: learning_mode=True, evaluation_mode=False, training_episodes=1, - episodes_collecting_initial_experience=0, continue_learning=False, trained_policies_save_path=None, early_stopping_steps=10, early_stopping_threshold=0.05, learning_rate=1e-4, batch_size=100, - tau=0.005, gamma=0.99, - gradient_steps=1, - policy_delay=2, - target_policy_noise=0.2, - target_noise_clip=0.5, + off_policy=OffPolicyConfig( + episodes_collecting_initial_experience=0, + gradient_steps=1, + tau=0.005, + policy_delay=2, + target_policy_noise=0.2, + target_noise_clip=0.5, + ), ), } From 89eb6659995d076485a23dc2a8c99735165fc714 Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Sun, 26 Apr 2026 13:14:52 +0200 Subject: [PATCH 28/44] Added Rollout Buffer test file (test_rl_rolloutbuffer.py) --- ...t_rl_buffer.py => test_rl_replaybuffer.py} | 0 tests/test_rl_rolloutbuffer.py | 466 ++++++++++++++++++ 2 files changed, 466 insertions(+) rename tests/{test_rl_buffer.py => test_rl_replaybuffer.py} (100%) create mode 100644 tests/test_rl_rolloutbuffer.py diff --git a/tests/test_rl_buffer.py b/tests/test_rl_replaybuffer.py similarity index 100% rename from tests/test_rl_buffer.py rename to tests/test_rl_replaybuffer.py diff --git a/tests/test_rl_rolloutbuffer.py b/tests/test_rl_rolloutbuffer.py new file mode 100644 index 000000000..e903e2e11 --- /dev/null +++ b/tests/test_rl_rolloutbuffer.py @@ -0,0 +1,466 @@ +# SPDX-FileCopyrightText: ASSUME Developers +# +# SPDX-License-Identifier: AGPL-3.0-or-later + +import numpy as np +import pytest + +try: + import torch as th + + from assume.reinforcement_learning.buffer import ( + RolloutBuffer, + RolloutBufferSamples, + ) +except ImportError: + pass + +def make_rollout_buffer( + buffer_size=8, + obs_dim=3, + act_dim=2, + n_rl_units=2, + gamma=0.99, + gae_lambda=0.95, +): + return RolloutBuffer( + buffer_size=buffer_size, + obs_dim=obs_dim, + act_dim=act_dim, + n_rl_units=n_rl_units, + device=th.device("cpu"), + float_type=th.float32, + gamma=gamma, + gae_lambda=gae_lambda, + ) + + +def fill_buffer(buf, n_steps=None, seed=0): + rng = np.random.default_rng(seed) + n = n_steps if n_steps is not None else buf.buffer_size + for _ in range(n): + obs = rng.random((buf.n_rl_units, buf.obs_dim)).astype(np.float32) + act = rng.random((buf.n_rl_units, buf.act_dim)).astype(np.float32) + rew = rng.random(buf.n_rl_units).astype(np.float32) + done = np.zeros(buf.n_rl_units, dtype=np.float32) + val = rng.random(buf.n_rl_units).astype(np.float32) + lp = rng.random(buf.n_rl_units).astype(np.float32) - 1.0 + buf.add(obs, act, rew, done, val, lp) + + +@pytest.mark.require_learning +def test_rollout_buffer_init_shapes(): + buf = make_rollout_buffer(buffer_size=10, obs_dim=3, act_dim=2, n_rl_units=4) + assert buf.observations.shape == (10, 4, 3) + assert buf.actions.shape == (10, 4, 2) + assert buf.rewards.shape == (10, 4) + assert buf.values.shape == (10, 4) + assert buf.log_probs.shape == (10, 4) + assert buf.dones.shape == (10, 4) + assert buf.advantages.shape == (10, 4) + assert buf.returns.shape == (10, 4) + + +@pytest.mark.require_learning +def test_rollout_buffer_init_state(): + buf = make_rollout_buffer() + assert buf.pos == 0 + assert buf.full is False + assert buf.generator_ready is False + assert buf.size() == 0 + + +@pytest.mark.require_learning +def test_rollout_buffer_reset_clears_data(): + buf = make_rollout_buffer(buffer_size=4) + fill_buffer(buf, n_steps=4) + assert buf.pos == 4 + + buf.reset() + assert buf.pos == 0 + assert buf.full is False + assert buf.generator_ready is False + assert np.all(buf.observations == 0) + assert np.all(buf.rewards == 0) + assert np.all(buf.advantages == 0) + assert np.all(buf.returns == 0) + + +@pytest.mark.require_learning +def test_rollout_buffer_add_increments_pos(): + buf = make_rollout_buffer(buffer_size=5) + obs = np.ones((buf.n_rl_units, buf.obs_dim), dtype=np.float32) + act = np.ones((buf.n_rl_units, buf.act_dim), dtype=np.float32) + rew = np.ones(buf.n_rl_units, dtype=np.float32) + done = np.zeros(buf.n_rl_units, dtype=np.float32) + val = np.ones(buf.n_rl_units, dtype=np.float32) + lp = np.zeros(buf.n_rl_units, dtype=np.float32) + + for i in range(1, 6): + buf.add(obs, act, rew, done, val, lp) + assert buf.pos == i + assert buf.size() == i + + +@pytest.mark.require_learning +def test_rollout_buffer_add_stores_correct_values(): + buf = make_rollout_buffer(buffer_size=4, obs_dim=2, act_dim=2, n_rl_units=1) + obs = np.array([[1.0, 2.0]], dtype=np.float32) + act = np.array([[0.5, -0.5]], dtype=np.float32) + rew = np.array([3.0], dtype=np.float32) + done = np.array([0.0], dtype=np.float32) + val = np.array([0.7], dtype=np.float32) + lp = np.array([-1.2], dtype=np.float32) + + buf.add(obs, act, rew, done, val, lp) + + np.testing.assert_array_almost_equal(buf.observations[0, 0], [1.0, 2.0]) + np.testing.assert_array_almost_equal(buf.actions[0, 0], [0.5, -0.5]) + assert buf.rewards[0, 0] == pytest.approx(3.0) + assert buf.dones[0, 0] == pytest.approx(0.0) + assert buf.values[0, 0] == pytest.approx(0.7) + assert buf.log_probs[0, 0] == pytest.approx(-1.2) + + +@pytest.mark.require_learning +def test_rollout_buffer_add_beyond_capacity_sets_full(): + buf = make_rollout_buffer(buffer_size=3) + obs = np.zeros((buf.n_rl_units, buf.obs_dim), dtype=np.float32) + act = np.zeros((buf.n_rl_units, buf.act_dim), dtype=np.float32) + rew = np.zeros(buf.n_rl_units, dtype=np.float32) + done = np.zeros(buf.n_rl_units, dtype=np.float32) + val = np.zeros(buf.n_rl_units, dtype=np.float32) + lp = np.zeros(buf.n_rl_units, dtype=np.float32) + + for _ in range(3): + buf.add(obs, act, rew, done, val, lp) + + assert buf.pos == 3 + assert buf.size() == 3 + + buf.add(obs, act, rew, done, val, lp) + assert buf.full is True + assert buf.size() == 3 + + +@pytest.mark.require_learning +def test_gae_single_step_non_terminal(): + """For 1 step, 1 agent, non-terminal: advantage = TD error.""" + gamma, gae_lambda = 0.99, 0.95 + buf = make_rollout_buffer( + buffer_size=1, obs_dim=1, act_dim=1, n_rl_units=1, + gamma=gamma, gae_lambda=gae_lambda, + ) + r, v, v_next = 1.0, 0.5, 0.8 + buf.add( + obs=np.array([[0.0]]), + action=np.array([[0.0]]), + reward=np.array([r]), + done=np.array([0.0]), + value=np.array([v]), + log_prob=np.array([0.0]), + ) + + buf.compute_returns_and_advantages( + last_values=np.array([v_next]), + dones=np.array([0.0]), + ) + + expected_advantage = r + gamma * v_next - v + expected_return = expected_advantage + v + + assert buf.advantages[0, 0] == pytest.approx(expected_advantage, abs=1e-5) + assert buf.returns[0, 0] == pytest.approx(expected_return, abs=1e-5) + + +@pytest.mark.require_learning +def test_gae_single_step_terminal(): + """For a terminal episode end, bootstrap value must not propagate.""" + gamma, gae_lambda = 0.99, 0.95 + buf = make_rollout_buffer( + buffer_size=1, obs_dim=1, act_dim=1, n_rl_units=1, + gamma=gamma, gae_lambda=gae_lambda, + ) + r, v = 2.0, 1.0 + buf.add( + obs=np.array([[0.0]]), + action=np.array([[0.0]]), + reward=np.array([r]), + done=np.array([0.0]), + value=np.array([v]), + log_prob=np.array([0.0]), + ) + + # done=1 — so no bootstrapping from last_values + buf.compute_returns_and_advantages( + last_values=np.array([999.0]), + dones=np.array([1.0]), + ) + + expected_advantage = r - v + expected_return = expected_advantage + v # = r + + assert buf.advantages[0, 0] == pytest.approx(expected_advantage, abs=1e-5) + assert buf.returns[0, 0] == pytest.approx(expected_return, abs=1e-5) + + +@pytest.mark.require_learning +def test_gae_multi_step_manual(): + """Manually verify 2-step GAE for a single agent.""" + gamma, gae_lambda = 0.99, 0.95 + buf = make_rollout_buffer( + buffer_size=2, obs_dim=1, act_dim=1, n_rl_units=1, + gamma=gamma, gae_lambda=gae_lambda, + ) + r0, v0 = 1.0, 0.4 + r1, v1 = 0.5, 0.6 + v_next = 0.8 + + for r, v in [(r0, v0), (r1, v1)]: + buf.add( + obs=np.array([[0.0]]), + action=np.array([[0.0]]), + reward=np.array([r]), + done=np.array([0.0]), + value=np.array([v]), + log_prob=np.array([0.0]), + ) + + buf.compute_returns_and_advantages( + last_values=np.array([v_next]), + dones=np.array([0.0]), + ) + + delta_1 = r1 + gamma * v_next - v1 + gae_1 = delta_1 + + delta_0 = r0 + gamma * v1 - v0 + gae_0 = delta_0 + gamma * gae_lambda * gae_1 + + assert buf.advantages[0, 0] == pytest.approx(gae_0, abs=1e-5) + assert buf.advantages[1, 0] == pytest.approx(gae_1, abs=1e-5) + assert buf.returns[0, 0] == pytest.approx(gae_0 + v0, abs=1e-5) + assert buf.returns[1, 0] == pytest.approx(gae_1 + v1, abs=1e-5) + + +@pytest.mark.require_learning +def test_gae_lambda_zero_equals_td_error(): + """gae_lambda=0 reduces GAE to a 1-step TD advantage per step.""" + gamma, gae_lambda = 0.99, 0.0 + buf = make_rollout_buffer( + buffer_size=3, obs_dim=1, act_dim=1, n_rl_units=1, + gamma=gamma, gae_lambda=gae_lambda, + ) + rewards = [1.0, 0.5, 2.0] + values = [0.4, 0.6, 0.3] + v_next = 0.8 + + for r, v in zip(rewards, values): + buf.add( + obs=np.array([[0.0]]), + action=np.array([[0.0]]), + reward=np.array([r]), + done=np.array([0.0]), + value=np.array([v]), + log_prob=np.array([0.0]), + ) + + buf.compute_returns_and_advantages( + last_values=np.array([v_next]), + dones=np.array([0.0]), + ) + + next_vals = [values[1], values[2], v_next] + for step, (r, v, nv) in enumerate(zip(rewards, values, next_vals)): + expected = r + gamma * nv - v + assert buf.advantages[step, 0] == pytest.approx(expected, abs=1e-5) + + +@pytest.mark.require_learning +def test_gae_lambda_one_gamma_one_monte_carlo(): + """with gamma=1, gae_lambda=1, terminal, should return equal undiscounted reward sums.""" + gamma, gae_lambda = 1.0, 1.0 + T = 4 + buf = make_rollout_buffer( + buffer_size=T, obs_dim=1, act_dim=1, n_rl_units=1, + gamma=gamma, gae_lambda=gae_lambda, + ) + rewards = [1.0, 1.0, 1.0, 1.0] + values = [0.1] * T + + for r, v in zip(rewards, values): + buf.add( + obs=np.array([[0.0]]), + action=np.array([[0.0]]), + reward=np.array([r]), + done=np.array([0.0]), + value=np.array([v]), + log_prob=np.array([0.0]), + ) + + buf.compute_returns_and_advantages( + last_values=np.array([0.0]), + dones=np.array([1.0]), + ) + + for t in range(T): + assert buf.returns[t, 0] == pytest.approx(float(T - t), abs=1e-5) + + +@pytest.mark.require_learning +def test_gae_multi_agent_independence(): + """One agent's rewards must not cause issue with another agent's advantages.""" + gamma, gae_lambda = 0.99, 0.95 + buf = make_rollout_buffer( + buffer_size=3, obs_dim=1, act_dim=1, n_rl_units=2, + gamma=gamma, gae_lambda=gae_lambda, + ) + + for _ in range(3): + buf.add( + obs=np.zeros((2, 1), dtype=np.float32), + action=np.zeros((2, 1), dtype=np.float32), + reward=np.array([1.0, 0.0]), + done=np.zeros(2, dtype=np.float32), + value=np.array([0.5, 0.5]), + log_prob=np.zeros(2, dtype=np.float32), + ) + + buf.compute_returns_and_advantages( + last_values=np.array([0.5, 0.5]), + dones=np.zeros(2), + ) + + for t in range(3): + assert abs(buf.advantages[t, 1]) < abs(buf.advantages[t, 0]), ( + f"step {t}: agent-1 advantage {buf.advantages[t, 1]:.4f} should be " + f"smaller than agent-0 advantage {buf.advantages[t, 0]:.4f}" + ) + + +@pytest.mark.require_learning +def test_gae_returns_equal_advantages_plus_values(): + """returns == advantages + values for every step and agent.""" + buf = make_rollout_buffer(buffer_size=6, n_rl_units=3) + fill_buffer(buf, n_steps=6) + + last_values = np.random.rand(3).astype(np.float32) + buf.compute_returns_and_advantages(last_values, dones=np.zeros(3, dtype=np.float32)) + + np.testing.assert_array_almost_equal( + buf.returns[: buf.pos], + buf.advantages[: buf.pos] + buf.values[: buf.pos], + decimal=5, + ) + + +@pytest.mark.require_learning +def test_rollout_buffer_get_raises_before_compute(): + """Calling get() before compute_returns_and_advantages must raise ValueError.""" + buf = make_rollout_buffer(buffer_size=4) + fill_buffer(buf, n_steps=4) + + with pytest.raises(ValueError, match="compute_returns_and_advantages"): + next(buf.get(batch_size=2)) + + +@pytest.mark.require_learning +def test_rollout_buffer_get_full_batch(): + """get(batch_size=None) yields one batch with all steps and correct shapes.""" + buf = make_rollout_buffer(buffer_size=5, obs_dim=3, act_dim=2, n_rl_units=2) + fill_buffer(buf, n_steps=5) + buf.compute_returns_and_advantages( + last_values=np.zeros(2, dtype=np.float32), + dones=np.zeros(2, dtype=np.float32), + ) + + batches = list(buf.get(batch_size=None)) + assert len(batches) == 1 + + batch = batches[0] + assert isinstance(batch, RolloutBufferSamples) + assert batch.observations.shape == (5, 2, 3) + assert batch.actions.shape == (5, 2, 2) + assert batch.old_values.shape == (5, 2) + assert batch.old_log_probs.shape == (5, 2) + assert batch.advantages.shape == (5, 2) + assert batch.returns.shape == (5, 2) + + +@pytest.mark.require_learning +def test_rollout_buffer_get_mini_batches_cover_all_steps(): + """Mini-batch iteration must cover every step exactly once.""" + T = 8 + buf = make_rollout_buffer(buffer_size=T, obs_dim=2, act_dim=1, n_rl_units=1) + fill_buffer(buf, n_steps=T) + buf.compute_returns_and_advantages( + last_values=np.zeros(1, dtype=np.float32), + dones=np.zeros(1, dtype=np.float32), + ) + + total_samples = 0 + for batch in buf.get(batch_size=2): + assert isinstance(batch, RolloutBufferSamples) + total_samples += batch.observations.shape[0] + + assert total_samples == T + + +@pytest.mark.require_learning +def test_rollout_buffer_get_partial_fill(): + """A partially-filled buffer must only yield the filled steps.""" + buf = make_rollout_buffer(buffer_size=10, obs_dim=2, act_dim=1, n_rl_units=1) + fill_buffer(buf, n_steps=4) + buf.compute_returns_and_advantages( + last_values=np.zeros(1, dtype=np.float32), + dones=np.zeros(1, dtype=np.float32), + ) + + batches = list(buf.get(batch_size=None)) + assert batches[0].observations.shape[0] == 4 + + +@pytest.mark.require_learning +def test_full_episode_rollout(): + """fill -> GAE -> mini-batch epochs -> reset""" + T, obs_dim, act_dim, n_agents = 16, 5, 3, 2 + buf = make_rollout_buffer( + buffer_size=T, obs_dim=obs_dim, act_dim=act_dim, n_rl_units=n_agents, + gamma=0.99, gae_lambda=0.95, + ) + + rng = np.random.default_rng(42) + for _ in range(T): + buf.add( + obs=rng.random((n_agents, obs_dim)).astype(np.float32), + action=rng.random((n_agents, act_dim)).astype(np.float32), + reward=rng.random(n_agents).astype(np.float32), + done=np.zeros(n_agents, dtype=np.float32), + value=rng.random(n_agents).astype(np.float32), + log_prob=-rng.random(n_agents).astype(np.float32), + ) + + assert buf.size() == T + + last_values = rng.random(n_agents).astype(np.float32) + buf.compute_returns_and_advantages(last_values, dones=np.zeros(n_agents)) + + # returns == advantages + values + np.testing.assert_array_almost_equal( + buf.returns, buf.advantages + buf.values, decimal=5 + ) + + # Two PPO epochs over mini-batches of size 4 + for _epoch in range(2): + samples_seen = 0 + for batch in buf.get(batch_size=4): + assert batch.observations.shape == (4, n_agents, obs_dim) + assert batch.actions.shape == (4, n_agents, act_dim) + samples_seen += batch.observations.shape[0] + assert samples_seen == T + + # Reset for next rollout + buf.reset() + assert buf.pos == 0 + assert buf.generator_ready is False + assert buf.size() == 0 From 24364efb59f470479a43b03a871bef27b24f1c6a Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Sun, 26 Apr 2026 14:36:35 +0200 Subject: [PATCH 29/44] Added MADDPG test cases (test_maddpg.py file) --- .../reinforcement_learning/learning_utils.py | 24 +- tests/test_maddpg.py | 508 ++++++++++++++++++ 2 files changed, 527 insertions(+), 5 deletions(-) create mode 100644 tests/test_maddpg.py diff --git a/assume/reinforcement_learning/learning_utils.py b/assume/reinforcement_learning/learning_utils.py index 9ee069e9a..be1f37ee8 100644 --- a/assume/reinforcement_learning/learning_utils.py +++ b/assume/reinforcement_learning/learning_utils.py @@ -194,6 +194,11 @@ def get_hidden_sizes(state_dict: dict, prefix: str) -> list[int]: return sizes[:-1] # exclude the final output layer if needed +def _get_q_prefixes(state_dict: dict) -> list[str]: + known = ("q_layers", "q1_layers", "q2_layers") + return [p for p in known if f"{p}.0.weight" in state_dict] + + def copy_layer_data(dst, src): for k in dst: if k in src and dst[k].shape == src[k].shape: @@ -289,8 +294,18 @@ def transfer_weights( # 1) Architecture check new_state = model.state_dict() - loaded_hidden = get_hidden_sizes(loaded_state, prefix="q1_layers") - new_hidden = get_hidden_sizes(new_state, prefix="q1_layers") + prefixes = _get_q_prefixes(loaded_state) + if not prefixes: + logger.warning( + "Cannot transfer weights: no recognised Q-network prefix " + "(q_layers / q1_layers / q2_layers) found in loaded state dict." + ) + return None + + # Using the first detected prefix for architecture check. + check_prefix = prefixes[0] + loaded_hidden = get_hidden_sizes(loaded_state, prefix=check_prefix) + new_hidden = get_hidden_sizes(new_state, prefix=check_prefix) if loaded_hidden != new_hidden: logger.warning( f"Cannot transfer weights: neural network architecture mismatch.\n" @@ -307,8 +322,7 @@ def transfer_weights( # 3) Clone new state new_state_copy = {k: v.clone() for k, v in new_state.items()} - # 4) Transfer per-prefix - for prefix in ("q1_layers", "q2_layers"): + for prefix in prefixes: w_loaded = loaded_state[f"{prefix}.0.weight"] b_loaded = loaded_state[f"{prefix}.0.bias"] w_new = new_state_copy[f"{prefix}.0.weight"] @@ -348,7 +362,7 @@ def transfer_weights( # actions untouched # d) bias and deeper layers - # copy all other wigths and biases (besides input layer) from loaded to new model + # copy all other weights and biases (besides input layer) from loaded to new model b_new.copy_(b_loaded) for i in range(1, len(new_hidden) + 1): new_state_copy[f"{prefix}.{i}.weight"].copy_( diff --git a/tests/test_maddpg.py b/tests/test_maddpg.py new file mode 100644 index 000000000..e95893faa --- /dev/null +++ b/tests/test_maddpg.py @@ -0,0 +1,508 @@ +# SPDX-FileCopyrightText: ASSUME Developers +# +# SPDX-License-Identifier: AGPL-3.0-or-later + +import json +import os +from copy import copy, deepcopy +from datetime import datetime + +import pytest + +from assume.common.base import LearningConfig, OffPolicyConfig + +try: + import torch as th + + from assume.common.base import LearningStrategy + from assume.reinforcement_learning.algorithms.maddpg import DDPG + from assume.reinforcement_learning.learning_role import Learning + from assume.reinforcement_learning.neural_network_architecture import CriticDDPG + +except ImportError: + pass + + +start = datetime(2023, 7, 1) +end = datetime(2023, 7, 2) + +@pytest.fixture +def base_learning_config() -> dict: + foresight = 2 + unique_obs_dim = 2 + num_timeseries_obs_dim = 4 + return { + "foresight": foresight, + "act_dim": 3, + "unique_obs_dim": unique_obs_dim, + "num_timeseries_obs_dim": num_timeseries_obs_dim, + "obs_dim": foresight * num_timeseries_obs_dim + unique_obs_dim, + "learning_config": LearningConfig( + train_freq="1h", + algorithm="maddpg", + actor_architecture="mlp", + learning_mode=True, + evaluation_mode=False, + training_episodes=1, + continue_learning=False, + trained_policies_save_path=None, + early_stopping_steps=10, + early_stopping_threshold=0.05, + learning_rate=1e-4, + batch_size=100, + gamma=0.99, + off_policy=OffPolicyConfig( + episodes_collecting_initial_experience=0, + gradient_steps=1, + tau=0.005, + policy_delay=2, + target_policy_noise=0.2, + target_noise_clip=0.5, + ), + ), + } + + +@pytest.fixture(scope="function") +def learning_role_n(base_learning_config): + config = copy(base_learning_config) + learn = Learning(config["learning_config"], start, end) + learn.rl_strats["agent_0"] = LearningStrategy(**config, learning_role=learn) + learn.rl_strats["agent_1"] = LearningStrategy(**config, learning_role=learn) + return learn + + +@pytest.fixture(scope="function") +def learning_role_n_plus_m(base_learning_config): + config = copy(base_learning_config) + learn = Learning(config["learning_config"], start, end) + learn.rl_strats["agent_0"] = LearningStrategy(**config, learning_role=learn) + learn.rl_strats["agent_1"] = LearningStrategy(**config, learning_role=learn) + learn.rl_strats["agent_2"] = LearningStrategy(**config, learning_role=learn) + return learn + + +@pytest.fixture(scope="function") +def saved_n_agent_model(learning_role_n, tmp_path) -> tuple[str, dict]: + learning_role_n.initialize_policy() + save_dir = tmp_path / "saved_model_n" + save_dir.mkdir(parents=True, exist_ok=True) + learning_role_n.rl_algorithm.save_params(directory=str(save_dir)) + agent = learning_role_n.rl_strats["agent_0"] + return str(save_dir), { + "critic": agent.critics.state_dict(), + "actor": agent.actor.state_dict(), + "target_critic": agent.target_critics.state_dict(), + "target_actor": agent.actor_target.state_dict(), + "optimizer_critic": agent.critics.optimizer.state_dict(), + "optimizer_actor": agent.actor.optimizer.state_dict(), + } + + +@pytest.fixture(scope="function") +def saved_n_plus_m_agent_model(learning_role_n_plus_m, tmp_path) -> tuple[str, dict]: + learning_role_n_plus_m.initialize_policy() + save_dir = tmp_path / "saved_model_n_plus_m" + save_dir.mkdir(parents=True, exist_ok=True) + learning_role_n_plus_m.rl_algorithm.save_params(directory=str(save_dir)) + agent = learning_role_n_plus_m.rl_strats["agent_0"] + return str(save_dir), { + "critic": agent.critics.state_dict(), + "actor": agent.actor.state_dict(), + } + + +def compare_state_dicts(dict1, dict2) -> bool: + if dict1.keys() != dict2.keys(): + return False + for k in dict1: + v1, v2 = dict1[k], dict2[k] + if isinstance(v1, th.Tensor): + if not th.equal(v1, v2): + return False + elif isinstance(v1, dict): + if not compare_state_dicts(v1, v2): + return False + else: + if v1 != v2: + return False + return True + + +@pytest.mark.require_learning +def test_maddpg_algorithm_class(learning_role_n): + learning_role_n.initialize_policy() + assert isinstance(learning_role_n.rl_algorithm, DDPG) + + +@pytest.mark.require_learning +def test_maddpg_save_params_creates_files(learning_role_n, tmp_path): + learning_role_n.initialize_policy() + save_dir = tmp_path / "model_save_test" + + learning_role_n.rl_algorithm.save_params(directory=str(save_dir)) + + assert os.path.exists(save_dir / "critics" / "critic_agent_0.pt") + assert os.path.exists(save_dir / "critics" / "critic_agent_1.pt") + assert os.path.exists(save_dir / "actors" / "actor_agent_0.pt") + assert os.path.exists(save_dir / "actors" / "actor_agent_1.pt") + + +@pytest.mark.require_learning +def test_maddpg_save_params_u_id_order(learning_role_n, tmp_path): + learning_role_n.initialize_policy() + save_dir = tmp_path / "u_id_order_test" + + learning_role_n.rl_algorithm.save_params(directory=str(save_dir)) + + order_file = save_dir / "critics" / "u_id_order.json" + assert order_file.exists(), "u_id_order.json must be written alongside critic files" + with open(order_file) as f: + mapping = json.load(f) + assert mapping.get("u_id_order") == ["agent_0", "agent_1"] + + +@pytest.mark.require_learning +def test_maddpg_load_matching_n(base_learning_config, saved_n_agent_model): + save_dir, original_states = saved_n_agent_model + + config_new = copy(base_learning_config) + learn_new = Learning(config_new["learning_config"], start, end) + learn_new.rl_strats["agent_0"] = LearningStrategy( + **config_new, learning_role=learn_new + ) + learn_new.rl_strats["agent_1"] = LearningStrategy( + **config_new, learning_role=learn_new + ) + learn_new.initialize_policy() + learn_new.rl_algorithm.load_params(directory=save_dir) + + agent = learn_new.rl_strats["agent_0"] + + assert compare_state_dicts(original_states["critic"], agent.critics.state_dict()) + assert compare_state_dicts(original_states["actor"], agent.actor.state_dict()) + assert compare_state_dicts( + original_states["target_critic"], agent.target_critics.state_dict() + ) + assert compare_state_dicts( + original_states["target_actor"], agent.actor_target.state_dict() + ) + assert compare_state_dicts( + deepcopy(original_states["optimizer_critic"]), + deepcopy(agent.critics.optimizer.state_dict()), + ) + assert compare_state_dicts( + deepcopy(original_states["optimizer_actor"]), + deepcopy(agent.actor.optimizer.state_dict()), + ) + + +def make_state_dicts( + obs_base: int, + act_dim: int, + unique_obs: int, + old_id_order: list[str], + new_id_order: list[str], + hidden_dims: list[int], +): + import torch as th + + class FakeModel: + def __init__(self, sd): + self._sd = sd + + def state_dict(self): + return self._sd + + old_n = len(old_id_order) + new_n = len(new_id_order) + old_input_dim = obs_base + unique_obs * max(0, old_n - 1) + act_dim * old_n + new_input_dim = obs_base + unique_obs * max(0, new_n - 1) + act_dim * new_n + + # Build baseline for new model + baseline_new = {} + prefix = "q_layers" + baseline_new[f"{prefix}.0.weight"] = th.randn(hidden_dims[0], new_input_dim) + baseline_new[f"{prefix}.0.bias"] = th.randn(hidden_dims[0]) + for i in range(1, len(hidden_dims)): + baseline_new[f"{prefix}.{i}.weight"] = th.randn(hidden_dims[i], hidden_dims[i - 1]) + baseline_new[f"{prefix}.{i}.bias"] = th.randn(hidden_dims[i]) + + # Build old_state with matching dims + old_state = {} + old_state[f"{prefix}.0.weight"] = th.randn(hidden_dims[0], old_input_dim) + 10.0 + old_state[f"{prefix}.0.bias"] = th.randn(hidden_dims[0]) + 20.0 + for i in range(1, len(hidden_dims)): + old_state[f"{prefix}.{i}.weight"] = baseline_new[f"{prefix}.{i}.weight"].clone() + old_state[f"{prefix}.{i}.bias"] = baseline_new[f"{prefix}.{i}.bias"].clone() + + model = FakeModel(baseline_new) + return model, old_state, baseline_new + + +@pytest.mark.require_learning +def test_ddpg_load_transfer_n_plus_m( + learning_role_n_plus_m, saved_n_agent_model, base_learning_config +): + """Saving a 2-agent DDPG model and loading it into a 3-agent setup must + transfer matching obs and action weight slices while leaving new-agent + slices at their random initialisation. + """ + save_dir, original_states = saved_n_agent_model + n_agents_old = 2 + n_agents_new = 3 + + learning_role_n_plus_m.initialize_policy() + + pre_state = deepcopy( + learning_role_n_plus_m.rl_strats["agent_0"].critics.state_dict() + ) + pre_opt_state = deepcopy( + learning_role_n_plus_m.rl_strats["agent_0"].critics.optimizer.state_dict() + ) + + learning_role_n_plus_m.rl_algorithm.load_params(directory=save_dir) + + post_state = learning_role_n_plus_m.rl_strats["agent_0"].critics.state_dict() + post_target = learning_role_n_plus_m.rl_strats["agent_0"].target_critics.state_dict() + post_opt_state = ( + learning_role_n_plus_m.rl_strats["agent_0"].critics.optimizer.state_dict() + ) + + assert not compare_state_dicts(pre_state, post_state) + + obs_base = base_learning_config["obs_dim"] + act_dim = base_learning_config["act_dim"] + unique_obs = base_learning_config["unique_obs_dim"] + + old_total_obs = obs_base + unique_obs * (n_agents_old - 1) + new_total_obs = obs_base + unique_obs * (n_agents_new - 1) + copy_agent_count = min(n_agents_old, n_agents_new) + copy_unique_obs_count = unique_obs * (copy_agent_count - 1) + copy_obs_end_idx = obs_base + copy_unique_obs_count + copy_action_count = act_dim * copy_agent_count + + for prefix in ["q_layers"]: + w_key = f"{prefix}.0.weight" + b_key = f"{prefix}.0.bias" + assert th.equal( + post_state[w_key][:, :obs_base], + original_states["critic"][w_key][:, :obs_base], + ) + # Matched unique-obs slices + if copy_obs_end_idx > obs_base: + assert th.equal( + post_state[w_key][:, obs_base:copy_obs_end_idx], + original_states["critic"][w_key][:, obs_base:copy_obs_end_idx], + ) + # Matched action slices + assert th.equal( + post_state[w_key][:, new_total_obs : new_total_obs + copy_action_count], + original_states["critic"][w_key][ + :, old_total_obs : old_total_obs + copy_action_count + ], + ) + + # Target critic must copy critic after transfer + assert compare_state_dicts(post_state, post_target) + # Optimizer state is preserved + assert compare_state_dicts(post_opt_state, pre_opt_state) + + +@pytest.mark.require_learning +def test_ddpg_load_transfer_n_minus_m( + learning_role_n, saved_n_plus_m_agent_model, base_learning_config +): + """Saving a 3-agent DDPG model and loading it into a 2-agent setup must + transfer only the overlapping obs and action weight slices. + """ + save_dir, original_states = saved_n_plus_m_agent_model + n_agents_old = 3 + n_agents_new = 2 + + learning_role_n.initialize_policy() + + pre_state = deepcopy(learning_role_n.rl_strats["agent_0"].critics.state_dict()) + learning_role_n.rl_algorithm.load_params(directory=save_dir) + + post_state = learning_role_n.rl_strats["agent_0"].critics.state_dict() + post_target = learning_role_n.rl_strats["agent_0"].target_critics.state_dict() + + assert not compare_state_dicts(pre_state, post_state) + + obs_base = base_learning_config["obs_dim"] + act_dim = base_learning_config["act_dim"] + unique_obs = base_learning_config["unique_obs_dim"] + + old_total_obs = obs_base + unique_obs * (n_agents_old - 1) + new_total_obs = obs_base + unique_obs * (n_agents_new - 1) + copy_agent_count = min(n_agents_old, n_agents_new) + copy_unique_obs_count = unique_obs * (copy_agent_count - 1) + copy_obs_end_idx = obs_base + copy_unique_obs_count + copy_action_count = act_dim * copy_agent_count + + for prefix in ["q_layers"]: + w_key = f"{prefix}.0.weight" + + assert th.equal( + post_state[w_key][:, :obs_base], + original_states["critic"][w_key][:, :obs_base], + ) + if copy_obs_end_idx > obs_base: + assert th.equal( + post_state[w_key][:, obs_base:copy_obs_end_idx], + original_states["critic"][w_key][:, obs_base:copy_obs_end_idx], + ) + assert th.equal( + post_state[w_key][:, new_total_obs : new_total_obs + copy_action_count], + original_states["critic"][w_key][ + :, old_total_obs : old_total_obs + copy_action_count + ], + ) + + assert compare_state_dicts(post_state, post_target) + + +@pytest.mark.parametrize( + "new_id_order", + [ + ["pp_5", "pp_6", "pp_3", "pp_4", "st_1"], + ["pp_3", "pp_4", "st_1", "pp_5", "pp_6"], + ["pp_3", "pp_5", "pp_4", "pp_6", "st_1"], + ["pp_3", "st_1"], + ], +) +@pytest.mark.require_learning +def test_ddpg_transfer_weights_various_orders(new_id_order): + import torch as th + + from assume.reinforcement_learning.learning_utils import transfer_weights + + obs_base = 10 + act_dim = 3 + unique_obs = 2 + hidden_dims = [5, 4] + old_id_order = ["pp_3", "pp_4", "st_1"] + + model, old_state, baseline = make_state_dicts( + obs_base, act_dim, unique_obs, old_id_order, new_id_order, hidden_dims + ) + new_state = transfer_weights( + model, old_state, old_id_order, new_id_order, obs_base, act_dim, unique_obs + ) + assert isinstance(new_state, dict), "transfer_weights must return a dict for DDPG" + + old_n = len(old_id_order) + new_n = len(new_id_order) + old_obs_tot = obs_base + unique_obs * max(0, old_n - 1) + new_obs_tot = obs_base + unique_obs * max(0, new_n - 1) + + prefix = "q_layers" + w_old = old_state[f"{prefix}.0.weight"] + w_base = baseline[f"{prefix}.0.weight"] + w_new = new_state[f"{prefix}.0.weight"] + + # Shared obs_base copied from old + assert th.equal(w_new[:, :obs_base], w_old[:, :obs_base]) + + # unique_obs slices per agent + for new_idx, u in enumerate(new_id_order): + if new_idx == 0: + continue + s = obs_base + unique_obs * (new_idx - 1) + e = s + unique_obs + if u in old_id_order: + old_idx = old_id_order.index(u) + if old_idx > 0: + os_ = obs_base + unique_obs * (old_idx - 1) + oe = os_ + unique_obs + assert th.equal(w_new[:, s:e], w_old[:, os_:oe]) + else: + assert th.equal(w_new[:, s:e], w_base[:, s:e]) + + # action slices per agent + for new_idx, u in enumerate(new_id_order): + s = new_obs_tot + act_dim * new_idx + e = s + act_dim + if u in old_id_order: + old_idx = old_id_order.index(u) + os_ = old_obs_tot + act_dim * old_idx + oe = os_ + act_dim + assert th.equal(w_new[:, s:e], w_old[:, os_:oe]) + else: + assert th.equal(w_new[:, s:e], w_base[:, s:e]) + + +@pytest.mark.require_learning +def test_maddpg_load_corrupted_critic(tmp_path, base_learning_config): + config = copy(base_learning_config) + learning = Learning(config["learning_config"], start, end) + learning.rl_strats["agent_0"] = LearningStrategy(**config, learning_role=learning) + learning.initialize_policy() + + original_state = deepcopy(learning.rl_strats["agent_0"].critics.state_dict()) + + corrupted_dir = tmp_path / "critics" + corrupted_dir.mkdir(parents=True, exist_ok=True) + + corrupted_obj = { + "critic": original_state, + "critic_target": { + k: v[:1] if isinstance(v, th.Tensor) and v.ndim > 0 else v + for k, v in original_state.items() + }, + } + th.save(corrupted_obj, corrupted_dir / "critic_agent_0.pt") + learning.rl_algorithm.load_critic_params(directory=str(tmp_path)) + + loaded_state = learning.rl_strats["agent_0"].critics.state_dict() + assert compare_state_dicts(loaded_state, original_state) + + +@pytest.mark.parametrize( + "mod_field, mod_value, expected_error", + [ + ("foresight", 99, "All foresight values must be the same"), + ("act_dim", 99, "All action dimensions must be the same"), + ("unique_obs_dim", 99, "All unique_obs_dim values must be the same"), + ( + "num_timeseries_obs_dim", + 99, + "All num_timeseries_obs_dim values must be the same", + ), + ], +) +@pytest.mark.require_learning +def test_initialize_policy_dimension_mismatch( + base_learning_config, mod_field, mod_value, expected_error +): + config = copy(base_learning_config) + config["num_timeseries_obs_dim"] = 1 + + learn = Learning(config["learning_config"], start, end) + strat_0 = LearningStrategy(**config, learning_role=learn) + + config_mismatch = copy(config) + config_mismatch[mod_field] = mod_value + strat_1 = LearningStrategy(**config_mismatch, learning_role=learn) + + learn.rl_strats["agent_0"] = strat_0 + learn.rl_strats["agent_1"] = strat_1 + + with pytest.raises(ValueError, match=expected_error): + learn.rl_algorithm.initialize_policy() + + +@pytest.mark.require_learning +def test_initialize_policy_all_dimensions_match(base_learning_config): + config = copy(base_learning_config) + config["num_timeseries_obs_dim"] = 1 + + learn = Learning(config["learning_config"], start, end) + for agent_id in ("agent_0", "agent_1", "agent_2"): + learn.rl_strats[agent_id] = LearningStrategy(**config, learning_role=learn) + + try: + learn.rl_algorithm.initialize_policy() + except Exception as e: + pytest.fail(f"initialize_policy raised an unexpected error: {e}") \ No newline at end of file From a1442a880691f2534b741b36830bfc71dad58f26 Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Sun, 26 Apr 2026 15:07:02 +0200 Subject: [PATCH 30/44] Updated the docs and completed the MADDPG implementation --- docs/source/assume.reinforcement_learning.rst | 16 +++ docs/source/learning.rst | 23 +++-- docs/source/learning_algorithm.rst | 98 ++++++++++++------- 3 files changed, 94 insertions(+), 43 deletions(-) diff --git a/docs/source/assume.reinforcement_learning.rst b/docs/source/assume.reinforcement_learning.rst index fe89cfce1..866070f00 100644 --- a/docs/source/assume.reinforcement_learning.rst +++ b/docs/source/assume.reinforcement_learning.rst @@ -48,6 +48,22 @@ assume.reinforcement\_learning.algorithms.matd3 module :undoc-members: :show-inheritance: +assume.reinforcement\_learning.algorithms.maddpg module +------------------------------------------------------- + +.. automodule:: assume.reinforcement_learning.algorithms.maddpg + :members: + :undoc-members: + :show-inheritance: + +assume.reinforcement\_learning.algorithms.mappo module +------------------------------------------------------ + +.. automodule:: assume.reinforcement_learning.algorithms.mappo + :members: + :undoc-members: + :show-inheritance: + Module contents --------------- diff --git a/docs/source/learning.rst b/docs/source/learning.rst index 21cfbdd47..a54600d15 100644 --- a/docs/source/learning.rst +++ b/docs/source/learning.rst @@ -36,8 +36,8 @@ After taking action :math:`a_i \in A_i` in state :math:`s_i \in S` according to Each agent receives a reward :math:`r_i` according to the individual reward function :math:`R_i` and a private observation correlated with the state :math:`o_i: S \rightarrow O_i`. Like in a Markov Decision Process, each agent :math:`i` learns an optimal policy :math:`\pi_i^*(s)` that maximizes its expected reward. -To enable multi-agent learning some adjustments are needed within the learning algorithm to get from the TD3 to an MATD3 algorithm. -Other authors used similar tweaks to improve the MADDPG algorithm and derive the MA-TD3 algorithm. +To enable multi-agent learning, ASSUME supports three RL algorithms out of the box: **MATD3** (Multi-Agent Twin Delayed DDPG, off-policy), **MADDPG** (Multi-Agent DDPG, off-policy), and **MAPPO** (Multi-Agent PPO, on-policy). +The algorithm to use is selected via the ``algorithm`` config item. We'll start explaining the learning by focusing on a single agent and then extend it to multi-agent learning. Single-Agent Learning @@ -96,18 +96,23 @@ Multi-Agent Learning In a single-agent setup, the state transition and respective reward depend only on the actions of a single agent. However, in a multi-agent setup, the state transitions and rewards depend on the actions of all learning agents. This makes the environment non-stationary for a single agent, violating the Markov property. The convergence guarantees of single-agent RL algorithms are no longer -valid. To address this, we utilize the framework of centralized training and decentralized execution and expand upon the MADDPG algorithm. +valid. To address this, we utilize the framework of centralized training and decentralized execution, which is supported in ASSUME (MATD3, MADDPG, MAPPO). The main idea is to use a centralized critic during the training phase, which has access to the entire state :math:`S`, and all actions :math:`a_1, \ldots, a_N`, thus resolving the issue of non-stationarity. Changes in state transitions and rewards can be explained by the actions of other agents. Meanwhile, during both training and execution, the actor has access only to its local observations :math:`o_i` derived from the entire state :math:`S`. -For each agent :math:`i`, we train not one but two centralized critics :math:`Q_{i,\theta_1,2}(S, a_1, \ldots, a_N)` together with two target critic networks. -Similar to TD3, the smaller value of the two critics and target action noise :math:`a_i,k \sim` is used to calculate the target :math:`y_i,k`. This is done to to address the issue of overestimation bias. +For each agent :math:`i`, MATD3 and MADDPG train centralized critics together with target critic networks. +In MATD3 and MADDPG, the critics are used to calculate the target :math:`y_i,k`. In MATD3, two critics per agent are maintained and the minimum value is used (twin-critic trick) to address overestimation bias. +In MADDPG, a single critic per agent is used. In MAPPO, a single centralized value network is used across all agents, updated via GAE-based advantage estimates rather than Bellman targets. + +For MATD3, the target uses the twin-critic minimum: .. math:: y_i,k = r_i,k + γ * min_j=1,2 Q_i,θ′_j(S′_k, a_1,k, ..., a_N,k, π′(o_i,k)) +For MADDPG, the same formulation is used with a single critic. + where :math:`r_i,k` is the reward obtained by agent :math:`i` at time step :math:`k`, :math:`\gamma` is the discount factor, :math:`S'_k` is the next state of the environment, and :math:`\pi'(o_i,k)` is the target policy of agent :math:`i`. @@ -123,8 +128,12 @@ The actor policy of each agent is updated using the deterministic policy gradien ∇_a Q_i,θ_j(S_k, a_1,k, ..., a_N,k, π(o_i,k))|a_i,k=π(o_i,k) * ∇_θ π(o_i,k) -The actor is updated similarly using only one critic network :math:`Q_{θ1}`. These changes to the original DDPG algorithm allow increased stability and convergence of the TD3 algorithm. This is especially relevant when approaching a multi-agent RL setup, as discussed in the foregoing section. -Please note that the actor and critics are updated by sampling experience from the buffer where all interactions of the agents are stored, namely the observations, actions and rewards. There are more complex buffers possible, like those that use importance sampling, but the default buffer is a simple replay buffer. You can find a documentation of the latter in :ref:`replay-buffer`. +In MATD3, the actor update is delayed relative to the critic (every ``policy_delay`` gradient steps) to improve stability. +In MADDPG, the actor is updated at every gradient step. +In MAPPO, the actor is updated using the PPO clipped surrogate objective rather than the deterministic policy gradient. +Please note that for off-policy algorithms (MATD3, MADDPG), the actor and critics are updated by sampling experience from the replay buffer where all interactions of the agents are stored. +For the on-policy algorithm (MAPPO), a rollout buffer is used instead, and experiences are discarded after each policy update. +You can find documentation of both buffer types in :ref:`replay-buffer` and :ref:`rollout-buffer`. .. _learning_implementation: diff --git a/docs/source/learning_algorithm.rst b/docs/source/learning_algorithm.rst index c09c1d4c0..1b14fa365 100644 --- a/docs/source/learning_algorithm.rst +++ b/docs/source/learning_algorithm.rst @@ -26,41 +26,63 @@ The following table shows the options that can be adjusted and gives a short exp - ======================================== ========================================================================================================== - learning config item description - ======================================== ========================================================================================================== - learning_mode Should we use learning mode at all? If False, the learning bidding strategy is loaded from trained_policies_load_path and no training occurs. Default is False. - evaluation_mode This setting is modified internally. Whether to run in evaluation mode. If True, the agent uses the learned policy without exploration noise and no training updates occur. Default is False. - continue_learning Whether to use pre-learned strategies and then continue learning. If True, loads existing policies from trained_policies_load_path and continues training. Note: Set True when you have a pretrained model and want incremental learning under new data or scenarios. Leave False for clean experiments. Default is False. - trained_policies_save_path The directory path - relative to the scenario's inputs_path - where newly trained RL policies (actor and critic networks) will be saved. Only needed when learning_mode is True. Value is set in setup_world(). Defaults otherwise to None. - trained_policies_load_path The directory path - relative to the scenario's inputs_path - from which pre-trained policies should be loaded. Needed when continue_learning is True or using pre-trained strategies. Default is None. - min_bid_price The minimum bid price which limits the action of the actor to this price. Used to constrain the actor's output to a price range. Note: Best practice is to set this parameter as unconstraining as possible. When agent bid convergence is guaranteed to occur above zero, increasing the minimum bid value can reduce training times. Default is -100.0. - max_bid_price The maximum bid price which limits the action of the actor to this price. Used to constrain the actor's output to a price range. Note: Align this with realistic market constraints. Too low = limited strategy space. Too high = noisy learning. Default is 100.0. - device The device to use for PyTorch computations. Options include "cpu", "cuda", or specific CUDA devices like "cuda:0". Default is "cpu". - episodes_collecting_initial_experience The number of episodes at the start during which random actions are chosen instead of using the actor network. This helps populate the replay buffer with diverse experiences. Note: Increase (5–20) for larger environments. Too low causes early high variance and instability; too high wastes time. Default is 5. - exploration_noise_std The standard deviation of Gaussian noise added to actions during exploration in the environment. Higher values encourage more exploration. Default is 0.2. - training_episodes The number of training episodes, where one episode is the entire simulation horizon specified in the general config. Default is 100. - validation_episodes_interval The interval (in episodes) at which validation episodes are run to evaluate the current policy's performance without training updates. Note: With long simulation horizons, choosing this higher will reduce training time. Default is 5. - train_freq Defines the frequency in time steps at which the actor and critic networks are updated. Accepts time strings like "24h" for 24 hours or "1d" for 1 day. Note: Shorter intervals = frequent updates, faster but less stable learning. Longer intervals = slower but more reliable. Use intervals > "72h" for units that require time coupling such as storages. Default is "24h". - batch_size The batch size of experiences sampled from the replay buffer for each training update. Larger batches provide more stable gradients but require more memory. In environments with many learning agents we advise small batch sizes. Default is 128. - gradient_steps The number of gradient descent steps performed during each training update. More steps can lead to better learning but increase computation time. Note: For environments with many agents one should use not many gradient steps, as policies of other agents are updated as well outdating the current best strategy. Default is 100. - learning_rate The learning rate (step size) for the optimizer, which controls how much the policy and value networks are updated during training. Note: Start around 1e-3. Decrease (e.g. 3e-4, 1e-4) if training oscillates or diverges. Default is 0.001. - learning_rate_schedule Which learning rate decay schedule to use. Currently only "linear" decay is available, which linearly decreases the learning rate over time. Default is None (constant learning rate). - early_stopping_steps The number of validation steps over which the moving average reward is calculated for early stopping. If the reward doesn't change by early_stopping_threshold over this many steps, training stops. Note: It prevents wasting compute on runs that have plateaued. Higher values are safer for noisy environments to avoid premature stopping; lower values react faster in stable settings. If None, defaults to training_episodes / validation_episodes_interval + 1. - early_stopping_threshold The minimum improvement in moving average reward required to avoid early stopping. If the reward improvement is less than this threshold over early_stopping_steps, training is terminated early. Note: If training stops too early, reduce the threshold. In noisy environments, combine a lower threshold with higher early_stopping_steps. Default is 0.05. - algorithm Specifies which reinforcement learning algorithm to use. Currently, only "matd3" (Multi-Agent Twin Delayed Deep Deterministic Policy Gradient) is implemented. Default is "matd3". - replay_buffer_size The maximum number of transitions stored in the replay buffer for experience replay. Larger buffers allow for more diverse training samples. Default is 500000. - gamma The discount factor for future rewards, ranging from 0 to 1. Higher values give more weight to long-term rewards in decision-making, which should be chosen for units with time coupling like storages. Default is 0.99. - actor_architecture The architecture of the neural networks used for the actors. Options include "mlp" (Multi-Layer Perceptron) and "lstm" (Long Short-Term Memory). Default is "mlp". - policy_delay The frequency (in gradient steps) at which the actor policy is updated. TD3 updates the critic more frequently than the actor to stabilize training. Default is 2. - noise_sigma The standard deviation of the Ornstein-Uhlenbeck or Gaussian noise distribution used to generate exploration noise added to actions. Note: In multi-agent ennvironments high noises are necessary to encourage sufficient exploration. Default is 0.1. - noise_scale The scale factor multiplied by the noise drawn from the distribution. Larger values increase exploration. Default is 1. - noise_dt The time step parameter for the Ornstein-Uhlenbeck process, which determines how quickly the noise decays over time. Used for noise scheduling. Default is 1. - action_noise_schedule Which action noise decay schedule to use. Currently only "linear" decay is available, which linearly decreases exploration noise over training. Default is "linear". - tau The soft update coefficient for updating target networks. Controls how slowly target networks track the main networks. Smaller values mean slower updates. Default is 0.005. - target_policy_noise The standard deviation of noise added to target policy actions during critic updates. This smoothing helps prevent overfitting to narrow policy peaks. Default is 0.2. - target_noise_clip The maximum absolute value for clipping the target policy noise. Prevents the noise from being too large. Default is 0.5. - ======================================== ========================================================================================================== +**Common parameters** (apply to all algorithms — set directly under ``learning:`` in the config) + + ================================= ========================================================================================================== + learning config item description + ================================= ========================================================================================================== + learning_mode Should we use learning mode at all? If False, the learning bidding strategy is loaded from trained_policies_load_path and no training occurs. Default is False. + evaluation_mode This setting is modified internally. Whether to run in evaluation mode. If True, the agent uses the learned policy without exploration noise and no training updates occur. Default is False. + continue_learning Whether to use pre-learned strategies and then continue learning. If True, loads existing policies from trained_policies_load_path and continues training. Note: Set True when you have a pretrained model and want incremental learning under new data or scenarios. Leave False for clean experiments. Default is False. + trained_policies_save_path The directory path - relative to the scenario's inputs_path - where newly trained RL policies (actor and critic networks) will be saved. Only needed when learning_mode is True. Value is set in setup_world(). Defaults otherwise to None. + trained_policies_load_path The directory path - relative to the scenario's inputs_path - from which pre-trained policies should be loaded. Needed when continue_learning is True or using pre-trained strategies. Default is None. + min_bid_price The minimum bid price which limits the action of the actor to this price. Used to constrain the actor's output to a price range. Note: Best practice is to set this parameter as unconstraining as possible. Default is -100.0. + max_bid_price The maximum bid price which limits the action of the actor to this price. Used to constrain the actor's output to a price range. Note: Align this with realistic market constraints. Default is 100.0. + device The device to use for PyTorch computations. Options include "cpu", "cuda", or specific CUDA devices like "cuda:0". Default is "cpu". + exploration_noise_std The standard deviation of Gaussian noise added to actions during exploration in the environment. Higher values encourage more exploration. Default is 0.2. + training_episodes The number of training episodes, where one episode is the entire simulation horizon specified in the general config. Default is 100. + validation_episodes_interval The interval (in episodes) at which validation episodes are run to evaluate the current policy's performance without training updates. Default is 5. + train_freq Defines the frequency in time steps at which the actor and critic networks are updated. Accepts time strings like "24h" or "1d". Default is "24h". + batch_size The batch size of experiences sampled from the buffer for each training update. Default is 128. + learning_rate The learning rate for the optimizer. Note: Start around 1e-3. Decrease (e.g. 3e-4, 1e-4) if training oscillates or diverges. Default is 0.001. + learning_rate_schedule Which learning rate decay schedule to use. Currently only "linear" decay is available. Default is None (constant learning rate). + early_stopping_steps The number of validation steps over which the moving average reward is checked for early stopping. If None, defaults to training_episodes / validation_episodes_interval + 1. + early_stopping_threshold The minimum improvement in moving average reward required to avoid early stopping. Default is 0.05. + algorithm Specifies which reinforcement learning algorithm to use. Options: ``"matd3"`` (Multi-Agent Twin Delayed DDPG, off-policy), ``"maddpg"`` (Multi-Agent DDPG, off-policy), ``"mappo"`` (Multi-Agent PPO, on-policy). Default is ``"matd3"``. + gamma The discount factor for future rewards (0–1). Higher values weight long-term rewards more. Default is 0.99. + actor_architecture The neural network architecture for the actors. Options: ``"mlp"`` (Multi-Layer Perceptron) or ``"lstm"`` (Long Short-Term Memory). Default is ``"mlp"``. + ================================= ========================================================================================================== + +**Off-policy parameters** (apply to ``"matd3"`` and ``"maddpg"`` — set under ``off_policy:`` in the config) + + ========================================= ========================================================================================================== + off_policy config item description + ========================================= ========================================================================================================== + episodes_collecting_initial_experience The number of episodes at the start during which random actions are chosen instead of using the actor network. Helps populate the replay buffer with diverse experiences. Note: Increase (5–20) for larger environments. Default is 5. + gradient_steps The number of gradient descent steps performed during each training update. Note: For environments with many agents, use fewer gradient steps as other agents' policies are updated simultaneously. Default is 100. + replay_buffer_size The maximum number of transitions stored in the replay buffer. Larger buffers allow for more diverse training samples. Default is 50000. + tau The soft update coefficient for updating target networks. Smaller values mean slower target network updates. Default is 0.005. + policy_delay (MATD3 only) The frequency (in gradient steps) at which the actor policy is updated. TD3 updates the critic more frequently than the actor to stabilize training. Default is 2. + noise_sigma The standard deviation of the exploration noise distribution added to actions. Note: In multi-agent environments, higher noise encourages sufficient exploration. Default is 0.1. + noise_scale The scale factor multiplied by the drawn noise. Larger values increase exploration. Default is 1. + noise_dt The time step parameter for the Ornstein-Uhlenbeck process, determining how quickly noise decays. Default is 1. + action_noise_schedule Which action noise decay schedule to use. Currently only ``"linear"`` decay is available. Default is None. + target_policy_noise (MATD3 only) The standard deviation of noise added to target policy actions during critic updates. Helps prevent overfitting to narrow policy peaks. Default is 0.2. + target_noise_clip (MATD3 only) The maximum absolute value for clipping target policy noise. Default is 0.5. + ========================================= ========================================================================================================== + +**On-policy parameters** (apply to ``"mappo"`` — set under ``on_policy:`` in the config) + + ====================== ========================================================================================================== + on_policy config item description + ====================== ========================================================================================================== + clip_ratio The clipping ratio for the PPO surrogate objective. Controls how far the new policy can deviate from the old one in a single update. Default is 0.1. + entropy_coef Coefficient for the entropy bonus term in the loss. Higher values encourage more exploration. Default is 0.01. + gae_lambda Lambda parameter for Generalized Advantage Estimation (GAE). Controls the bias-variance trade-off. Default is 0.95. + max_grad_norm Maximum gradient norm for gradient clipping. Default is 0.5. + vf_coef Coefficient for the value function loss term. Default is 0.5. + n_epochs Number of optimization epochs performed over each rollout batch. Default is 10. + ====================== ========================================================================================================== Note: We advise to not use the setting of a seed in the general config (``seed=null``) when using learning, as it will decrease performance, see https://docs.pytorch.org/docs/stable/notes/randomness.html. Completely reproducible results are not guaranteed across different PyTorch versions, hardware, or CUDA configurations. @@ -208,6 +230,8 @@ How are buffers implemented in ASSUME? In principal ASSUME allows for different buffers to be implemented. They just need to adhere to the structure presented in the base buffer. Here we will present the different buffers already implemented, which is only one, yet. +.. _replay-buffer: + The simple replay buffer ------------------------ @@ -226,6 +250,8 @@ This reduces the reliance on new experiences and makes better use of the availab as the agent is exposed to a diverse set of experiences. +.. _rollout-buffer: + The rollout buffer ------------------ @@ -260,7 +286,7 @@ After a complete rollout is collected (determined by the ``train_freq`` paramete The learning role collects experiences after each environment step by calling the buffer's add function. Once the buffer accumulates enough data (specified by ``batch_size``), the PPO algorithm's update function -is triggered, which retrieves mini-batches from the buffer for multiple training epochs (specified by ``ppo_n_epochs``). +is triggered, which retrieves mini-batches from the buffer for multiple training epochs (specified by ``on_policy.n_epochs``). After training is complete, the buffer is reset, and the cycle begins again with the updated policy. This ensures that PPO always learns from fresh, on-policy experiences, which is critical for the algorithm's stability and performance. From 0ba82b75cc380b93a5018b2626c6e02cbeae7a81 Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Tue, 28 Apr 2026 05:47:54 +0200 Subject: [PATCH 31/44] Added MAPPO test file (test_mappo.py) --- assume/strategies/learning_strategies.py | 4 +- tests/test_mappo.py | 376 +++++++++++++++++++++++ 2 files changed, 377 insertions(+), 3 deletions(-) create mode 100644 tests/test_mappo.py diff --git a/assume/strategies/learning_strategies.py b/assume/strategies/learning_strategies.py index 732a11862..83d50a975 100644 --- a/assume/strategies/learning_strategies.py +++ b/assume/strategies/learning_strategies.py @@ -303,9 +303,7 @@ def get_actions(self, next_observation): curr_action, log_prob = self.actor.get_action_and_log_prob(next_observation.unsqueeze(0)) curr_action = curr_action.squeeze(0).detach() self._last_log_prob = log_prob.squeeze(0).detach() - # Computing the value later from centralized observations in learning_role. - self._last_value = th.tensor(0.0, device=self.device) - + # Using stochastic PPO policy with no external noise. noise = th.zeros_like(curr_action, dtype=self.float_type) else: diff --git a/tests/test_mappo.py b/tests/test_mappo.py new file mode 100644 index 000000000..73ea73b3a --- /dev/null +++ b/tests/test_mappo.py @@ -0,0 +1,376 @@ +# SPDX-FileCopyrightText: ASSUME Developers +# +# SPDX-License-Identifier: AGPL-3.0-or-later + +import json +import os +from copy import copy, deepcopy +from datetime import datetime + +import numpy as np +import pytest + +from assume.common.base import LearningConfig, OnPolicyConfig + +try: + import torch as th + + from assume.common.base import LearningStrategy + from assume.reinforcement_learning.algorithms.mappo import PPO + from assume.reinforcement_learning.buffer import RolloutBuffer + from assume.reinforcement_learning.learning_role import Learning + from assume.reinforcement_learning.neural_network_architecture import ( + ActorPPO, + CriticPPO, + ) + +except ImportError: + pass + + +start = datetime(2023, 7, 1) +end = datetime(2023, 7, 2) + + +@pytest.fixture +def base_learning_config() -> dict: + foresight = 2 + unique_obs_dim = 2 + num_timeseries_obs_dim = 4 + return { + "foresight": foresight, + "act_dim": 3, + "unique_obs_dim": unique_obs_dim, + "num_timeseries_obs_dim": num_timeseries_obs_dim, + "obs_dim": foresight * num_timeseries_obs_dim + unique_obs_dim, + "learning_config": LearningConfig( + train_freq="1h", + algorithm="mappo", + actor_architecture="mlp", + learning_mode=True, + evaluation_mode=False, + training_episodes=10, + continue_learning=False, + trained_policies_save_path=None, + early_stopping_steps=10, + early_stopping_threshold=0.05, + learning_rate=1e-4, + batch_size=10, + gamma=0.99, + on_policy=OnPolicyConfig( + clip_ratio=0.2, + entropy_coef=0.01, + gae_lambda=0.95, + max_grad_norm=0.5, + vf_coef=0.5, + n_epochs=2, + ), + ), + } + + +@pytest.fixture(scope="function") +def learning_role_n(base_learning_config): + config = copy(base_learning_config) + learn = Learning(config["learning_config"], start, end) + for agent_id in ("agent_0", "agent_1"): + strat = LearningStrategy(**config, learning_role=learn) + strat.unit_id = agent_id + learn.rl_strats[agent_id] = strat + return learn + + +@pytest.fixture(scope="function") +def saved_n_agent_model(learning_role_n, tmp_path) -> tuple[str, dict]: + """Save a 2-agent PPO model; return (save_dir, state_dict_snapshot). + """ + learning_role_n.initialize_policy() + save_dir = tmp_path / "saved_model_n" + save_dir.mkdir(parents=True, exist_ok=True) + learning_role_n.rl_algorithm.save_params(directory=str(save_dir)) + agent = learning_role_n.rl_strats["agent_0"] + return str(save_dir), { + "critic": agent.critics.state_dict(), + "actor": agent.actor.state_dict(), + "optimizer_critic": agent.critics.optimizer.state_dict(), + "optimizer_actor": agent.actor.optimizer.state_dict(), + } + + +def compare_state_dicts(dict1, dict2) -> bool: + if dict1.keys() != dict2.keys(): + return False + for k in dict1: + v1, v2 = dict1[k], dict2[k] + if isinstance(v1, th.Tensor): + if not th.equal(v1, v2): + return False + elif isinstance(v1, dict): + if not compare_state_dicts(v1, v2): + return False + else: + if v1 != v2: + return False + return True + + +def _make_rollout_buffer( + obs_dim: int, + act_dim: int, + n_agents: int, + n_steps: int, + device: str = "cpu", +) -> "RolloutBuffer": + """Building and filling a RolloutBuffer with random data for update_policy tests.""" + buf = RolloutBuffer( + buffer_size=n_steps + 10, + obs_dim=obs_dim, + act_dim=act_dim, + n_rl_units=n_agents, + device=device, + float_type=th.float32, + gamma=0.99, + gae_lambda=0.95, + ) + rng = np.random.default_rng(42) + for _ in range(n_steps): + buf.add( + obs=rng.random((n_agents, obs_dim)).astype(np.float32), + action=rng.random((n_agents, act_dim)).astype(np.float32), + reward=rng.random(n_agents).astype(np.float32), + done=np.zeros(n_agents, dtype=np.float32), + value=rng.random(n_agents).astype(np.float32), + log_prob=(rng.random(n_agents).astype(np.float32) - 1.0), + ) + return buf + + +def _setup_for_update(learning_role) -> None: + """Setting minimal attributes needed. + """ + learning_role.update_steps = 0 + learning_role.db_addr = None # disables the context.schedule_instant_message path + + +@pytest.mark.require_learning +def test_mappo_algorithm_class(learning_role_n): + """initialize_policy creates a PPO instance as the rl_algorithm.""" + learning_role_n.initialize_policy() + assert isinstance(learning_role_n.rl_algorithm, PPO) + + +@pytest.mark.require_learning +def test_mappo_save_params_creates_files(learning_role_n, tmp_path): + learning_role_n.initialize_policy() + save_dir = tmp_path / "model_save_test" + + learning_role_n.rl_algorithm.save_params(directory=str(save_dir)) + + assert os.path.exists(save_dir / "critics" / "critic_agent_0.pt") + assert os.path.exists(save_dir / "critics" / "critic_agent_1.pt") + assert os.path.exists(save_dir / "actors" / "actor_agent_0.pt") + assert os.path.exists(save_dir / "actors" / "actor_agent_1.pt") + + +@pytest.mark.require_learning +def test_mappo_save_params_u_id_order(learning_role_n, tmp_path): + learning_role_n.initialize_policy() + save_dir = tmp_path / "uid_order_test" + learning_role_n.rl_algorithm.save_params(directory=str(save_dir)) + + order_file = save_dir / "critics" / "u_id_order.json" + assert order_file.exists(), "u_id_order.json must be written alongside critic files" + with open(order_file) as f: + mapping = json.load(f) + assert mapping.get("u_id_order") == ["agent_0", "agent_1"] + + +@pytest.mark.require_learning +def test_mappo_load_matching_n(base_learning_config, saved_n_agent_model): + save_dir, original_states = saved_n_agent_model + + config_new = copy(base_learning_config) + learn_new = Learning(config_new["learning_config"], start, end) + learn_new.rl_strats["agent_0"] = LearningStrategy( + **config_new, learning_role=learn_new + ) + learn_new.rl_strats["agent_1"] = LearningStrategy( + **config_new, learning_role=learn_new + ) + learn_new.initialize_policy() + learn_new.rl_algorithm.load_params(directory=save_dir) + + agent = learn_new.rl_strats["agent_0"] + assert compare_state_dicts(original_states["critic"], agent.critics.state_dict()) + assert compare_state_dicts(original_states["actor"], agent.actor.state_dict()) + assert compare_state_dicts( + deepcopy(original_states["optimizer_critic"]), + deepcopy(agent.critics.optimizer.state_dict()), + ) + assert compare_state_dicts( + deepcopy(original_states["optimizer_actor"]), + deepcopy(agent.actor.optimizer.state_dict()), + ) + + +# @pytest.mark.require_learning +# def test_mappo_update_policy_skips_none_buffer(learning_role_n, monkeypatch): +# learning_role_n.initialize_policy() +# _setup_for_update(learning_role_n) +# monkeypatch.setattr(learning_role_n, "get_progress_remaining", lambda: 1.0) + +# learning_role_n.buffer = None +# learning_role_n.rl_algorithm.update_policy() +# assert learning_role_n.rl_algorithm.n_updates == 0 + + +# @pytest.mark.require_learning +# def test_mappo_update_policy_skips_empty_buffer(learning_role_n, monkeypatch): +# learning_role_n.initialize_policy() +# _setup_for_update(learning_role_n) +# monkeypatch.setattr(learning_role_n, "get_progress_remaining", lambda: 1.0) + +# s = learning_role_n.rl_strats["agent_0"] +# learning_role_n.buffer = RolloutBuffer( +# buffer_size=50, +# obs_dim=s.obs_dim, +# act_dim=s.act_dim, +# n_rl_units=2, +# device="cpu", +# float_type=th.float32, +# ) +# learning_role_n.rl_algorithm.update_policy() +# assert learning_role_n.rl_algorithm.n_updates == 0 + + +# @pytest.mark.require_learning +# def test_mappo_update_policy_skips_insufficient_data(learning_role_n, monkeypatch): +# learning_role_n.initialize_policy() +# _setup_for_update(learning_role_n) +# monkeypatch.setattr(learning_role_n, "get_progress_remaining", lambda: 1.0) + +# s = learning_role_n.rl_strats["agent_0"] +# learning_role_n.buffer = _make_rollout_buffer( +# obs_dim=s.obs_dim, act_dim=s.act_dim, n_agents=2, n_steps=1 +# ) +# learning_role_n.rl_algorithm.update_policy() +# assert learning_role_n.rl_algorithm.n_updates == 0 + + +# @pytest.mark.require_learning +# def test_mappo_update_policy_increments_n_updates(learning_role_n, monkeypatch): +# learning_role_n.initialize_policy() +# _setup_for_update(learning_role_n) +# monkeypatch.setattr(learning_role_n, "get_progress_remaining", lambda: 1.0) + +# s = learning_role_n.rl_strats["agent_0"] +# learning_role_n.buffer = _make_rollout_buffer( +# obs_dim=s.obs_dim, act_dim=s.act_dim, n_agents=2, n_steps=20 +# ) +# learning_role_n.rl_algorithm.update_policy() +# assert learning_role_n.rl_algorithm.n_updates == 1 + + +# @pytest.mark.require_learning +# def test_mappo_update_policy_resets_buffer(learning_role_n, monkeypatch): +# learning_role_n.initialize_policy() +# _setup_for_update(learning_role_n) +# monkeypatch.setattr(learning_role_n, "get_progress_remaining", lambda: 1.0) + +# s = learning_role_n.rl_strats["agent_0"] +# learning_role_n.buffer = _make_rollout_buffer( +# obs_dim=s.obs_dim, act_dim=s.act_dim, n_agents=2, n_steps=20 +# ) +# assert learning_role_n.buffer.pos > 0 + +# learning_role_n.rl_algorithm.update_policy() +# assert learning_role_n.buffer.pos == 0, ( +# "RolloutBuffer.reset() must be called at the end of every PPO update" +# ) + + +# @pytest.mark.require_learning +# def test_mappo_update_policy_multiple_epochs(base_learning_config, monkeypatch): +# config = copy(base_learning_config) +# config["learning_config"].on_policy.n_epochs = 3 + +# learn = Learning(config["learning_config"], start, end) +# for agent_id in ("agent_0", "agent_1"): +# strat = LearningStrategy(**config, learning_role=learn) +# strat.unit_id = agent_id +# learn.rl_strats[agent_id] = strat +# learn.initialize_policy() +# _setup_for_update(learn) +# monkeypatch.setattr(learn, "get_progress_remaining", lambda: 1.0) + +# s = learn.rl_strats["agent_0"] +# learn.buffer = _make_rollout_buffer( +# obs_dim=s.obs_dim, act_dim=s.act_dim, n_agents=2, n_steps=30 +# ) + +# algo = learn.rl_algorithm +# assert algo.n_epochs == 3 +# algo.update_policy() +# assert algo.n_updates == 1 + + +# @pytest.mark.require_learning +# def test_mappo_update_policy_actor_weights_change(learning_role_n, monkeypatch): +# learning_role_n.initialize_policy() +# _setup_for_update(learning_role_n) +# monkeypatch.setattr(learning_role_n, "get_progress_remaining", lambda: 1.0) + +# s = learning_role_n.rl_strats["agent_0"] +# pre_actor = deepcopy(s.actor.state_dict()) +# pre_critic = deepcopy(s.critics.state_dict()) + +# learning_role_n.buffer = _make_rollout_buffer( +# obs_dim=s.obs_dim, act_dim=s.act_dim, n_agents=2, n_steps=20 +# ) +# learning_role_n.rl_algorithm.update_policy() + +# post_actor = s.actor.state_dict() +# post_critic = s.critics.state_dict() + +# actor_changed = any( +# not th.equal(pre_actor[k], post_actor[k]) for k in pre_actor +# ) +# critic_changed = any( +# not th.equal(pre_critic[k], post_critic[k]) for k in pre_critic +# ) +# assert actor_changed, "Actor weights must change after a PPO update" +# assert critic_changed, "Critic weights must change after a PPO update" + + +@pytest.mark.require_learning +def test_mappo_initialize_policy_dimension_mismatch(base_learning_config): + config = copy(base_learning_config) + config["num_timeseries_obs_dim"] = 1 + + learn = Learning(config["learning_config"], start, end) + strat_0 = LearningStrategy(**config, learning_role=learn) + + config_bad = copy(config) + config_bad["act_dim"] = 99 + strat_1 = LearningStrategy(**config_bad, learning_role=learn) + + learn.rl_strats["agent_0"] = strat_0 + learn.rl_strats["agent_1"] = strat_1 + + with pytest.raises(ValueError, match="All action dimensions must be the same"): + learn.rl_algorithm.initialize_policy() + + +@pytest.mark.require_learning +def test_mappo_initialize_policy_all_dimensions_match(base_learning_config): + config = copy(base_learning_config) + config["num_timeseries_obs_dim"] = 1 + + learn = Learning(config["learning_config"], start, end) + for agent_id in ("agent_0", "agent_1", "agent_2"): + learn.rl_strats[agent_id] = LearningStrategy(**config, learning_role=learn) + + try: + learn.rl_algorithm.initialize_policy() + except Exception as e: + pytest.fail(f"initialize_policy raised an unexpected error: {e}") From eb6522fbc5c4838c12c64ffff71dc49e0c55456e Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Tue, 28 Apr 2026 08:44:31 +0200 Subject: [PATCH 32/44] Moved get_action from learning_strategies to RLAlgorithm --- .../algorithms/base_algorithm.py | 62 ++++++++++++++ .../algorithms/mappo.py | 26 +++++- .../reinforcement_learning/learning_role.py | 31 ++++--- .../reinforcement_learning/learning_utils.py | 32 +++++++ .../neural_network_architecture.py | 84 ++++++------------- assume/strategies/learning_strategies.py | 70 ++-------------- examples/inputs/example_02a/config.yaml | 2 +- examples/inputs/example_02b/config.yaml | 4 +- 8 files changed, 172 insertions(+), 139 deletions(-) diff --git a/assume/reinforcement_learning/algorithms/base_algorithm.py b/assume/reinforcement_learning/algorithms/base_algorithm.py index d1ac21ddb..c6900d564 100644 --- a/assume/reinforcement_learning/algorithms/base_algorithm.py +++ b/assume/reinforcement_learning/algorithms/base_algorithm.py @@ -13,6 +13,8 @@ transfer_weights, ) +from assume.common.base import LearningStrategy + logger = logging.getLogger(__name__) @@ -95,6 +97,25 @@ def update_learning_rate( for param_group in optimizer.param_groups: param_group["lr"] = learning_rate + def get_action( + self, strategy: "LearningStrategy", obs: th.Tensor + ) -> tuple[th.Tensor, th.Tensor]: + """Sample an action for strategy given observation *obs*. + + Each concrete algorithm overrides this method with its own sampling + logic. + + Args: + strategy: The TorchLearningStrategy instance requesting an action. + obs: Flat observation tensor for a single time-step. + + Returns: + A (action, noise) tuple, both tensors on the same device as strategy. + """ + raise NotImplementedError( + f"{type(self).__name__} must implement get_action()" + ) + def update_policy(self) -> None: """Update the policy parameters. @@ -185,6 +206,47 @@ def __init__(self, learning_role): """ super().__init__(learning_role) + def get_action( + self, strategy: "LearningStrategy", obs: th.Tensor + ) -> tuple[th.Tensor, th.Tensor]: + """Sample an action using the off-policy strategy. + + During learning mode the agent either performs pure-noise initial + exploration (first N episodes) or uses its deterministic actor plus + Gaussian action noise. During evaluation mode the actor is used + without any noise. + + This default implementation is shared by TD3 and DDPG. PPO overrides + it with its own stochastic Gaussian sampling. + """ + if strategy.learning_mode and not strategy.evaluation_mode: + if strategy.collect_initial_experience_mode: + # Pure Gaussian noise for initial random exploration + noise = th.normal( + mean=0.0, + std=strategy.exploration_noise_std, + size=(strategy.act_dim,), + dtype=strategy.float_type, + device=strategy.device, + ) + return noise, noise + + action = strategy.actor(obs).detach() + noise = strategy.action_noise.noise( + device=strategy.device, dtype=strategy.float_type + ) + action = th.clamp( + action + noise, + strategy.actor.min_output, + strategy.actor.max_output, + ) + return action, noise + + # Evaluation + action = strategy.actor(obs).detach() + noise = th.zeros(strategy.act_dim, dtype=strategy.float_type, device=strategy.device) + return action, noise + def save_params(self, directory: str) -> None: """Save actor and critic network parameters. diff --git a/assume/reinforcement_learning/algorithms/mappo.py b/assume/reinforcement_learning/algorithms/mappo.py index b827b93c4..affda23c2 100644 --- a/assume/reinforcement_learning/algorithms/mappo.py +++ b/assume/reinforcement_learning/algorithms/mappo.py @@ -103,7 +103,31 @@ def __init__( # Note: save_params, save_critic_params, save_actor_params, load_params, # load_critic_params, load_actor_params, initialize_policy are inherited from A2CAlgorithm - + def get_action( + self, strategy, obs: th.Tensor + ) -> tuple[th.Tensor, th.Tensor]: + """Sample a stochastic action. + + In learning mode the actor's Gaussian policy is sampled and the + log-probability is cached on the strategy for later use in + _store_to_buffer_and_update_sync. In evaluation mode the + deterministic mean action is returned instead. + + PPO does *not* have an initial-exploration phase — the stochastic + policy provides sufficient exploration from the very first episode. + """ + if strategy.learning_mode and not strategy.evaluation_mode: + action, log_prob = strategy.actor.get_action_and_log_prob(obs.unsqueeze(0)) + action = action.squeeze(0).detach() + # Cache log-prob for rollout buffer; value is recomputed centrally + strategy._last_log_prob = log_prob.squeeze(0).detach() + noise = th.zeros_like(action, dtype=strategy.float_type) + return action, noise + + # Evaluation + action = strategy.actor(obs, deterministic=True).detach() + noise = th.zeros_like(action, dtype=strategy.float_type) + return action, noise def create_actors(self) -> None: """Create stochastic actor networks for all agents. diff --git a/assume/reinforcement_learning/learning_role.py b/assume/reinforcement_learning/learning_role.py index b6272e7dc..6904349ba 100644 --- a/assume/reinforcement_learning/learning_role.py +++ b/assume/reinforcement_learning/learning_role.py @@ -136,10 +136,12 @@ def __init__( self.all_rewards = defaultdict(lambda: defaultdict(list)) self.all_regrets = defaultdict(lambda: defaultdict(list)) self.all_profits = defaultdict(lambda: defaultdict(list)) - # PPO algorithm specific caches for on-policy learning - self.all_values = defaultdict(lambda: defaultdict(list)) - self.all_log_probs = defaultdict(lambda: defaultdict(list)) - self.all_dones = defaultdict(lambda: defaultdict(list)) + # On-policy (PPO/MAPPO) only: value estimates, log-probs, and done + # flags collected per time-step for GAE computation. + if is_on_policy(self.learning_config.algorithm): + self.all_values = defaultdict(lambda: defaultdict(list)) + self.all_log_probs = defaultdict(lambda: defaultdict(list)) + self.all_dones = defaultdict(lambda: defaultdict(list)) def on_ready(self): """Set up the learning role for reinforcement learning training. @@ -264,10 +266,15 @@ async def store_to_buffer_and_update(self) -> None: current_noises = self.all_noises current_regrets = self.all_regrets current_profits = self.all_profits - # PPO specific caches - current_values = self.all_values - current_log_probs = self.all_log_probs - current_dones = self.all_dones + # On-policy (PPO/MAPPO) only caches + if is_on_policy(self.learning_config.algorithm): + current_values = self.all_values + current_log_probs = self.all_log_probs + current_dones = self.all_dones + else: + current_values = defaultdict(lambda: defaultdict(list)) + current_log_probs = defaultdict(lambda: defaultdict(list)) + current_dones = defaultdict(lambda: defaultdict(list)) # Reset cache dicts immediately with new defaultdicts self.all_obs = defaultdict(lambda: defaultdict(list)) @@ -276,10 +283,10 @@ async def store_to_buffer_and_update(self) -> None: self.all_noises = defaultdict(lambda: defaultdict(list)) self.all_regrets = defaultdict(lambda: defaultdict(list)) self.all_profits = defaultdict(lambda: defaultdict(list)) - # PPO specific resets - self.all_values = defaultdict(lambda: defaultdict(list)) - self.all_log_probs = defaultdict(lambda: defaultdict(list)) - self.all_dones = defaultdict(lambda: defaultdict(list)) + if is_on_policy(self.learning_config.algorithm): + self.all_values = defaultdict(lambda: defaultdict(list)) + self.all_log_probs = defaultdict(lambda: defaultdict(list)) + self.all_dones = defaultdict(lambda: defaultdict(list)) # Get timestamps from cache we took all_timestamps = sorted(current_obs.keys()) diff --git a/assume/reinforcement_learning/learning_utils.py b/assume/reinforcement_learning/learning_utils.py index be1f37ee8..822f451d9 100644 --- a/assume/reinforcement_learning/learning_utils.py +++ b/assume/reinforcement_learning/learning_utils.py @@ -375,6 +375,38 @@ def transfer_weights( return new_state_copy +def xavier_init_weights(module: th.nn.Module) -> None: + """Apply Xavier uniform initialisation to all Linear layers in *module*. + + Xavier initialisation keeps activation variance roughly constant across + layers, which works well for tanh / softsign activations (TD3/DDPG actors + and all Q-network critics). + + Args: + module: Any ``nn.Module`` whose ``Linear`` sub-layers should be initialised. + """ + if isinstance(module, th.nn.Linear): + th.nn.init.xavier_uniform_(module.weight) + th.nn.init.zeros_(module.bias) + + +def orthogonal_init_weights(module: th.nn.Module, gain: float = 1.0) -> None: + """Apply orthogonal initialisation to a single Linear layer. + + Orthogonal initialisation is the standard choice for PPO because it + preserves gradient norms better than Xavier when combined with ReLU + activations and a Gaussian policy head. + + Args: + module: An ``nn.Linear`` layer to initialise. + gain: Scaling factor for the weight matrix. Common choices: + ``sqrt(2)`` for hidden layers, ``0.01`` for the output / policy head. + """ + if isinstance(module, th.nn.Linear): + th.nn.init.orthogonal_(module.weight, gain=gain) + th.nn.init.zeros_(module.bias) + + def encode_time_features(start: datetime) -> list: """ Encode time features for a given datetime object. diff --git a/assume/reinforcement_learning/neural_network_architecture.py b/assume/reinforcement_learning/neural_network_architecture.py index f33dba168..0ecd5c2d0 100644 --- a/assume/reinforcement_learning/neural_network_architecture.py +++ b/assume/reinforcement_learning/neural_network_architecture.py @@ -9,7 +9,11 @@ from typing import List, Tuple, Type, Optional, Union -from assume.reinforcement_learning.learning_utils import activation_function_limit +from assume.reinforcement_learning.learning_utils import ( + activation_function_limit, + xavier_init_weights, + orthogonal_init_weights, +) class Critic(nn.Module): @@ -69,14 +73,8 @@ def _build_q_network(self) -> nn.ModuleList: return layers def _init_weights(self): - """Apply Xavier initialization to all layers.""" - - def init_layer(m): - if isinstance(m, nn.Linear): - nn.init.xavier_uniform_(m.weight) - nn.init.zeros_(m.bias) - - self.apply(init_layer) + """Apply Xavier uniform initialisation to all Linear layers.""" + self.apply(xavier_init_weights) class CriticTD3(Critic): @@ -218,16 +216,11 @@ def __init__( self._init_weights() def _init_weights(self) -> None: - """Apply Orthogonal initialization with appropriate gains.""" - def init_layer(m): - if isinstance(m, nn.Linear): - if m.out_features == 1: # Output layer - nn.init.orthogonal_(m.weight, gain=0.01) - else: # Hidden layers - nn.init.orthogonal_(m.weight, gain=np.sqrt(2)) - nn.init.zeros_(m.bias) - - self.apply(init_layer) + """Apply orthogonal initialisation: sqrt(2) gain for hidden layers, 1.0 for the value head.""" + for layer in self.v_layers: + if isinstance(layer, nn.Linear): + gain = 0.01 if layer.out_features == 1 else np.sqrt(2) + orthogonal_init_weights(layer, gain=gain) def forward(self, obs: th.Tensor) -> th.Tensor: """Returns V value.""" @@ -269,14 +262,8 @@ def __init__(self, obs_dim: int, act_dim: int, float_type, *args, **kwargs): self._init_weights() def _init_weights(self): - """Apply Xavier initialization to all layers.""" - - def init_layer(m): - if isinstance(m, nn.Linear): - nn.init.xavier_uniform_(m.weight) - nn.init.zeros_(m.bias) - - self.apply(init_layer) + """Apply Xavier uniform initialisation to all Linear layers.""" + self.apply(xavier_init_weights) def forward(self, obs): """Forward pass for action prediction.""" @@ -389,7 +376,7 @@ def __init__( self.act_dim = act_dim self.float_type = float_type - self.activation = "softsign" # or "tanh", "sigmoid", "relu" + self.activation = "tanh" # or "softsign", "sigmoid", "relu" if self.activation not in activation_function_limit: raise ValueError( @@ -412,24 +399,10 @@ def __init__( self._init_weights() def _init_weights(self) -> None: - """Apply orthogonal initialization with appropriate gains.""" - def init_layer(m): - if isinstance(m, nn.Linear): - if m.out_features == self.act_dim: # Output layer (mean) - nn.init.orthogonal_(m.weight, gain=0.01) - else: # Hidden layers - nn.init.orthogonal_(m.weight, gain=np.sqrt(2)) - nn.init.zeros_(m.bias) - - # Initialize hidden layers with larger gain - nn.init.orthogonal_(self.FC1.weight, gain=np.sqrt(2)) - nn.init.orthogonal_(self.FC2.weight, gain=np.sqrt(2)) - nn.init.zeros_(self.FC1.bias) - nn.init.zeros_(self.FC2.bias) - - # Initialize output layer with small gain - nn.init.orthogonal_(self.mean_layer.weight, gain=0.01) - nn.init.zeros_(self.mean_layer.bias) + """Apply orthogonal initialisation with appropriate gains.""" + orthogonal_init_weights(self.FC1, gain=np.sqrt(2)) + orthogonal_init_weights(self.FC2, gain=np.sqrt(2)) + orthogonal_init_weights(self.mean_layer, gain=0.01) def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor: """Forward pass""" @@ -550,7 +523,7 @@ def __init__( self.unique_obs_dim = unique_obs_dim self.num_timeseries_obs_dim = num_timeseries_obs_dim - self.activation = "softsign" + self.activation = "tanh" self.min_output = activation_function_limit[self.activation]["min"] self.max_output = activation_function_limit[self.activation]["max"] self.activation_function = activation_function_limit[self.activation]["func"] @@ -581,22 +554,15 @@ def __init__( self._init_weights() def _init_weights(self) -> None: - """Apply orthogonal initialization.""" - def init_layer(m): - if isinstance(m, nn.Linear): - nn.init.orthogonal_(m.weight, gain=1.0) - nn.init.zeros_(m.bias) - elif isinstance(m, nn.LSTMCell): + """Apply orthogonal initialisation.""" + for m in self.modules(): + if isinstance(m, nn.LSTMCell): nn.init.orthogonal_(m.weight_ih, gain=1.0) nn.init.orthogonal_(m.weight_hh, gain=1.0) nn.init.zeros_(m.bias_ih) nn.init.zeros_(m.bias_hh) - - self.apply(init_layer) - - # Initialize output layer with small gain - nn.init.orthogonal_(self.mean_layer.weight, gain=0.01) - nn.init.zeros_(self.mean_layer.bias) + orthogonal_init_weights(self.FC1, gain=np.sqrt(2)) + orthogonal_init_weights(self.mean_layer, gain=0.01) def _compute_mean(self, obs: th.Tensor) -> th.Tensor: """Compute policy mean action from LSTM features.""" diff --git a/assume/strategies/learning_strategies.py b/assume/strategies/learning_strategies.py index 83d50a975..036ff8097 100644 --- a/assume/strategies/learning_strategies.py +++ b/assume/strategies/learning_strategies.py @@ -22,10 +22,7 @@ from assume.common.utils import min_max_scale from assume.reinforcement_learning.algorithms import actor_architecture_aliases from assume.reinforcement_learning.learning_utils import NormalActionNoise -from assume.common.base import ( - is_off_policy, - is_on_policy, -) +from assume.common.base import is_off_policy logger = logging.getLogger(__name__) @@ -251,8 +248,10 @@ def get_individual_observations( return np.array([]) def get_actions(self, next_observation): - """ - Determines actions based on the current observation, applying noise for exploration if in learning mode. + """Determine action and exploration noise for the current observation. + + All algorithm-specific sampling logic lives in the + algorithm class via get_action. Args ---- @@ -272,64 +271,7 @@ def get_actions(self, next_observation): solely on noise to cover the action space broadly. For PPO, we also store log_prob and value estimates for later use. """ - - current_algorithm = self.learning_config.algorithm - - # distinction whether we are in learning mode or not to handle exploration realised with noise - if self.learning_mode and not self.evaluation_mode: - # if we are in learning mode the first x episodes we want to explore the entire action space - # to get a good initial experience, in the area around the costs of the agent - # Only use initial experience collection for off-policy algorithms (not PPO) - if self.collect_initial_experience_mode and is_off_policy(current_algorithm): - # define current action as solely noise - noise = th.normal( - mean=0.0, - std=self.exploration_noise_std, - size=(self.act_dim,), - dtype=self.float_type, - device=self.device, - ) - - # ============================================================================= - # 2.1 Get Actions and handle exploration - # ============================================================================= - # Using only noise as the action to enforce exploration. - curr_action = noise - - else: - # Using the policy forMAPPO (no initial random exploration). - if current_algorithm == "mappo": - # Using get_action_and_log_prob for proper PPO stochastic sampling. - curr_action, log_prob = self.actor.get_action_and_log_prob(next_observation.unsqueeze(0)) - curr_action = curr_action.squeeze(0).detach() - self._last_log_prob = log_prob.squeeze(0).detach() - - # Using stochastic PPO policy with no external noise. - noise = th.zeros_like(curr_action, dtype=self.float_type) - else: - # TD3/DDPG: if we are not in the initial exploration phase we chose the action with the actor neural net - # and add noise to the action - curr_action = self.actor(next_observation).detach() - noise = self.action_noise.noise( - device=self.device, dtype=self.float_type - ) - curr_action += noise - - # make sure that noise adding does not exceed the actual output of the NN as it pushes results in a direction that actor can't even reach - curr_action = th.clamp( - curr_action, self.actor.min_output, self.actor.max_output - ) - else: - # if we are not in learning mode we just use the actor neural net to get the action without adding noise - if current_algorithm == "mappo": - # For PPO evaluation, use deterministic action (mean) - curr_action = self.actor(next_observation, deterministic=True).detach() - else: - curr_action = self.actor(next_observation).detach() - # noise is an tensor with zeros, because we are not in learning mode - noise = th.zeros_like(curr_action, dtype=self.float_type) - - return curr_action, noise + return self.learning_role.rl_algorithm.get_action(self, next_observation) class EnergyLearningStrategy(TorchLearningStrategy, MinMaxStrategy): diff --git a/examples/inputs/example_02a/config.yaml b/examples/inputs/example_02a/config.yaml index b3dbfb9c8..0a34556fb 100644 --- a/examples/inputs/example_02a/config.yaml +++ b/examples/inputs/example_02a/config.yaml @@ -4,7 +4,7 @@ base: start_date: 2019-03-01 00:00 - end_date: 2019-03-07 00:00 + end_date: 2019-03-30 00:00 time_step: 1h save_frequency_hours: null seed: null diff --git a/examples/inputs/example_02b/config.yaml b/examples/inputs/example_02b/config.yaml index f01903b10..d69f20e29 100644 --- a/examples/inputs/example_02b/config.yaml +++ b/examples/inputs/example_02b/config.yaml @@ -4,7 +4,7 @@ base: start_date: 2019-03-01 00:00 - end_date: 2019-03-07 00:00 + end_date: 2019-03-29 00:00 time_step: 1h save_frequency_hours: null seed: null @@ -17,7 +17,7 @@ base: max_bid_price: 100 algorithm: matd3 learning_rate: 0.001 - training_episodes: 30 + training_episodes: 150 train_freq: 100h batch_size: 128 gamma: 0.99 From d2f8f9dc04852fa23eaf549f3696bac46060c3a5 Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Fri, 8 May 2026 01:55:55 +0200 Subject: [PATCH 33/44] Fixed the test_learning_role.py by updating the OffPolicyConfig parameters passing structure --- tests/test_learning_role.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/test_learning_role.py b/tests/test_learning_role.py index f72317184..aecea8a40 100644 --- a/tests/test_learning_role.py +++ b/tests/test_learning_role.py @@ -8,7 +8,7 @@ import pytest try: - from assume.common.base import LearningConfig + from assume.common.base import LearningConfig, OffPolicyConfig from assume.reinforcement_learning.learning_role import ( Learning, LearningStrategy, @@ -37,11 +37,13 @@ def test_learning_init(): learning_mode=True, evaluation_mode=False, training_episodes=3, - episodes_collecting_initial_experience=1, continue_learning=False, trained_policies_save_path=None, early_stopping_steps=10, early_stopping_threshold=0.05, + off_policy=OffPolicyConfig( + episodes_collecting_initial_experience=1, + ), ), } @@ -89,11 +91,13 @@ async def learning_role(): learning_mode=True, evaluation_mode=True, # evaluation mode to skip buffer/policy update training_episodes=3, - episodes_collecting_initial_experience=1, continue_learning=False, trained_policies_save_path=None, early_stopping_steps=10, early_stopping_threshold=0.05, + off_policy=OffPolicyConfig( + episodes_collecting_initial_experience=1, + ), ), } From 798f7a0b0f7b4bd9f743553eadb0f77974c37616 Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Fri, 8 May 2026 02:16:22 +0200 Subject: [PATCH 34/44] Fixed the test_integration_cli.py by adding market_mechanism which got erased due to updates before in example_02a/config.yaml tiny configuration --- examples/inputs/example_02a/config.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/inputs/example_02a/config.yaml b/examples/inputs/example_02a/config.yaml index 0a34556fb..d402d90d1 100644 --- a/examples/inputs/example_02a/config.yaml +++ b/examples/inputs/example_02a/config.yaml @@ -176,4 +176,5 @@ tiny: maximum_bid_volume: 100000 maximum_bid_price: 3000 minimum_bid_price: -500 - price_unit: EUR/MWh \ No newline at end of file + price_unit: EUR/MWh + market_mechanism: pay_as_clear \ No newline at end of file From 3a1366f7a19eb1c9c163ea0744176f06ffd0f55c Mon Sep 17 00:00:00 2001 From: kim-mskw Date: Fri, 8 May 2026 09:42:57 +0200 Subject: [PATCH 35/44] - config cleaning and dokstringc chnages of AlgorithmConfig data class - do gradient steps and episode_collecting_initial_expereince tests actually in OffPolicy algotihm config --- assume/common/base.py | 58 +++++++++++++++++++++++++++++++------------ 1 file changed, 42 insertions(+), 16 deletions(-) diff --git a/assume/common/base.py b/assume/common/base.py index f84c04724..68ff1c8bd 100644 --- a/assume/common/base.py +++ b/assume/common/base.py @@ -761,15 +761,12 @@ class AlgorithmConfig: Base configuration for algorithm-specific parameters. Parameters: - actor_architecture (str): The architecture of the neural networks used for the actors. Options include - "mlp" (Multi-Layer Perceptron) and "lstm" (Long Short-Term Memory). Default is "mlp". batch_size (int): The batch size of experiences sampled from the replay buffer for each training update. Larger batches provide more stable gradients but require more memory. Default is 128. gamma (float): The discount factor for future rewards, ranging from 0 to 1. Default is 0.99. train_freq (str): Defines the frequency at which networks are updated. Default is "24h". """ - # actor_architecture: str = "mlp" batch_size: int = 128 gamma: float = 0.99 train_freq: str = "24h" @@ -778,14 +775,16 @@ class AlgorithmConfig: # Algorithm category mapping ALGORITHM_CATEGORIES = { "mappo": "on-policy", - "matd3": "off-policy", - "maddpg": "off-policy" + "matd3": "off-policy", + "maddpg": "off-policy", } + def is_on_policy(algorithm_name: str) -> bool: """Check if algorithm is on-policy.""" return ALGORITHM_CATEGORIES.get(algorithm_name) == "on-policy" + def is_off_policy(algorithm_name: str) -> bool: """Check if algorithm is off-policy.""" return ALGORITHM_CATEGORIES.get(algorithm_name) == "off-policy" @@ -803,15 +802,25 @@ class OffPolicyConfig(AlgorithmConfig): episodes_collecting_initial_experience (int): The number of episodes at the start during which random actions are chosen instead of using the actor network. Default is 5. gradient_steps (int): The number of gradient descent steps performed during each training update. Default is 100. - noise_dt (int): The time step parameter for the Ornstein-Uhlenbeck process. Default is 1. - noise_scale (int): The scale factor multiplied by the noise drawn from the distribution. Default is 1. - noise_sigma (float): The standard deviation of the noise distribution for exploration. Default is 0.1. - action_noise_schedule (str | None): Which action noise decay schedule to use. Default is None. - policy_delay (int): The frequency (in gradient steps) at which the actor policy is updated. Default is 2. - tau (float): The soft update coefficient for updating target networks. Default is 0.005. - target_policy_noise (float): The standard deviation of noise added to target policy actions. Default is 0.2. - target_noise_clip (float): The maximum absolute value for clipping the target policy noise. Default is 0.5. + actor_architecture (str): The architecture of the neural networks used for the actors. Options include + "mlp" (Multi-Layer Perceptron) and "lstm" (Long Short-Term Memory). Default is "mlp". replay_buffer_size (int): The maximum number of transitions stored in the replay buffer. Default is 50000. + policy_delay (int): The frequency (in gradient steps) at which the actor policy is updated. + Some algorithms update the critic more frequently than the actor to stabilize training. Default is 2. + noise_sigma (float): The standard deviation of the Ornstein-Uhlenbeck or Gaussian noise distribution + used to generate exploration noise added to actions. Default is 0.1. + noise_scale (int): The scale factor multiplied by the noise drawn from the distribution. + Larger values increase exploration. Default is 1. + noise_dt (int): The time step parameter for the Ornstein-Uhlenbeck process, which determines how + quickly the noise decays over time. Used for noise scheduling. Default is 1. + action_noise_schedule (str | None): Which action noise decay schedule to use. Currently only "linear" + decay is available, which linearly decreases exploration noise over training. Default is "linear". + tau (float): The soft update coefficient for updating target networks. Controls how slowly target + networks track the main networks. Smaller values mean slower updates. Default is 0.005. + target_policy_noise (float): The standard deviation of noise added to target policy actions during + critic updates. This smoothing helps prevent overfitting to narrow policy peaks. Default is 0.2. + target_noise_clip (float): The maximum absolute value for clipping the target policy noise. + Prevents the noise from being too large. Default is 0.5. """ episodes_collecting_initial_experience: int = 5 @@ -827,6 +836,22 @@ class OffPolicyConfig(AlgorithmConfig): target_noise_clip: float = 0.5 replay_buffer_size: int = 50000 + def __post_init__(self): + # if we do not have initial experience collected we will get an error as no samples are available on the + # buffer from which we can draw experience to adapt the strategy, hence we set it to minimum one episode + if self.episodes_collecting_initial_experience < 1: + logger.warning( + f"episodes_collecting_initial_experience need to be at least 1 to sample from buffer, got {self.episodes_collecting_initial_experience}. setting to 1" + ) + + self.episodes_collecting_initial_experience = 1 + + # check that gradient_steps is positive + if self.gradient_steps <= 0: + raise ValueError( + f"gradient_steps need to be positive, got {self.gradient_steps}" + ) + @dataclass class OnPolicyConfig(AlgorithmConfig): @@ -843,6 +868,8 @@ class OnPolicyConfig(AlgorithmConfig): max_grad_norm (float): Maximum gradient norm for clipping. Default is 0.5. vf_coef (float): Coefficient for value function term in loss. Default is 0.5. n_epochs (int): Number of optimization epochs per rollout. Default is 10. + actor_architecture (str): The architecture of the neural networks used for the actors. Options include + "mlp" (Multi-Layer Perceptron) and "lstm" (Long Short-Term Memory). Default is "mlp". """ clip_ratio: float = 0.1 @@ -901,7 +928,7 @@ class LearningConfig: training is terminated early. Default is 0.05. algorithm (str): Specifies which reinforcement learning algorithm to use. Options include "matd3" - (Multi-Agent Twin Delayed Deep Deterministic Policy Gradient), "maddpg", and "mappo". Default is "matd3". + (Multi-Agent Twin Delayed Deep Deterministic Policy Gradient), "maddpg" (Multi-Agent Deep Deterministic Policy Gradient), and "mappo" (Multi-Agent Proximal Policy Optimization). Default is "matd3". gamma (float): The discount factor for future rewards, ranging from 0 to 1. Higher values give more weight to long-term rewards in decision-making. Default is 0.99. actor_architecture (str): The architecture of the neural networks used for the actors. Options include @@ -949,12 +976,11 @@ def __post_init__(self): self.on_policy = OnPolicyConfig(**self.on_policy) for config in [self.off_policy, self.on_policy]: - # config.actor_architecture = self.actor_architecture if config: config.batch_size = self.batch_size config.gamma = self.gamma config.train_freq = self.train_freq - + self.off_policy.actor_architecture = self.actor_architecture self.on_policy.actor_architecture = self.actor_architecture From fc88fd3f56aa041eb6ba1c8b10ddb40eeee27856 Mon Sep 17 00:00:00 2001 From: kim-mskw Date: Fri, 8 May 2026 10:14:58 +0200 Subject: [PATCH 36/44] - move off-policy get_actions behavior to respective algorithm file - make default behavior A2CAlgorithm class independent of off-/on-policy to avoid mistakes --- .../algorithms/base_algorithm.py | 189 +++++++----------- .../algorithms/maddpg.py | 111 +++++++--- .../algorithms/matd3.py | 65 +++++- 3 files changed, 218 insertions(+), 147 deletions(-) diff --git a/assume/reinforcement_learning/algorithms/base_algorithm.py b/assume/reinforcement_learning/algorithms/base_algorithm.py index c6900d564..b1769bbb3 100644 --- a/assume/reinforcement_learning/algorithms/base_algorithm.py +++ b/assume/reinforcement_learning/algorithms/base_algorithm.py @@ -8,44 +8,46 @@ import torch as th from torch.optim import AdamW +from assume.common.base import LearningStrategy from assume.reinforcement_learning.algorithms import actor_architecture_aliases from assume.reinforcement_learning.learning_utils import ( transfer_weights, ) -from assume.common.base import LearningStrategy - logger = logging.getLogger(__name__) class RLAlgorithm: """Base reinforcement learning algorithm class. - - This is the foundation class for all Reinforcement Learning algorithms in the framework. - To implement a custom RL algorithm, subclass this class and override the `update_policy` method. - + + This is the foundation class for all Reinforcement Learning algorithms in the framework. + To implement a custom RL algorithm, subclass this class and override the `update_policy` and `get_action` methods. + The class provides common functionality for: - Learning rate scheduling - Parameter saving/loading - Device management - + Attributes: learning_role: The learning role object containing configuration and strategies. learning_config: Configuration parameters from the learning role. device: The computation device (CPU/GPU) for tensors. float_type: The floating point precision type for computations. actor_architecture_class: The actor network architecture class. - + Example: >>> class CustomAlgorithm(RLAlgorithm): ... def update_policy(self): ... # Custom policy update logic ... pass + ... def get_action(self, strategy, obs): + ... # Custom action selection logic + ... pass """ def __init__(self, learning_role): """Initialize the RL algorithm. - + Args: learning_role: Learning role object containing configuration and strategies. Must be an instance of the Learning class. @@ -73,19 +75,19 @@ def update_learning_rate( learning_rate: float, ) -> None: """Update optimizer learning rates. - + Sets the learning rate for one or more optimizers. Handles both single optimizers and lists of optimizers uniformly. - + Args: optimizers: A single optimizer or list of optimizers to update. learning_rate: The new learning rate value to set. - + Note: Adapted from Stable Baselines 3: - https://github.com/DLR-RM/stable-baselines3/blob/512eea923afad6f6da4bb53d72b6ea4c6d856e59/stable_baselines3/common/base_class.py#L286 - https://github.com/DLR-RM/stable-baselines3/blob/512eea923afad6f6da4bb53d72b6ea4c6d856e59/stable_baselines3/common/utils.py#L68 - + Example: >>> optimizer = AdamW(model.parameters(), lr=0.001) >>> algorithm.update_learning_rate(optimizer, 0.0001) @@ -112,20 +114,18 @@ def get_action( Returns: A (action, noise) tuple, both tensors on the same device as strategy. """ - raise NotImplementedError( - f"{type(self).__name__} must implement get_action()" - ) + raise NotImplementedError(f"{type(self).__name__} must implement get_action()") def update_policy(self) -> None: """Update the policy parameters. - + This method must be overridden by subclasses to implement the specific policy update logic for each RL algorithm. The base implementation raises an error to enforce this requirement. - + Raises: NotImplementedError: If called on the base class without override. - + Example: >>> class CustomAlgorithm(RLAlgorithm): ... def update_policy(self): @@ -140,17 +140,17 @@ def update_policy(self) -> None: def load_obj(self, directory: str): """Load a serialized object from directory. - + Loads a PyTorch serialized object from the specified directory path. The object is loaded onto the device specified by the algorithm's configuration. - + Args: directory: Path to the directory containing the serialized object. Should point to a valid .pt file. - + Returns: object: The deserialized Python object. - + Example: >>> model_state = algorithm.load_obj('/path/to/checkpoint.pt') """ @@ -158,13 +158,13 @@ def load_obj(self, directory: str): def load_params(self, directory: str) -> None: """Load learning parameters from disk. - + Abstract method that should be implemented by subclasses to load algorithm-specific parameters from the specified directory. - + Args: directory: Path to the directory containing saved parameters. - + Note: This is an abstract method that must be overridden by subclasses. """ @@ -172,21 +172,21 @@ def load_params(self, directory: str) -> None: class A2CAlgorithm(RLAlgorithm): """Base actor-critic algorithm class. - + Provides shared functionality for actor-critic reinforcement learning algorithms including parameter management, network initialization, and saving/loading utilities. This serves as the foundation for algorithms like MATD3, MADDPG, and MAPPO. - + The class handles: - Actor and critic network creation and management - Target network synchronization (when applicable) - Parameter saving and loading - Weight transfer between different agent configurations - + Attributes: uses_target_networks: Whether this algorithm uses target networks. TD3 and DDPG use target networks (True), PPO does not (False). - + Example: >>> class ActorCriticAlgorithm(A2CAlgorithm): ... def update_policy(self): @@ -200,63 +200,22 @@ class A2CAlgorithm(RLAlgorithm): def __init__(self, learning_role): """Initialize the actor-critic algorithm. - + Args: learning_role: Learning role object containing configuration and strategies. """ super().__init__(learning_role) - def get_action( - self, strategy: "LearningStrategy", obs: th.Tensor - ) -> tuple[th.Tensor, th.Tensor]: - """Sample an action using the off-policy strategy. - - During learning mode the agent either performs pure-noise initial - exploration (first N episodes) or uses its deterministic actor plus - Gaussian action noise. During evaluation mode the actor is used - without any noise. - - This default implementation is shared by TD3 and DDPG. PPO overrides - it with its own stochastic Gaussian sampling. - """ - if strategy.learning_mode and not strategy.evaluation_mode: - if strategy.collect_initial_experience_mode: - # Pure Gaussian noise for initial random exploration - noise = th.normal( - mean=0.0, - std=strategy.exploration_noise_std, - size=(strategy.act_dim,), - dtype=strategy.float_type, - device=strategy.device, - ) - return noise, noise - - action = strategy.actor(obs).detach() - noise = strategy.action_noise.noise( - device=strategy.device, dtype=strategy.float_type - ) - action = th.clamp( - action + noise, - strategy.actor.min_output, - strategy.actor.max_output, - ) - return action, noise - - # Evaluation - action = strategy.actor(obs).detach() - noise = th.zeros(strategy.act_dim, dtype=strategy.float_type, device=strategy.device) - return action, noise - def save_params(self, directory: str) -> None: """Save actor and critic network parameters. - + Saves both actor and critic network parameters to separate subdirectories. Creates the directory structure if it doesn't exist. - + Args: directory: Base directory path where parameters will be saved. Will create 'actors/' and 'critics/' subdirectories. - + Example: >>> algorithm.save_params('/path/to/save/directory') # Creates: @@ -268,15 +227,15 @@ def save_params(self, directory: str) -> None: def save_critic_params(self, directory: str) -> None: """Save critic network parameters. - + Saves critic networks, their optimizers, and target critics (if applicable) for all registered learning strategies. Also saves agent ID ordering information to ensure proper loading. - + Args: directory: Directory path where critic parameters will be saved. Will be created if it doesn't exist. - + Example: >>> algorithm.save_critic_params('/path/to/critics/') """ @@ -289,7 +248,7 @@ def save_critic_params(self, directory: str) -> None: # Only save target critic if this algorithm uses target networks if self.uses_target_networks: obj["critic_target"] = strategy.target_critics.state_dict() - + path = f"{directory}/critic_{u_id}.pt" th.save(obj, path) @@ -302,14 +261,14 @@ def save_critic_params(self, directory: str) -> None: def save_actor_params(self, directory: str) -> None: """Save actor network parameters. - + Saves actor networks, their optimizers, and target actors (if applicable) for all registered learning strategies. - + Args: directory: Directory path where actor parameters will be saved. Will be created if it doesn't exist. - + Example: >>> algorithm.save_actor_params('/path/to/actors/') """ @@ -322,19 +281,19 @@ def save_actor_params(self, directory: str) -> None: # Only save target actor if this algorithm uses target networks if self.uses_target_networks: obj["actor_target"] = strategy.actor_target.state_dict() - + path = f"{directory}/actor_{u_id}.pt" th.save(obj, path) def load_params(self, directory: str) -> None: """Load actor and critic network parameters. - + Loads both actor and critic parameters from the specified directory. Calls load_critic_params() and load_actor_params() sequentially. - + Args: directory: Base directory containing 'actors/' and 'critics/' subdirectories. - + Example: >>> algorithm.load_params('/path/to/saved/parameters/') """ @@ -343,18 +302,18 @@ def load_params(self, directory: str) -> None: def load_critic_params(self, directory: str) -> None: """Load critic network parameters. - + Loads critic networks, target critics (if applicable), and optimizer states for each registered agent strategy. Handles cases where the number of agents differs between saved and current models by performing intelligent weight transfer. - + Args: directory: Base directory containing the 'critics/' subdirectory. - + Note: Automatically handles agent count mismatches through weight transfer. Preserves the order of agents using saved mapping information. - + Example: >>> algorithm.load_critic_params('/path/to/saved/parameters/') """ @@ -393,12 +352,12 @@ def load_critic_params(self, directory: str) -> None: try: critic_params = th.load(critic_path, weights_only=True) - + # Required keys depend on whether algorithm uses target networks required_keys = ["critic", "critic_optimizer"] if self.uses_target_networks: required_keys.append("critic_target") - + for key in required_keys: if key not in critic_params: logger.warning( @@ -427,9 +386,9 @@ def load_critic_params(self, directory: str) -> None: act_dim=strategy.act_dim, unique_obs=strategy.unique_obs_dim, ) - + strategy.critics.load_state_dict(critic_weights) - + # Only transfer target critic weights if this algorithm uses target networks if self.uses_target_networks and "critic_target" in critic_params: target_critic_weights = transfer_weights( @@ -442,8 +401,10 @@ def load_critic_params(self, directory: str) -> None: unique_obs=strategy.unique_obs_dim, ) if target_critic_weights is not None: - strategy.target_critics.load_state_dict(target_critic_weights) - + strategy.target_critics.load_state_dict( + target_critic_weights + ) + logger.debug(f"Critic weights transferred for {u_id}.") except Exception as e: @@ -451,13 +412,13 @@ def load_critic_params(self, directory: str) -> None: def load_actor_params(self, directory: str) -> None: """Load actor network parameters. - + Loads actor networks, target actors (if applicable), and optimizer states for each registered agent strategy from the specified directory. - + Args: directory: The directory containing the 'actors/' subdirectory where the parameters should be loaded. - + Example: >>> algorithm.load_actor_params('/path/to/saved/parameters/') """ @@ -499,11 +460,11 @@ def initialize_policy(self, actors_and_critics: dict = None) -> None: If None, creates new networks. If provided, assigns existing networks. Expected format includes 'actors', 'critics', and optionally 'actor_targets' and 'target_critics' keys. - + Example: >>> # Create new networks >>> algorithm.initialize_policy() - >>> + >>> >>> # Assign existing networks >>> algorithm.initialize_policy(existing_networks_dict) """ @@ -516,7 +477,7 @@ def initialize_policy(self, actors_and_critics: dict = None) -> None: for u_id, strategy in self.learning_role.rl_strats.items(): strategy.actor = actors_and_critics["actors"][u_id] strategy.critics = actors_and_critics["critics"][u_id] - + if self.uses_target_networks: strategy.actor_target = actors_and_critics["actor_targets"][u_id] strategy.target_critics = actors_and_critics["target_critics"][u_id] @@ -527,7 +488,7 @@ def initialize_policy(self, actors_and_critics: dict = None) -> None: def check_strategy_dimensions(self) -> None: """Validate learning strategy dimensions. - + Ensures all registered learning strategies have consistent dimensional properties required for centralized critic algorithms. Checks: - Observation dimensions @@ -538,10 +499,10 @@ def check_strategy_dimensions(self) -> None: If not consistent, raises a ValueError. This is important for centralized critic algorithms, as it uses a centralized critic that requires consistent dimensions across all agents. - + Raises: ValueError: If any dimension mismatch is detected across strategies. - + Note: This validation is crucial for centralized critic algorithms where all agents must have compatible observation and action spaces. @@ -597,15 +558,15 @@ def check_strategy_dimensions(self) -> None: def create_actors(self) -> None: """Create actor networks for all learning strategies. - + This method initializes actor networks and their corresponding target networks for each registered unit strategy. Actors map observations to actions. - + Note: All strategies must have the same observation dimension due to the centralized critic architecture. Units with different observation dimensions require separate learning roles with different critics. - + Example: >>> algorithm.create_actors() >>> # Creates actor and actor_target for each strategy @@ -642,15 +603,15 @@ def create_actors(self) -> None: def create_critics(self) -> None: """Create critic networks for all learning strategies. - + Initializes critic networks and their corresponding target networks for each registered agent strategy. Critics evaluate state-action pairs. - + Note: All strategies must have the same observation dimension due to the centralized critic architecture. Units with different observation dimensions require separate learning roles with different critics. - + Example: >>> algorithm.create_critics() >>> # Creates critics and target_critics for each strategy @@ -686,10 +647,10 @@ def create_critics(self) -> None: def extract_policy(self) -> dict: """Extract all policy networks. - + Collects actor and critic networks from all learning strategies into a structured dictionary. Includes both primary and target networks. - + Returns: Dictionary containing all network components organized by type: - 'actors': Primary actor networks @@ -697,7 +658,7 @@ def extract_policy(self) -> dict: - 'critics': Primary critic networks - 'target_critics': Target critic networks - Dimension information for reconstruction - + Example: >>> policy_dict = algorithm.extract_policy() >>> # Contains all networks ready for saving or transfer diff --git a/assume/reinforcement_learning/algorithms/maddpg.py b/assume/reinforcement_learning/algorithms/maddpg.py index 01743327c..7bd1fdad3 100644 --- a/assume/reinforcement_learning/algorithms/maddpg.py +++ b/assume/reinforcement_learning/algorithms/maddpg.py @@ -7,6 +7,7 @@ import torch as th from torch.nn import functional as F +from assume.common.base import LearningStrategy from assume.reinforcement_learning.algorithms.base_algorithm import A2CAlgorithm from assume.reinforcement_learning.learning_utils import ( polyak_update, @@ -18,21 +19,21 @@ class DDPG(A2CAlgorithm): """Deep Deterministic Policy Gradient (DDPG) Algorithm. - + An off-policy actor-critic algorithm that uses deterministic policy gradients for continuous action spaces. DDPG combines Q-learning with policy gradients, using: - + - A single critic network to estimate Q-values - Deterministic actor networks that map states to actions - Target networks updated via Polyak averaging for stability - Replay buffer for sample efficiency and decorrelation - + Attributes: n_updates: Counter for gradient updates performed. grad_clip_norm: Maximum gradient norm for clipping. critic_architecture_class: Critic network architecture (CriticDDPG). - + Example: >>> ddpg = DDPG(learning_role) >>> ddpg.update_policy() # Performs one training iteration @@ -40,34 +41,77 @@ class DDPG(A2CAlgorithm): def __init__(self, learning_role) -> None: """Initialize the DDPG algorithm. - + Sets up the algorithm with gradient counters, clipping parameters, and critic architecture. - + Args: learning_role: Learning role object managing agents and replay buffer. Must have off-policy configuration. """ super().__init__(learning_role) - + # Gradient step counter self.n_updates = 0 - + # Gradient clipping threshold self.grad_clip_norm = 1.0 # Define the critic architecture class for DDPG (single critic) self.critic_architecture_class = CriticDDPG + def get_action( + self, strategy: "LearningStrategy", obs: th.Tensor + ) -> tuple[th.Tensor, th.Tensor]: + """Sample an action using the off-policy strategy. + + During learning mode the agent either performs pure-noise initial + exploration (first N episodes) or uses its deterministic actor plus + Gaussian action noise. During evaluation mode the actor is used + without any noise. + + This default implementation is shared by TD3 and DDPG. PPO overrides + it with its own stochastic Gaussian sampling. + """ + if strategy.learning_mode and not strategy.evaluation_mode: + if strategy.collect_initial_experience_mode: + # Pure Gaussian noise for initial random exploration + noise = th.normal( + mean=0.0, + std=strategy.exploration_noise_std, + size=(strategy.act_dim,), + dtype=strategy.float_type, + device=strategy.device, + ) + return noise, noise + + action = strategy.actor(obs).detach() + noise = strategy.action_noise.noise( + device=strategy.device, dtype=strategy.float_type + ) + action = th.clamp( + action + noise, + strategy.actor.min_output, + strategy.actor.max_output, + ) + return action, noise + + # Evaluation + action = strategy.actor(obs).detach() + noise = th.zeros( + strategy.act_dim, dtype=strategy.float_type, device=strategy.device + ) + return action, noise + def update_policy(self) -> None: """Update actor and critic networks using DDPG algorithm. - + Performs one complete training iteration consisting of: 1. Sampling batches from replay buffer 2. Updating critic networks using MSE loss 3. Updating actor networks using policy gradient 4. Updating target networks via Polyak averaging - + """ logger.debug("Updating Policy (MADDPG/DDPG)") @@ -92,7 +136,9 @@ def update_policy(self) -> None: # Update noise decay and learning rate based on training progress progress_remaining = self.learning_role.get_progress_remaining() - updated_noise_decay = self.learning_role.calc_noise_from_progress(progress_remaining) + updated_noise_decay = self.learning_role.calc_noise_from_progress( + progress_remaining + ) learning_rate = self.learning_role.calc_lr_from_progress(progress_remaining) # Update learning rates and noise schedules for all strategies @@ -111,7 +157,7 @@ def update_policy(self) -> None: transitions = self.learning_role.buffer.sample( self.learning_config.batch_size ) - + states, actions, next_states, rewards = ( transitions.observations, transitions.actions, @@ -121,10 +167,12 @@ def update_policy(self) -> None: # Compute target actions using target actors with th.no_grad(): - next_actions = th.stack([ - strategy.actor_target(next_states[:, i, :]).clamp(-1, 1) - for i, strategy in enumerate(strategies) - ]) + next_actions = th.stack( + [ + strategy.actor_target(next_states[:, i, :]).clamp(-1, 1) + for i, strategy in enumerate(strategies) + ] + ) next_actions = next_actions.transpose(0, 1).contiguous() next_actions = next_actions.view(-1, n_rl_agents * self.act_dim) @@ -134,7 +182,7 @@ def update_policy(self) -> None: unique_obs_from_others = states[ :, :, self.obs_dim - self.unique_obs_dim : ].reshape(self.learning_config.batch_size, n_rl_agents, -1) - + next_unique_obs_from_others = next_states[ :, :, self.obs_dim - self.unique_obs_dim : ].reshape(self.learning_config.batch_size, n_rl_agents, -1) @@ -157,7 +205,10 @@ def update_policy(self) -> None: dim=1, ) other_next_unique_obs = th.cat( - (next_unique_obs_from_others[:, :i], next_unique_obs_from_others[:, i + 1 :]), + ( + next_unique_obs_from_others[:, :i], + next_unique_obs_from_others[:, i + 1 :], + ), dim=1, ) @@ -170,8 +221,12 @@ def update_policy(self) -> None: ) all_next_states = th.cat( ( - next_states[:, i, :].reshape(self.learning_config.batch_size, -1), - other_next_unique_obs.reshape(self.learning_config.batch_size, -1), + next_states[:, i, :].reshape( + self.learning_config.batch_size, -1 + ), + other_next_unique_obs.reshape( + self.learning_config.batch_size, -1 + ), ), dim=1, ) @@ -204,8 +259,12 @@ def update_policy(self) -> None: ) strategy.critics.optimizer.step() - unit_params[step][strategy.unit_id]["critic_total_grad_norm"] = total_norm - unit_params[step][strategy.unit_id]["critic_max_grad_norm"] = max_grad_norm + unit_params[step][strategy.unit_id]["critic_total_grad_norm"] = ( + total_norm + ) + unit_params[step][strategy.unit_id]["critic_max_grad_norm"] = ( + max_grad_norm + ) # ------------------------------------------------------------ # ACTOR UPDATE PHASE (updated every step) @@ -257,8 +316,12 @@ def update_policy(self) -> None: ) strategy.actor.optimizer.step() - unit_params[step][strategy.unit_id]["actor_total_grad_norm"] = total_norm - unit_params[step][strategy.unit_id]["actor_max_grad_norm"] = max_grad_norm + unit_params[step][strategy.unit_id]["actor_total_grad_norm"] = ( + total_norm + ) + unit_params[step][strategy.unit_id]["actor_max_grad_norm"] = ( + max_grad_norm + ) # ------------------------------------------------------------ # TARGET NETWORK UPDATE PHASE (Polyak averaging) diff --git a/assume/reinforcement_learning/algorithms/matd3.py b/assume/reinforcement_learning/algorithms/matd3.py index 47950dbad..89e7597c3 100644 --- a/assume/reinforcement_learning/algorithms/matd3.py +++ b/assume/reinforcement_learning/algorithms/matd3.py @@ -7,6 +7,7 @@ import torch as th from torch.nn import functional as F +from assume.common.base import LearningStrategy from assume.reinforcement_learning.algorithms.base_algorithm import A2CAlgorithm from assume.reinforcement_learning.learning_utils import ( polyak_update, @@ -25,12 +26,12 @@ class TD3(A2CAlgorithm): Open AI Spinning guide: https://spinningup.openai.com/en/latest/algorithms/td3.html Original paper: https://arxiv.org/pdf/1802.09477.pdf - + Attributes: n_updates: Counter for gradient updates performed. grad_clip_norm: Maximum gradient norm for clipping. critic_architecture_class: Critic network architecture class (CriticTD3). - + Example: >>> td3 = TD3(learning_role) >>> td3.update_policy() @@ -38,10 +39,10 @@ class TD3(A2CAlgorithm): def __init__(self, learning_role): """Initialize the TD3 algorithm. - + Sets up the algorithm with gradient counters, clipping parameters, and critic architecture. - + Args: learning_role: Learning role object managing agents and replay buffer. Must have off-policy configuration. @@ -54,12 +55,55 @@ def __init__(self, learning_role): # Define the critic architecture class for TD3 self.critic_architecture_class = CriticTD3 + def get_action( + self, strategy: "LearningStrategy", obs: th.Tensor + ) -> tuple[th.Tensor, th.Tensor]: + """Sample an action using the off-policy strategy. + + During learning mode the agent either performs pure-noise initial + exploration (first N episodes) or uses its deterministic actor plus + Gaussian action noise. During evaluation mode the actor is used + without any noise. + + This default implementation is shared by TD3 and DDPG. PPO overrides + it with its own stochastic Gaussian sampling. + """ + if strategy.learning_mode and not strategy.evaluation_mode: + if strategy.collect_initial_experience_mode: + # Pure Gaussian noise for initial random exploration + noise = th.normal( + mean=0.0, + std=strategy.exploration_noise_std, + size=(strategy.act_dim,), + dtype=strategy.float_type, + device=strategy.device, + ) + return noise, noise + + action = strategy.actor(obs).detach() + noise = strategy.action_noise.noise( + device=strategy.device, dtype=strategy.float_type + ) + action = th.clamp( + action + noise, + strategy.actor.min_output, + strategy.actor.max_output, + ) + return action, noise + + # Evaluation + action = strategy.actor(obs).detach() + noise = th.zeros( + strategy.act_dim, dtype=strategy.float_type, device=strategy.device + ) + return action, noise + def update_policy(self): """Update the policy using the Twin Delayed Deep Deterministic Policy Gradients (TD3). - This method performs the policy update step, which involves updating the actor - (policy) and critic (Q-function) networks using the TD3 algorithm. It iterates - over the specified number of gradient steps and performs the following for each + This method performs the policy update step, which involves updating the actor + (policy) and critic (Q-function) networks using the TD3 algorithm. It iterates + over the specified number of gradient steps and performs the following for each learning strategy: 1. Sample a batch of transitions from the replay buffer. @@ -127,7 +171,8 @@ def update_policy(self): with th.no_grad(): # Select action according to policy and add clipped noise noise = ( - th.randn_like(actions) * self.learning_config.off_policy.target_policy_noise + th.randn_like(actions) + * self.learning_config.off_policy.target_policy_noise ) noise = noise.clamp( -self.learning_config.off_policy.target_noise_clip, @@ -359,7 +404,9 @@ def update_policy(self): self.learning_config.off_policy.tau, ) polyak_update( - all_actor_params, all_target_actor_params, self.learning_config.off_policy.tau + all_actor_params, + all_target_actor_params, + self.learning_config.off_policy.tau, ) self.learning_role.write_rl_grad_params_to_output(learning_rate, unit_params) From e66e9732a143cdace165843180e44a10e202b59a Mon Sep 17 00:00:00 2001 From: kim-mskw Date: Fri, 8 May 2026 10:47:55 +0200 Subject: [PATCH 37/44] - slight changes of error handling to ensure same flow as before - use "uses_target_networks" flag for actor and critic creation as well, otherwise were created but never used? - same conistency enforced for extract_policy --- .../algorithms/base_algorithm.py | 79 +++++++++++-------- 1 file changed, 47 insertions(+), 32 deletions(-) diff --git a/assume/reinforcement_learning/algorithms/base_algorithm.py b/assume/reinforcement_learning/algorithms/base_algorithm.py index b1769bbb3..0fcff0019 100644 --- a/assume/reinforcement_learning/algorithms/base_algorithm.py +++ b/assume/reinforcement_learning/algorithms/base_algorithm.py @@ -286,10 +286,11 @@ def save_actor_params(self, directory: str) -> None: th.save(obj, path) def load_params(self, directory: str) -> None: - """Load actor and critic network parameters. + """ + Load the parameters of both actor and critic networks. - Loads both actor and critic parameters from the specified directory. - Calls load_critic_params() and load_actor_params() sequentially. + This method loads the parameters of both the actor and critic networks associated with the learning role from the specified + directory. It uses the `load_critic_params` and `load_actor_params` methods to load the respective parameters. Args: directory: Base directory containing 'actors/' and 'critics/' subdirectories. @@ -387,6 +388,12 @@ def load_critic_params(self, directory: str) -> None: unique_obs=strategy.unique_obs_dim, ) + if critic_weights is None: + logger.warning( + f"Critic weights transfer failed for {u_id}; skipping." + ) + continue + strategy.critics.load_state_dict(critic_weights) # Only transfer target critic weights if this algorithm uses target networks @@ -400,10 +407,14 @@ def load_critic_params(self, directory: str) -> None: act_dim=strategy.act_dim, unique_obs=strategy.unique_obs_dim, ) - if target_critic_weights is not None: - strategy.target_critics.load_state_dict( - target_critic_weights + + if target_critic_weights is None: + logger.warning( + f"Target critic weights transfer failed for {u_id}; skipping." ) + continue + + strategy.target_critics.load_state_dict(target_critic_weights) logger.debug(f"Critic weights transferred for {u_id}.") @@ -581,16 +592,17 @@ def create_actors(self) -> None: num_timeseries_obs_dim=self.num_timeseries_obs_dim, ).to(self.device) - strategy.actor_target = self.actor_architecture_class( - obs_dim=self.obs_dim, - act_dim=self.act_dim, - float_type=self.float_type, - unique_obs_dim=self.unique_obs_dim, - num_timeseries_obs_dim=self.num_timeseries_obs_dim, - ).to(self.device) + if self.uses_target_networks: + strategy.actor_target = self.actor_architecture_class( + obs_dim=self.obs_dim, + act_dim=self.act_dim, + float_type=self.float_type, + unique_obs_dim=self.unique_obs_dim, + num_timeseries_obs_dim=self.num_timeseries_obs_dim, + ).to(self.device) - strategy.actor_target.load_state_dict(strategy.actor.state_dict()) - strategy.actor_target.train(mode=False) + strategy.actor_target.load_state_dict(strategy.actor.state_dict()) + strategy.actor_target.train(mode=False) strategy.actor.optimizer = AdamW( strategy.actor.parameters(), @@ -627,16 +639,17 @@ def create_critics(self) -> None: float_type=self.float_type, ).to(self.device) - strategy.target_critics = self.critic_architecture_class( - n_agents=n_agents, - obs_dim=self.obs_dim, - act_dim=self.act_dim, - unique_obs_dim=self.unique_obs_dim, - float_type=self.float_type, - ).to(self.device) + if self.uses_target_networks: + strategy.target_critics = self.critic_architecture_class( + n_agents=n_agents, + obs_dim=self.obs_dim, + act_dim=self.act_dim, + unique_obs_dim=self.unique_obs_dim, + float_type=self.float_type, + ).to(self.device) - strategy.target_critics.load_state_dict(strategy.critics.state_dict()) - strategy.target_critics.train(mode=False) + strategy.target_critics.load_state_dict(strategy.critics.state_dict()) + strategy.target_critics.train(mode=False) strategy.critics.optimizer = AdamW( strategy.critics.parameters(), @@ -664,26 +677,28 @@ def extract_policy(self) -> dict: >>> # Contains all networks ready for saving or transfer """ actors = {} - actor_targets = {} - critics = {} - target_critics = {} + if self.uses_target_networks: + actor_targets = {} + target_critics = {} for u_id, strategy in self.learning_role.rl_strats.items(): actors[u_id] = strategy.actor - actor_targets[u_id] = strategy.actor_target - critics[u_id] = strategy.critics - target_critics[u_id] = strategy.target_critics + if self.uses_target_networks: + actor_targets[u_id] = strategy.actor_target + target_critics[u_id] = strategy.target_critics actors_and_critics = { "actors": actors, - "actor_targets": actor_targets, "critics": critics, - "target_critics": target_critics, "obs_dim": self.obs_dim, "act_dim": self.act_dim, "unique_obs_dim": self.unique_obs_dim, } + if self.uses_target_networks: + actors_and_critics["actor_targets"] = actor_targets + actors_and_critics["target_critics"] = target_critics + return actors_and_critics From e7693c6c564e6768fa31125cf10fda708e2f8d34 Mon Sep 17 00:00:00 2001 From: kim-mskw Date: Fri, 8 May 2026 15:24:30 +0200 Subject: [PATCH 38/44] - change buffer logic to closely map each other refactor convert_to_tensor function --- assume/reinforcement_learning/buffer.py | 250 +++++++++--------------- 1 file changed, 92 insertions(+), 158 deletions(-) diff --git a/assume/reinforcement_learning/buffer.py b/assume/reinforcement_learning/buffer.py index 1a06232d6..2bb3ca71d 100644 --- a/assume/reinforcement_learning/buffer.py +++ b/assume/reinforcement_learning/buffer.py @@ -3,11 +3,14 @@ # SPDX-License-Identifier: AGPL-3.0-or-later import warnings -from typing import NamedTuple, Generator +from collections.abc import Generator +from typing import NamedTuple import numpy as np import torch as th +from assume.common.utils import convert_to_tensors + try: # Check memory used by replay buffer when possible import psutil @@ -16,6 +19,16 @@ class ReplayBufferSamples(NamedTuple): + """Container for replay buffer samples. + + + Attributes: + observations: States/observations the agent saw. + actions: Actions the agent took. + next_observations: States/observations the agent saw after taking the action. + rewards: Rewards the agent received for taking the action. + """ + observations: th.Tensor actions: th.Tensor next_observations: th.Tensor @@ -33,17 +46,20 @@ def __init__( float_type, ): """Initialize the replay buffer. - + A class that represents a replay buffer for storing observations, actions, and rewards. The replay buffer is implemented as a circular buffer, where the oldest experiences are discarded when the buffer is full. - + Args: - buffer_size: The maximum size of the buffer. - obs_dim: The dimension of the observation space. - act_dim: The dimension of the action space. - n_rl_units: The number of reinforcement learning units. - device: The device to use for storing the data (e.g., 'cpu' or 'cuda'). - float_type: The data type to use for the stored data. + buffer_size (int): The maximum size of the buffer. + obs_dim (int): The dimension of the observation space. + act_dim (int): The dimension of the action space. + n_rl_units (int): The number of reinforcement learning units. + device (str): The device to use for storing the data (e.g., 'cpu' or 'cuda'). + float_type (torch.dtype): The data type to use for the stored data. + observations (numpy.ndarray): The stored observations. + actions (numpy.ndarray): The stored actions. + rewards (numpy.ndarray): The stored rewards. """ self.buffer_size = buffer_size @@ -90,31 +106,12 @@ def __init__( def size(self): """Return the current size of the buffer. - - Returns: - The current size of the buffer (i.e. number of transitions stored). - """ - return self.buffer_size if self.full else self.pos - def to_torch(self, array: np.array, copy=True): - """Convert a numpy array to a PyTorch tensor. - - Note: - It copies the data by default. - - Args: - array: The numpy array to convert. - copy: Whether to copy the data or not (may be useful to avoid changing - things by reference). Defaults to True. - Returns: - The converted PyTorch tensor. - """ - - if copy: - return th.tensor(array, dtype=self.th_float_type, device=self.device) + buffer_size(int): The current size of the buffer - return th.as_tensor(array, dtype=self.th_float_type, device=self.device) + """ + return self.buffer_size if self.full else self.pos def add( self, @@ -123,11 +120,11 @@ def add( reward: np.ndarray, ): """Add an observation, action, and reward of all agents to the replay buffer. - + Args: - obs: The observation to add. - actions: The actions to add. - reward: The reward to add. + obs (numpy.ndarray): The observation to add. + actions (numpy.ndarray): The actions to add. + reward (numpy.ndarray): The reward to add. """ # copying all to avoid modification len_obs = obs.shape[0] @@ -144,13 +141,13 @@ def add( def sample(self, batch_size: int) -> ReplayBufferSamples: """Sample a random batch of experiences from the replay buffer. - + Args: - batch_size: The number of experiences to sample. - + batch_size (int): The number of experiences to sample. + Returns: - A named tuple containing the sampled observations, actions, and rewards. - + ReplayBufferSamples: A named tuple containing the sampled observations, actions, and rewards. + Raises: Exception: If there are less than two entries in the buffer. """ @@ -167,13 +164,16 @@ def sample(self, batch_size: int) -> ReplayBufferSamples: self.rewards[batch_inds], ) - return ReplayBufferSamples(*tuple(map(self.to_torch, data))) + return ReplayBufferSamples( + *tuple(convert_to_tensors(x, self.device, self.th_float_type) for x in data) + ) + class RolloutBufferSamples(NamedTuple): """Container for rollout buffer samples. - + It holds one batch of training samples from PPO's rollout buffer. - + Attributes: observations: States/observations the agent saw. actions: Actions the agent took. @@ -182,22 +182,24 @@ class RolloutBufferSamples(NamedTuple): advantages: Generalized advantage estimates. returns: Expected returns. """ - observations: th.Tensor # states/observations the agent saw - actions: th.Tensor # actions the agent took - old_values: th.Tensor # critic's value estimates - old_log_probs: th.Tensor # log_probability of taking each action - advantages: th.Tensor # generalized advantage estimates - returns: th.Tensor # expected returns + + observations: th.Tensor # states/observations the agent saw + actions: th.Tensor # actions the agent took + old_values: th.Tensor # critic's value estimates + old_log_probs: th.Tensor # log_probability of taking each action + advantages: th.Tensor # generalized advantage estimates + returns: th.Tensor # expected returns + class RolloutBuffer: """Rollout buffer used in on-policy algorithms like PPO. - + It corresponds to the transitions collected using the current policy. This experience is discarded after the policy is updated. In order to use PPO, the current observations are needed to be stored. The observations include actions, rewards, values, log probabilities and done for each action. """ - + def __init__( self, buffer_size: int, @@ -207,10 +209,10 @@ def __init__( device: str | th.device, float_type: th.dtype, gamma: float = 0.99, - gae_lambda: float = 0.98 + gae_lambda: float = 0.98, ): """Initialize the rollout buffer. - + Args: buffer_size: Max number of elements allowed in the buffer. obs_dim: Dimension of the observation space. @@ -240,69 +242,25 @@ def __init__( def reset(self) -> None: """Reset the rollout buffer. - + Clearing the buffer and allocating new storage. """ self.observations = np.zeros( - ( - self.buffer_size, - self.n_rl_units, - self.obs_dim - ), - dtype = np.float32 + (self.buffer_size, self.n_rl_units, self.obs_dim), dtype=np.float32 ) self.actions = np.zeros( - ( - self.buffer_size, - self.n_rl_units, - self.act_dim - ), - dtype = np.float32 - ) - self.rewards = np.zeros( - ( - self.buffer_size, - self.n_rl_units - ), - dtype = np.float32 - ) - self.values = np.zeros( - ( - self.buffer_size, - self.n_rl_units - ), - dtype = np.float32 - ) - self.log_probs = np.zeros( - ( - self.buffer_size, - self.n_rl_units - ), - dtype = np.float32 - ) - self.dones = np.zeros( - ( - self.buffer_size, - self.n_rl_units - ), - dtype = np.float32 + (self.buffer_size, self.n_rl_units, self.act_dim), dtype=np.float32 ) + self.rewards = np.zeros((self.buffer_size, self.n_rl_units), dtype=np.float32) + self.values = np.zeros((self.buffer_size, self.n_rl_units), dtype=np.float32) + self.log_probs = np.zeros((self.buffer_size, self.n_rl_units), dtype=np.float32) + self.dones = np.zeros((self.buffer_size, self.n_rl_units), dtype=np.float32) # Computed after rollout self.advantages = np.zeros( - ( - self.buffer_size, - self.n_rl_units - ), - dtype = np.float32 - ) - self.returns = np.zeros( - ( - self.buffer_size, - self.n_rl_units - ), - dtype = np.float32 + (self.buffer_size, self.n_rl_units), dtype=np.float32 ) + self.returns = np.zeros((self.buffer_size, self.n_rl_units), dtype=np.float32) self.pos = 0 self.full = False @@ -315,10 +273,10 @@ def add( reward: np.ndarray, done: np.ndarray, value: np.ndarray, - log_prob: np.ndarray + log_prob: np.ndarray, ) -> None: """Add a transition to the buffer. - + Args: obs: Observation of the agents. action: Action taken by the agents. @@ -330,7 +288,7 @@ def add( if self.pos >= self.buffer_size: self.full = True return - + self.observations[self.pos] = np.array(obs).copy() self.actions[self.pos] = np.array(action).copy() self.rewards[self.pos] = np.array(reward).flatten().copy() @@ -342,14 +300,12 @@ def add( self.pos += 1 def compute_returns_and_advantages( - self, - last_values: np.ndarray, - dones: np.ndarray + self, last_values: np.ndarray, dones: np.ndarray ) -> None: """Use Generalized Advantage Estimation to compute the advantage. - + To obtain the lambda-return, the advantage is added to the value estimate. - + Args: last_values: Value estimation for the last step. dones: Whether the last step was terminal. @@ -384,8 +340,7 @@ def compute_returns_and_advantages( # GAE advantage last_gae_lam = ( - delta - + self.gamma * self.gae_lambda * next_non_terminal * last_gae_lam + delta + self.gamma * self.gae_lambda * next_non_terminal * last_gae_lam ) self.advantages[step] = last_gae_lam @@ -394,14 +349,13 @@ def compute_returns_and_advantages( self.generator_ready = True def get( - self, - batch_size: int | None = None + self, batch_size: int | None = None ) -> Generator[RolloutBufferSamples, None, None]: """Generate batches of transition samples for training. - + Args: batch_size: Number of samples to be accessed per batch. - + Yields: A generator yielding RolloutBufferSamples. """ @@ -409,7 +363,7 @@ def get( raise ValueError( "Must call compute_returns_and_advantages before sampling." ) - + buffer_size = self.pos if not self.full else self.buffer_size indices = np.random.permutation(buffer_size) @@ -419,56 +373,36 @@ def get( start_idx = 0 while start_idx < buffer_size: batch_indices = indices[start_idx : start_idx + batch_size] - yield self._get_samples(batch_indices) + yield self.sample(batch_indices) start_idx += batch_size - def _get_samples(self, indices: np.ndarray) -> RolloutBufferSamples: + def sample(self, indices: np.ndarray) -> RolloutBufferSamples: """Sample data from the buffer for given indices. - + Converts numpy arrays to torch tensors for given indices. - + Args: indices: Indices of the samples to retrieve. - + Returns: The batch of samples converted to PyTorch tensors. """ + data = ( + self.observations[indices], + self.actions[indices], + self.values[indices], + self.log_probs[indices], + self.advantages[indices], + self.returns[indices], + ) + return RolloutBufferSamples( - observations = th.as_tensor( - self.observations[indices], - device = self.device, - dtype = self.float_type - ), - actions = th.as_tensor( - self.actions[indices], - device = self.device, - dtype = self.float_type - ), - old_values = th.as_tensor( - self.values[indices], - device = self.device, - dtype = self.float_type - ), - old_log_probs = th.as_tensor( - self.log_probs[indices], - device = self.device, - dtype = self.float_type - ), - advantages = th.as_tensor( - self.advantages[indices], - device = self.device, - dtype = self.float_type - ), - returns = th.as_tensor( - self.returns[indices], - device = self.device, - dtype = self.float_type - ) + *(convert_to_tensors(x, self.device, self.float_type) for x in data) ) - + def size(self) -> int: """Return the current number of stored transitions. - + Returns: The size of the buffer. """ From 3b60e91b9bb6414235b0daa5d1203f976ae0441a Mon Sep 17 00:00:00 2001 From: kim-mskw Date: Fri, 8 May 2026 15:25:12 +0200 Subject: [PATCH 39/44] forgot to commit refactored function --- assume/common/utils.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/assume/common/utils.py b/assume/common/utils.py index 8c6cbe8ea..dc547050b 100644 --- a/assume/common/utils.py +++ b/assume/common/utils.py @@ -599,6 +599,34 @@ def rename_study_case(path: str, old_key: str, new_key: str): yaml.safe_dump(data, file, sort_keys=False) +def convert_to_tensors(self, array: np.array, copy=True, dtype=None, device=None): + """Convert a numpy array to a PyTorch tensor. + + Note: + It copies the data by default. + + Args: + array (numpy.ndarray): The numpy array to convert. + copy (bool, optional): Whether to copy the data or not + (may be useful to avoid changing things by reference). Defaults to True. + + Returns: + torch.Tensor: The converted PyTorch tensor. + """ + + try: + import torch as th + + if copy: + return th.tensor(array, dtype=dtype, device=device) + + return th.as_tensor(array, dtype=dtype, device=device) + + except ImportError: + # If torch is not installed, return the array unchanged + return array + + def convert_tensors(data): """ Recursively checks if the data contains PyTorch tensors and converts them to From 1609b1df2f623ad3a350d8679fbe0daa63081917 Mon Sep 17 00:00:00 2001 From: kim-mskw Date: Fri, 8 May 2026 15:52:24 +0200 Subject: [PATCH 40/44] delete unnecessary defintions and delet rl_strats sorting, as we use the order in leanring role as singl source of truth --- .../algorithms/mappo.py | 100 +++++++---- .../reinforcement_learning/learning_role.py | 170 ++++++++---------- 2 files changed, 141 insertions(+), 129 deletions(-) diff --git a/assume/reinforcement_learning/algorithms/mappo.py b/assume/reinforcement_learning/algorithms/mappo.py index affda23c2..6357fde92 100644 --- a/assume/reinforcement_learning/algorithms/mappo.py +++ b/assume/reinforcement_learning/algorithms/mappo.py @@ -22,11 +22,11 @@ class PPO(A2CAlgorithm): """ Proximal Policy Optimization (PPO) Algorithm. - - A policy gradient method that alternates between sampling data through - interaction with the environment, and optimizing a surrogate objective + + A policy gradient method that alternates between sampling data through + interaction with the environment, and optimizing a surrogate objective function using stochastic gradient ascent. It is an on-policy algorithm. - + Attributes: clip_range: The epsilon parameter for PPO clipping. clip_range_vf: The epsilon parameter for value function clipping. @@ -37,7 +37,7 @@ class PPO(A2CAlgorithm): n_updates: Counter for gradient updates performed. actor_architecture_class: Actor network architecture class. critic_architecture_class: Critic network architecture class. - + Example: >>> ppo = PPO(learning_role) >>> ppo.update_policy() @@ -54,7 +54,7 @@ def __init__( max_grad_norm=None, ): """Initialize PPO algorithm with specific hyperparameters. - + Args: learning_role: The primary learning role object. clip_range: The epsilon parameter for PPO policy clipping. @@ -80,9 +80,7 @@ def __init__( self.clip_range_vf = clip_range_vf self.n_epochs = n_epochs if n_epochs is not None else on_policy_config.n_epochs self.entropy_coef = ( - entropy_coef - if entropy_coef is not None - else on_policy_config.entropy_coef + entropy_coef if entropy_coef is not None else on_policy_config.entropy_coef ) self.vf_coef = vf_coef if vf_coef is not None else on_policy_config.vf_coef self.max_grad_norm = ( @@ -103,9 +101,7 @@ def __init__( # Note: save_params, save_critic_params, save_actor_params, load_params, # load_critic_params, load_actor_params, initialize_policy are inherited from A2CAlgorithm - def get_action( - self, strategy, obs: th.Tensor - ) -> tuple[th.Tensor, th.Tensor]: + def get_action(self, strategy, obs: th.Tensor) -> tuple[th.Tensor, th.Tensor]: """Sample a stochastic action. In learning mode the actor's Gaussian policy is sampled and the @@ -131,10 +127,10 @@ def get_action( def create_actors(self) -> None: """Create stochastic actor networks for all agents. - + Initializes the ActorPPO or LSTMActorPPO network based on the configuration, as well as its optimizer for each agent strategy. - + Example: >>> ppo.create_actors() >>> # Creates actor network and optimizer for each strategy @@ -168,10 +164,10 @@ def create_actors(self) -> None: def create_critics(self) -> None: """Create value networks for all agents. - - Initializes the CriticPPO network (Centralized Critic) and its optimizer + + Initializes the CriticPPO network (Centralized Critic) and its optimizer for each registered agent strategy. - + Example: >>> ppo.create_critics() >>> # Creates critic networks and optimizers for each strategy @@ -195,16 +191,16 @@ def create_critics(self) -> None: def extract_policy(self) -> dict: """Extract all actor and critic networks into a dictionary. - + Collects actor and critic networks from all learning strategies into a structured dictionary. - + Returns: Dictionary containing all network components organized by type: - 'actors': Primary actor networks - 'critics': Primary critic networks - Dimension information for reconstruction - + Example: >>> policy_dict = ppo.extract_policy() >>> # Contains all networks ready for saving or transfer @@ -230,7 +226,7 @@ def extract_policy(self) -> dict: def update_policy(self) -> None: """Update actor and critic networks using Proximal Policy Optimization (PPO). - + Performs one complete training iteration consisting of: 1. Checking if enough data is collected in the rollout buffer. 2. Computing Generalized Advantage Estimation (GAE) and Returns using the last value estimate. @@ -240,11 +236,10 @@ def update_policy(self) -> None: 6. Logging metrics and gradients. 7. Clearing the on-policy buffer after the update. """ - logger.debug("Updating Policy") + logger.debug("Updating Policy (PPO)") # Keeping strategy order aligned with rollout-buffer column order. - sorted_unit_ids = sorted(self.learning_role.rl_strats.keys()) - strategies = [self.learning_role.rl_strats[u_id] for u_id in sorted_unit_ids] + strategies = [self.learning_role.rl_strats] n_rl_agents = len(strategies) # Getting the buffer, this will be a RolloutBuffer for on-policy algorithms. @@ -278,7 +273,11 @@ def update_policy(self) -> None: dones = np.zeros(n_rl_agents) # Get the buffer size to index into the last stored state - buffer_size = rollout_buffer.pos if not rollout_buffer.full else rollout_buffer.buffer_size + buffer_size = ( + rollout_buffer.pos + if not rollout_buffer.full + else rollout_buffer.buffer_size + ) if buffer_size > 0: # Use the LAST observation as the bootstrap for the REST of the buffer. @@ -314,7 +313,9 @@ def update_policy(self) -> None: dtype=self.float_type, ) # Get value estimate from critic - last_values[i] = strategy.critics(obs_tensor).cpu().numpy().flatten()[0] + last_values[i] = ( + strategy.critics(obs_tensor).cpu().numpy().flatten()[0] + ) dones[i] = last_dones[i] # Compute advantages and returns @@ -346,7 +347,9 @@ def create_step_entry(): effective_batch_size = min( self.learning_config.batch_size, - rollout_buffer.pos if not rollout_buffer.full else rollout_buffer.buffer_size, + rollout_buffer.pos + if not rollout_buffer.full + else rollout_buffer.buffer_size, ) for epoch in range(self.n_epochs): @@ -366,7 +369,10 @@ def create_step_entry(): # Construct centralized state other_unique_obs = th.cat( - (unique_obs_from_others[:, :i], unique_obs_from_others[:, i + 1 :]), + ( + unique_obs_from_others[:, :i], + unique_obs_from_others[:, i + 1 :], + ), dim=1, ) all_states = th.cat( @@ -431,11 +437,19 @@ def create_step_entry(): critic_params = list(critic.parameters()) actor_max_grad_norm = max( - (p.grad.norm().item() for p in actor_params if p.grad is not None), + ( + p.grad.norm().item() + for p in actor_params + if p.grad is not None + ), default=0.0, ) critic_max_grad_norm = max( - (p.grad.norm().item() for p in critic_params if p.grad is not None), + ( + p.grad.norm().item() + for p in critic_params + if p.grad is not None + ), default=0.0, ) @@ -460,20 +474,32 @@ def create_step_entry(): unit_params.append(create_step_entry()) # Store per-unit gradient params for this step - unit_params[step_count][strategy.unit_id]["actor_loss"] = policy_loss.item() - unit_params[step_count][strategy.unit_id]["critic_loss"] = value_loss.item() - unit_params[step_count][strategy.unit_id]["actor_total_grad_norm"] = ( + unit_params[step_count][strategy.unit_id]["actor_loss"] = ( + policy_loss.item() + ) + unit_params[step_count][strategy.unit_id]["critic_loss"] = ( + value_loss.item() + ) + unit_params[step_count][strategy.unit_id][ + "actor_total_grad_norm" + ] = ( actor_total_grad_norm.item() if isinstance(actor_total_grad_norm, th.Tensor) else actor_total_grad_norm ) - unit_params[step_count][strategy.unit_id]["actor_max_grad_norm"] = actor_max_grad_norm - unit_params[step_count][strategy.unit_id]["critic_total_grad_norm"] = ( + unit_params[step_count][strategy.unit_id]["actor_max_grad_norm"] = ( + actor_max_grad_norm + ) + unit_params[step_count][strategy.unit_id][ + "critic_total_grad_norm" + ] = ( critic_total_grad_norm.item() if isinstance(critic_total_grad_norm, th.Tensor) else critic_total_grad_norm ) - unit_params[step_count][strategy.unit_id]["critic_max_grad_norm"] = critic_max_grad_norm + unit_params[step_count][strategy.unit_id][ + "critic_max_grad_norm" + ] = critic_max_grad_norm step_count += 1 @@ -488,4 +514,4 @@ def create_step_entry(): logger.debug( f"PPO update complete. Actor loss: {np.mean(all_actor_losses):.4f}, " f"Value loss: {np.mean(all_critic_losses):.4f}" - ) \ No newline at end of file + ) diff --git a/assume/reinforcement_learning/learning_role.py b/assume/reinforcement_learning/learning_role.py index 6904349ba..4d04f2321 100644 --- a/assume/reinforcement_learning/learning_role.py +++ b/assume/reinforcement_learning/learning_role.py @@ -24,13 +24,9 @@ timestamp2datetime, ) from assume.reinforcement_learning.algorithms.base_algorithm import RLAlgorithm -from assume.reinforcement_learning.algorithms.matd3 import TD3 from assume.reinforcement_learning.algorithms.maddpg import DDPG from assume.reinforcement_learning.algorithms.mappo import PPO -from assume.reinforcement_learning.buffer import ( - ReplayBuffer, - RolloutBuffer -) +from assume.reinforcement_learning.algorithms.matd3 import TD3 from assume.reinforcement_learning.learning_utils import ( linear_schedule_func, transform_buffer_data, @@ -42,9 +38,9 @@ class Learning(Role): """Manages the learning process of reinforcement learning agents. - + This class handles the initialization of key components such as neural networks, - replay buffer, and learning hyperparameters. It handles both training and evaluation + replay buffer, and learning hyperparameters. It handles both training and evaluation modes based on the provided learning configuration. Args: @@ -110,7 +106,9 @@ def __init__( self.learning_config.off_policy.noise_dt ) else: - self.calc_noise_from_progress = lambda x: self.learning_config.off_policy.noise_dt + self.calc_noise_from_progress = ( + lambda x: self.learning_config.off_policy.noise_dt + ) # For on-policy algorithms, no noise schedule needed self.eval_episodes_done = 0 @@ -148,11 +146,11 @@ def on_ready(self): Note: This method prepares the learning role for the reinforcement learning training process. - It subscribes to relevant messages for handling the training process and schedules + It subscribes to relevant messages for handling the training process and schedules recurrent tasks for policy updates based on the specified training frequency. - This cannot happen in the init since the context (compare mango agents) is not - yet available there. To avoid inconsistent replay buffer states (e.g. observation - and action has been stored but not the reward), this slightly shifts the timing + This cannot happen in the init since the context (compare mango agents) is not + yet available there. To avoid inconsistent replay buffer states (e.g. observation + and action has been stored but not the reward), this slightly shifts the timing of the buffer updates. """ super().on_ready() @@ -175,7 +173,7 @@ def on_ready(self): def sync_train_freq_with_simulation_horizon(self) -> str | None: """Ensure self.train_freq evenly divides the simulation length. - + If not, adjust self.train_freq (in-place) and return the new string, otherwise return None. Uses self.start_datetime/self.end_datetime when available, otherwise falls back to timestamp fields. """ @@ -232,7 +230,7 @@ def determine_validation_interval(self) -> int: self.learning_config.off_policy.episodes_collecting_initial_experience + validation_interval ) - + if self.learning_config.training_episodes < min_required_episodes: raise ValueError( f"Training episodes ({training_episodes}) must be greater than the sum of initial experience episodes ({self.learning_config.off_policy.episodes_collecting_initial_experience}) and evaluation interval ({validation_interval})." @@ -240,7 +238,7 @@ def determine_validation_interval(self) -> int: else: # For on-policy algorithms, no initial experience collection needed min_required_episodes = validation_interval - + if self.learning_config.training_episodes < min_required_episodes: raise ValueError( f"Training episodes ({training_episodes}) must be greater than evaluation interval ({validation_interval})." @@ -316,7 +314,7 @@ async def store_to_buffer_and_update(self) -> None: "profit": {t: current_profits[t] for t in timestamps_to_process}, "values": {t: current_values[t] for t in timestamps_to_process}, "log_probs": {t: current_log_probs[t] for t in timestamps_to_process}, - "dones": {t: current_dones[t] for t in timestamps_to_process} + "dones": {t: current_dones[t] for t in timestamps_to_process}, } # write data to output agent @@ -331,8 +329,8 @@ async def store_to_buffer_and_update(self) -> None: async def _store_to_buffer_and_update_sync(self, cache, device) -> None: """Process strategy data into the buffer and trigger policy update. - - This function takes all the information that the strategies wrote into the + + This function takes all the information that the strategies wrote into the learning_role cache dicts and post-processes them to fit into the buffer. """ first_start = next(iter(cache["obs"])) @@ -358,34 +356,29 @@ async def _store_to_buffer_and_update_sync(self, cache, device) -> None: n_rl_agents = len(sorted_unit_ids) obs_data = transform_buffer_data( - { - timestamp: cache["obs"][timestamp] - }, + {timestamp: cache["obs"][timestamp]}, device, sorted_unit_ids, ) actions_data = transform_buffer_data( - { - timestamp: cache["actions"][timestamp] - }, + {timestamp: cache["actions"][timestamp]}, device, sorted_unit_ids, ) rewards_data = transform_buffer_data( - { - timestamp: cache["rewards"][timestamp] - }, + {timestamp: cache["rewards"][timestamp]}, device, sorted_unit_ids, ) # Computing MAPPO value targets with the centralized critic - #using the joint observation available at this timestamp. + # using the joint observation available at this timestamp. if self.learning_config.algorithm == "mappo": values_data = np.zeros((1, n_rl_agents, 1), dtype=np.float32) obs_step = obs_data[0] unique_obs_all = obs_step[ - :, self.rl_algorithm.obs_dim - self.rl_algorithm.unique_obs_dim : + :, + self.rl_algorithm.obs_dim - self.rl_algorithm.unique_obs_dim :, ] with th.no_grad(): @@ -406,52 +399,52 @@ async def _store_to_buffer_and_update_sync(self, cache, device) -> None: dtype=self.float_type, ) values_data[0, i, 0] = ( - strategy.critics(obs_tensor).cpu().numpy().reshape(-1)[0] + strategy.critics(obs_tensor) + .cpu() + .numpy() + .reshape(-1)[0] ) else: values_data = transform_buffer_data( - { - timestamp: cache["values"][timestamp] - }, + {timestamp: cache["values"][timestamp]}, device, sorted_unit_ids, ) - + log_probs_data = transform_buffer_data( - { - timestamp: cache["log_probs"][timestamp] - }, + {timestamp: cache["log_probs"][timestamp]}, device, sorted_unit_ids, ) dones_data = transform_buffer_data( - { - timestamp: cache["dones"][timestamp] - }, + {timestamp: cache["dones"][timestamp]}, device, sorted_unit_ids, ) # Adding data to the rollout buffer. self.buffer.add( - obs = obs_data, - action = actions_data, - reward = rewards_data, - done = dones_data, - value = values_data, - log_prob = log_probs_data + obs=obs_data, + action=actions_data, + reward=rewards_data, + done=dones_data, + value=values_data, + log_prob=log_probs_data, ) added_timestamps += 1 else: # Using ReplayBuffer for off-policy algorithms (TD3/DDPG). - # Rewriting the dict so obs.shape == (n_rl_units, obs_dim), sorting by keys, and storing it in the buffer. - sorted_unit_ids = sorted(self.rl_strats.keys()) + # Rewriting the dict so obs.shape == (n_rl_units, obs_dim), used by keys in learning role. self.buffer.add( - obs = transform_buffer_data(cache["obs"], device, sorted_unit_ids), - actions = transform_buffer_data(cache["actions"], device, sorted_unit_ids), - reward = transform_buffer_data(cache["rewards"], device, sorted_unit_ids), + obs=transform_buffer_data(cache["obs"], device, self.rl_strats.keys()), + actions=transform_buffer_data( + cache["actions"], device, self.rl_strats.keys() + ), + reward=transform_buffer_data( + cache["rewards"], device, self.rl_strats.keys() + ), ) # Only update policy after initial experience for off-policy algorithms @@ -540,12 +533,7 @@ def add_reward_to_cache(self, unit_id, start, reward, regret, profit) -> None: self.all_profits[start][unit_id].append(profit) def add_ppo_data_to_cache( - self, - unit_id, - start, - value, - log_prob, - done=False + self, unit_id, start, value, log_prob, done=False ) -> None: """Add PPO specific data to the cache dict, per unit_id. @@ -624,7 +612,7 @@ def turn_off_initial_exploration(self, loaded_only=False) -> None: def get_progress_remaining(self) -> float: """Get the remaining learning progress from the simulation run. - + Returns: The remaining progress as a float between 0 and 1. """ @@ -633,25 +621,20 @@ def get_progress_remaining(self) -> float: # Only calculate progress for off-policy algorithms if is_off_policy(self.learning_config.algorithm): - initial_experience_episodes = self.learning_config.off_policy.episodes_collecting_initial_experience + initial_experience_episodes = ( + self.learning_config.off_policy.episodes_collecting_initial_experience + ) learning_episodes = ( - self.learning_config.training_episodes - - initial_experience_episodes + self.learning_config.training_episodes - initial_experience_episodes ) - if ( - self.episodes_done - < initial_experience_episodes - ): + if self.episodes_done < initial_experience_episodes: progress_remaining = 1 else: progress_remaining = ( 1 - ( - ( - self.episodes_done - - initial_experience_episodes - ) + (self.episodes_done - initial_experience_episodes) / learning_episodes ) - ((1 / learning_episodes) * (elapsed_duration / total_duration)) @@ -659,15 +642,19 @@ def get_progress_remaining(self) -> float: else: # For on-policy algorithms, simpler progress calculation total_episodes = self.learning_config.training_episodes - progress_remaining = 1 - (self.episodes_done / total_episodes) - (elapsed_duration / total_duration) + progress_remaining = ( + 1 + - (self.episodes_done / total_episodes) + - (elapsed_duration / total_duration) + ) return progress_remaining def create_learning_algorithm(self, algorithm: RLAlgorithm): """Create and initialize the reinforcement learning algorithm. - This method creates and initializes the reinforcement learning algorithm based on - the specified algorithm name. The algorithm is associated with the learning role + This method creates and initializes the reinforcement learning algorithm based on + the specified algorithm name. The algorithm is associated with the learning role and configured with relevant hyperparameters. Args: @@ -686,11 +673,11 @@ def initialize_policy(self, actors_and_critics: dict = None) -> None: """ Initialize the policy of the reinforcement learning agent considering the respective algorithm. - This method initializes the policy (actor) of the reinforcement learning agent. It - tests if we want to continue the learning process with stored policies from a former - training process. If so, it loads the policies from the specified directory. + This method initializes the policy (actor) of the reinforcement learning agent. It + tests if we want to continue the learning process with stored policies from a former + training process. If so, it loads the policies from the specified directory. Otherwise, it initializes the respective new policies. - + Args: actors_and_critics: The pre-initialized actor and critic policies. """ @@ -713,19 +700,19 @@ def initialize_policy(self, actors_and_critics: dict = None) -> None: def compare_and_save_policies(self, metrics: dict) -> bool: """Compare evaluation metrics and save best performing policies. - This method compares the evaluation metrics, such as reward, profit, and regret, - and saves the policies if they achieve the best performance in their respective + This method compares the evaluation metrics, such as reward, profit, and regret, + and saves the policies if they achieve the best performance in their respective categories. It iterates through the specified modes, compares the current evaluation - value with the previous best, and updates the best value if necessary. If an improvement + value with the previous best, and updates the best value if necessary. If an improvement is detected, it saves the policy and associated parameters. - Metrics contain a metric key like "reward" and the current value. This function - stores the policies with the highest metric. If minimize is required, one should + Metrics contain a metric key like "reward" and the current value. This function + stores the policies with the highest metric. If minimize is required, one should add for example "minus_regret" which is then maximized. Args: metrics: Dictionary of metrics evaluated. - + Returns: True if early stopping criteria is triggered, False otherwise. @@ -820,12 +807,12 @@ def init_logging( It also initializes the parameters required for sending data to the output role. Args: - simulation_id: The unique identifier for the simulation. - episode: The current training episode number. - eval_episode: The current evaluation episode number. - db_uri: URI for connecting to the database. - output_agent_addr: The address of the output agent. - train_start: The start time of simulation. + simulation_id (str): The unique identifier for the simulation. + episode (int): The current training episode number. + eval_episode (int): The current evaluation episode number. + db_uri (str): URI for connecting to the database. + output_agent_addr (str): The address of the output agent. + train_start (str): The start time of simulation. """ self.tensor_board_logger = TensorBoardLogger( @@ -903,7 +890,7 @@ def write_rl_grad_params_to_output( Args: learning_rate: The current learning rate used in training. - unit_params_list: A list of dictionaries containing critic losses for each + unit_params_list: A list of dictionaries containing critic losses for each time step (mapping critic names to their losses in dict). """ # gradient steps performed in previous training episodes @@ -922,9 +909,8 @@ def write_rl_grad_params_to_output( ) current_gradient_steps = self.learning_config.off_policy.gradient_steps else: - # For on-policy, no gradient steps concept - use 1 for calculation purposes + # For on-policy, no gradient steps concept - use 0 for calculation purposes gradient_steps_done = 0 - current_gradient_steps = 1 # Handle different parameter structures for on-policy vs off-policy if self.learning_config.algorithm == "mappo": @@ -937,7 +923,7 @@ def write_rl_grad_params_to_output( # For off-policy: use configured gradient_steps actual_gradient_steps = self.learning_config.off_policy.gradient_steps gradient_step_range = range(actual_gradient_steps) - + # gradient steps performed in previous training episodes gradient_steps_done = ( max( From 22fcd62170e9ea4b3d3fdd00be2de2c1ff79c0d5 Mon Sep 17 00:00:00 2001 From: kim-mskw Date: Fri, 8 May 2026 16:31:29 +0200 Subject: [PATCH 41/44] refactor initilze buffer into leanring role --- .../reinforcement_learning/learning_role.py | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/assume/reinforcement_learning/learning_role.py b/assume/reinforcement_learning/learning_role.py index 4d04f2321..ef015eb11 100644 --- a/assume/reinforcement_learning/learning_role.py +++ b/assume/reinforcement_learning/learning_role.py @@ -27,6 +27,7 @@ from assume.reinforcement_learning.algorithms.maddpg import DDPG from assume.reinforcement_learning.algorithms.mappo import PPO from assume.reinforcement_learning.algorithms.matd3 import TD3 +from assume.reinforcement_learning.buffer import ReplayBuffer, RolloutBuffer from assume.reinforcement_learning.learning_utils import ( linear_schedule_func, transform_buffer_data, @@ -171,6 +172,43 @@ def on_ready(self): src="no_wait", ) + def intialize_buffer(self, time_step, validation_interval): + """Initialize the replay buffer for reinforcement learning training. + + Args: + buffer: The replay buffer to be initialized. + """ + if is_off_policy(self.learning_config.algorithm): + buffer = ReplayBuffer( + buffer_size=self.learning_config.off_policy.replay_buffer_size, + obs_dim=self.rl_algorithm.obs_dim, + act_dim=self.rl_algorithm.act_dim, + n_rl_units=len(self.rl_strats), + device=self.device, + float_type=self.float_type, + ) + min_episode_for_eval = ( + self.learning_config.off_policy.episodes_collecting_initial_experience + + validation_interval + ) + else: + train_freq = pd.Timedelta(str(self.learning_config.train_freq)) + time_step = pd.Timedelta(time_step) + rollout_buffer_size = max(2, int(train_freq / time_step)) + buffer = RolloutBuffer( + buffer_size=rollout_buffer_size, + obs_dim=self.rl_algorithm.obs_dim, + act_dim=self.rl_algorithm.act_dim, + n_rl_units=len(self.rl_strats), + device=self.device, + float_type=self.float_type, + gamma=self.learning_config.gamma, + gae_lambda=self.learning_config.on_policy.gae_lambda, + ) + min_episode_for_eval = validation_interval + + return buffer, min_episode_for_eval + def sync_train_freq_with_simulation_horizon(self) -> str | None: """Ensure self.train_freq evenly divides the simulation length. From e1f358ad3d2fcdc6f3ef4111f9054d7691be2f99 Mon Sep 17 00:00:00 2001 From: kim-mskw Date: Fri, 8 May 2026 16:40:20 +0200 Subject: [PATCH 42/44] ruff formatting --- .../algorithms/__init__.py | 14 +- .../reinforcement_learning/learning_role.py | 1 - .../reinforcement_learning/learning_utils.py | 26 ++-- .../neural_network_architecture.py | 141 +++++++---------- assume/scenario/loader_csv.py | 46 +----- assume/strategies/learning_strategies.py | 5 +- examples/inputs/example_02a/config.yaml | 2 +- tests/test_maddpg.py | 20 ++- tests/test_mappo.py | 146 +----------------- tests/test_public_api.py | 49 +++--- tests/test_rl_rolloutbuffer.py | 59 +++++-- 11 files changed, 180 insertions(+), 329 deletions(-) diff --git a/assume/reinforcement_learning/algorithms/__init__.py b/assume/reinforcement_learning/algorithms/__init__.py index 8fa7dae2e..e30fa3ffc 100644 --- a/assume/reinforcement_learning/algorithms/__init__.py +++ b/assume/reinforcement_learning/algorithms/__init__.py @@ -8,19 +8,19 @@ LSTMActor, MLPActor, ) +from assume.reinforcement_learning.algorithms.base_algorithm import ( + A2CAlgorithm, + RLAlgorithm, +) +from assume.reinforcement_learning.algorithms.maddpg import DDPG +from assume.reinforcement_learning.algorithms.mappo import PPO +from assume.reinforcement_learning.algorithms.matd3 import TD3 actor_architecture_aliases: dict[str, type[nn.Module]] = { "mlp": MLPActor, "lstm": LSTMActor, } -from assume.reinforcement_learning.algorithms.base_algorithm import ( - A2CAlgorithm, - RLAlgorithm, -) -from assume.reinforcement_learning.algorithms.maddpg import DDPG -from assume.reinforcement_learning.algorithms.mappo import PPO -from assume.reinforcement_learning.algorithms.matd3 import TD3 __all__ = [ # Base classes diff --git a/assume/reinforcement_learning/learning_role.py b/assume/reinforcement_learning/learning_role.py index ef015eb11..02c568c7a 100644 --- a/assume/reinforcement_learning/learning_role.py +++ b/assume/reinforcement_learning/learning_role.py @@ -945,7 +945,6 @@ def write_rl_grad_params_to_output( ) * self.learning_config.off_policy.gradient_steps ) - current_gradient_steps = self.learning_config.off_policy.gradient_steps else: # For on-policy, no gradient steps concept - use 0 for calculation purposes gradient_steps_done = 0 diff --git a/assume/reinforcement_learning/learning_utils.py b/assume/reinforcement_learning/learning_utils.py index 822f451d9..97f393bcc 100644 --- a/assume/reinforcement_learning/learning_utils.py +++ b/assume/reinforcement_learning/learning_utils.py @@ -87,11 +87,11 @@ def noise(self, device=None, dtype=th.float): """Generate noise using torch.normal() ensuring efficient execution on GPU if needed. Args: - device: Target device (e.g., 'cuda' or 'cpu'). - dtype: Data type of the tensor (default: torch.float32). + - device (torch.device, optional): Target device (e.g., 'cuda' or 'cpu'). + - dtype (torch.dtype, optional): Data type of the tensor (default: torch.float32). Returns: - Noise tensor on the specified device. + - torch.Tensor: Noise tensor on the specified device. """ return ( self.dt @@ -111,7 +111,7 @@ def update_noise_decay(self, updated_decay: float): def polyak_update(params, target_params, tau: float): """Perform a Polyak average update on ``target_params`` using ``params``. - + Target parameters are slowly updated towards the main parameters. ``tau``, the soft update coefficient controls the interpolation: ``tau=1`` corresponds to copying the parameters to the target ones whereas nothing happens when ``tau=0``. @@ -135,8 +135,8 @@ def linear_schedule_func( start: float, end: float = 0, end_fraction: float = 1 ) -> Schedule: """Create a function that interpolates linearly between start and end. - - Interpolates linearly between start and end between ``progress_remaining`` = 1 + + Interpolates linearly between start and end between ``progress_remaining`` = 1 and ``progress_remaining`` = 1 - ``end_fraction``. Args: @@ -163,13 +163,13 @@ def func(progress_remaining: float) -> float: def constant_schedule(val: float) -> Schedule: - """Create a function that returns a constant. - + """Create a function that returns a constant. + It is useful for learning rate schedule (to avoid code duplication). Args: val: Constant value. - + Returns: Constant schedule function. @@ -273,11 +273,9 @@ def transfer_weights( act_dim: int, unique_obs: int, ) -> dict | None: - """Transfer weights from loaded model to new model. - - Copy only those obs- and action-slices for matching IDs. New IDs keep their - original (random) weights. Function only works if the neural network architecture - remained stable besides the input layer, namely with the same hidden layers. + """ + Transfer weights from loaded model to new model. Copy only those obs- and action-slices for matching IDs. + New IDs keep their original (random) weights. Function only works if the neural network architeczture remained stable besides the input layer, namely with the same hidden layers. Args: model (th.nn.Module): The model to transfer weights to. diff --git a/assume/reinforcement_learning/neural_network_architecture.py b/assume/reinforcement_learning/neural_network_architecture.py index 0ecd5c2d0..5b9e94422 100644 --- a/assume/reinforcement_learning/neural_network_architecture.py +++ b/assume/reinforcement_learning/neural_network_architecture.py @@ -2,17 +2,16 @@ # # SPDX-License-Identifier: AGPL-3.0-or-later + import numpy as np import torch as th from torch import nn from torch.nn import functional as F -from typing import List, Tuple, Type, Optional, Union - from assume.reinforcement_learning.learning_utils import ( activation_function_limit, - xavier_init_weights, orthogonal_init_weights, + xavier_init_weights, ) @@ -26,6 +25,7 @@ class Critic(nn.Module): float_type: Data type for parameters unique_obs_dim: Dimension of agent-specific observations """ + def __init__( self, n_agents: int, @@ -45,9 +45,7 @@ def __init__( # Dynamic Architecture Definition self.hidden_sizes = self._get_architecture(n_agents) - def _get_architecture( - self, n_agents: int - ) -> List[int]: + def _get_architecture(self, n_agents: int) -> list[int]: """Returns hidden layer sizes based on the number of agents.""" if n_agents <= 20: hidden_sizes = [256, 128] # Shallow network for small `n_agents` @@ -62,13 +60,13 @@ def _build_q_network(self) -> nn.ModuleList: layers = nn.ModuleList() input_dim = ( self.obs_dim + self.act_dim - ) # Input includes all observations and actions + ) # Input includes all observations and actions for h in self.hidden_sizes: layers.append(nn.Linear(input_dim, h, dtype=self.float_type)) layers.append(nn.ReLU()) input_dim = h - layers.append(nn.Linear(input_dim, 1, dtype=self.float_type)) # Output Q-value + layers.append(nn.Linear(input_dim, 1, dtype=self.float_type)) # Output Q-value return layers @@ -87,21 +85,11 @@ class CriticTD3(Critic): float_type: Data type for parameters. unique_obs_dim: Dimension of agent-specific observations. """ + def __init__( - self, - n_agents: int, - obs_dim: int, - act_dim: int, - float_type, - unique_obs_dim: int + self, n_agents: int, obs_dim: int, act_dim: int, float_type, unique_obs_dim: int ): - super().__init__( - n_agents, - obs_dim, - act_dim, - float_type, - unique_obs_dim - ) + super().__init__(n_agents, obs_dim, act_dim, float_type, unique_obs_dim) # First Q-network (Q1) self.q1_layers = self._build_q_network() @@ -110,30 +98,33 @@ def __init__( self.q2_layers = self._build_q_network() def forward( - self, - obs: th.Tensor, - actions: th.Tensor - ) -> Tuple[th.Tensor, th.Tensor]: + self, obs: th.Tensor, actions: th.Tensor + ) -> tuple[th.Tensor, th.Tensor]: """Forward pass through both Q-networks.""" - xu = th.cat([obs, actions], dim=1) # Concatenate obs & actions + xu = th.cat([obs, actions], dim=1) # Concatenate obs & actions # Compute Q1 - x1 = nn.Sequential(*self.q1_layers)(xu) + x1 = xu + for layer in self.q1_layers[:-1]: # All hidden layers + x1 = F.relu(layer(x1)) + x1 = self.q1_layers[-1](x1) # Output layer (no activation) # Compute Q2 - x2 = nn.Sequential(*self.q2_layers)(xu) + x2 = xu + for layer in self.q2_layers[:-1]: # All hidden layers + x2 = F.relu(layer(x2)) + x2 = self.q2_layers[-1](x2) # Output layer (no activation) return x1, x2 - def q1_forward( - self, - obs: th.Tensor, - actions: th.Tensor - ) -> th.Tensor: + def q1_forward(self, obs: th.Tensor, actions: th.Tensor) -> th.Tensor: """Compute only Q1 (used during actor updates).""" x = th.cat([obs, actions], dim=1) - x = nn.Sequential(*self.q1_layers)(x) + for layer in self.q1_layers[:-1]: # All hidden layers + x = F.relu(layer(x)) + + x = self.q1_layers[-1](x) # Output layer (no activation) return x @@ -148,6 +139,7 @@ class CriticDDPG(Critic): float_type: Data type for parameters unique_obs_dim: Dimension of agent-specific observations """ + def __init__( self, n_agents: int, @@ -156,30 +148,23 @@ def __init__( float_type: th.dtype, unique_obs_dim: int, ): - super().__init__( - n_agents, - obs_dim, - act_dim, - float_type, - unique_obs_dim - ) + super().__init__(n_agents, obs_dim, act_dim, float_type, unique_obs_dim) # Q-network self.q_layers = self._build_q_network() - + # Initialize weights properly self._init_weights() - def forward( - self, - obs: th.Tensor, - actions: th.Tensor - ) -> th.Tensor: + def forward(self, obs: th.Tensor, actions: th.Tensor) -> th.Tensor: """Returns Q value.""" - xu = th.cat([obs, actions], dim=1) # Concatenate obs & actions + xu = th.cat([obs, actions], dim=1) # Concatenate obs & actions # Compute Q - x = nn.Sequential(*self.q_layers)(xu) + for layer in self.q_layers[:-1]: # All hidden layers + xu = F.relu(layer(xu)) + + x = self.q_layers[-1](xu) return x @@ -194,19 +179,13 @@ class CriticPPO(Critic): unique_obs_dim: Dimension of agent-specific observations. """ - def __init__( - self, - n_agents: int, - obs_dim: int, - float_type, - unique_obs_dim: int - ): + def __init__(self, n_agents: int, obs_dim: int, float_type, unique_obs_dim: int): super().__init__( n_agents=n_agents, obs_dim=obs_dim, act_dim=0, float_type=float_type, - unique_obs_dim=unique_obs_dim + unique_obs_dim=unique_obs_dim, ) # V-network @@ -253,7 +232,7 @@ class MLPActor(Actor): def __init__(self, obs_dim: int, act_dim: int, float_type, *args, **kwargs): super().__init__() - + self.FC1 = nn.Linear(obs_dim, 256, dtype=float_type) self.FC2 = nn.Linear(256, 128, dtype=float_type) self.FC3 = nn.Linear(128, act_dim, dtype=float_type) @@ -280,8 +259,8 @@ class LSTMActor(Actor): Based on "Multi-Period and Multi-Spatial Equilibrium Analysis in Imperfect Electricity Markets" by Ye at al. (2019) - Note: - The original source code was not available, therefore this implementation was derived from the published paper. + Note: + The original source code was not available, therefore this implementation was derived from the published paper. Adjustments to resemble final layers from MLPActor: - dense layer 2 was omitted - single output layer with softsign activation function to output actions directly instead of two output layers for mean and stddev @@ -295,7 +274,7 @@ def __init__( unique_obs_dim: int, num_timeseries_obs_dim: int, *args, - **kwargs + **kwargs, ): super().__init__() self.float_type = float_type @@ -349,7 +328,7 @@ def forward(self, obs): outputs = th.cat(outputs, dim=1) x = th.cat((outputs, x2), dim=1) - + x = F.relu(self.FC1(x)) x = self.activation_function(self.FC2(x)) @@ -392,9 +371,7 @@ def __init__( self.mean_layer = nn.Linear(128, act_dim, dtype=float_type) # Learnable log standard deviation - self.log_std = nn.Parameter( - th.ones(act_dim, dtype=float_type) * log_std_init - ) + self.log_std = nn.Parameter(th.ones(act_dim, dtype=float_type) * log_std_init) self._init_weights() @@ -409,16 +386,16 @@ def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor: x = F.relu(self.FC1(obs)) x = F.relu(self.FC2(x)) mean = th.tanh(self.mean_layer(x)) # Bounded to [-1, 1] - + if deterministic: return mean - + # Sample from Gaussian during training log_std = self.log_std.expand_as(mean) std = log_std.exp() noise = th.randn_like(mean) action = mean + std * noise - + # Clamp to valid range return th.clamp(action, -1.0, 1.0) @@ -428,7 +405,7 @@ def get_distribution(self, obs: th.Tensor) -> tuple[th.Tensor, th.Tensor]: x = F.relu(self.FC2(x)) mean = th.tanh(self.mean_layer(x)) # Bounded to [-1, 1] log_std = self.log_std.expand_as(mean) - + return mean, log_std def get_action_and_log_prob( @@ -437,11 +414,11 @@ def get_action_and_log_prob( deterministic: bool = False, ) -> tuple[th.Tensor, th.Tensor]: """Sample action and compute log probability. - + Args: obs: Observations. deterministic: If True, return mean action. - + Returns: Tuple of (action, log_prob). """ @@ -469,13 +446,13 @@ def evaluate_actions( actions: th.Tensor, ) -> tuple[th.Tensor, th.Tensor, th.Tensor]: """Evaluate log probability and entropy for given actions. - + Used during PPO update to compute importance ratio. - + Args: obs: Observations. actions: Actions to evaluate. - + Returns: Tuple of (log_prob, entropy, values). """ @@ -543,13 +520,13 @@ def __init__( self.LSTM2 = nn.LSTMCell(8, 16, dtype=float_type) # Fully Connected Layers - self.FC1 = nn.Linear(self.timeseries_len * 16 + unique_obs_dim, 128, dtype=float_type) + self.FC1 = nn.Linear( + self.timeseries_len * 16 + unique_obs_dim, 128, dtype=float_type + ) self.mean_layer = nn.Linear(128, act_dim, dtype=float_type) # Learnable log standard deviation - self.log_std = nn.Parameter( - th.ones(act_dim, dtype=float_type) * log_std_init - ) + self.log_std = nn.Parameter(th.ones(act_dim, dtype=float_type) * log_std_init) self._init_weights() @@ -602,10 +579,10 @@ def _compute_mean(self, obs: th.Tensor) -> th.Tensor: # Concatenate LSTM outputs outputs = th.cat(outputs, dim=1) - + # Concatenate with stationary observations x = th.cat((outputs, x2), dim=1) - + # FC Layers x = F.relu(self.FC1(x)) mean = th.tanh(self.mean_layer(x)) # Bounded to [-1, 1] @@ -629,7 +606,7 @@ def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor: std = std + 1e-6 noise = th.randn_like(mean) action = mean + std * noise - + # Clamp to valid range return th.clamp(action, -1.0, 1.0) @@ -669,4 +646,4 @@ def evaluate_actions( std = log_std.exp() + 1e-6 log_prob = self._compute_log_prob(actions, mean, std) entropy = 0.5 * (1.0 + th.log(2 * th.pi * std.pow(2))).sum(dim=-1) - return log_prob, entropy \ No newline at end of file + return log_prob, entropy diff --git a/assume/scenario/loader_csv.py b/assume/scenario/loader_csv.py index 77694e8b6..16249aeec 100644 --- a/assume/scenario/loader_csv.py +++ b/assume/scenario/loader_csv.py @@ -1062,8 +1062,6 @@ def run_learning( - Upon completion of training, the function performs an evaluation run using the last policy learned during training. - The best policies are chosen based on the average reward obtained during the evaluation runs, and they are saved for future use. """ - from assume.common.base import is_off_policy - from assume.reinforcement_learning.buffer import ReplayBuffer, RolloutBuffer if not verbose: logger.setLevel(logging.WARNING) @@ -1085,7 +1083,6 @@ def run_learning( if os.path.exists(tensorboard_path): shutil.rmtree(tensorboard_path, ignore_errors=True) - learning_config = world.learning_role.learning_config validation_interval = world.learning_role.determine_validation_interval() # sync train frequency with simulation horizon once at the beginning of training and overwrite scenario data @@ -1094,34 +1091,10 @@ def run_learning( ) # Build the appropriate buffer for the selected algorithm category. - if is_off_policy(learning_config.algorithm): - buffer = ReplayBuffer( - buffer_size=learning_config.off_policy.replay_buffer_size, - obs_dim=world.learning_role.rl_algorithm.obs_dim, - act_dim=world.learning_role.rl_algorithm.act_dim, - n_rl_units=len(world.learning_role.rl_strats), - device=world.learning_role.device, - float_type=world.learning_role.float_type, - ) - min_episode_for_eval = ( - learning_config.off_policy.episodes_collecting_initial_experience - + validation_interval - ) - else: - train_freq = pd.Timedelta(str(learning_config.train_freq)) - time_step = pd.Timedelta(str(world.scenario_data["config"]["time_step"])) - rollout_buffer_size = max(2, int(train_freq / time_step)) - buffer = RolloutBuffer( - buffer_size=rollout_buffer_size, - obs_dim=world.learning_role.rl_algorithm.obs_dim, - act_dim=world.learning_role.rl_algorithm.act_dim, - n_rl_units=len(world.learning_role.rl_strats), - device=world.learning_role.device, - float_type=world.learning_role.float_type, - gamma=learning_config.gamma, - gae_lambda=learning_config.on_policy.gae_lambda, - ) - min_episode_for_eval = validation_interval + buffer, min_episode_for_eval = world.learning_role.initialize_buffer( + time_step=world.scenario_data["config"]["time_step"], + validation_interval=validation_interval, + ) # ----------------------------------------- # Information that needs to be stored across episodes, aka one simulation run @@ -1162,11 +1135,7 @@ def run_learning( inter_episodic_data["episodes_done"] = episode # evaluation run: - if ( - episode % validation_interval == 0 - and episode - >= min_episode_for_eval - ): + if episode % validation_interval == 0 and episode >= min_episode_for_eval: world.reset() # load evaluation run @@ -1210,10 +1179,7 @@ def run_learning( world.reset() # save the policies after each episode in case the simulation is stopped or crashes - if ( - episode - >= min_episode_for_eval - ): + if episode >= min_episode_for_eval: world.learning_role.rl_algorithm.save_params( directory=f"{world.learning_role.learning_config.trained_policies_save_path}/last_policies" ) diff --git a/assume/strategies/learning_strategies.py b/assume/strategies/learning_strategies.py index 036ff8097..c6396b2ec 100644 --- a/assume/strategies/learning_strategies.py +++ b/assume/strategies/learning_strategies.py @@ -16,13 +16,13 @@ MinMaxStrategy, SupportsMinMax, SupportsMinMaxCharge, + is_off_policy, ) from assume.common.fast_pandas import FastSeries from assume.common.market_objects import MarketConfig, Orderbook, Product from assume.common.utils import min_max_scale from assume.reinforcement_learning.algorithms import actor_architecture_aliases from assume.reinforcement_learning.learning_utils import NormalActionNoise -from assume.common.base import is_off_policy logger = logging.getLogger(__name__) @@ -74,7 +74,6 @@ def __init__(self, *args, **kwargs): self.learning_config.algorithm ) - if is_off_policy(self.learning_config.algorithm): self.action_noise = NormalActionNoise( mu=0.0, @@ -251,7 +250,7 @@ def get_actions(self, next_observation): """Determine action and exploration noise for the current observation. All algorithm-specific sampling logic lives in the - algorithm class via get_action. + algorithm class via get_action. Args ---- diff --git a/examples/inputs/example_02a/config.yaml b/examples/inputs/example_02a/config.yaml index d402d90d1..2254d9d95 100644 --- a/examples/inputs/example_02a/config.yaml +++ b/examples/inputs/example_02a/config.yaml @@ -177,4 +177,4 @@ tiny: maximum_bid_price: 3000 minimum_bid_price: -500 price_unit: EUR/MWh - market_mechanism: pay_as_clear \ No newline at end of file + market_mechanism: pay_as_clear diff --git a/tests/test_maddpg.py b/tests/test_maddpg.py index e95893faa..4de547496 100644 --- a/tests/test_maddpg.py +++ b/tests/test_maddpg.py @@ -17,7 +17,6 @@ from assume.common.base import LearningStrategy from assume.reinforcement_learning.algorithms.maddpg import DDPG from assume.reinforcement_learning.learning_role import Learning - from assume.reinforcement_learning.neural_network_architecture import CriticDDPG except ImportError: pass @@ -26,6 +25,7 @@ start = datetime(2023, 7, 1) end = datetime(2023, 7, 2) + @pytest.fixture def base_learning_config() -> dict: foresight = 2 @@ -55,7 +55,7 @@ def base_learning_config() -> dict: episodes_collecting_initial_experience=0, gradient_steps=1, tau=0.005, - policy_delay=2, + policy_delay=2, target_policy_noise=0.2, target_noise_clip=0.5, ), @@ -225,7 +225,9 @@ def state_dict(self): baseline_new[f"{prefix}.0.weight"] = th.randn(hidden_dims[0], new_input_dim) baseline_new[f"{prefix}.0.bias"] = th.randn(hidden_dims[0]) for i in range(1, len(hidden_dims)): - baseline_new[f"{prefix}.{i}.weight"] = th.randn(hidden_dims[i], hidden_dims[i - 1]) + baseline_new[f"{prefix}.{i}.weight"] = th.randn( + hidden_dims[i], hidden_dims[i - 1] + ) baseline_new[f"{prefix}.{i}.bias"] = th.randn(hidden_dims[i]) # Build old_state with matching dims @@ -264,10 +266,12 @@ def test_ddpg_load_transfer_n_plus_m( learning_role_n_plus_m.rl_algorithm.load_params(directory=save_dir) post_state = learning_role_n_plus_m.rl_strats["agent_0"].critics.state_dict() - post_target = learning_role_n_plus_m.rl_strats["agent_0"].target_critics.state_dict() - post_opt_state = ( - learning_role_n_plus_m.rl_strats["agent_0"].critics.optimizer.state_dict() - ) + post_target = learning_role_n_plus_m.rl_strats[ + "agent_0" + ].target_critics.state_dict() + post_opt_state = learning_role_n_plus_m.rl_strats[ + "agent_0" + ].critics.optimizer.state_dict() assert not compare_state_dicts(pre_state, post_state) @@ -505,4 +509,4 @@ def test_initialize_policy_all_dimensions_match(base_learning_config): try: learn.rl_algorithm.initialize_policy() except Exception as e: - pytest.fail(f"initialize_policy raised an unexpected error: {e}") \ No newline at end of file + pytest.fail(f"initialize_policy raised an unexpected error: {e}") diff --git a/tests/test_mappo.py b/tests/test_mappo.py index 73ea73b3a..551d3e5c4 100644 --- a/tests/test_mappo.py +++ b/tests/test_mappo.py @@ -19,10 +19,7 @@ from assume.reinforcement_learning.algorithms.mappo import PPO from assume.reinforcement_learning.buffer import RolloutBuffer from assume.reinforcement_learning.learning_role import Learning - from assume.reinforcement_learning.neural_network_architecture import ( - ActorPPO, - CriticPPO, - ) + except ImportError: pass @@ -74,16 +71,15 @@ def learning_role_n(base_learning_config): config = copy(base_learning_config) learn = Learning(config["learning_config"], start, end) for agent_id in ("agent_0", "agent_1"): - strat = LearningStrategy(**config, learning_role=learn) - strat.unit_id = agent_id - learn.rl_strats[agent_id] = strat + strategy = LearningStrategy(**config, learning_role=learn) + strategy.unit_id = agent_id + learn.rl_strats[agent_id] = strategy return learn @pytest.fixture(scope="function") def saved_n_agent_model(learning_role_n, tmp_path) -> tuple[str, dict]: - """Save a 2-agent PPO model; return (save_dir, state_dict_snapshot). - """ + """Save a 2-agent PPO model; return (save_dir, state_dict_snapshot).""" learning_role_n.initialize_policy() save_dir = tmp_path / "saved_model_n" save_dir.mkdir(parents=True, exist_ok=True) @@ -146,8 +142,7 @@ def _make_rollout_buffer( def _setup_for_update(learning_role) -> None: - """Setting minimal attributes needed. - """ + """Setting minimal attributes needed.""" learning_role.update_steps = 0 learning_role.db_addr = None # disables the context.schedule_instant_message path @@ -213,135 +208,6 @@ def test_mappo_load_matching_n(base_learning_config, saved_n_agent_model): ) -# @pytest.mark.require_learning -# def test_mappo_update_policy_skips_none_buffer(learning_role_n, monkeypatch): -# learning_role_n.initialize_policy() -# _setup_for_update(learning_role_n) -# monkeypatch.setattr(learning_role_n, "get_progress_remaining", lambda: 1.0) - -# learning_role_n.buffer = None -# learning_role_n.rl_algorithm.update_policy() -# assert learning_role_n.rl_algorithm.n_updates == 0 - - -# @pytest.mark.require_learning -# def test_mappo_update_policy_skips_empty_buffer(learning_role_n, monkeypatch): -# learning_role_n.initialize_policy() -# _setup_for_update(learning_role_n) -# monkeypatch.setattr(learning_role_n, "get_progress_remaining", lambda: 1.0) - -# s = learning_role_n.rl_strats["agent_0"] -# learning_role_n.buffer = RolloutBuffer( -# buffer_size=50, -# obs_dim=s.obs_dim, -# act_dim=s.act_dim, -# n_rl_units=2, -# device="cpu", -# float_type=th.float32, -# ) -# learning_role_n.rl_algorithm.update_policy() -# assert learning_role_n.rl_algorithm.n_updates == 0 - - -# @pytest.mark.require_learning -# def test_mappo_update_policy_skips_insufficient_data(learning_role_n, monkeypatch): -# learning_role_n.initialize_policy() -# _setup_for_update(learning_role_n) -# monkeypatch.setattr(learning_role_n, "get_progress_remaining", lambda: 1.0) - -# s = learning_role_n.rl_strats["agent_0"] -# learning_role_n.buffer = _make_rollout_buffer( -# obs_dim=s.obs_dim, act_dim=s.act_dim, n_agents=2, n_steps=1 -# ) -# learning_role_n.rl_algorithm.update_policy() -# assert learning_role_n.rl_algorithm.n_updates == 0 - - -# @pytest.mark.require_learning -# def test_mappo_update_policy_increments_n_updates(learning_role_n, monkeypatch): -# learning_role_n.initialize_policy() -# _setup_for_update(learning_role_n) -# monkeypatch.setattr(learning_role_n, "get_progress_remaining", lambda: 1.0) - -# s = learning_role_n.rl_strats["agent_0"] -# learning_role_n.buffer = _make_rollout_buffer( -# obs_dim=s.obs_dim, act_dim=s.act_dim, n_agents=2, n_steps=20 -# ) -# learning_role_n.rl_algorithm.update_policy() -# assert learning_role_n.rl_algorithm.n_updates == 1 - - -# @pytest.mark.require_learning -# def test_mappo_update_policy_resets_buffer(learning_role_n, monkeypatch): -# learning_role_n.initialize_policy() -# _setup_for_update(learning_role_n) -# monkeypatch.setattr(learning_role_n, "get_progress_remaining", lambda: 1.0) - -# s = learning_role_n.rl_strats["agent_0"] -# learning_role_n.buffer = _make_rollout_buffer( -# obs_dim=s.obs_dim, act_dim=s.act_dim, n_agents=2, n_steps=20 -# ) -# assert learning_role_n.buffer.pos > 0 - -# learning_role_n.rl_algorithm.update_policy() -# assert learning_role_n.buffer.pos == 0, ( -# "RolloutBuffer.reset() must be called at the end of every PPO update" -# ) - - -# @pytest.mark.require_learning -# def test_mappo_update_policy_multiple_epochs(base_learning_config, monkeypatch): -# config = copy(base_learning_config) -# config["learning_config"].on_policy.n_epochs = 3 - -# learn = Learning(config["learning_config"], start, end) -# for agent_id in ("agent_0", "agent_1"): -# strat = LearningStrategy(**config, learning_role=learn) -# strat.unit_id = agent_id -# learn.rl_strats[agent_id] = strat -# learn.initialize_policy() -# _setup_for_update(learn) -# monkeypatch.setattr(learn, "get_progress_remaining", lambda: 1.0) - -# s = learn.rl_strats["agent_0"] -# learn.buffer = _make_rollout_buffer( -# obs_dim=s.obs_dim, act_dim=s.act_dim, n_agents=2, n_steps=30 -# ) - -# algo = learn.rl_algorithm -# assert algo.n_epochs == 3 -# algo.update_policy() -# assert algo.n_updates == 1 - - -# @pytest.mark.require_learning -# def test_mappo_update_policy_actor_weights_change(learning_role_n, monkeypatch): -# learning_role_n.initialize_policy() -# _setup_for_update(learning_role_n) -# monkeypatch.setattr(learning_role_n, "get_progress_remaining", lambda: 1.0) - -# s = learning_role_n.rl_strats["agent_0"] -# pre_actor = deepcopy(s.actor.state_dict()) -# pre_critic = deepcopy(s.critics.state_dict()) - -# learning_role_n.buffer = _make_rollout_buffer( -# obs_dim=s.obs_dim, act_dim=s.act_dim, n_agents=2, n_steps=20 -# ) -# learning_role_n.rl_algorithm.update_policy() - -# post_actor = s.actor.state_dict() -# post_critic = s.critics.state_dict() - -# actor_changed = any( -# not th.equal(pre_actor[k], post_actor[k]) for k in pre_actor -# ) -# critic_changed = any( -# not th.equal(pre_critic[k], post_critic[k]) for k in pre_critic -# ) -# assert actor_changed, "Actor weights must change after a PPO update" -# assert critic_changed, "Critic weights must change after a PPO update" - - @pytest.mark.require_learning def test_mappo_initialize_policy_dimension_mismatch(base_learning_config): config = copy(base_learning_config) diff --git a/tests/test_public_api.py b/tests/test_public_api.py index 76c844006..a11e047b8 100644 --- a/tests/test_public_api.py +++ b/tests/test_public_api.py @@ -12,14 +12,6 @@ import pytest -try: - import torch as th - - TORCH_AVAILABLE = True -except ImportError: - TORCH_AVAILABLE = False - - # --------------------------------------------------------------------------- # Layer 1 – assume.reinforcement_learning.algorithms # --------------------------------------------------------------------------- @@ -73,16 +65,20 @@ def test_import_actor_architecture_aliases(self): def test_algorithm_hierarchy(self): """TD3, DDPG, PPO must all be subclasses of A2CAlgorithm → RLAlgorithm.""" from assume.reinforcement_learning.algorithms import ( - A2CAlgorithm, DDPG, PPO, - RLAlgorithm, TD3, + A2CAlgorithm, + RLAlgorithm, ) for cls in (TD3, DDPG, PPO): - assert issubclass(cls, A2CAlgorithm), f"{cls.__name__} not subclass of A2CAlgorithm" - assert issubclass(cls, RLAlgorithm), f"{cls.__name__} not subclass of RLAlgorithm" + assert issubclass(cls, A2CAlgorithm), ( + f"{cls.__name__} not subclass of A2CAlgorithm" + ) + assert issubclass(cls, RLAlgorithm), ( + f"{cls.__name__} not subclass of RLAlgorithm" + ) def test_actor_aliases_map_to_nn_modules(self): from torch import nn @@ -90,7 +86,9 @@ def test_actor_aliases_map_to_nn_modules(self): from assume.reinforcement_learning.algorithms import actor_architecture_aliases for name, cls in actor_architecture_aliases.items(): - assert issubclass(cls, nn.Module), f"alias '{name}' does not map to an nn.Module" + assert issubclass(cls, nn.Module), ( + f"alias '{name}' does not map to an nn.Module" + ) # --------------------------------------------------------------------------- @@ -180,7 +178,10 @@ def test_replay_buffer_and_rollout_buffer_are_distinct(self): assert ReplayBuffer is not RolloutBuffer def test_buffer_samples_are_distinct(self): - from assume.reinforcement_learning import ReplayBufferSamples, RolloutBufferSamples + from assume.reinforcement_learning import ( + ReplayBufferSamples, + RolloutBufferSamples, + ) assert ReplayBufferSamples is not RolloutBufferSamples @@ -273,8 +274,12 @@ def test_rl_symbols_consistent_across_layers(self): from assume.reinforcement_learning.algorithms import DDPG, PPO, TD3 for name, algo_cls in [("TD3", TD3), ("DDPG", DDPG), ("PPO", PPO)]: - assert getattr(rl, name) is algo_cls, f"rl.{name} is not the same object as algorithms.{name}" - assert getattr(assume, name) is algo_cls, f"assume.{name} is not the same object as algorithms.{name}" + assert getattr(rl, name) is algo_cls, ( + f"rl.{name} is not the same object as algorithms.{name}" + ) + assert getattr(assume, name) is algo_cls, ( + f"assume.{name} is not the same object as algorithms.{name}" + ) def test_version_still_present(self): import assume @@ -286,5 +291,13 @@ def test_non_rl_symbols_unchanged(self): """Core non-RL exports (World, MarketConfig, etc.) must still be present.""" import assume - for name in ("World", "MarketConfig", "MarketProduct", "load_scenario_folder", "run_learning"): - assert hasattr(assume, name), f"Pre-existing export '{name}' missing after __init__ update" + for name in ( + "World", + "MarketConfig", + "MarketProduct", + "load_scenario_folder", + "run_learning", + ): + assert hasattr(assume, name), ( + f"Pre-existing export '{name}' missing after __init__ update" + ) diff --git a/tests/test_rl_rolloutbuffer.py b/tests/test_rl_rolloutbuffer.py index e903e2e11..19fe5c19d 100644 --- a/tests/test_rl_rolloutbuffer.py +++ b/tests/test_rl_rolloutbuffer.py @@ -15,6 +15,7 @@ except ImportError: pass + def make_rollout_buffer( buffer_size=8, obs_dim=3, @@ -148,8 +149,12 @@ def test_gae_single_step_non_terminal(): """For 1 step, 1 agent, non-terminal: advantage = TD error.""" gamma, gae_lambda = 0.99, 0.95 buf = make_rollout_buffer( - buffer_size=1, obs_dim=1, act_dim=1, n_rl_units=1, - gamma=gamma, gae_lambda=gae_lambda, + buffer_size=1, + obs_dim=1, + act_dim=1, + n_rl_units=1, + gamma=gamma, + gae_lambda=gae_lambda, ) r, v, v_next = 1.0, 0.5, 0.8 buf.add( @@ -178,8 +183,12 @@ def test_gae_single_step_terminal(): """For a terminal episode end, bootstrap value must not propagate.""" gamma, gae_lambda = 0.99, 0.95 buf = make_rollout_buffer( - buffer_size=1, obs_dim=1, act_dim=1, n_rl_units=1, - gamma=gamma, gae_lambda=gae_lambda, + buffer_size=1, + obs_dim=1, + act_dim=1, + n_rl_units=1, + gamma=gamma, + gae_lambda=gae_lambda, ) r, v = 2.0, 1.0 buf.add( @@ -209,8 +218,12 @@ def test_gae_multi_step_manual(): """Manually verify 2-step GAE for a single agent.""" gamma, gae_lambda = 0.99, 0.95 buf = make_rollout_buffer( - buffer_size=2, obs_dim=1, act_dim=1, n_rl_units=1, - gamma=gamma, gae_lambda=gae_lambda, + buffer_size=2, + obs_dim=1, + act_dim=1, + n_rl_units=1, + gamma=gamma, + gae_lambda=gae_lambda, ) r0, v0 = 1.0, 0.4 r1, v1 = 0.5, 0.6 @@ -248,8 +261,12 @@ def test_gae_lambda_zero_equals_td_error(): """gae_lambda=0 reduces GAE to a 1-step TD advantage per step.""" gamma, gae_lambda = 0.99, 0.0 buf = make_rollout_buffer( - buffer_size=3, obs_dim=1, act_dim=1, n_rl_units=1, - gamma=gamma, gae_lambda=gae_lambda, + buffer_size=3, + obs_dim=1, + act_dim=1, + n_rl_units=1, + gamma=gamma, + gae_lambda=gae_lambda, ) rewards = [1.0, 0.5, 2.0] values = [0.4, 0.6, 0.3] @@ -282,8 +299,12 @@ def test_gae_lambda_one_gamma_one_monte_carlo(): gamma, gae_lambda = 1.0, 1.0 T = 4 buf = make_rollout_buffer( - buffer_size=T, obs_dim=1, act_dim=1, n_rl_units=1, - gamma=gamma, gae_lambda=gae_lambda, + buffer_size=T, + obs_dim=1, + act_dim=1, + n_rl_units=1, + gamma=gamma, + gae_lambda=gae_lambda, ) rewards = [1.0, 1.0, 1.0, 1.0] values = [0.1] * T @@ -312,8 +333,12 @@ def test_gae_multi_agent_independence(): """One agent's rewards must not cause issue with another agent's advantages.""" gamma, gae_lambda = 0.99, 0.95 buf = make_rollout_buffer( - buffer_size=3, obs_dim=1, act_dim=1, n_rl_units=2, - gamma=gamma, gae_lambda=gae_lambda, + buffer_size=3, + obs_dim=1, + act_dim=1, + n_rl_units=2, + gamma=gamma, + gae_lambda=gae_lambda, ) for _ in range(3): @@ -410,7 +435,7 @@ def test_rollout_buffer_get_mini_batches_cover_all_steps(): def test_rollout_buffer_get_partial_fill(): """A partially-filled buffer must only yield the filled steps.""" buf = make_rollout_buffer(buffer_size=10, obs_dim=2, act_dim=1, n_rl_units=1) - fill_buffer(buf, n_steps=4) + fill_buffer(buf, n_steps=4) buf.compute_returns_and_advantages( last_values=np.zeros(1, dtype=np.float32), dones=np.zeros(1, dtype=np.float32), @@ -425,8 +450,12 @@ def test_full_episode_rollout(): """fill -> GAE -> mini-batch epochs -> reset""" T, obs_dim, act_dim, n_agents = 16, 5, 3, 2 buf = make_rollout_buffer( - buffer_size=T, obs_dim=obs_dim, act_dim=act_dim, n_rl_units=n_agents, - gamma=0.99, gae_lambda=0.95, + buffer_size=T, + obs_dim=obs_dim, + act_dim=act_dim, + n_rl_units=n_agents, + gamma=0.99, + gae_lambda=0.95, ) rng = np.random.default_rng(42) From b0b8457156465a7f6bf8c2f159d65ff2b5e9e93e Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Sat, 16 May 2026 23:08:11 +0200 Subject: [PATCH 43/44] Fixed update_policy's strategies listing, temporarily fixed circular import issue, fixed convert_to_tensors function calling in buffer.py --- assume/common/utils.py | 2 +- assume/reinforcement_learning/algorithms/__init__.py | 11 ++++++----- assume/reinforcement_learning/algorithms/mappo.py | 2 +- assume/reinforcement_learning/buffer.py | 4 ++-- 4 files changed, 10 insertions(+), 9 deletions(-) diff --git a/assume/common/utils.py b/assume/common/utils.py index dc547050b..e4dec8f8d 100644 --- a/assume/common/utils.py +++ b/assume/common/utils.py @@ -599,7 +599,7 @@ def rename_study_case(path: str, old_key: str, new_key: str): yaml.safe_dump(data, file, sort_keys=False) -def convert_to_tensors(self, array: np.array, copy=True, dtype=None, device=None): +def convert_to_tensors(array: np.array, copy=True, dtype=None, device=None): """Convert a numpy array to a PyTorch tensor. Note: diff --git a/assume/reinforcement_learning/algorithms/__init__.py b/assume/reinforcement_learning/algorithms/__init__.py index e30fa3ffc..363a58edd 100644 --- a/assume/reinforcement_learning/algorithms/__init__.py +++ b/assume/reinforcement_learning/algorithms/__init__.py @@ -8,6 +8,12 @@ LSTMActor, MLPActor, ) + +actor_architecture_aliases: dict[str, type[nn.Module]] = { + "mlp": MLPActor, + "lstm": LSTMActor, +} + from assume.reinforcement_learning.algorithms.base_algorithm import ( A2CAlgorithm, RLAlgorithm, @@ -16,11 +22,6 @@ from assume.reinforcement_learning.algorithms.mappo import PPO from assume.reinforcement_learning.algorithms.matd3 import TD3 -actor_architecture_aliases: dict[str, type[nn.Module]] = { - "mlp": MLPActor, - "lstm": LSTMActor, -} - __all__ = [ # Base classes diff --git a/assume/reinforcement_learning/algorithms/mappo.py b/assume/reinforcement_learning/algorithms/mappo.py index 6357fde92..f41082dab 100644 --- a/assume/reinforcement_learning/algorithms/mappo.py +++ b/assume/reinforcement_learning/algorithms/mappo.py @@ -239,7 +239,7 @@ def update_policy(self) -> None: logger.debug("Updating Policy (PPO)") # Keeping strategy order aligned with rollout-buffer column order. - strategies = [self.learning_role.rl_strats] + strategies = [strategy for strategy in self.learning_role.rl_strats.values()] n_rl_agents = len(strategies) # Getting the buffer, this will be a RolloutBuffer for on-policy algorithms. diff --git a/assume/reinforcement_learning/buffer.py b/assume/reinforcement_learning/buffer.py index 2bb3ca71d..af6230f9d 100644 --- a/assume/reinforcement_learning/buffer.py +++ b/assume/reinforcement_learning/buffer.py @@ -165,7 +165,7 @@ def sample(self, batch_size: int) -> ReplayBufferSamples: ) return ReplayBufferSamples( - *tuple(convert_to_tensors(x, self.device, self.th_float_type) for x in data) + *tuple(convert_to_tensors(array=x, dtype=self.th_float_type, device=self.device) for x in data) ) @@ -397,7 +397,7 @@ def sample(self, indices: np.ndarray) -> RolloutBufferSamples: ) return RolloutBufferSamples( - *(convert_to_tensors(x, self.device, self.float_type) for x in data) + *(convert_to_tensors(array=x, dtype=self.float_type, device=self.device) for x in data) ) def size(self) -> int: From eb3990a059cf9d64de5c565e609eaeb788273bbe Mon Sep 17 00:00:00 2001 From: Harshul-18 Date: Sun, 17 May 2026 13:05:25 +0200 Subject: [PATCH 44/44] Removed sorting from MAPPO pipeline, Removed adding default values instead warning generation, fixed last_idx/last_dones in buffer handling (mappo.py), Reverted back to the main/upstream branch's transform_buffer_data --- .../algorithms/mappo.py | 10 +- assume/reinforcement_learning/buffer.py | 21 ++- .../reinforcement_learning/learning_role.py | 56 ++++++-- .../reinforcement_learning/learning_utils.py | 29 ++-- tests/test_mappo.py | 131 ++++++++++++++++++ tests/test_rl_rolloutbuffer.py | 11 +- 6 files changed, 222 insertions(+), 36 deletions(-) diff --git a/assume/reinforcement_learning/algorithms/mappo.py b/assume/reinforcement_learning/algorithms/mappo.py index f41082dab..b8b51e5f0 100644 --- a/assume/reinforcement_learning/algorithms/mappo.py +++ b/assume/reinforcement_learning/algorithms/mappo.py @@ -282,11 +282,15 @@ def update_policy(self) -> None: if buffer_size > 0: # Use the LAST observation as the bootstrap for the REST of the buffer. # We sacrifice the last step (pos-1) to serve as s_{t+1} for the step before it. - # This ensures V(s_{t+1}) is calculating using the REAL next state, not self-referential. - + # This ensures V(s_{t+1}) is calculated using the REAL next state, not a self- + # referential V(s_{t}). last_idx = buffer_size - 1 last_obs = rollout_buffer.observations[last_idx] - last_dones = rollout_buffer.dones[last_idx] + + if last_idx > 0: + last_dones = rollout_buffer.dones[last_idx - 1] + else: + last_dones = rollout_buffer.dones[last_idx] # Reduce buffer size by 1 so as to not train on the bootstrap step rollout_buffer.pos -= 1 diff --git a/assume/reinforcement_learning/buffer.py b/assume/reinforcement_learning/buffer.py index af6230f9d..3bbcca66d 100644 --- a/assume/reinforcement_learning/buffer.py +++ b/assume/reinforcement_learning/buffer.py @@ -2,6 +2,7 @@ # # SPDX-License-Identifier: AGPL-3.0-or-later +import logging import warnings from collections.abc import Generator from typing import NamedTuple @@ -18,6 +19,9 @@ psutil = None +logger = logging.getLogger(__name__) + + class ReplayBufferSamples(NamedTuple): """Container for replay buffer samples. @@ -284,10 +288,23 @@ def add( done: Whether the episode ended. value: Value estimate from the critic. log_prob: Log probability of the action. + + Raises: + OverflowError: If the buffer is already full. The buffer must be either + resized or cleared before adding another transition. """ if self.pos >= self.buffer_size: self.full = True - return + logger.error( + "RolloutBuffer is full (size=%d). Refusing to silently drop a " + "transition. Increase buffer_size or call reset() before adding " + "more data.", + self.buffer_size, + ) + raise OverflowError( + f"RolloutBuffer of size {self.buffer_size} is full; cannot add " + "another transition without losing data." + ) self.observations[self.pos] = np.array(obs).copy() self.actions[self.pos] = np.array(action).copy() @@ -298,6 +315,8 @@ def add( # flattening the rewards, dones, values, log_probs array to (n_units,) size self.pos += 1 + if self.pos >= self.buffer_size: + self.full = True def compute_returns_and_advantages( self, last_values: np.ndarray, dones: np.ndarray diff --git a/assume/reinforcement_learning/learning_role.py b/assume/reinforcement_learning/learning_role.py index 02c568c7a..f3e50550f 100644 --- a/assume/reinforcement_learning/learning_role.py +++ b/assume/reinforcement_learning/learning_role.py @@ -336,11 +336,20 @@ async def store_to_buffer_and_update(self) -> None: timestamps_to_process = [ ts for ts in all_timestamps if ts not in incomplete_timestamps ] - # Carry over incomplete timesteps to new cache dicts + # Carry over incomplete timesteps to new cache dicts so they are + # not lost when the cache is reset below. + on_policy_active = is_on_policy(self.learning_config.algorithm) for ts in incomplete_timestamps: self.all_obs[ts] = current_obs[ts] self.all_actions[ts] = current_actions[ts] self.all_noises[ts] = current_noises[ts] + if on_policy_active: + if ts in current_values: + self.all_values[ts] = current_values[ts] + if ts in current_log_probs: + self.all_log_probs[ts] = current_log_probs[ts] + if ts in current_dones: + self.all_dones[ts] = current_dones[ts] # Create filtered cache (only complete timesteps) cache = { @@ -388,25 +397,45 @@ async def _store_to_buffer_and_update_sync(self, cache, device) -> None: # Add data to buffer - type depends on algorithm category if is_on_policy(self.learning_config.algorithm): # Using RolloutBuffer for on-policy algorithms (PPO/MAPPO). + unit_id_order = list(self.rl_strats.keys()) + n_rl_agents = len(unit_id_order) added_timestamps = 0 + for timestamp in sorted(cache["obs"].keys()): - sorted_unit_ids = sorted(cache["obs"][timestamp].keys()) - n_rl_agents = len(sorted_unit_ids) + missing_units = [ + u + for u in unit_id_order + if u not in cache["obs"][timestamp] + or u not in cache["actions"][timestamp] + or u not in cache["rewards"][timestamp] + or u not in cache["log_probs"][timestamp] + or u not in cache["dones"][timestamp] + ] + if missing_units: + logger.warning( + "Skipping on-policy rollout step at %s: missing data for units %s. " + "This usually means a learning unit failed to report an " + "observation/action/reward/log_prob/done for this timestep, " + "and we refuse to fill the buffer with zeros.", + timestamp, + missing_units, + ) + continue obs_data = transform_buffer_data( {timestamp: cache["obs"][timestamp]}, device, - sorted_unit_ids, + unit_id_order, ) actions_data = transform_buffer_data( {timestamp: cache["actions"][timestamp]}, device, - sorted_unit_ids, + unit_id_order, ) rewards_data = transform_buffer_data( {timestamp: cache["rewards"][timestamp]}, device, - sorted_unit_ids, + unit_id_order, ) # Computing MAPPO value targets with the centralized critic @@ -420,7 +449,7 @@ async def _store_to_buffer_and_update_sync(self, cache, device) -> None: ] with th.no_grad(): - for i, unit_id in enumerate(sorted_unit_ids): + for i, unit_id in enumerate(unit_id_order): strategy = self.rl_strats[unit_id] obs_i = obs_step[i : i + 1] other_unique = np.concatenate( @@ -446,19 +475,19 @@ async def _store_to_buffer_and_update_sync(self, cache, device) -> None: values_data = transform_buffer_data( {timestamp: cache["values"][timestamp]}, device, - sorted_unit_ids, + unit_id_order, ) log_probs_data = transform_buffer_data( {timestamp: cache["log_probs"][timestamp]}, device, - sorted_unit_ids, + unit_id_order, ) dones_data = transform_buffer_data( {timestamp: cache["dones"][timestamp]}, device, - sorted_unit_ids, + unit_id_order, ) # Adding data to the rollout buffer. @@ -475,13 +504,14 @@ async def _store_to_buffer_and_update_sync(self, cache, device) -> None: else: # Using ReplayBuffer for off-policy algorithms (TD3/DDPG). # Rewriting the dict so obs.shape == (n_rl_units, obs_dim), used by keys in learning role. + unit_id_order = list(self.rl_strats.keys()) self.buffer.add( - obs=transform_buffer_data(cache["obs"], device, self.rl_strats.keys()), + obs=transform_buffer_data(cache["obs"], device, unit_id_order), actions=transform_buffer_data( - cache["actions"], device, self.rl_strats.keys() + cache["actions"], device, unit_id_order ), reward=transform_buffer_data( - cache["rewards"], device, self.rl_strats.keys() + cache["rewards"], device, unit_id_order ), ) diff --git a/assume/reinforcement_learning/learning_utils.py b/assume/reinforcement_learning/learning_utils.py index 97f393bcc..5afb8aed3 100644 --- a/assume/reinforcement_learning/learning_utils.py +++ b/assume/reinforcement_learning/learning_utils.py @@ -206,7 +206,7 @@ def copy_layer_data(dst, src): def transform_buffer_data( - nested_dict: dict, device: th.device, keys_unit_order: list | None = None + nested_dict: dict, device: th.device, keys_unit_order: list ) -> np.ndarray: """ Transform nested dict {datetime -> {unit_id -> [values]}} into @@ -216,22 +216,14 @@ def transform_buffer_data( Args: nested_dict: Dict with structure {datetime -> {unit_id -> list[tensor]}}. device: PyTorch device config. + keys_unit_order: Ordered iterable of unit ids defining the agent + axis of the returned tensor. Returns: - Shape (n_timesteps, n_powerplants, feature_dim). + np.ndarray: Shape (n_timesteps, n_powerplants, feature_dim). """ - if not nested_dict: - return np.zeros((0, 0, 1), dtype=np.float32) - - # Get sorted lists of units and timestamps (for consistent ordering) all_times = sorted(nested_dict.keys()) - if keys_unit_order is None: - unit_ids = set() - for unit_data in nested_dict.values(): - unit_ids.update(unit_data.keys()) - keys_unit_order = sorted(unit_ids) - # Get feature dimension from first non-empty value feature_dim = None for unit_data in nested_dict.values(): for values in unit_data.values(): @@ -244,21 +236,22 @@ def transform_buffer_data( if feature_dim is not None: break - # Some on-policy fields (e.g. log_probs/values) can be empty for some timesteps. - # Keep zeros in that case instead of failing the entire training loop. if feature_dim is None: - feature_dim = 1 + raise ValueError( + "Error, while transforming RL data for buffer: No data found " + "to determine feature dimension. Callers must filter out empty " + "timesteps before calling transform_buffer_data (see " + "learning_role._store_to_buffer_and_update_sync)." + ) - # Pre-allocate tensor (keep on same device as input data) result = th.zeros( (len(all_times), len(keys_unit_order), feature_dim), device=device ) - # Fill tensor with values (stays on same device as input so if on GPU it stays there during filling) for t, timestamp in enumerate(all_times): for u, unit_id in enumerate(keys_unit_order): values = nested_dict[timestamp].get(unit_id, []) - if values: # if we have values for this timestamp + if values: result[t, u] = values[0] return result.cpu().numpy() diff --git a/tests/test_mappo.py b/tests/test_mappo.py index 551d3e5c4..cd69550ef 100644 --- a/tests/test_mappo.py +++ b/tests/test_mappo.py @@ -240,3 +240,134 @@ def test_mappo_initialize_policy_all_dimensions_match(base_learning_config): learn.rl_algorithm.initialize_policy() except Exception as e: pytest.fail(f"initialize_policy raised an unexpected error: {e}") + + +@pytest.mark.require_learning +def test_mappo_buffer_storage_uses_rl_strats_order(base_learning_config): + """Regression test for the agent-ordering bug. + + The on-policy buffer-storage path used to call + ``sorted(cache["obs"][timestamp].keys())`` to order agents, while + ``mappo.PPO.update_policy`` iterates ``self.rl_strats.values()``. When + the unit ids do not happen to be alphabetically sorted (e.g. + ``pp_6, pp_7, pp_8, pp_9, pp_10``) the two orders diverge and every + agent is trained on a different agent's observations / actions / values, + silently degrading MAPPO to noise. + + This test pins ``learning_role`` to use the ``rl_strats`` insertion order + when filling the rollout buffer, exactly like the off-policy algorithms + already do. + """ + import asyncio + from collections import defaultdict + + config = copy(base_learning_config) + + learn = Learning(config["learning_config"], start, end) + insertion_order = ("pp_6", "pp_7", "pp_8", "pp_9", "pp_10") + assert sorted(insertion_order) != list(insertion_order), ( + "test scenario must use unit ids whose sort order differs from " + "insertion order; otherwise this regression test is trivially passing" + ) + + for agent_id in insertion_order: + strat = LearningStrategy(**config, learning_role=learn) + strat.unit_id = agent_id + learn.rl_strats[agent_id] = strat + + learn.initialize_policy() + + n_agents = len(insertion_order) + # ``LearningStrategy`` computes ``self.obs_dim`` from + # ``num_timeseries_obs_dim * foresight + unique_obs_dim``, so we must + # match that here for the fake centralized-critic input to align. + obs_dim = ( + config["num_timeseries_obs_dim"] * config["foresight"] + + config["unique_obs_dim"] + ) + act_dim = config["act_dim"] + + # Build a fake rollout buffer large enough to hold one fake timestep. + learn.buffer = RolloutBuffer( + buffer_size=4, + obs_dim=obs_dim, + act_dim=act_dim, + n_rl_units=n_agents, + device="cpu", + float_type=th.float32, + gamma=0.99, + gae_lambda=0.95, + ) + + # Craft a cache where each unit's observation/action/reward is a unique + # constant equal to (1+i)*10, so we can assert that the row for agent i in + # the buffer matches the i-th *insertion-order* unit, not the i-th + # *sorted-order* unit. + timestamp = "2023-07-01 00:00:00" + cache = { + "obs": {timestamp: {}}, + "actions": {timestamp: {}}, + "rewards": {timestamp: {}}, + "noises": {timestamp: {}}, + "regret": {timestamp: {}}, + "profit": {timestamp: {}}, + "values": {timestamp: defaultdict(list)}, + "log_probs": {timestamp: {}}, + "dones": {timestamp: {}}, + } + for i, unit_id in enumerate(insertion_order): + marker = float(i + 1) + cache["obs"][timestamp][unit_id] = [ + th.full((obs_dim,), marker, dtype=th.float32) + ] + cache["actions"][timestamp][unit_id] = [ + th.full((act_dim,), marker, dtype=th.float32) + ] + cache["rewards"][timestamp][unit_id] = [marker] + cache["noises"][timestamp][unit_id] = [ + th.zeros(act_dim, dtype=th.float32) + ] + cache["regret"][timestamp][unit_id] = [0.0] + cache["profit"][timestamp][unit_id] = [0.0] + cache["log_probs"][timestamp][unit_id] = [-marker] + cache["dones"][timestamp][unit_id] = [0.0] + # leave cache["values"][timestamp] empty - mappo recomputes values + + # Stash db_addr/update_steps so the logging path inside the algorithm is + # safe to call. We do NOT need an actual policy update for this test, so + # we monkey-patch update_policy to a no-op. + learn.db_addr = None + learn.update_steps = 0 + learn.rl_algorithm.update_policy = lambda: None + + asyncio.run( + learn._store_to_buffer_and_update_sync(cache, learn.device) + ) + + buf = learn.buffer + # One timestamp -> one row in the buffer. + assert buf.pos == 1, f"expected 1 transition, got {buf.pos}" + + stored_obs = buf.observations[0] + stored_actions = buf.actions[0] + stored_rewards = buf.rewards[0] + stored_log_probs = buf.log_probs[0] + + for i in range(n_agents): + expected = float(i + 1) + assert np.allclose(stored_obs[i], expected), ( + f"row {i} of buffer.observations should match insertion-order " + f"agent {insertion_order[i]} (value {expected}); got {stored_obs[i]}" + ) + assert np.allclose(stored_actions[i], expected), ( + f"row {i} of buffer.actions should match insertion-order " + f"agent {insertion_order[i]} (value {expected}); got {stored_actions[i]}" + ) + assert np.allclose(stored_rewards[i], expected), ( + f"row {i} of buffer.rewards should match insertion-order " + f"agent {insertion_order[i]} (value {expected}); got {stored_rewards[i]}" + ) + assert np.allclose(stored_log_probs[i], -expected), ( + f"row {i} of buffer.log_probs should match insertion-order " + f"agent {insertion_order[i]} (value {-expected}); got {stored_log_probs[i]}" + ) diff --git a/tests/test_rl_rolloutbuffer.py b/tests/test_rl_rolloutbuffer.py index 19fe5c19d..50410a4be 100644 --- a/tests/test_rl_rolloutbuffer.py +++ b/tests/test_rl_rolloutbuffer.py @@ -125,6 +125,13 @@ def test_rollout_buffer_add_stores_correct_values(): @pytest.mark.require_learning def test_rollout_buffer_add_beyond_capacity_sets_full(): + """The buffer becomes ``full`` after the last in-bounds add and refuses + to silently drop further transitions. + + Silent drops mask configuration errors (e.g. an over-sized rollout window) + and were one of the bugs that hid the MAPPO ordering issue. The buffer + must instead raise ``OverflowError`` so the caller has a chance to react. + """ buf = make_rollout_buffer(buffer_size=3) obs = np.zeros((buf.n_rl_units, buf.obs_dim), dtype=np.float32) act = np.zeros((buf.n_rl_units, buf.act_dim), dtype=np.float32) @@ -138,8 +145,10 @@ def test_rollout_buffer_add_beyond_capacity_sets_full(): assert buf.pos == 3 assert buf.size() == 3 + assert buf.full is True - buf.add(obs, act, rew, done, val, lp) + with pytest.raises(OverflowError): + buf.add(obs, act, rew, done, val, lp) assert buf.full is True assert buf.size() == 3