From 2b928eedd4bf88b2d84efe58c8cbdb5b7d1390bc Mon Sep 17 00:00:00 2001 From: Alexander Soare Date: Tue, 2 Apr 2024 19:11:53 +0100 Subject: [PATCH 01/80] backup wip --- lerobot/common/datasets/factory.py | 26 ++++ lerobot/common/policies/act/detr_vae.py | 81 ++++++---- lerobot/common/policies/act/policy.py | 40 ++++- lerobot/common/policies/factory.py | 2 +- lerobot/configs/policy/act.yaml | 3 +- lerobot/scripts/train.py | 1 + poetry.lock | 197 +++++++++++++++++++++++- pyproject.toml | 1 + 8 files changed, 314 insertions(+), 37 deletions(-) diff --git a/lerobot/common/datasets/factory.py b/lerobot/common/datasets/factory.py index 04077034..47a15ea4 100644 --- a/lerobot/common/datasets/factory.py +++ b/lerobot/common/datasets/factory.py @@ -125,6 +125,32 @@ def make_offline_buffer( # TODO(rcadene): remove this and put it in config. Ideally we want to reproduce SOTA results just with mean_std normalization_mode = "mean_std" if cfg.env.name == "aloha" else "min_max" + + # TODO(now): These stats are needed to use their pretrained model for sim_transfer_cube_human. + # (Pdb) stats['observation']['state']['mean'] + # tensor([-0.0071, -0.6293, 1.0351, -0.0517, -0.4642, -0.0754, 0.4751, -0.0373, + # -0.3324, 0.9034, -0.2258, -0.3127, -0.2412, 0.6866]) + stats['observation', 'state', 'mean'] = torch.tensor([-0.00740268, -0.63187766, 1.0356655 , -0.05027218, -0.46199223, + -0.07467502, 0.47467607, -0.03615446, -0.33203387, 0.9038929 , + -0.22060776, -0.31011587, -0.23484458, 0.6842416 ]) + # (Pdb) stats['observation']['state']['std'] + # tensor([0.0022, 0.0520, 0.0291, 0.0092, 0.0267, 0.0145, 0.0563, 0.0179, 0.0494, + # 0.0326, 0.0476, 0.0535, 0.0956, 0.0513]) + stats['observation', 'state', 'std'] = torch.tensor([0.01219023, 0.2975381 , 0.16728032, 0.04733803, 0.1486037 , + 0.08788499, 0.31752336, 0.1049916 , 0.27933604, 0.18094037, + 0.26604933, 0.30466506, 0.5298686 , 0.25505227]) + # (Pdb) stats['action']['mean'] + # tensor([-0.0075, -0.6346, 1.0353, -0.0465, -0.4686, -0.0738, 0.3723, -0.0396, + # -0.3184, 0.8991, -0.2065, -0.3182, -0.2338, 0.5593]) + stats['action']['mean'] = torch.tensor([-0.00756444, -0.6281845 , 1.0312834 , -0.04664314, -0.47211358, + -0.074527 , 0.37389806, -0.03718753, -0.3261143 , 0.8997205 , + -0.21371077, -0.31840396, -0.23360962, 0.551947]) + # (Pdb) stats['action']['std'] + # tensor([0.0023, 0.0514, 0.0290, 0.0086, 0.0263, 0.0143, 0.0593, 0.0185, 0.0510, + # 0.0328, 0.0478, 0.0531, 0.0945, 0.0794]) + stats['action']['std'] = torch.tensor([0.01252818, 0.2957442 , 0.16701928, 0.04584508, 0.14833844, + 0.08763024, 0.30665937, 0.10600077, 0.27572668, 0.1805853 , + 0.26304692, 0.30708534, 0.5305411 , 0.38381037]) transforms.append(NormalizeTransform(stats, in_keys, mode=normalization_mode)) offline_buffer.set_transform(transforms) diff --git a/lerobot/common/policies/act/detr_vae.py b/lerobot/common/policies/act/detr_vae.py index 0f2626f7..4d5525f2 100644 --- a/lerobot/common/policies/act/detr_vae.py +++ b/lerobot/common/policies/act/detr_vae.py @@ -2,6 +2,7 @@ import numpy as np import torch from torch import nn from torch.autograd import Variable +from transformers import DetrForObjectDetection from .backbone import build_backbone from .transformer import TransformerEncoder, TransformerEncoderLayer, build_transformer @@ -24,31 +25,57 @@ def get_sinusoid_encoding_table(n_position, d_hid): return torch.FloatTensor(sinusoid_table).unsqueeze(0) -class DETRVAE(nn.Module): - """This is the DETR module that performs object detection""" +class ActionChunkingTransformer(nn.Module): + """ + Action Chunking Transformer as per Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware + (https://arxiv.org/abs/2304.13705). + + Note: In this code we use the symbols `vae_encoder`, 'encoder', `decoder`. The meanings are as follows. + - The `vae_encoder` is, as per the literature around conditional variational auto-encoders (cVAE), the + part of the model that encodes the target data (here, a sequence of actions), and the condition + (here, we include the robot joint-space state as an input to the encoder). + - The `transformer` is the cVAE's decoder. But since we have an option to train this model without the + variational objective (in which case we drop the `vae_encoder` altogether), we don't call it the + `vae_decoder`. + # TODO(now): remove the following + - The `encoder` is actually a component of the cVAE's "decoder". But we refer to it as an "encoder" + because, in terms of the transformer with cross-attention that forms the cVAE's decoder, it is the + "encoder" part. We drop the `vae_` prefix because we have an option to train this model without the + variational objective (in which case we drop the `vae_encoder` altogether), and nothing about this + model has anything to do with a VAE). + - The `decoder` is a building block of the VAE decoder, and is just the "decoder" part of a + transformer with cross-attention. For the same reasoning behind the naming of `encoder`, we make + this term agnostic to the option to use a variational objective for training. + + """ def __init__( - self, backbones, transformer, encoder, state_dim, action_dim, num_queries, camera_names, vae + self, backbones, transformer, vae_encoder, state_dim, action_dim, horizon, camera_names, vae ): """Initializes the model. Parameters: backbones: torch module of the backbone to be used. See backbone.py transformer: torch module of the transformer architecture. See transformer.py state_dim: robot state dimension of the environment - num_queries: number of object queries, ie detection slot. This is the maximal number of objects + horizon: number of object queries, ie detection slot. This is the maximal number of objects DETR can detect in a single image. For COCO, we recommend 100 queries. - aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. + + Args: + state_dim: Robot positional state dimension. + action_dim: Action dimension. + horizon: The number of actions to generate in one forward pass. + vae: Whether to use the variational objective. TODO(now): Give more details. """ super().__init__() - self.num_queries = num_queries self.camera_names = camera_names self.transformer = transformer - self.encoder = encoder + self.vae_encoder = vae_encoder self.vae = vae hidden_dim = transformer.d_model self.action_head = nn.Linear(hidden_dim, action_dim) self.is_pad_head = nn.Linear(hidden_dim, 1) - self.query_embed = nn.Embedding(num_queries, hidden_dim) + # Positional embedding to be used as input to the latent vae_encoder (if applicable) and for the + self.pos_embed = nn.Embedding(horizon, hidden_dim) if backbones is not None: self.input_proj = nn.Conv2d(backbones[0].num_channels, hidden_dim, kernel_size=1) self.backbones = nn.ModuleList(backbones) @@ -61,16 +88,16 @@ class DETRVAE(nn.Module): self.pos = torch.nn.Embedding(2, hidden_dim) self.backbones = None - # encoder extra parameters + # vae_encoder extra parameters self.latent_dim = 32 # final size of latent z # TODO tune self.cls_embed = nn.Embedding(1, hidden_dim) # extra cls token embedding - self.encoder_action_proj = nn.Linear(14, hidden_dim) # project action to embedding - self.encoder_joint_proj = nn.Linear(14, hidden_dim) # project qpos to embedding + self.vae_encoder_action_proj = nn.Linear(14, hidden_dim) # project action to embedding + self.vae_encoder_joint_proj = nn.Linear(14, hidden_dim) # project qpos to embedding self.latent_proj = nn.Linear( hidden_dim, self.latent_dim * 2 ) # project hidden state to latent std, var self.register_buffer( - "pos_table", get_sinusoid_encoding_table(1 + 1 + num_queries, hidden_dim) + "pos_table", get_sinusoid_encoding_table(1 + 1 + horizon, hidden_dim) ) # [CLS], qpos, a_seq # decoder extra parameters @@ -91,15 +118,15 @@ class DETRVAE(nn.Module): ### Obtain latent z from action sequence if self.vae and is_training: # project action sequence to embedding dim, and concat with a CLS token - action_embed = self.encoder_action_proj(actions) # (bs, seq, hidden_dim) - qpos_embed = self.encoder_joint_proj(qpos) # (bs, hidden_dim) + action_embed = self.vae_encoder_action_proj(actions) # (bs, seq, hidden_dim) + qpos_embed = self.vae_encoder_joint_proj(qpos) # (bs, hidden_dim) qpos_embed = torch.unsqueeze(qpos_embed, axis=1) # (bs, 1, hidden_dim) cls_embed = self.cls_embed.weight # (1, hidden_dim) cls_embed = torch.unsqueeze(cls_embed, axis=0).repeat(bs, 1, 1) # (bs, 1, hidden_dim) - encoder_input = torch.cat( + vae_encoder_input = torch.cat( [cls_embed, qpos_embed, action_embed], axis=1 ) # (bs, seq+1, hidden_dim) - encoder_input = encoder_input.permute(1, 0, 2) # (seq+1, bs, hidden_dim) + vae_encoder_input = vae_encoder_input.permute(1, 0, 2) # (seq+1, bs, hidden_dim) # do not mask cls token # cls_joint_is_pad = torch.full((bs, 2), False).to(qpos.device) # False: not a padding # is_pad = torch.cat([cls_joint_is_pad, is_pad], axis=1) # (bs, seq+1) @@ -107,9 +134,9 @@ class DETRVAE(nn.Module): pos_embed = self.pos_table.clone().detach() pos_embed = pos_embed.permute(1, 0, 2) # (seq+1, 1, hidden_dim) # query model - encoder_output = self.encoder(encoder_input, pos=pos_embed) # , src_key_padding_mask=is_pad) - encoder_output = encoder_output[0] # take cls output only - latent_info = self.latent_proj(encoder_output) + vae_encoder_output = self.vae_encoder(vae_encoder_input, pos=pos_embed) # , src_key_padding_mask=is_pad) + vae_encoder_output = vae_encoder_output[0] # take cls output only + latent_info = self.latent_proj(vae_encoder_output) mu = latent_info[:, : self.latent_dim] logvar = latent_info[:, self.latent_dim :] latent_sample = reparametrize(mu, logvar) @@ -137,7 +164,7 @@ class DETRVAE(nn.Module): hs = self.transformer( src, None, - self.query_embed.weight, + self.pos_embed.weight, pos, latent_input, proprio_input, @@ -147,7 +174,7 @@ class DETRVAE(nn.Module): qpos = self.input_proj_robot_state(qpos) env_state = self.input_proj_env_state(env_state) transformer_input = torch.cat([qpos, env_state], axis=1) # seq length = 2 - hs = self.transformer(transformer_input, None, self.query_embed.weight, self.pos.weight)[0] + hs = self.transformer(transformer_input, None, self.pos_embed.weight, self.pos.weight)[0] a_hat = self.action_head(hs) is_pad_hat = self.is_pad_head(hs) return a_hat, is_pad_hat, [mu, logvar] @@ -165,7 +192,7 @@ def mlp(input_dim, hidden_dim, output_dim, hidden_depth): return trunk -def build_encoder(args): +def build_vae_encoder(args): d_model = args.hidden_dim # 256 dropout = args.dropout # 0.1 nhead = args.nheads # 8 @@ -192,16 +219,16 @@ def build(args): backbones.append(backbone) transformer = build_transformer(args) + + vae_encoder = build_vae_encoder(args) - encoder = build_encoder(args) - - model = DETRVAE( + model = ActionChunkingTransformer( backbones, transformer, - encoder, + vae_encoder, state_dim=args.state_dim, action_dim=args.action_dim, - num_queries=args.num_queries, + horizon=args.num_queries, camera_names=args.camera_names, vae=args.vae, ) diff --git a/lerobot/common/policies/act/policy.py b/lerobot/common/policies/act/policy.py index ae4f7320..a88f7640 100644 --- a/lerobot/common/policies/act/policy.py +++ b/lerobot/common/policies/act/policy.py @@ -42,9 +42,28 @@ def kl_divergence(mu, logvar): class ActionChunkingTransformerPolicy(AbstractPolicy): + """ + Action Chunking Transformer as per Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware + (https://arxiv.org/abs/2304.13705). + """ + name = "act" def __init__(self, cfg, device, n_action_steps=1): + """ + Args: + vae: Whether to use the variational objective. TODO(now): Give more details. + temporal_agg: Whether to do temporal aggregation. For each timestep during rollout, the action + returned as an exponential moving average of previously generated actions for that timestep. + n_obs_steps: Number of time steps worth of observation to use as input. + horizon: The number of actions to generate in one forward pass. + kl_weight: Weight for KL divergence. Defaults to None. Only applicable when using the variational + objective. + batch_size: Training batch size. + grad_clip_norm: Optionally clip the gradients to have this value as the norm at most. Defaults to + None meaning gradient clipping is not applied. + lr: Learning rate. + """ super().__init__(n_action_steps) self.cfg = cfg self.n_action_steps = n_action_steps @@ -57,8 +76,6 @@ class ActionChunkingTransformerPolicy(AbstractPolicy): def update(self, replay_buffer, step): del step - start_time = time.time() - self.train() num_slices = self.cfg.batch_size @@ -103,11 +120,14 @@ class ActionChunkingTransformerPolicy(AbstractPolicy): "action": action.to(self.device, non_blocking=True), } return out + + start_time = time.time() batch = replay_buffer.sample(batch_size) batch = process_batch(batch, self.cfg.horizon, num_slices) data_s = time.time() - start_time + print(data_s) loss = self.compute_loss(batch) loss.backward() @@ -151,9 +171,6 @@ class ActionChunkingTransformerPolicy(AbstractPolicy): @torch.no_grad() def select_actions(self, observation, step_count): - if observation["image"].shape[0] != 1: - raise NotImplementedError("Batch size > 1 not handled") - # TODO(rcadene): remove unused step_count del step_count @@ -167,7 +184,17 @@ class ActionChunkingTransformerPolicy(AbstractPolicy): "image": observation["image", "top"], "agent_pos": observation["state"], } - action = self._forward(qpos=obs_dict["agent_pos"], image=obs_dict["image"]) + # qpos = obs_dict["agent_pos"] + # img = obs_dict["image"] + # qpos_ = torch.load('/tmp/qpos.pth') + # img_ = torch.load('/tmp/curr_image.pth') + # out_ = torch.load('/tmp/out.pth') + # import cv2, numpy as np + # cv2.imwrite("ours.png", (obs_dict["image"][0, 0].permute(1, 2, 0).cpu().numpy() * 255).astype(np.uint8)) + # cv2.imwrite("theirs.png", (img_[0, 0].permute(1, 2, 0).cpu().numpy() * 255).astype(np.uint8)) + # out = self._forward(qpos_, img_) + # breakpoint() + action = self._forward(qpos=obs_dict["agent_pos"] * 0.182, image=obs_dict["image"]) if self.cfg.temporal_agg: # TODO(rcadene): implement temporal aggregation @@ -197,6 +224,7 @@ class ActionChunkingTransformerPolicy(AbstractPolicy): if is_pad is not None: is_pad = is_pad[:, : self.model.num_queries] + breakpoint() a_hat, is_pad_hat, (mu, logvar) = self.model(qpos, image, env_state, actions, is_pad) all_l1 = F.l1_loss(actions, a_hat, reduction="none") diff --git a/lerobot/common/policies/factory.py b/lerobot/common/policies/factory.py index 934f0962..577ccf75 100644 --- a/lerobot/common/policies/factory.py +++ b/lerobot/common/policies/factory.py @@ -1,5 +1,5 @@ def make_policy(cfg): - if cfg.policy.name != "diffusion" and cfg.rollout_batch_size > 1: + if cfg.policy.name not in ["diffusion", "act"] and cfg.rollout_batch_size > 1: raise NotImplementedError("Only diffusion policy supports rollout_batch_size > 1 for the time being.") if cfg.policy.name == "tdmpc": diff --git a/lerobot/configs/policy/act.yaml b/lerobot/configs/policy/act.yaml index a52c3f54..0244944b 100644 --- a/lerobot/configs/policy/act.yaml +++ b/lerobot/configs/policy/act.yaml @@ -1,6 +1,6 @@ # @package _global_ -offline_steps: 1344000 +offline_steps: 2000 online_steps: 0 eval_episodes: 1 @@ -24,7 +24,6 @@ policy: weight_decay: 1e-4 grad_clip_norm: 10 backbone: resnet18 - num_queries: ${horizon} # chunk_size horizon: ${horizon} # chunk_size kl_weight: 10 hidden_dim: 512 diff --git a/lerobot/scripts/train.py b/lerobot/scripts/train.py index 18c3715b..454adf1a 100644 --- a/lerobot/scripts/train.py +++ b/lerobot/scripts/train.py @@ -151,6 +151,7 @@ def train(cfg: dict, out_dir=None, job_name=None): logging.info("make_policy") policy = make_policy(cfg) + policy.save("act.pt") num_learnable_params = sum(p.numel() for p in policy.parameters() if p.requires_grad) num_total_params = sum(p.numel() for p in policy.parameters()) diff --git a/poetry.lock b/poetry.lock index 72397001..9766051c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3248,6 +3248,133 @@ numpy = "*" [package.extras] all = ["defusedxml", "fsspec", "imagecodecs (>=2023.8.12)", "lxml", "matplotlib", "zarr"] +[[package]] +name = "tokenizers" +version = "0.15.2" +description = "" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tokenizers-0.15.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:52f6130c9cbf70544287575a985bf44ae1bda2da7e8c24e97716080593638012"}, + {file = "tokenizers-0.15.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:054c1cc9c6d68f7ffa4e810b3d5131e0ba511b6e4be34157aa08ee54c2f8d9ee"}, + {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a9b9b070fdad06e347563b88c278995735292ded1132f8657084989a4c84a6d5"}, + {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea621a7eef4b70e1f7a4e84dd989ae3f0eeb50fc8690254eacc08acb623e82f1"}, + {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cf7fd9a5141634fa3aa8d6b7be362e6ae1b4cda60da81388fa533e0b552c98fd"}, + {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:44f2a832cd0825295f7179eaf173381dc45230f9227ec4b44378322d900447c9"}, + {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8b9ec69247a23747669ec4b0ca10f8e3dfb3545d550258129bd62291aabe8605"}, + {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40b6a4c78da863ff26dbd5ad9a8ecc33d8a8d97b535172601cf00aee9d7ce9ce"}, + {file = "tokenizers-0.15.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5ab2a4d21dcf76af60e05af8063138849eb1d6553a0d059f6534357bce8ba364"}, + {file = "tokenizers-0.15.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a47acfac7e511f6bbfcf2d3fb8c26979c780a91e06fb5b9a43831b2c0153d024"}, + {file = "tokenizers-0.15.2-cp310-none-win32.whl", hash = "sha256:064ff87bb6acdbd693666de9a4b692add41308a2c0ec0770d6385737117215f2"}, + {file = "tokenizers-0.15.2-cp310-none-win_amd64.whl", hash = "sha256:3b919afe4df7eb6ac7cafd2bd14fb507d3f408db7a68c43117f579c984a73843"}, + {file = "tokenizers-0.15.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:89cd1cb93e4b12ff39bb2d626ad77e35209de9309a71e4d3d4672667b4b256e7"}, + {file = "tokenizers-0.15.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cfed5c64e5be23d7ee0f0e98081a25c2a46b0b77ce99a4f0605b1ec43dd481fa"}, + {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a907d76dcfda37023ba203ab4ceeb21bc5683436ebefbd895a0841fd52f6f6f2"}, + {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20ea60479de6fc7b8ae756b4b097572372d7e4032e2521c1bbf3d90c90a99ff0"}, + {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:48e2b9335be2bc0171df9281385c2ed06a15f5cf121c44094338306ab7b33f2c"}, + {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:112a1dd436d2cc06e6ffdc0b06d55ac019a35a63afd26475205cb4b1bf0bfbff"}, + {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4620cca5c2817177ee8706f860364cc3a8845bc1e291aaf661fb899e5d1c45b0"}, + {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ccd73a82751c523b3fc31ff8194702e4af4db21dc20e55b30ecc2079c5d43cb7"}, + {file = "tokenizers-0.15.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:107089f135b4ae7817affe6264f8c7a5c5b4fd9a90f9439ed495f54fcea56fb4"}, + {file = "tokenizers-0.15.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0ff110ecc57b7aa4a594396525a3451ad70988e517237fe91c540997c4e50e29"}, + {file = "tokenizers-0.15.2-cp311-none-win32.whl", hash = "sha256:6d76f00f5c32da36c61f41c58346a4fa7f0a61be02f4301fd30ad59834977cc3"}, + {file = "tokenizers-0.15.2-cp311-none-win_amd64.whl", hash = "sha256:cc90102ed17271cf0a1262babe5939e0134b3890345d11a19c3145184b706055"}, + {file = "tokenizers-0.15.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f86593c18d2e6248e72fb91c77d413a815153b8ea4e31f7cd443bdf28e467670"}, + {file = "tokenizers-0.15.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0774bccc6608eca23eb9d620196687c8b2360624619623cf4ba9dc9bd53e8b51"}, + {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d0222c5b7c9b26c0b4822a82f6a7011de0a9d3060e1da176f66274b70f846b98"}, + {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3835738be1de66624fff2f4f6f6684775da4e9c00bde053be7564cbf3545cc66"}, + {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0143e7d9dcd811855c1ce1ab9bf5d96d29bf5e528fd6c7824d0465741e8c10fd"}, + {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db35825f6d54215f6b6009a7ff3eedee0848c99a6271c870d2826fbbedf31a38"}, + {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3f5e64b0389a2be47091d8cc53c87859783b837ea1a06edd9d8e04004df55a5c"}, + {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e0480c452217edd35eca56fafe2029fb4d368b7c0475f8dfa3c5c9c400a7456"}, + {file = "tokenizers-0.15.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a33ab881c8fe70474980577e033d0bc9a27b7ab8272896e500708b212995d834"}, + {file = "tokenizers-0.15.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a308a607ca9de2c64c1b9ba79ec9a403969715a1b8ba5f998a676826f1a7039d"}, + {file = "tokenizers-0.15.2-cp312-none-win32.whl", hash = "sha256:b8fcfa81bcb9447df582c5bc96a031e6df4da2a774b8080d4f02c0c16b42be0b"}, + {file = "tokenizers-0.15.2-cp312-none-win_amd64.whl", hash = "sha256:38d7ab43c6825abfc0b661d95f39c7f8af2449364f01d331f3b51c94dcff7221"}, + {file = "tokenizers-0.15.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:38bfb0204ff3246ca4d5e726e8cc8403bfc931090151e6eede54d0e0cf162ef0"}, + {file = "tokenizers-0.15.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9c861d35e8286a53e06e9e28d030b5a05bcbf5ac9d7229e561e53c352a85b1fc"}, + {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:936bf3842db5b2048eaa53dade907b1160f318e7c90c74bfab86f1e47720bdd6"}, + {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:620beacc3373277700d0e27718aa8b25f7b383eb8001fba94ee00aeea1459d89"}, + {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2735ecbbf37e52db4ea970e539fd2d450d213517b77745114f92867f3fc246eb"}, + {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:473c83c5e2359bb81b0b6fde870b41b2764fcdd36d997485e07e72cc3a62264a"}, + {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:968fa1fb3c27398b28a4eca1cbd1e19355c4d3a6007f7398d48826bbe3a0f728"}, + {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:865c60ae6eaebdde7da66191ee9b7db52e542ed8ee9d2c653b6d190a9351b980"}, + {file = "tokenizers-0.15.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7c0d8b52664ab2d4a8d6686eb5effc68b78608a9008f086a122a7b2996befbab"}, + {file = "tokenizers-0.15.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:f33dfbdec3784093a9aebb3680d1f91336c56d86cc70ddf88708251da1fe9064"}, + {file = "tokenizers-0.15.2-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:d44ba80988ff9424e33e0a49445072ac7029d8c0e1601ad25a0ca5f41ed0c1d6"}, + {file = "tokenizers-0.15.2-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:dce74266919b892f82b1b86025a613956ea0ea62a4843d4c4237be2c5498ed3a"}, + {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0ef06b9707baeb98b316577acb04f4852239d856b93e9ec3a299622f6084e4be"}, + {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c73e2e74bbb07910da0d37c326869f34113137b23eadad3fc00856e6b3d9930c"}, + {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4eeb12daf02a59e29f578a865f55d87cd103ce62bd8a3a5874f8fdeaa82e336b"}, + {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ba9f6895af58487ca4f54e8a664a322f16c26bbb442effd01087eba391a719e"}, + {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ccec77aa7150e38eec6878a493bf8c263ff1fa8a62404e16c6203c64c1f16a26"}, + {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3f40604f5042ff210ba82743dda2b6aa3e55aa12df4e9f2378ee01a17e2855e"}, + {file = "tokenizers-0.15.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5645938a42d78c4885086767c70923abad047163d809c16da75d6b290cb30bbe"}, + {file = "tokenizers-0.15.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:05a77cbfebe28a61ab5c3891f9939cc24798b63fa236d84e5f29f3a85a200c00"}, + {file = "tokenizers-0.15.2-cp37-none-win32.whl", hash = "sha256:361abdc068e8afe9c5b818769a48624687fb6aaed49636ee39bec4e95e1a215b"}, + {file = "tokenizers-0.15.2-cp37-none-win_amd64.whl", hash = "sha256:7ef789f83eb0f9baeb4d09a86cd639c0a5518528f9992f38b28e819df397eb06"}, + {file = "tokenizers-0.15.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4fe1f74a902bee74a3b25aff180fbfbf4f8b444ab37c4d496af7afd13a784ed2"}, + {file = "tokenizers-0.15.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4c4b89038a684f40a6b15d6b09f49650ac64d951ad0f2a3ea9169687bbf2a8ba"}, + {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d05a1b06f986d41aed5f2de464c003004b2df8aaf66f2b7628254bcbfb72a438"}, + {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:508711a108684111ec8af89d3a9e9e08755247eda27d0ba5e3c50e9da1600f6d"}, + {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:daa348f02d15160cb35439098ac96e3a53bacf35885072611cd9e5be7d333daa"}, + {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:494fdbe5932d3416de2a85fc2470b797e6f3226c12845cadf054dd906afd0442"}, + {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c2d60f5246f4da9373f75ff18d64c69cbf60c3bca597290cea01059c336d2470"}, + {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93268e788825f52de4c7bdcb6ebc1fcd4a5442c02e730faa9b6b08f23ead0e24"}, + {file = "tokenizers-0.15.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6fc7083ab404019fc9acafe78662c192673c1e696bd598d16dc005bd663a5cf9"}, + {file = "tokenizers-0.15.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:41e39b41e5531d6b2122a77532dbea60e171ef87a3820b5a3888daa847df4153"}, + {file = "tokenizers-0.15.2-cp38-none-win32.whl", hash = "sha256:06cd0487b1cbfabefb2cc52fbd6b1f8d4c37799bd6c6e1641281adaa6b2504a7"}, + {file = "tokenizers-0.15.2-cp38-none-win_amd64.whl", hash = "sha256:5179c271aa5de9c71712e31cb5a79e436ecd0d7532a408fa42a8dbfa4bc23fd9"}, + {file = "tokenizers-0.15.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:82f8652a74cc107052328b87ea8b34291c0f55b96d8fb261b3880216a9f9e48e"}, + {file = "tokenizers-0.15.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:02458bee6f5f3139f1ebbb6d042b283af712c0981f5bc50edf771d6b762d5e4f"}, + {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c9a09cd26cca2e1c349f91aa665309ddb48d71636370749414fbf67bc83c5343"}, + {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:158be8ea8554e5ed69acc1ce3fbb23a06060bd4bbb09029431ad6b9a466a7121"}, + {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ddba9a2b0c8c81633eca0bb2e1aa5b3a15362b1277f1ae64176d0f6eba78ab1"}, + {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3ef5dd1d39797044642dbe53eb2bc56435308432e9c7907728da74c69ee2adca"}, + {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:454c203164e07a860dbeb3b1f4a733be52b0edbb4dd2e5bd75023ffa8b49403a"}, + {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0cf6b7f1d4dc59af960e6ffdc4faffe6460bbfa8dce27a58bf75755ffdb2526d"}, + {file = "tokenizers-0.15.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2ef09bbc16519f6c25d0c7fc0c6a33a6f62923e263c9d7cca4e58b8c61572afb"}, + {file = "tokenizers-0.15.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c9a2ebdd2ad4ec7a68e7615086e633857c85e2f18025bd05d2a4399e6c5f7169"}, + {file = "tokenizers-0.15.2-cp39-none-win32.whl", hash = "sha256:918fbb0eab96fe08e72a8c2b5461e9cce95585d82a58688e7f01c2bd546c79d0"}, + {file = "tokenizers-0.15.2-cp39-none-win_amd64.whl", hash = "sha256:524e60da0135e106b254bd71f0659be9f89d83f006ea9093ce4d1fab498c6d0d"}, + {file = "tokenizers-0.15.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6a9b648a58281c4672212fab04e60648fde574877d0139cd4b4f93fe28ca8944"}, + {file = "tokenizers-0.15.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:7c7d18b733be6bbca8a55084027f7be428c947ddf871c500ee603e375013ffba"}, + {file = "tokenizers-0.15.2-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:13ca3611de8d9ddfbc4dc39ef54ab1d2d4aaa114ac8727dfdc6a6ec4be017378"}, + {file = "tokenizers-0.15.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:237d1bf3361cf2e6463e6c140628e6406766e8b27274f5fcc62c747ae3c6f094"}, + {file = "tokenizers-0.15.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67a0fe1e49e60c664915e9fb6b0cb19bac082ab1f309188230e4b2920230edb3"}, + {file = "tokenizers-0.15.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4e022fe65e99230b8fd89ebdfea138c24421f91c1a4f4781a8f5016fd5cdfb4d"}, + {file = "tokenizers-0.15.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d857be2df69763362ac699f8b251a8cd3fac9d21893de129bc788f8baaef2693"}, + {file = "tokenizers-0.15.2-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:708bb3e4283177236309e698da5fcd0879ce8fd37457d7c266d16b550bcbbd18"}, + {file = "tokenizers-0.15.2-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:64c35e09e9899b72a76e762f9854e8750213f67567787d45f37ce06daf57ca78"}, + {file = "tokenizers-0.15.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1257f4394be0d3b00de8c9e840ca5601d0a4a8438361ce9c2b05c7d25f6057b"}, + {file = "tokenizers-0.15.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02272fe48280e0293a04245ca5d919b2c94a48b408b55e858feae9618138aeda"}, + {file = "tokenizers-0.15.2-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:dc3ad9ebc76eabe8b1d7c04d38be884b8f9d60c0cdc09b0aa4e3bcf746de0388"}, + {file = "tokenizers-0.15.2-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:32e16bdeffa7c4f46bf2152172ca511808b952701d13e7c18833c0b73cb5c23f"}, + {file = "tokenizers-0.15.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:fb16ba563d59003028b678d2361a27f7e4ae0ab29c7a80690efa20d829c81fdb"}, + {file = "tokenizers-0.15.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:2277c36d2d6cdb7876c274547921a42425b6810d38354327dd65a8009acf870c"}, + {file = "tokenizers-0.15.2-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1cf75d32e8d250781940d07f7eece253f2fe9ecdb1dc7ba6e3833fa17b82fcbc"}, + {file = "tokenizers-0.15.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1b3b31884dc8e9b21508bb76da80ebf7308fdb947a17affce815665d5c4d028"}, + {file = "tokenizers-0.15.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b10122d8d8e30afb43bb1fe21a3619f62c3e2574bff2699cf8af8b0b6c5dc4a3"}, + {file = "tokenizers-0.15.2-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d88b96ff0fe8e91f6ef01ba50b0d71db5017fa4e3b1d99681cec89a85faf7bf7"}, + {file = "tokenizers-0.15.2-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:37aaec5a52e959892870a7c47cef80c53797c0db9149d458460f4f31e2fb250e"}, + {file = "tokenizers-0.15.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e2ea752f2b0fe96eb6e2f3adbbf4d72aaa1272079b0dfa1145507bd6a5d537e6"}, + {file = "tokenizers-0.15.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4b19a808d8799fda23504a5cd31d2f58e6f52f140380082b352f877017d6342b"}, + {file = "tokenizers-0.15.2-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:64c86e5e068ac8b19204419ed8ca90f9d25db20578f5881e337d203b314f4104"}, + {file = "tokenizers-0.15.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de19c4dc503c612847edf833c82e9f73cd79926a384af9d801dcf93f110cea4e"}, + {file = "tokenizers-0.15.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea09acd2fe3324174063d61ad620dec3bcf042b495515f27f638270a7d466e8b"}, + {file = "tokenizers-0.15.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cf27fd43472e07b57cf420eee1e814549203d56de00b5af8659cb99885472f1f"}, + {file = "tokenizers-0.15.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:7ca22bd897537a0080521445d91a58886c8c04084a6a19e6c78c586e0cfa92a5"}, + {file = "tokenizers-0.15.2.tar.gz", hash = "sha256:e6e9c6e019dd5484be5beafc775ae6c925f4c69a3487040ed09b45e13df2cb91"}, +] + +[package.dependencies] +huggingface_hub = ">=0.16.4,<1.0" + +[package.extras] +dev = ["tokenizers[testing]"] +docs = ["setuptools_rust", "sphinx", "sphinx_rtd_theme"] +testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"] + [[package]] name = "tomli" version = "2.0.1" @@ -3413,6 +3540,74 @@ notebook = ["ipywidgets (>=6)"] slack = ["slack-sdk"] telegram = ["requests"] +[[package]] +name = "transformers" +version = "4.39.3" +description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "transformers-4.39.3-py3-none-any.whl", hash = "sha256:7838034a12cca3168247f9d2d1dba6724c9de3ae0f73a108258c6b8fc5912601"}, + {file = "transformers-4.39.3.tar.gz", hash = "sha256:2586e5ff4150f122716fc40f5530e92871befc051848fbe82600969c535b762d"}, +] + +[package.dependencies] +filelock = "*" +huggingface-hub = ">=0.19.3,<1.0" +numpy = ">=1.17" +packaging = ">=20.0" +pyyaml = ">=5.1" +regex = "!=2019.12.17" +requests = "*" +safetensors = ">=0.4.1" +tokenizers = ">=0.14,<0.19" +tqdm = ">=4.27" + +[package.extras] +accelerate = ["accelerate (>=0.21.0)"] +agents = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "datasets (!=2.5.0)", "diffusers", "opencv-python", "sentencepiece (>=0.1.91,!=0.1.92)", "torch"] +all = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm", "tokenizers (>=0.14,<0.19)", "torch", "torchaudio", "torchvision"] +audio = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] +codecarbon = ["codecarbon (==1.2.0)"] +deepspeed = ["accelerate (>=0.21.0)", "deepspeed (>=0.9.3)"] +deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.21.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "optuna", "parameterized", "protobuf", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] +dev = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "timm", "tokenizers (>=0.14,<0.19)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] +dev-tensorflow = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.14,<0.19)", "urllib3 (<2.0.0)"] +dev-torch = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "librosa", "nltk", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "timeout-decorator", "timm", "tokenizers (>=0.14,<0.19)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] +docs = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "hf-doc-builder", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm", "tokenizers (>=0.14,<0.19)", "torch", "torchaudio", "torchvision"] +docs-specific = ["hf-doc-builder"] +flax = ["flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "optax (>=0.0.8,<=0.1.4)"] +flax-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] +ftfy = ["ftfy"] +integrations = ["optuna", "ray[tune] (>=2.7.0)", "sigopt"] +ja = ["fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "rhoknp (>=1.1.0,<1.3.1)", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)"] +modelcreation = ["cookiecutter (==1.7.3)"] +natten = ["natten (>=0.14.6,<0.15.0)"] +onnx = ["onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "tf2onnx"] +onnxruntime = ["onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)"] +optuna = ["optuna"] +quality = ["GitPython (<3.1.19)", "datasets (!=2.5.0)", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "ruff (==0.1.5)", "urllib3 (<2.0.0)"] +ray = ["ray[tune] (>=2.7.0)"] +retrieval = ["datasets (!=2.5.0)", "faiss-cpu"] +sagemaker = ["sagemaker (>=2.31.0)"] +sentencepiece = ["protobuf", "sentencepiece (>=0.1.91,!=0.1.92)"] +serving = ["fastapi", "pydantic", "starlette", "uvicorn"] +sigopt = ["sigopt"] +sklearn = ["scikit-learn"] +speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] +testing = ["GitPython (<3.1.19)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "parameterized", "protobuf", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "tensorboard", "timeout-decorator"] +tf = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx"] +tf-cpu = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow-cpu (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx"] +tf-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] +timm = ["timm"] +tokenizers = ["tokenizers (>=0.14,<0.19)"] +torch = ["accelerate (>=0.21.0)", "torch"] +torch-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] +torch-vision = ["Pillow (>=10.0.1,<=15.0)", "torchvision"] +torchhub = ["filelock", "huggingface-hub (>=0.19.3,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.14,<0.19)", "torch", "tqdm (>=4.27)"] +video = ["av (==9.2.0)", "decord (==0.6.0)"] +vision = ["Pillow (>=10.0.1,<=15.0)"] + [[package]] name = "triton" version = "2.2.0" @@ -3589,4 +3784,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "174c7d42f8039eedd2c447a4e6cae5169782cbd94346b5606572a0010194ca05" +content-hash = "5ebd02dac0322efe1236eb9fec84c471edd0c5373cc8967b1982314164b3bf50" diff --git a/pyproject.toml b/pyproject.toml index 972c1b61..b2526e5c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -52,6 +52,7 @@ robomimic = "0.2.0" gymnasium-robotics = "^1.2.4" gymnasium = "^0.29.1" cmake = "^3.29.0.1" +transformers = "^4.39.3" [tool.poetry.group.dev.dependencies] From 65ef8c30d03fd5c8904f2f914870447c712387a9 Mon Sep 17 00:00:00 2001 From: Alexander Soare Date: Tue, 2 Apr 2024 19:13:49 +0100 Subject: [PATCH 02/80] backup wip --- lerobot/common/datasets/factory.py | 86 +++++++++++++++++++++---- lerobot/common/policies/act/detr_vae.py | 9 +-- lerobot/common/policies/act/policy.py | 4 +- 3 files changed, 80 insertions(+), 19 deletions(-) diff --git a/lerobot/common/datasets/factory.py b/lerobot/common/datasets/factory.py index 47a15ea4..b394e830 100644 --- a/lerobot/common/datasets/factory.py +++ b/lerobot/common/datasets/factory.py @@ -125,32 +125,92 @@ def make_offline_buffer( # TODO(rcadene): remove this and put it in config. Ideally we want to reproduce SOTA results just with mean_std normalization_mode = "mean_std" if cfg.env.name == "aloha" else "min_max" - + # TODO(now): These stats are needed to use their pretrained model for sim_transfer_cube_human. # (Pdb) stats['observation']['state']['mean'] # tensor([-0.0071, -0.6293, 1.0351, -0.0517, -0.4642, -0.0754, 0.4751, -0.0373, # -0.3324, 0.9034, -0.2258, -0.3127, -0.2412, 0.6866]) - stats['observation', 'state', 'mean'] = torch.tensor([-0.00740268, -0.63187766, 1.0356655 , -0.05027218, -0.46199223, - -0.07467502, 0.47467607, -0.03615446, -0.33203387, 0.9038929 , - -0.22060776, -0.31011587, -0.23484458, 0.6842416 ]) + stats["observation", "state", "mean"] = torch.tensor( + [ + -0.00740268, + -0.63187766, + 1.0356655, + -0.05027218, + -0.46199223, + -0.07467502, + 0.47467607, + -0.03615446, + -0.33203387, + 0.9038929, + -0.22060776, + -0.31011587, + -0.23484458, + 0.6842416, + ] + ) # (Pdb) stats['observation']['state']['std'] # tensor([0.0022, 0.0520, 0.0291, 0.0092, 0.0267, 0.0145, 0.0563, 0.0179, 0.0494, # 0.0326, 0.0476, 0.0535, 0.0956, 0.0513]) - stats['observation', 'state', 'std'] = torch.tensor([0.01219023, 0.2975381 , 0.16728032, 0.04733803, 0.1486037 , - 0.08788499, 0.31752336, 0.1049916 , 0.27933604, 0.18094037, - 0.26604933, 0.30466506, 0.5298686 , 0.25505227]) + stats["observation", "state", "std"] = torch.tensor( + [ + 0.01219023, + 0.2975381, + 0.16728032, + 0.04733803, + 0.1486037, + 0.08788499, + 0.31752336, + 0.1049916, + 0.27933604, + 0.18094037, + 0.26604933, + 0.30466506, + 0.5298686, + 0.25505227, + ] + ) # (Pdb) stats['action']['mean'] # tensor([-0.0075, -0.6346, 1.0353, -0.0465, -0.4686, -0.0738, 0.3723, -0.0396, # -0.3184, 0.8991, -0.2065, -0.3182, -0.2338, 0.5593]) - stats['action']['mean'] = torch.tensor([-0.00756444, -0.6281845 , 1.0312834 , -0.04664314, -0.47211358, - -0.074527 , 0.37389806, -0.03718753, -0.3261143 , 0.8997205 , - -0.21371077, -0.31840396, -0.23360962, 0.551947]) + stats["action"]["mean"] = torch.tensor( + [ + -0.00756444, + -0.6281845, + 1.0312834, + -0.04664314, + -0.47211358, + -0.074527, + 0.37389806, + -0.03718753, + -0.3261143, + 0.8997205, + -0.21371077, + -0.31840396, + -0.23360962, + 0.551947, + ] + ) # (Pdb) stats['action']['std'] # tensor([0.0023, 0.0514, 0.0290, 0.0086, 0.0263, 0.0143, 0.0593, 0.0185, 0.0510, # 0.0328, 0.0478, 0.0531, 0.0945, 0.0794]) - stats['action']['std'] = torch.tensor([0.01252818, 0.2957442 , 0.16701928, 0.04584508, 0.14833844, - 0.08763024, 0.30665937, 0.10600077, 0.27572668, 0.1805853 , - 0.26304692, 0.30708534, 0.5305411 , 0.38381037]) + stats["action"]["std"] = torch.tensor( + [ + 0.01252818, + 0.2957442, + 0.16701928, + 0.04584508, + 0.14833844, + 0.08763024, + 0.30665937, + 0.10600077, + 0.27572668, + 0.1805853, + 0.26304692, + 0.30708534, + 0.5305411, + 0.38381037, + ] + ) transforms.append(NormalizeTransform(stats, in_keys, mode=normalization_mode)) offline_buffer.set_transform(transforms) diff --git a/lerobot/common/policies/act/detr_vae.py b/lerobot/common/policies/act/detr_vae.py index 4d5525f2..f21308ad 100644 --- a/lerobot/common/policies/act/detr_vae.py +++ b/lerobot/common/policies/act/detr_vae.py @@ -2,7 +2,6 @@ import numpy as np import torch from torch import nn from torch.autograd import Variable -from transformers import DetrForObjectDetection from .backbone import build_backbone from .transformer import TransformerEncoder, TransformerEncoderLayer, build_transformer @@ -74,7 +73,7 @@ class ActionChunkingTransformer(nn.Module): hidden_dim = transformer.d_model self.action_head = nn.Linear(hidden_dim, action_dim) self.is_pad_head = nn.Linear(hidden_dim, 1) - # Positional embedding to be used as input to the latent vae_encoder (if applicable) and for the + # Positional embedding to be used as input to the latent vae_encoder (if applicable) and for the self.pos_embed = nn.Embedding(horizon, hidden_dim) if backbones is not None: self.input_proj = nn.Conv2d(backbones[0].num_channels, hidden_dim, kernel_size=1) @@ -134,7 +133,9 @@ class ActionChunkingTransformer(nn.Module): pos_embed = self.pos_table.clone().detach() pos_embed = pos_embed.permute(1, 0, 2) # (seq+1, 1, hidden_dim) # query model - vae_encoder_output = self.vae_encoder(vae_encoder_input, pos=pos_embed) # , src_key_padding_mask=is_pad) + vae_encoder_output = self.vae_encoder( + vae_encoder_input, pos=pos_embed + ) # , src_key_padding_mask=is_pad) vae_encoder_output = vae_encoder_output[0] # take cls output only latent_info = self.latent_proj(vae_encoder_output) mu = latent_info[:, : self.latent_dim] @@ -219,7 +220,7 @@ def build(args): backbones.append(backbone) transformer = build_transformer(args) - + vae_encoder = build_vae_encoder(args) model = ActionChunkingTransformer( diff --git a/lerobot/common/policies/act/policy.py b/lerobot/common/policies/act/policy.py index a88f7640..5cf74ae5 100644 --- a/lerobot/common/policies/act/policy.py +++ b/lerobot/common/policies/act/policy.py @@ -54,7 +54,7 @@ class ActionChunkingTransformerPolicy(AbstractPolicy): Args: vae: Whether to use the variational objective. TODO(now): Give more details. temporal_agg: Whether to do temporal aggregation. For each timestep during rollout, the action - returned as an exponential moving average of previously generated actions for that timestep. + returned as an exponential moving average of previously generated actions for that timestep. n_obs_steps: Number of time steps worth of observation to use as input. horizon: The number of actions to generate in one forward pass. kl_weight: Weight for KL divergence. Defaults to None. Only applicable when using the variational @@ -120,7 +120,7 @@ class ActionChunkingTransformerPolicy(AbstractPolicy): "action": action.to(self.device, non_blocking=True), } return out - + start_time = time.time() batch = replay_buffer.sample(batch_size) From 110ac5ffa123c64eb61a313eb08638ed6efe84ee Mon Sep 17 00:00:00 2001 From: Alexander Soare Date: Wed, 3 Apr 2024 14:21:07 +0100 Subject: [PATCH 03/80] backup wip --- lerobot/common/envs/aloha/env.py | 1 - lerobot/common/policies/act/detr_vae.py | 216 ++++++++++----------- lerobot/common/policies/act/policy.py | 5 +- lerobot/common/policies/act/transformer.py | 85 ++------ lerobot/configs/policy/act.yaml | 2 +- scripts/convert_act_weights.py | 64 ++++++ 6 files changed, 182 insertions(+), 191 deletions(-) create mode 100644 scripts/convert_act_weights.py diff --git a/lerobot/common/envs/aloha/env.py b/lerobot/common/envs/aloha/env.py index 8f907650..ad8087d0 100644 --- a/lerobot/common/envs/aloha/env.py +++ b/lerobot/common/envs/aloha/env.py @@ -191,7 +191,6 @@ class AlohaEnv(AbstractEnv): { "observation": TensorDict(obs, batch_size=[]), "reward": torch.tensor([reward], dtype=torch.float32), - # success and done are true when coverage > self.success_threshold in env "done": torch.tensor([done], dtype=torch.bool), "success": torch.tensor([success], dtype=torch.bool), }, diff --git a/lerobot/common/policies/act/detr_vae.py b/lerobot/common/policies/act/detr_vae.py index f21308ad..ff137a34 100644 --- a/lerobot/common/policies/act/detr_vae.py +++ b/lerobot/common/policies/act/detr_vae.py @@ -1,18 +1,12 @@ +import einops import numpy as np import torch from torch import nn -from torch.autograd import Variable from .backbone import build_backbone from .transformer import TransformerEncoder, TransformerEncoderLayer, build_transformer -def reparametrize(mu, logvar): - std = logvar.div(2).exp() - eps = Variable(std.data.new(std.size()).normal_()) - return mu + std * eps - - def get_sinusoid_encoding_table(n_position, d_hid): def get_position_angle_vec(position): return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)] @@ -27,7 +21,7 @@ def get_sinusoid_encoding_table(n_position, d_hid): class ActionChunkingTransformer(nn.Module): """ Action Chunking Transformer as per Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware - (https://arxiv.org/abs/2304.13705). + (paper: https://arxiv.org/abs/2304.13705, code: https://github.com/tonyzhaozh/act) Note: In this code we use the symbols `vae_encoder`, 'encoder', `decoder`. The meanings are as follows. - The `vae_encoder` is, as per the literature around conditional variational auto-encoders (cVAE), the @@ -49,7 +43,7 @@ class ActionChunkingTransformer(nn.Module): """ def __init__( - self, backbones, transformer, vae_encoder, state_dim, action_dim, horizon, camera_names, vae + self, backbones, transformer, vae_encoder, state_dim, action_dim, horizon, camera_names, use_vae ): """Initializes the model. Parameters: @@ -63,134 +57,124 @@ class ActionChunkingTransformer(nn.Module): state_dim: Robot positional state dimension. action_dim: Action dimension. horizon: The number of actions to generate in one forward pass. - vae: Whether to use the variational objective. TODO(now): Give more details. + use_vae: Whether to use the variational objective. TODO(now): Give more details. """ super().__init__() + self.camera_names = camera_names self.transformer = transformer self.vae_encoder = vae_encoder - self.vae = vae + self.use_vae = use_vae hidden_dim = transformer.d_model - self.action_head = nn.Linear(hidden_dim, action_dim) - self.is_pad_head = nn.Linear(hidden_dim, 1) - # Positional embedding to be used as input to the latent vae_encoder (if applicable) and for the - self.pos_embed = nn.Embedding(horizon, hidden_dim) - if backbones is not None: - self.input_proj = nn.Conv2d(backbones[0].num_channels, hidden_dim, kernel_size=1) - self.backbones = nn.ModuleList(backbones) - self.input_proj_robot_state = nn.Linear(state_dim, hidden_dim) - else: - # input_dim = 14 + 7 # robot_state + env_state - self.input_proj_robot_state = nn.Linear(state_dim, hidden_dim) - # TODO(rcadene): understand what is env_state, and why it needs to be 7 - self.input_proj_env_state = nn.Linear(state_dim // 2, hidden_dim) - self.pos = torch.nn.Embedding(2, hidden_dim) - self.backbones = None - # vae_encoder extra parameters - self.latent_dim = 32 # final size of latent z # TODO tune - self.cls_embed = nn.Embedding(1, hidden_dim) # extra cls token embedding - self.vae_encoder_action_proj = nn.Linear(14, hidden_dim) # project action to embedding - self.vae_encoder_joint_proj = nn.Linear(14, hidden_dim) # project qpos to embedding - self.latent_proj = nn.Linear( - hidden_dim, self.latent_dim * 2 - ) # project hidden state to latent std, var - self.register_buffer( - "pos_table", get_sinusoid_encoding_table(1 + 1 + horizon, hidden_dim) - ) # [CLS], qpos, a_seq + # BERT style VAE encoder with input [cls, *joint_space_configuration, *action_sequence]. + # The cls token forms parameters of the latent's distribution (like this [*means, *log_variances]). + if use_vae: + self.cls_embed = nn.Embedding(1, hidden_dim) + # Projection layer for joint-space configuration to hidden dimension. + self.vae_encoder_robot_state_input_proj = nn.Linear(state_dim, hidden_dim) + # Projection layer for action (joint-space target) to hidden dimension. + self.vae_encoder_action_input_proj = nn.Linear(state_dim, hidden_dim) + # Final size of latent z. TODO(now): Add to hyperparams. + self.latent_dim = 32 + # Projection layer from the VAE encoder's output to the latent distribution's parameter space. + self.vae_encoder_latent_output_proj = nn.Linear(hidden_dim, self.latent_dim * 2) + # Fixed sinusoidal positional embedding the whole input to the VAE encoder. + self.register_buffer( + "vae_encoder_pos_enc", get_sinusoid_encoding_table(1 + 1 + horizon, hidden_dim) + ) - # decoder extra parameters - self.latent_out_proj = nn.Linear(self.latent_dim, hidden_dim) # project latent sample to embedding + # Transformer encoder input projections. The tokens will be structured like + # [latent, robot_state, image_feature_map_pixels]. + self.backbones = nn.ModuleList(backbones) + self.encoder_img_feat_input_proj = nn.Conv2d(backbones[0].num_channels, hidden_dim, kernel_size=1) + self.encoder_robot_state_input_proj = nn.Linear(state_dim, hidden_dim) + self.encoder_latent_input_proj = nn.Linear(self.latent_dim, hidden_dim) + # TODO(now): Fix this nonsense. One positional embedding is needed. We should extract the image + # feature dimension with a dry run. self.additional_pos_embed = nn.Embedding( 2, hidden_dim ) # learned position embedding for proprio and latent - def forward(self, qpos, image, env_state, actions=None, is_pad=None): + # Transformer decoder. + # Learnable positional embedding for the transformer's decoder (in the style of DETR object queries). + self.decoder_pos_embed = nn.Embedding(horizon, hidden_dim) + # Final action regression head on the output of the transformer's decoder. + self.action_head = nn.Linear(hidden_dim, action_dim) + + def forward(self, robot_state, image, actions=None): """ - qpos: batch, qpos_dim - image: batch, num_cam, channel, height, width - env_state: None - actions: batch, seq, action_dim + Args: + robot_state: (B, J) batch of robot joint configurations. + image: (B, N, C, H, W) batch of N camera frames. + actions: (B, S, A) batch of actions from the target dataset which must be provided if the + VAE is enabled and the model is in training mode. """ - is_training = actions is not None # train or val - bs, _ = qpos.shape - ### Obtain latent z from action sequence - if self.vae and is_training: - # project action sequence to embedding dim, and concat with a CLS token - action_embed = self.vae_encoder_action_proj(actions) # (bs, seq, hidden_dim) - qpos_embed = self.vae_encoder_joint_proj(qpos) # (bs, hidden_dim) - qpos_embed = torch.unsqueeze(qpos_embed, axis=1) # (bs, 1, hidden_dim) - cls_embed = self.cls_embed.weight # (1, hidden_dim) - cls_embed = torch.unsqueeze(cls_embed, axis=0).repeat(bs, 1, 1) # (bs, 1, hidden_dim) - vae_encoder_input = torch.cat( - [cls_embed, qpos_embed, action_embed], axis=1 - ) # (bs, seq+1, hidden_dim) - vae_encoder_input = vae_encoder_input.permute(1, 0, 2) # (seq+1, bs, hidden_dim) - # do not mask cls token - # cls_joint_is_pad = torch.full((bs, 2), False).to(qpos.device) # False: not a padding - # is_pad = torch.cat([cls_joint_is_pad, is_pad], axis=1) # (bs, seq+1) - # obtain position embedding - pos_embed = self.pos_table.clone().detach() - pos_embed = pos_embed.permute(1, 0, 2) # (seq+1, 1, hidden_dim) - # query model + if self.use_vae and self.training: + assert ( + actions is not None + ), "actions must be provided when using the variational objective in training mode." + + batch_size, _ = robot_state.shape + + # Prepare the latent for input to the transformer. + if self.use_vae and actions is not None: + # Prepare the input to the VAE encoder: [cls, *joint_space_configuration, *action_sequence]. + cls_embed = einops.repeat(self.cls_embed.weight, "1 d -> b 1 d", b=batch_size) # (B, 1, D) + robot_state_embed = self.vae_encoder_robot_state_input_proj(robot_state).unsqueeze(1) # (B, 1, D) + action_embed = self.vae_encoder_action_input_proj(actions) # (B, S, D) + vae_encoder_input = torch.cat([cls_embed, robot_state_embed, action_embed], axis=1) # (B, S+2, D) + vae_encoder_input = vae_encoder_input.permute(1, 0, 2) # (S+2, B, D) + # Note: detach() shouldn't be necessary but leaving it the same as the original code just in case. + # Prepare fixed positional embedding. + pos_embed = self.vae_encoder_pos_enc.clone().detach().permute(1, 0, 2) # (S+2, 1, D) + # Forward pass through VAE encoder and sample the latent with the reparameterization trick. vae_encoder_output = self.vae_encoder( vae_encoder_input, pos=pos_embed - ) # , src_key_padding_mask=is_pad) + ) # , src_key_padding_mask=is_pad) # TODO(now) vae_encoder_output = vae_encoder_output[0] # take cls output only - latent_info = self.latent_proj(vae_encoder_output) - mu = latent_info[:, : self.latent_dim] - logvar = latent_info[:, self.latent_dim :] - latent_sample = reparametrize(mu, logvar) - latent_input = self.latent_out_proj(latent_sample) + latent_pdf_params = self.vae_encoder_latent_output_proj(vae_encoder_output) + mu = latent_pdf_params[:, : self.latent_dim] + logvar = latent_pdf_params[:, self.latent_dim :] + # Use reparameterization trick to sample from the latent's PDF. + latent_sample = mu + logvar.div(2).exp() * torch.randn_like(mu) else: + # When not using the VAE encoder, we set the latent to be all zeros. mu = logvar = None - latent_sample = torch.zeros([bs, self.latent_dim], dtype=torch.float32).to(qpos.device) - latent_input = self.latent_out_proj(latent_sample) + latent_sample = torch.zeros([batch_size, self.latent_dim], dtype=robot_state.dtype).to( + robot_state.device + ) - if self.backbones is not None: - # Image observation features and position embeddings - all_cam_features = [] - all_cam_pos = [] - for cam_id, _ in enumerate(self.camera_names): - features, pos = self.backbones[0](image[:, cam_id]) # HARDCODED - features = features[0] # take the last layer feature - pos = pos[0] - all_cam_features.append(self.input_proj(features)) - all_cam_pos.append(pos) - # proprioception features - proprio_input = self.input_proj_robot_state(qpos) - # fold camera dimension into width dimension - src = torch.cat(all_cam_features, axis=3) - pos = torch.cat(all_cam_pos, axis=3) - hs = self.transformer( - src, - None, - self.pos_embed.weight, - pos, - latent_input, - proprio_input, - self.additional_pos_embed.weight, - )[0] - else: - qpos = self.input_proj_robot_state(qpos) - env_state = self.input_proj_env_state(env_state) - transformer_input = torch.cat([qpos, env_state], axis=1) # seq length = 2 - hs = self.transformer(transformer_input, None, self.pos_embed.weight, self.pos.weight)[0] - a_hat = self.action_head(hs) - is_pad_hat = self.is_pad_head(hs) - return a_hat, is_pad_hat, [mu, logvar] + # Prepare all other transformer inputs. + # Image observation features and position embeddings. + all_cam_features = [] + all_cam_pos = [] + for cam_id, _ in enumerate(self.camera_names): + # TODO(now): remove the positional embedding from the backbones. + features, pos = self.backbones[0](image[:, cam_id]) # HARDCODED + features = features[0] # take the last layer feature + pos = pos[0] + all_cam_features.append(self.encoder_img_feat_input_proj(features)) + all_cam_pos.append(pos) + # Concatenate image observation feature maps along the width dimension. + transformer_input = torch.cat(all_cam_features, axis=3) + # TODO(now): remove the positional embedding from the backbones. + pos = torch.cat(all_cam_pos, axis=3) + robot_state_embed = self.encoder_robot_state_input_proj(robot_state) + latent_embed = self.encoder_latent_input_proj(latent_sample) + # Run the transformer and project the outputs to the action space. + transformer_output = self.transformer( + transformer_input, + query_embed=self.decoder_pos_embed.weight, + pos_embed=pos, + latent_input=latent_embed, + proprio_input=robot_state_embed, + additional_pos_embed=self.additional_pos_embed.weight, + ) + a_hat = self.action_head(transformer_output) -def mlp(input_dim, hidden_dim, output_dim, hidden_depth): - if hidden_depth == 0: - mods = [nn.Linear(input_dim, output_dim)] - else: - mods = [nn.Linear(input_dim, hidden_dim), nn.ReLU(inplace=True)] - for _ in range(hidden_depth - 1): - mods += [nn.Linear(hidden_dim, hidden_dim), nn.ReLU(inplace=True)] - mods.append(nn.Linear(hidden_dim, output_dim)) - trunk = nn.Sequential(*mods) - return trunk + return a_hat, [mu, logvar] def build_vae_encoder(args): @@ -231,7 +215,7 @@ def build(args): action_dim=args.action_dim, horizon=args.num_queries, camera_names=args.camera_names, - vae=args.vae, + use_vae=args.vae, ) n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) diff --git a/lerobot/common/policies/act/policy.py b/lerobot/common/policies/act/policy.py index 5cf74ae5..7d24620a 100644 --- a/lerobot/common/policies/act/policy.py +++ b/lerobot/common/policies/act/policy.py @@ -224,8 +224,7 @@ class ActionChunkingTransformerPolicy(AbstractPolicy): if is_pad is not None: is_pad = is_pad[:, : self.model.num_queries] - breakpoint() - a_hat, is_pad_hat, (mu, logvar) = self.model(qpos, image, env_state, actions, is_pad) + a_hat, (mu, logvar) = self.model(qpos, image, env_state, actions, is_pad) all_l1 = F.l1_loss(actions, a_hat, reduction="none") l1 = all_l1.mean() if is_pad is None else (all_l1 * ~is_pad.unsqueeze(-1)).mean() @@ -240,5 +239,5 @@ class ActionChunkingTransformerPolicy(AbstractPolicy): loss_dict["loss"] = loss_dict["l1"] return loss_dict else: - action, _, (_, _) = self.model(qpos, image, env_state) # no action, sample from prior + action, _ = self.model(qpos, image, env_state) # no action, sample from prior return action diff --git a/lerobot/common/policies/act/transformer.py b/lerobot/common/policies/act/transformer.py index 20cfc815..11d5a013 100644 --- a/lerobot/common/policies/act/transformer.py +++ b/lerobot/common/policies/act/transformer.py @@ -26,10 +26,8 @@ class Transformer(nn.Module): dropout=0.1, activation="relu", normalize_before=False, - return_intermediate_dec=False, ): super().__init__() - encoder_layer = TransformerEncoderLayer( d_model, nhead, dim_feedforward, dropout, activation, normalize_before ) @@ -40,9 +38,7 @@ class Transformer(nn.Module): d_model, nhead, dim_feedforward, dropout, activation, normalize_before ) decoder_norm = nn.LayerNorm(d_model) - self.decoder = TransformerDecoder( - decoder_layer, num_decoder_layers, decoder_norm, return_intermediate=return_intermediate_dec - ) + self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm) self._reset_parameters() @@ -57,7 +53,6 @@ class Transformer(nn.Module): def forward( self, src, - mask, query_embed, pos_embed, latent_input=None, @@ -68,10 +63,10 @@ class Transformer(nn.Module): if len(src.shape) == 4: # has H and W # flatten NxCxHxW to HWxNxC bs, c, h, w = src.shape + # Each "pixel" on the feature maps will form a token. src = src.flatten(2).permute(2, 0, 1) pos_embed = pos_embed.flatten(2).permute(2, 0, 1).repeat(1, bs, 1) query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1) - # mask = mask.flatten(1) additional_pos_embed = additional_pos_embed.unsqueeze(1).repeat(1, bs, 1) # seq, bs, dim pos_embed = torch.cat([additional_pos_embed, pos_embed], axis=0) @@ -87,9 +82,9 @@ class Transformer(nn.Module): query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1) tgt = torch.zeros_like(query_embed) - memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed) - hs = self.decoder(tgt, memory, memory_key_padding_mask=mask, pos=pos_embed, query_pos=query_embed) - hs = hs.transpose(1, 2) + memory = self.encoder(src, pos=pos_embed) + hs = self.decoder(tgt, memory, pos=pos_embed, query_pos=query_embed) + hs = hs.transpose(0, 1) return hs @@ -103,14 +98,12 @@ class TransformerEncoder(nn.Module): def forward( self, src, - mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, pos: Optional[Tensor] = None, ): output = src for layer in self.layers: - output = layer(output, src_mask=mask, src_key_padding_mask=src_key_padding_mask, pos=pos) + output = layer(output, pos=pos) if self.norm is not None: output = self.norm(output) @@ -119,52 +112,33 @@ class TransformerEncoder(nn.Module): class TransformerDecoder(nn.Module): - def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False): + def __init__(self, decoder_layer, num_layers, norm=None): super().__init__() self.layers = _get_clones(decoder_layer, num_layers) self.num_layers = num_layers self.norm = norm - self.return_intermediate = return_intermediate def forward( self, tgt, memory, - tgt_mask: Optional[Tensor] = None, - memory_mask: Optional[Tensor] = None, - tgt_key_padding_mask: Optional[Tensor] = None, - memory_key_padding_mask: Optional[Tensor] = None, pos: Optional[Tensor] = None, query_pos: Optional[Tensor] = None, ): output = tgt - intermediate = [] - for layer in self.layers: output = layer( output, memory, - tgt_mask=tgt_mask, - memory_mask=memory_mask, - tgt_key_padding_mask=tgt_key_padding_mask, - memory_key_padding_mask=memory_key_padding_mask, pos=pos, query_pos=query_pos, ) - if self.return_intermediate: - intermediate.append(self.norm(output)) if self.norm is not None: output = self.norm(output) - if self.return_intermediate: - intermediate.pop() - intermediate.append(output) - if self.return_intermediate: - return torch.stack(intermediate) - - return output.unsqueeze(0) + return output class TransformerEncoderLayer(nn.Module): @@ -192,12 +166,10 @@ class TransformerEncoderLayer(nn.Module): def forward_post( self, src, - src_mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, pos: Optional[Tensor] = None, ): q = k = self.with_pos_embed(src, pos) - src2 = self.self_attn(q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0] + src2 = self.self_attn(q, k, value=src)[0] src = src + self.dropout1(src2) src = self.norm1(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) @@ -208,13 +180,11 @@ class TransformerEncoderLayer(nn.Module): def forward_pre( self, src, - src_mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, pos: Optional[Tensor] = None, ): src2 = self.norm1(src) q = k = self.with_pos_embed(src2, pos) - src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0] + src2 = self.self_attn(q, k, value=src2)[0] src = src + self.dropout1(src2) src2 = self.norm2(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src2)))) @@ -224,13 +194,11 @@ class TransformerEncoderLayer(nn.Module): def forward( self, src, - src_mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, pos: Optional[Tensor] = None, ): if self.normalize_before: - return self.forward_pre(src, src_mask, src_key_padding_mask, pos) - return self.forward_post(src, src_mask, src_key_padding_mask, pos) + return self.forward_pre(src, pos) + return self.forward_post(src, pos) class TransformerDecoderLayer(nn.Module): @@ -262,23 +230,17 @@ class TransformerDecoderLayer(nn.Module): self, tgt, memory, - tgt_mask: Optional[Tensor] = None, - memory_mask: Optional[Tensor] = None, - tgt_key_padding_mask: Optional[Tensor] = None, - memory_key_padding_mask: Optional[Tensor] = None, pos: Optional[Tensor] = None, query_pos: Optional[Tensor] = None, ): q = k = self.with_pos_embed(tgt, query_pos) - tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0] + tgt2 = self.self_attn(q, k, value=tgt)[0] tgt = tgt + self.dropout1(tgt2) tgt = self.norm1(tgt) tgt2 = self.multihead_attn( query=self.with_pos_embed(tgt, query_pos), key=self.with_pos_embed(memory, pos), value=memory, - attn_mask=memory_mask, - key_padding_mask=memory_key_padding_mask, )[0] tgt = tgt + self.dropout2(tgt2) tgt = self.norm2(tgt) @@ -291,24 +253,18 @@ class TransformerDecoderLayer(nn.Module): self, tgt, memory, - tgt_mask: Optional[Tensor] = None, - memory_mask: Optional[Tensor] = None, - tgt_key_padding_mask: Optional[Tensor] = None, - memory_key_padding_mask: Optional[Tensor] = None, pos: Optional[Tensor] = None, query_pos: Optional[Tensor] = None, ): tgt2 = self.norm1(tgt) q = k = self.with_pos_embed(tgt2, query_pos) - tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0] + tgt2 = self.self_attn(q, k, value=tgt2)[0] tgt = tgt + self.dropout1(tgt2) tgt2 = self.norm2(tgt) tgt2 = self.multihead_attn( query=self.with_pos_embed(tgt2, query_pos), key=self.with_pos_embed(memory, pos), value=memory, - attn_mask=memory_mask, - key_padding_mask=memory_key_padding_mask, )[0] tgt = tgt + self.dropout2(tgt2) tgt2 = self.norm3(tgt) @@ -320,10 +276,6 @@ class TransformerDecoderLayer(nn.Module): self, tgt, memory, - tgt_mask: Optional[Tensor] = None, - memory_mask: Optional[Tensor] = None, - tgt_key_padding_mask: Optional[Tensor] = None, - memory_key_padding_mask: Optional[Tensor] = None, pos: Optional[Tensor] = None, query_pos: Optional[Tensor] = None, ): @@ -331,16 +283,10 @@ class TransformerDecoderLayer(nn.Module): return self.forward_pre( tgt, memory, - tgt_mask, - memory_mask, - tgt_key_padding_mask, - memory_key_padding_mask, pos, query_pos, ) - return self.forward_post( - tgt, memory, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos - ) + return self.forward_post(tgt, memory, pos, query_pos) def _get_clones(module, n): @@ -356,7 +302,6 @@ def build_transformer(args): num_encoder_layers=args.enc_layers, num_decoder_layers=args.dec_layers, normalize_before=args.pre_norm, - return_intermediate_dec=True, ) diff --git a/lerobot/configs/policy/act.yaml b/lerobot/configs/policy/act.yaml index 0244944b..1086b595 100644 --- a/lerobot/configs/policy/act.yaml +++ b/lerobot/configs/policy/act.yaml @@ -29,7 +29,7 @@ policy: hidden_dim: 512 dim_feedforward: 3200 enc_layers: 4 - dec_layers: 7 + dec_layers: 1 nheads: 8 #camera_names: [top, front_close, left_pillar, right_pillar] camera_names: [top] diff --git a/scripts/convert_act_weights.py b/scripts/convert_act_weights.py new file mode 100644 index 00000000..d0c0c3e7 --- /dev/null +++ b/scripts/convert_act_weights.py @@ -0,0 +1,64 @@ +import torch + +from lerobot.common.policies.factory import make_policy +from lerobot.common.utils import init_hydra_config + +cfg = init_hydra_config( + "/home/alexander/Projects/lerobot/outputs/train/act_aloha_sim_transfer_cube_human/.hydra/config.yaml" +) + +policy = make_policy(cfg) + +state_dict = torch.load("/home/alexander/Projects/act/outputs/sim_transfer_cube_human_vae/policy_last.ckpt") + + +# Replace keys based on what they start with. + +start_replacements = [ + ("model.query_embed.weight", "model.pos_embed.weight"), + ("model.pos_table", "model.vae_encoder_pos_enc"), + ("model.pos_embed.weight", "model.decoder_pos_embed.weight"), + ("model.encoder.", "model.vae_encoder."), + ("model.encoder_action_proj.", "model.vae_encoder_action_input_proj."), + ("model.encoder_joint_proj.", "model.vae_encoder_robot_state_input_proj."), + ("model.latent_proj.", "model.vae_encoder_latent_output_proj."), + ("model.latent_proj.", "model.vae_encoder_latent_output_proj."), + ("model.input_proj.", "model.encoder_img_feat_input_proj."), + ("model.input_proj_robot_state", "model.encoder_robot_state_input_proj"), + ("model.latent_out_proj.", "model.encoder_latent_input_proj."), +] + +for to_replace, replace_with in start_replacements: + for k in list(state_dict.keys()): + if k.startswith(to_replace): + k_ = replace_with + k.removeprefix(to_replace) + state_dict[k_] = state_dict[k] + del state_dict[k] + +# Remove keys based on what they start with. + +start_removals = [ + # There is a bug that means the pretrained model doesn't even use the final decoder layers. + *[f"model.transformer.decoder.layers.{i}" for i in range(1, 7)], + "model.is_pad_head.", +] + +for to_remove in start_removals: + for k in list(state_dict.keys()): + if k.startswith(to_remove): + del state_dict[k] + +missing_keys, unexpected_keys = policy.load_state_dict(state_dict, strict=False) + +if len(missing_keys) != 0: + print("MISSING KEYS") + print(missing_keys) +if len(unexpected_keys) != 0: + print("UNEXPECTED KEYS") + print(unexpected_keys) + +# if len(missing_keys) != 0 or len(unexpected_keys) != 0: +# print("Failed due to mismatch in state dicts.") +# exit() + +policy.save("/tmp/weights.pth") From 278336a39a32ec0a7f7af87dac5b65c21368e488 Mon Sep 17 00:00:00 2001 From: Alexander Soare Date: Wed, 3 Apr 2024 19:23:22 +0100 Subject: [PATCH 04/80] backup wip --- lerobot/common/policies/act/detr_vae.py | 85 ++--- lerobot/common/policies/act/transformer.py | 350 ++++++++------------- pyproject.toml | 3 + 3 files changed, 185 insertions(+), 253 deletions(-) diff --git a/lerobot/common/policies/act/detr_vae.py b/lerobot/common/policies/act/detr_vae.py index ff137a34..aaf4d098 100644 --- a/lerobot/common/policies/act/detr_vae.py +++ b/lerobot/common/policies/act/detr_vae.py @@ -4,7 +4,7 @@ import torch from torch import nn from .backbone import build_backbone -from .transformer import TransformerEncoder, TransformerEncoderLayer, build_transformer +from .transformer import Transformer, TransformerEncoder def get_sinusoid_encoding_table(n_position, d_hid): @@ -124,16 +124,14 @@ class ActionChunkingTransformer(nn.Module): robot_state_embed = self.vae_encoder_robot_state_input_proj(robot_state).unsqueeze(1) # (B, 1, D) action_embed = self.vae_encoder_action_input_proj(actions) # (B, S, D) vae_encoder_input = torch.cat([cls_embed, robot_state_embed, action_embed], axis=1) # (B, S+2, D) - vae_encoder_input = vae_encoder_input.permute(1, 0, 2) # (S+2, B, D) # Note: detach() shouldn't be necessary but leaving it the same as the original code just in case. # Prepare fixed positional embedding. - pos_embed = self.vae_encoder_pos_enc.clone().detach().permute(1, 0, 2) # (S+2, 1, D) + pos_embed = self.vae_encoder_pos_enc.clone().detach() # (1, S+2, D) # Forward pass through VAE encoder and sample the latent with the reparameterization trick. - vae_encoder_output = self.vae_encoder( - vae_encoder_input, pos=pos_embed - ) # , src_key_padding_mask=is_pad) # TODO(now) - vae_encoder_output = vae_encoder_output[0] # take cls output only - latent_pdf_params = self.vae_encoder_latent_output_proj(vae_encoder_output) + cls_token_out = self.vae_encoder( + vae_encoder_input.permute(1, 0, 2), pos=pos_embed.permute(1, 0, 2) + )[0] # (B, D) + latent_pdf_params = self.vae_encoder_latent_output_proj(cls_token_out) mu = latent_pdf_params[:, : self.latent_dim] logvar = latent_pdf_params[:, self.latent_dim :] # Use reparameterization trick to sample from the latent's PDF. @@ -151,10 +149,11 @@ class ActionChunkingTransformer(nn.Module): all_cam_pos = [] for cam_id, _ in enumerate(self.camera_names): # TODO(now): remove the positional embedding from the backbones. - features, pos = self.backbones[0](image[:, cam_id]) # HARDCODED - features = features[0] # take the last layer feature + cam_features, pos = self.backbones[0](image[:, cam_id]) # HARDCODED + cam_features = cam_features[0] # take the last layer feature pos = pos[0] - all_cam_features.append(self.encoder_img_feat_input_proj(features)) + cam_features = self.encoder_img_feat_input_proj(cam_features) # (B, C, h, w) + all_cam_features.append(cam_features) all_cam_pos.append(pos) # Concatenate image observation feature maps along the width dimension. transformer_input = torch.cat(all_cam_features, axis=3) @@ -163,36 +162,25 @@ class ActionChunkingTransformer(nn.Module): robot_state_embed = self.encoder_robot_state_input_proj(robot_state) latent_embed = self.encoder_latent_input_proj(latent_sample) + # TODO(now): Explain all of this madness. + transformer_input = torch.cat( + [ + torch.stack([latent_embed, robot_state_embed], axis=0), + transformer_input.flatten(2).permute(2, 0, 1), + ] + ) + pos_embed = torch.cat( + [self.additional_pos_embed.weight.unsqueeze(1), pos.flatten(2).permute(2, 0, 1)], axis=0 + ) + # Run the transformer and project the outputs to the action space. transformer_output = self.transformer( transformer_input, - query_embed=self.decoder_pos_embed.weight, - pos_embed=pos, - latent_input=latent_embed, - proprio_input=robot_state_embed, - additional_pos_embed=self.additional_pos_embed.weight, - ) - a_hat = self.action_head(transformer_output) - - return a_hat, [mu, logvar] - - -def build_vae_encoder(args): - d_model = args.hidden_dim # 256 - dropout = args.dropout # 0.1 - nhead = args.nheads # 8 - dim_feedforward = args.dim_feedforward # 2048 - num_encoder_layers = args.enc_layers # 4 # TODO shared with VAE decoder - normalize_before = args.pre_norm # False - activation = "relu" - - encoder_layer = TransformerEncoderLayer( - d_model, nhead, dim_feedforward, dropout, activation, normalize_before - ) - encoder_norm = nn.LayerNorm(d_model) if normalize_before else None - encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) - - return encoder + encoder_pos=pos_embed, + decoder_pos=self.decoder_pos_embed.weight.unsqueeze(1), + ).transpose(0, 1) # back to (B, S, C) + actions = self.action_head(transformer_output) + return actions, [mu, logvar] def build(args): @@ -203,9 +191,26 @@ def build(args): backbone = build_backbone(args) backbones.append(backbone) - transformer = build_transformer(args) + transformer = Transformer( + d_model=args.hidden_dim, + dropout=args.dropout, + nhead=args.nheads, + dim_feedforward=args.dim_feedforward, + num_encoder_layers=args.enc_layers, + num_decoder_layers=args.dec_layers, + normalize_before=args.pre_norm, + ) - vae_encoder = build_vae_encoder(args) + # TODO(now): args.enc_layers shouldn't be shared with the transformer decoder + vae_encoder = TransformerEncoder( + num_layers=args.enc_layers, + d_model=args.hidden_dim, + nhead=args.nheads, + dim_feedforward=args.dim_feedforward, + dropout=args.dropout, + activation="relu", + normalize_before=args.pre_norm, + ) model = ActionChunkingTransformer( backbones, diff --git a/lerobot/common/policies/act/transformer.py b/lerobot/common/policies/act/transformer.py index 11d5a013..7e71f3ea 100644 --- a/lerobot/common/policies/act/transformer.py +++ b/lerobot/common/policies/act/transformer.py @@ -1,13 +1,7 @@ """ -DETR Transformer class. - -Copy-paste from torch.nn.Transformer with modifications: - * positional encodings are passed in MHattention - * extra LN at the end of encoder is removed - * decoder returns a stack of activations from all decoding layers +TODO(now) """ -import copy from typing import Optional import torch @@ -28,117 +22,68 @@ class Transformer(nn.Module): normalize_before=False, ): super().__init__() - encoder_layer = TransformerEncoderLayer( - d_model, nhead, dim_feedforward, dropout, activation, normalize_before + self.encoder = TransformerEncoder( + num_encoder_layers, d_model, nhead, dim_feedforward, dropout, activation, normalize_before ) - encoder_norm = nn.LayerNorm(d_model) if normalize_before else None - self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) - - decoder_layer = TransformerDecoderLayer( - d_model, nhead, dim_feedforward, dropout, activation, normalize_before + self.decoder = TransformerDecoder( + num_decoder_layers, d_model, nhead, dim_feedforward, dropout, activation, normalize_before ) - decoder_norm = nn.LayerNorm(d_model) - self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm) - - self._reset_parameters() - self.d_model = d_model self.nhead = nhead + self._init_params() # TODO(now): move to somewhere common - def _reset_parameters(self): + def _init_params(self): for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) - def forward( - self, - src, - query_embed, - pos_embed, - latent_input=None, - proprio_input=None, - additional_pos_embed=None, - ): + def forward(self, x, encoder_pos, decoder_pos): + """ + Args: + x: ((E)ncoder (S)equence, (B)atch, (C)hannels) + decoder_pos: (Decoder Sequence, C) tensor for the decoder's positional embedding. + encoder_pos: (ES, C) tenso + """ # TODO flatten only when input has H and W - if len(src.shape) == 4: # has H and W - # flatten NxCxHxW to HWxNxC - bs, c, h, w = src.shape - # Each "pixel" on the feature maps will form a token. - src = src.flatten(2).permute(2, 0, 1) - pos_embed = pos_embed.flatten(2).permute(2, 0, 1).repeat(1, bs, 1) - query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1) + bs = x.shape[1] - additional_pos_embed = additional_pos_embed.unsqueeze(1).repeat(1, bs, 1) # seq, bs, dim - pos_embed = torch.cat([additional_pos_embed, pos_embed], axis=0) - - addition_input = torch.stack([latent_input, proprio_input], axis=0) - src = torch.cat([addition_input, src], axis=0) - else: - assert len(src.shape) == 3 - # flatten NxHWxC to HWxNxC - bs, hw, c = src.shape - src = src.permute(1, 0, 2) - pos_embed = pos_embed.unsqueeze(1).repeat(1, bs, 1) - query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1) - - tgt = torch.zeros_like(query_embed) - memory = self.encoder(src, pos=pos_embed) - hs = self.decoder(tgt, memory, pos=pos_embed, query_pos=query_embed) - hs = hs.transpose(0, 1) - return hs + encoder_out = self.encoder(x, pos=encoder_pos) + decoder_in = torch.zeros( + (decoder_pos.shape[0], bs, decoder_pos.shape[2]), + dtype=decoder_pos.dtype, + device=decoder_pos.device, + ) + decoder_out = self.decoder(decoder_in, encoder_out, encoder_pos=encoder_pos, decoder_pos=decoder_pos) + return decoder_out class TransformerEncoder(nn.Module): - def __init__(self, encoder_layer, num_layers, norm=None): - super().__init__() - self.layers = _get_clones(encoder_layer, num_layers) - self.num_layers = num_layers - self.norm = norm - - def forward( + def __init__( self, - src, - pos: Optional[Tensor] = None, + num_layers, + d_model, + nhead, + dim_feedforward=2048, + dropout=0.1, + activation="relu", + normalize_before=False, ): - output = src - - for layer in self.layers: - output = layer(output, pos=pos) - - if self.norm is not None: - output = self.norm(output) - - return output - - -class TransformerDecoder(nn.Module): - def __init__(self, decoder_layer, num_layers, norm=None): super().__init__() - self.layers = _get_clones(decoder_layer, num_layers) - self.num_layers = num_layers - self.norm = norm - - def forward( - self, - tgt, - memory, - pos: Optional[Tensor] = None, - query_pos: Optional[Tensor] = None, - ): - output = tgt + self.layers = nn.ModuleList( + [ + TransformerEncoderLayer( + d_model, nhead, dim_feedforward, dropout, activation, normalize_before + ) + for _ in range(num_layers) + ] + ) + self.norm = nn.LayerNorm(d_model) if normalize_before else nn.Identity() + def forward(self, x, pos: Optional[Tensor] = None): for layer in self.layers: - output = layer( - output, - memory, - pos=pos, - query_pos=query_pos, - ) - - if self.norm is not None: - output = self.norm(output) - - return output + x = layer(x, pos=pos) + x = self.norm(x) + return x class TransformerEncoderLayer(nn.Module): @@ -160,45 +105,55 @@ class TransformerEncoderLayer(nn.Module): self.activation = _get_activation_fn(activation) self.normalize_before = normalize_before - def with_pos_embed(self, tensor, pos: Optional[Tensor]): - return tensor if pos is None else tensor + pos - - def forward_post( - self, - src, - pos: Optional[Tensor] = None, - ): - q = k = self.with_pos_embed(src, pos) - src2 = self.self_attn(q, k, value=src)[0] - src = src + self.dropout1(src2) - src = self.norm1(src) - src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) - src = src + self.dropout2(src2) - src = self.norm2(src) - return src - - def forward_pre( - self, - src, - pos: Optional[Tensor] = None, - ): - src2 = self.norm1(src) - q = k = self.with_pos_embed(src2, pos) - src2 = self.self_attn(q, k, value=src2)[0] - src = src + self.dropout1(src2) - src2 = self.norm2(src) - src2 = self.linear2(self.dropout(self.activation(self.linear1(src2)))) - src = src + self.dropout2(src2) - return src - - def forward( - self, - src, - pos: Optional[Tensor] = None, - ): + def forward(self, x, pos: Optional[Tensor] = None): + skip = x if self.normalize_before: - return self.forward_pre(src, pos) - return self.forward_post(src, pos) + x = self.norm1(x) + q = k = x if pos is None else x + pos + x = self.self_attn(q, k, value=x)[0] + x = skip + self.dropout1(x) + if self.normalize_before: + skip = x + x = self.norm2(x) + else: + x = self.norm1(x) + skip = x + x = self.linear2(self.dropout(self.activation(self.linear1(x)))) + x = skip + self.dropout2(x) + if not self.normalize_before: + x = self.norm2(x) + return x + + +class TransformerDecoder(nn.Module): + def __init__( + self, + num_layers, + d_model, + nhead, + dim_feedforward=2048, + dropout=0.1, + activation="relu", + normalize_before=False, + ): + super().__init__() + self.layers = nn.ModuleList( + [ + TransformerDecoderLayer( + d_model, nhead, dim_feedforward, dropout, activation, normalize_before + ) + for _ in range(num_layers) + ] + ) + self.num_layers = num_layers + self.norm = nn.LayerNorm(d_model) + + def forward(self, x, encoder_out, decoder_pos: Tensor | None = None, encoder_pos: Tensor | None = None): + for layer in self.layers: + x = layer(x, encoder_out, decoder_pos=decoder_pos, encoder_pos=encoder_pos) + if self.norm is not None: + x = self.norm(x) + return x class TransformerDecoderLayer(nn.Module): @@ -223,86 +178,55 @@ class TransformerDecoderLayer(nn.Module): self.activation = _get_activation_fn(activation) self.normalize_before = normalize_before - def with_pos_embed(self, tensor, pos: Optional[Tensor]): + def maybe_add_pos_embed(self, tensor: Tensor, pos: Tensor | None) -> Tensor: return tensor if pos is None else tensor + pos - def forward_post( - self, - tgt, - memory, - pos: Optional[Tensor] = None, - query_pos: Optional[Tensor] = None, - ): - q = k = self.with_pos_embed(tgt, query_pos) - tgt2 = self.self_attn(q, k, value=tgt)[0] - tgt = tgt + self.dropout1(tgt2) - tgt = self.norm1(tgt) - tgt2 = self.multihead_attn( - query=self.with_pos_embed(tgt, query_pos), - key=self.with_pos_embed(memory, pos), - value=memory, - )[0] - tgt = tgt + self.dropout2(tgt2) - tgt = self.norm2(tgt) - tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) - tgt = tgt + self.dropout3(tgt2) - tgt = self.norm3(tgt) - return tgt - - def forward_pre( - self, - tgt, - memory, - pos: Optional[Tensor] = None, - query_pos: Optional[Tensor] = None, - ): - tgt2 = self.norm1(tgt) - q = k = self.with_pos_embed(tgt2, query_pos) - tgt2 = self.self_attn(q, k, value=tgt2)[0] - tgt = tgt + self.dropout1(tgt2) - tgt2 = self.norm2(tgt) - tgt2 = self.multihead_attn( - query=self.with_pos_embed(tgt2, query_pos), - key=self.with_pos_embed(memory, pos), - value=memory, - )[0] - tgt = tgt + self.dropout2(tgt2) - tgt2 = self.norm3(tgt) - tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) - tgt = tgt + self.dropout3(tgt2) - return tgt - def forward( self, - tgt, - memory, - pos: Optional[Tensor] = None, - query_pos: Optional[Tensor] = None, - ): + x: Tensor, + encoder_out: Tensor, + decoder_pos: Tensor | None = None, + encoder_pos: Tensor | None = None, + ) -> Tensor: + """ + Args: + x: (Decoder Sequence, Batch, Channel) tensor of input tokens. + encoder_out: (Encoder Sequence, B, C) output features from the last layer of the encoder we are + cross-attending with. + decoder_pos: (ES, 1, C) positional embedding for keys (from the encoder). + encoder_pos: (DS, 1, C) Positional_embedding for the queries (from the decoder). + Returns: + (DS, B, C) tensor of decoder output features. + """ + skip = x if self.normalize_before: - return self.forward_pre( - tgt, - memory, - pos, - query_pos, - ) - return self.forward_post(tgt, memory, pos, query_pos) - - -def _get_clones(module, n): - return nn.ModuleList([copy.deepcopy(module) for _ in range(n)]) - - -def build_transformer(args): - return Transformer( - d_model=args.hidden_dim, - dropout=args.dropout, - nhead=args.nheads, - dim_feedforward=args.dim_feedforward, - num_encoder_layers=args.enc_layers, - num_decoder_layers=args.dec_layers, - normalize_before=args.pre_norm, - ) + x = self.norm1(x) + q = k = self.maybe_add_pos_embed(x, decoder_pos) + x = self.self_attn(q, k, value=x)[0] + x = skip + self.dropout1(x) + if self.normalize_before: + skip = x + x = self.norm2(x) + else: + x = self.norm1(x) + skip = x + x = self.multihead_attn( + query=self.maybe_add_pos_embed(x, decoder_pos), + key=self.maybe_add_pos_embed(encoder_out, encoder_pos), + value=encoder_out, + )[0] + x = skip + self.dropout2(x) + if self.normalize_before: + skip = x + x = self.norm3(x) + else: + x = self.norm2(x) + skip = x + x = self.linear2(self.dropout(self.activation(self.linear1(x)))) + x = skip + self.dropout3(x) + if not self.normalize_before: + x = self.norm3(x) + return x def _get_activation_fn(activation): @@ -313,4 +237,4 @@ def _get_activation_fn(activation): return F.gelu if activation == "glu": return F.glu - raise RuntimeError(f"activation should be relu/gelu, not {activation}.") + raise RuntimeError(f"activation should be relu/gelu/glu, not {activation}.") diff --git a/pyproject.toml b/pyproject.toml index b2526e5c..6d76cffc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -101,3 +101,6 @@ enable = true [build-system] requires = ["poetry-core>=1.0.0", "poetry-dynamic-versioning>=1.0.0,<2.0.0"] build-backend = "poetry_dynamic_versioning.backend" + +[tool.black] +line-length = 110 From 1cdfbc8b52fc7a14d148db74acc29cd02f087982 Mon Sep 17 00:00:00 2001 From: Cadene Date: Sun, 31 Mar 2024 15:05:25 +0000 Subject: [PATCH 05/80] WIP WIP WIP train.py works, loss going down WIP eval.py Fix WIP (eval running, TODO: verify results reproduced) Eval works! (testing reproducibility) WIP pretrained model pusht reproduces same results as torchrl pretrained model pusht reproduces same results as torchrl Remove AbstractPolicy, Move all queues in select_action WIP test_datasets passed (TODO: re-enable NormalizeTransform) --- lerobot/common/datasets/aloha.py | 168 ++++++++------ lerobot/common/datasets/factory.py | 134 ++++------- lerobot/common/datasets/pusht.py | 165 ++++++++----- lerobot/common/datasets/simxarm.py | 169 ++++++++------ lerobot/common/datasets/utils.py | 69 ++++++ lerobot/common/envs/factory.py | 74 ++---- lerobot/common/envs/simxarm/env.py | 4 +- lerobot/common/policies/diffusion/policy.py | 108 ++++----- lerobot/common/transforms.py | 123 +++++----- lerobot/scripts/eval.py | 245 +++++++++++++------- lerobot/scripts/train.py | 96 ++++---- lerobot/scripts/visualize_dataset.py | 20 +- poetry.lock | 28 ++- pyproject.toml | 3 + tests/test_datasets.py | 23 +- tests/test_envs.py | 10 +- tests/test_policies.py | 8 +- 17 files changed, 826 insertions(+), 621 deletions(-) diff --git a/lerobot/common/datasets/aloha.py b/lerobot/common/datasets/aloha.py index 031c2cd3..2744f595 100644 --- a/lerobot/common/datasets/aloha.py +++ b/lerobot/common/datasets/aloha.py @@ -1,26 +1,13 @@ import logging from pathlib import Path -from typing import Callable import einops import gdown import h5py import torch -import torchrl import tqdm -from tensordict import TensorDict -from torchrl.data.replay_buffers.samplers import Sampler -from torchrl.data.replay_buffers.storages import TensorStorage -from torchrl.data.replay_buffers.writers import Writer -from lerobot.common.datasets.abstract import AbstractDataset - -DATASET_IDS = [ - "aloha_sim_insertion_human", - "aloha_sim_insertion_scripted", - "aloha_sim_transfer_cube_human", - "aloha_sim_transfer_cube_scripted", -] +from lerobot.common.datasets.utils import load_data_with_delta_timestamps FOLDER_URLS = { "aloha_sim_insertion_human": "https://drive.google.com/drive/folders/1RgyD0JgTX30H4IM5XZn8I3zSV_mr8pyF", @@ -66,7 +53,6 @@ CAMERAS = { def download(data_dir, dataset_id): - assert dataset_id in DATASET_IDS assert dataset_id in FOLDER_URLS assert dataset_id in EP48_URLS assert dataset_id in EP49_URLS @@ -80,51 +66,78 @@ def download(data_dir, dataset_id): gdown.download(EP49_URLS[dataset_id], output=str(data_dir / "episode_49.hdf5"), fuzzy=True) -class AlohaDataset(AbstractDataset): - available_datasets = DATASET_IDS +class AlohaDataset(torch.utils.data.Dataset): + available_datasets = [ + "aloha_sim_insertion_human", + "aloha_sim_insertion_scripted", + "aloha_sim_transfer_cube_human", + "aloha_sim_transfer_cube_scripted", + ] + fps = 50 + image_keys = ["observation.images.top"] def __init__( self, dataset_id: str, version: str | None = "v1.2", - batch_size: int | None = None, - *, - shuffle: bool = True, root: Path | None = None, - pin_memory: bool = False, - prefetch: int = None, - sampler: Sampler | None = None, - collate_fn: Callable | None = None, - writer: Writer | None = None, - transform: "torchrl.envs.Transform" = None, + transform: callable = None, + delta_timestamps: dict[list[float]] | None = None, ): - super().__init__( - dataset_id, - version, - batch_size, - shuffle=shuffle, - root=root, - pin_memory=pin_memory, - prefetch=prefetch, - sampler=sampler, - collate_fn=collate_fn, - writer=writer, - transform=transform, - ) + super().__init__() + self.dataset_id = dataset_id + self.version = version + self.root = root + self.transform = transform + self.delta_timestamps = delta_timestamps + + data_dir = self.root / f"{self.dataset_id}" + if (data_dir / "data_dict.pth").exists() and (data_dir / "data_ids_per_episode.pth").exists(): + self.data_dict = torch.load(data_dir / "data_dict.pth") + self.data_ids_per_episode = torch.load(data_dir / "data_ids_per_episode.pth") + else: + self._download_and_preproc_obsolete() + data_dir.mkdir(parents=True, exist_ok=True) + torch.save(self.data_dict, data_dir / "data_dict.pth") + torch.save(self.data_ids_per_episode, data_dir / "data_ids_per_episode.pth") @property - def stats_patterns(self) -> dict: - d = { - ("observation", "state"): "b c -> c", - ("action",): "b c -> c", - } - for cam in CAMERAS[self.dataset_id]: - d[("observation", "image", cam)] = "b c h w -> c 1 1" - return d + def num_samples(self) -> int: + return len(self.data_dict["index"]) @property - def image_keys(self) -> list: - return [("observation", "image", cam) for cam in CAMERAS[self.dataset_id]] + def num_episodes(self) -> int: + return len(self.data_ids_per_episode) + + def __len__(self): + return self.num_samples + + def __getitem__(self, idx): + item = {} + + # get episode id and timestamp of the sampled frame + current_ts = self.data_dict["timestamp"][idx].item() + episode = self.data_dict["episode"][idx].item() + + for key in self.data_dict: + if self.delta_timestamps is not None and key in self.delta_timestamps: + data, is_pad = load_data_with_delta_timestamps( + self.data_dict, + self.data_ids_per_episode, + self.delta_timestamps, + key, + current_ts, + episode, + ) + item[key] = data + item[f"{key}_is_pad"] = is_pad + else: + item[key] = self.data_dict[key][idx] + + if self.transform is not None: + item = self.transform(item) + + return item def _download_and_preproc_obsolete(self): assert self.root is not None @@ -132,54 +145,55 @@ class AlohaDataset(AbstractDataset): if not raw_dir.is_dir(): download(raw_dir, self.dataset_id) - total_num_frames = 0 + total_frames = 0 logging.info("Compute total number of frames to initialize offline buffer") for ep_id in range(NUM_EPISODES[self.dataset_id]): ep_path = raw_dir / f"episode_{ep_id}.hdf5" with h5py.File(ep_path, "r") as ep: - total_num_frames += ep["/action"].shape[0] - 1 - logging.info(f"{total_num_frames=}") + total_frames += ep["/action"].shape[0] - 1 + logging.info(f"{total_frames=}") + + self.data_ids_per_episode = {} + ep_dicts = [] logging.info("Initialize and feed offline buffer") - idxtd = 0 for ep_id in tqdm.tqdm(range(NUM_EPISODES[self.dataset_id])): ep_path = raw_dir / f"episode_{ep_id}.hdf5" with h5py.File(ep_path, "r") as ep: - ep_num_frames = ep["/action"].shape[0] + num_frames = ep["/action"].shape[0] # last step of demonstration is considered done - done = torch.zeros(ep_num_frames, 1, dtype=torch.bool) + done = torch.zeros(num_frames, 1, dtype=torch.bool) done[-1] = True state = torch.from_numpy(ep["/observations/qpos"][:]) action = torch.from_numpy(ep["/action"][:]) - ep_td = TensorDict( - { - ("observation", "state"): state[:-1], - "action": action[:-1], - "episode": torch.tensor([ep_id] * (ep_num_frames - 1)), - "frame_id": torch.arange(0, ep_num_frames - 1, 1), - ("next", "observation", "state"): state[1:], - # TODO: compute reward and success - # ("next", "reward"): reward[1:], - ("next", "done"): done[1:], - # ("next", "success"): success[1:], - }, - batch_size=ep_num_frames - 1, - ) + ep_dict = { + "observation.state": state, + "action": action, + "episode": torch.tensor([ep_id] * num_frames), + "frame_id": torch.arange(0, num_frames, 1), + "timestamp": torch.arange(0, num_frames, 1) / self.fps, + # "next.observation.state": state, + # TODO(rcadene): compute reward and success + # "next.reward": reward[1:], + "next.done": done[1:], + # "next.success": success[1:], + } for cam in CAMERAS[self.dataset_id]: image = torch.from_numpy(ep[f"/observations/images/{cam}"][:]) image = einops.rearrange(image, "b h w c -> b c h w").contiguous() - ep_td["observation", "image", cam] = image[:-1] - ep_td["next", "observation", "image", cam] = image[1:] + ep_dict[f"observation.images.{cam}"] = image[:-1] + # ep_dict[f"next.observation.images.{cam}"] = image[1:] - if ep_id == 0: - # hack to initialize tensordict data structure to store episodes - td_data = ep_td[0].expand(total_num_frames).memmap_like(self.root / f"{self.dataset_id}") + ep_dicts.append(ep_dict) - td_data[idxtd : idxtd + len(ep_td)] = ep_td - idxtd = idxtd + len(ep_td) + self.data_dict = {} - return TensorStorage(td_data.lock_()) + keys = ep_dicts[0].keys() + for key in keys: + self.data_dict[key] = torch.cat([x[key] for x in ep_dicts]) + + self.data_dict["index"] = torch.arange(0, total_frames, 1) diff --git a/lerobot/common/datasets/factory.py b/lerobot/common/datasets/factory.py index 04077034..94ac8ca4 100644 --- a/lerobot/common/datasets/factory.py +++ b/lerobot/common/datasets/factory.py @@ -1,11 +1,10 @@ -import logging import os from pathlib import Path import torch -from torchrl.data.replay_buffers import PrioritizedSliceSampler, SliceSampler +from torchvision.transforms import v2 -from lerobot.common.transforms import NormalizeTransform, Prod +from lerobot.common.transforms import Prod # DATA_DIR specifies to location where datasets are loaded. By default, DATA_DIR is None and # we load from `$HOME/.cache/huggingface/hub/datasets`. For our unit tests, we set `DATA_DIR=tests/data` @@ -13,57 +12,12 @@ from lerobot.common.transforms import NormalizeTransform, Prod DATA_DIR = Path(os.environ["DATA_DIR"]) if "DATA_DIR" in os.environ else None -def make_offline_buffer( +def make_dataset( cfg, - overwrite_sampler=None, # set normalize=False to remove all transformations and keep images unnormalized in [0,255] normalize=True, - overwrite_batch_size=None, - overwrite_prefetch=None, stats_path=None, ): - if cfg.policy.balanced_sampling: - assert cfg.online_steps > 0 - batch_size = None - pin_memory = False - prefetch = None - else: - assert cfg.online_steps == 0 - num_slices = cfg.policy.batch_size - batch_size = cfg.policy.horizon * num_slices - pin_memory = cfg.device == "cuda" - prefetch = cfg.prefetch - - if overwrite_batch_size is not None: - batch_size = overwrite_batch_size - - if overwrite_prefetch is not None: - prefetch = overwrite_prefetch - - if overwrite_sampler is None: - # TODO(rcadene): move batch_size outside - num_traj_per_batch = cfg.policy.batch_size # // cfg.horizon - # TODO(rcadene): Sampler outputs a batch_size <= cfg.batch_size. - # We would need to add a transform to pad the tensordict to ensure batch_size == cfg.batch_size. - - if cfg.offline_prioritized_sampler: - logging.info("use prioritized sampler for offline dataset") - sampler = PrioritizedSliceSampler( - max_capacity=100_000, - alpha=cfg.policy.per_alpha, - beta=cfg.policy.per_beta, - num_slices=num_traj_per_batch, - strict_length=False, - ) - else: - logging.info("use simple sampler for offline dataset") - sampler = SliceSampler( - num_slices=num_traj_per_batch, - strict_length=False, - ) - else: - sampler = overwrite_sampler - if cfg.env.name == "simxarm": from lerobot.common.datasets.simxarm import SimxarmDataset @@ -81,56 +35,56 @@ def make_offline_buffer( else: raise ValueError(cfg.env.name) - offline_buffer = clsfunc( - dataset_id=cfg.dataset_id, - sampler=sampler, - batch_size=batch_size, - root=DATA_DIR, - pin_memory=pin_memory, - prefetch=prefetch if isinstance(prefetch, int) else None, - ) - - if cfg.policy.name == "tdmpc": - img_keys = [] - for key in offline_buffer.image_keys: - img_keys.append(("next", *key)) - img_keys += offline_buffer.image_keys - else: - img_keys = offline_buffer.image_keys - + transforms = None if normalize: - transforms = [Prod(in_keys=img_keys, prod=1 / 255)] - # TODO(rcadene): make normalization strategy configurable between mean_std, min_max, manual_min_max, # min_max_from_spec - stats = offline_buffer.compute_or_load_stats() if stats_path is None else torch.load(stats_path) + # stats = dataset.compute_or_load_stats() if stats_path is None else torch.load(stats_path) - # we only normalize the state and action, since the images are usually normalized inside the model for - # now (except for tdmpc: see the following) - in_keys = [("observation", "state"), ("action")] - - if cfg.policy.name == "tdmpc": - # TODO(rcadene): we add img_keys to the keys to normalize for tdmpc only, since diffusion and act policies normalize the image inside the model for now - in_keys += img_keys - # TODO(racdene): since we use next observations in tdmpc, we also add them to the normalization. We are wasting a bit of compute on this for now. - in_keys += [("next", *key) for key in img_keys] - in_keys.append(("next", "observation", "state")) + stats = {} if cfg.policy.name == "diffusion" and cfg.env.name == "pusht": # TODO(rcadene): we overwrite stats to have the same as pretrained model, but we should remove this - stats["observation", "state", "min"] = torch.tensor([13.456424, 32.938293], dtype=torch.float32) - stats["observation", "state", "max"] = torch.tensor([496.14618, 510.9579], dtype=torch.float32) - stats["action", "min"] = torch.tensor([12.0, 25.0], dtype=torch.float32) - stats["action", "max"] = torch.tensor([511.0, 511.0], dtype=torch.float32) + stats["observation.state"] = {} + stats["observation.state"]["min"] = torch.tensor([13.456424, 32.938293], dtype=torch.float32) + stats["observation.state"]["max"] = torch.tensor([496.14618, 510.9579], dtype=torch.float32) + stats["action"] = {} + stats["action"]["min"] = torch.tensor([12.0, 25.0], dtype=torch.float32) + stats["action"]["max"] = torch.tensor([511.0, 511.0], dtype=torch.float32) # TODO(rcadene): remove this and put it in config. Ideally we want to reproduce SOTA results just with mean_std - normalization_mode = "mean_std" if cfg.env.name == "aloha" else "min_max" - transforms.append(NormalizeTransform(stats, in_keys, mode=normalization_mode)) + # normalization_mode = "mean_std" if cfg.env.name == "aloha" else "min_max" - offline_buffer.set_transform(transforms) + transforms = v2.Compose( + [ + # TODO(rcadene): we need to do something about image_keys + Prod(in_keys=clsfunc.image_keys, prod=1 / 255.0), + # NormalizeTransform( + # stats, + # in_keys=[ + # "observation.state", + # "action", + # ], + # mode=normalization_mode, + # ), + ] + ) - if not overwrite_sampler: - index = torch.arange(0, offline_buffer.num_samples, 1) - sampler.extend(index) + if cfg.policy.name == "diffusion" and cfg.env.name == "pusht": + # TODO(rcadene): implement delta_timestamps in config + delta_timestamps = { + "observation.image": [-0.1, 0], + "observation.state": [-0.1, 0], + "action": [-0.1] + [i / clsfunc.fps for i in range(15)], + } + else: + delta_timestamps = None - return offline_buffer + dataset = clsfunc( + dataset_id=cfg.dataset_id, + root=DATA_DIR, + delta_timestamps=delta_timestamps, + transform=transforms, + ) + + return dataset diff --git a/lerobot/common/datasets/pusht.py b/lerobot/common/datasets/pusht.py index 624fb140..3de70b1f 100644 --- a/lerobot/common/datasets/pusht.py +++ b/lerobot/common/datasets/pusht.py @@ -1,20 +1,13 @@ from pathlib import Path -from typing import Callable import einops import numpy as np import pygame import pymunk import torch -import torchrl import tqdm -from tensordict import TensorDict -from torchrl.data.replay_buffers.samplers import Sampler -from torchrl.data.replay_buffers.storages import TensorStorage -from torchrl.data.replay_buffers.writers import Writer -from lerobot.common.datasets.abstract import AbstractDataset -from lerobot.common.datasets.utils import download_and_extract_zip +from lerobot.common.datasets.utils import download_and_extract_zip, load_data_with_delta_timestamps from lerobot.common.envs.pusht.pusht_env import pymunk_to_shapely from lerobot.common.policies.diffusion.replay_buffer import ReplayBuffer as DiffusionPolicyReplayBuffer @@ -83,37 +76,82 @@ def add_tee( return body -class PushtDataset(AbstractDataset): +class PushtDataset(torch.utils.data.Dataset): + """ + + Arguments + ---------- + delta_timestamps : dict[list[float]] | None, optional + Loads data from frames with a shift in timestamps with a different strategy for each data key (e.g. state, action or image) + If `None`, no shift is applied to current timestamp and the data from the current frame is loaded. + """ + available_datasets = ["pusht"] + fps = 10 + image_keys = ["observation.image"] def __init__( self, dataset_id: str, version: str | None = "v1.2", - batch_size: int | None = None, - *, - shuffle: bool = True, root: Path | None = None, - pin_memory: bool = False, - prefetch: int = None, - sampler: Sampler | None = None, - collate_fn: Callable | None = None, - writer: Writer | None = None, - transform: "torchrl.envs.Transform" = None, + transform: callable = None, + delta_timestamps: dict[list[float]] | None = None, ): - super().__init__( - dataset_id, - version, - batch_size, - shuffle=shuffle, - root=root, - pin_memory=pin_memory, - prefetch=prefetch, - sampler=sampler, - collate_fn=collate_fn, - writer=writer, - transform=transform, - ) + super().__init__() + self.dataset_id = dataset_id + self.version = version + self.root = root + self.transform = transform + self.delta_timestamps = delta_timestamps + + data_dir = self.root / f"{self.dataset_id}" + if (data_dir / "data_dict.pth").exists() and (data_dir / "data_ids_per_episode.pth").exists(): + self.data_dict = torch.load(data_dir / "data_dict.pth") + self.data_ids_per_episode = torch.load(data_dir / "data_ids_per_episode.pth") + else: + self._download_and_preproc_obsolete() + data_dir.mkdir(parents=True, exist_ok=True) + torch.save(self.data_dict, data_dir / "data_dict.pth") + torch.save(self.data_ids_per_episode, data_dir / "data_ids_per_episode.pth") + + @property + def num_samples(self) -> int: + return len(self.data_dict["index"]) + + @property + def num_episodes(self) -> int: + return len(self.data_ids_per_episode) + + def __len__(self): + return self.num_samples + + def __getitem__(self, idx): + item = {} + + # get episode id and timestamp of the sampled frame + current_ts = self.data_dict["timestamp"][idx].item() + episode = self.data_dict["episode"][idx].item() + + for key in self.data_dict: + if self.delta_timestamps is not None and key in self.delta_timestamps: + data, is_pad = load_data_with_delta_timestamps( + self.data_dict, + self.data_ids_per_episode, + self.delta_timestamps, + key, + current_ts, + episode, + ) + item[key] = data + item[f"{key}_is_pad"] = is_pad + else: + item[key] = self.data_dict[key][idx] + + if self.transform is not None: + item = self.transform(item) + + return item def _download_and_preproc_obsolete(self): assert self.root is not None @@ -147,8 +185,10 @@ class PushtDataset(AbstractDataset): states = torch.from_numpy(dataset_dict["state"]) actions = torch.from_numpy(dataset_dict["action"]) + self.data_ids_per_episode = {} + ep_dicts = [] + idx0 = 0 - idxtd = 0 for episode_id in tqdm.tqdm(range(num_episodes)): idx1 = dataset_dict.meta["episode_ends"][episode_id] # to create test artifact @@ -194,30 +234,45 @@ class PushtDataset(AbstractDataset): # last step of demonstration is considered done done[-1] = True - ep_td = TensorDict( - { - ("observation", "image"): image[:-1], - ("observation", "state"): agent_pos[:-1], - "action": actions[idx0:idx1][:-1], - "episode": episode_ids[idx0:idx1][:-1], - "frame_id": torch.arange(0, num_frames - 1, 1), - ("next", "observation", "image"): image[1:], - ("next", "observation", "state"): agent_pos[1:], - # TODO: verify that reward and done are aligned with image and agent_pos - ("next", "reward"): reward[1:], - ("next", "done"): done[1:], - ("next", "success"): success[1:], - }, - batch_size=num_frames - 1, - ) + ep_dict = { + "observation.image": image, + "observation.state": agent_pos, + "action": actions[idx0:idx1], + "episode": torch.tensor([episode_id] * num_frames, dtype=torch.int), + "frame_id": torch.arange(0, num_frames, 1), + "timestamp": torch.arange(0, num_frames, 1) / self.fps, + # "next.observation.image": image[1:], + # "next.observation.state": agent_pos[1:], + # TODO(rcadene): verify that reward and done are aligned with image and agent_pos + "next.reward": torch.cat([reward[1:], reward[[-1]]]), + "next.done": torch.cat([done[1:], done[[-1]]]), + "next.success": torch.cat([success[1:], success[[-1]]]), + } + ep_dicts.append(ep_dict) - if episode_id == 0: - # hack to initialize tensordict data structure to store episodes - td_data = ep_td[0].expand(total_frames).memmap_like(self.root / f"{self.dataset_id}") - - td_data[idxtd : idxtd + len(ep_td)] = ep_td + assert isinstance(episode_id, int) + self.data_ids_per_episode[episode_id] = torch.arange(idx0, idx1, 1) + assert len(self.data_ids_per_episode[episode_id]) == num_frames idx0 = idx1 - idxtd = idxtd + len(ep_td) - return TensorStorage(td_data.lock_()) + self.data_dict = {} + + keys = ep_dicts[0].keys() + for key in keys: + self.data_dict[key] = torch.cat([x[key] for x in ep_dicts]) + + self.data_dict["index"] = torch.arange(0, total_frames, 1) + + +if __name__ == "__main__": + dataset = PushtDataset( + "pusht", + root=Path("data"), + delta_timestamps={ + "observation.image": [0, -1, -0.2, -0.1], + "observation.state": [0, -1, -0.2, -0.1], + "action": [-0.1, 0, 1, 2, 3], + }, + ) + dataset[10] diff --git a/lerobot/common/datasets/simxarm.py b/lerobot/common/datasets/simxarm.py index dc30e69e..4b2c68ad 100644 --- a/lerobot/common/datasets/simxarm.py +++ b/lerobot/common/datasets/simxarm.py @@ -1,75 +1,104 @@ import pickle import zipfile from pathlib import Path -from typing import Callable import torch -import torchrl import tqdm -from tensordict import TensorDict -from torchrl.data.replay_buffers.samplers import ( - Sampler, -) -from torchrl.data.replay_buffers.storages import TensorStorage -from torchrl.data.replay_buffers.writers import Writer -from lerobot.common.datasets.abstract import AbstractDataset +from lerobot.common.datasets.utils import load_data_with_delta_timestamps -def download(): - raise NotImplementedError() +def download(raw_dir): import gdown + raw_dir.mkdir(parents=True, exist_ok=True) url = "https://drive.google.com/uc?id=1nhxpykGtPDhmQKm-_B8zBSywVRdgeVya" - download_path = "data.zip" - gdown.download(url, download_path, quiet=False) + zip_path = raw_dir / "data.zip" + gdown.download(url, str(zip_path), quiet=False) print("Extracting...") - with zipfile.ZipFile(download_path, "r") as zip_f: + with zipfile.ZipFile(str(zip_path), "r") as zip_f: for member in zip_f.namelist(): if member.startswith("data/xarm") and member.endswith(".pkl"): print(member) zip_f.extract(member=member) - Path(download_path).unlink() + zip_path.unlink() -class SimxarmDataset(AbstractDataset): +class SimxarmDataset(torch.utils.data.Dataset): available_datasets = [ "xarm_lift_medium", ] + fps = 15 + image_keys = ["observation.image"] def __init__( self, dataset_id: str, version: str | None = "v1.1", - batch_size: int | None = None, - *, - shuffle: bool = True, root: Path | None = None, - pin_memory: bool = False, - prefetch: int = None, - sampler: Sampler | None = None, - collate_fn: Callable | None = None, - writer: Writer | None = None, - transform: "torchrl.envs.Transform" = None, + transform: callable = None, + delta_timestamps: dict[list[float]] | None = None, ): - super().__init__( - dataset_id, - version, - batch_size, - shuffle=shuffle, - root=root, - pin_memory=pin_memory, - prefetch=prefetch, - sampler=sampler, - collate_fn=collate_fn, - writer=writer, - transform=transform, - ) + super().__init__() + self.dataset_id = dataset_id + self.version = version + self.root = root + self.transform = transform + self.delta_timestamps = delta_timestamps + + data_dir = self.root / f"{self.dataset_id}" + if (data_dir / "data_dict.pth").exists() and (data_dir / "data_ids_per_episode.pth").exists(): + self.data_dict = torch.load(data_dir / "data_dict.pth") + self.data_ids_per_episode = torch.load(data_dir / "data_ids_per_episode.pth") + else: + self._download_and_preproc_obsolete() + data_dir.mkdir(parents=True, exist_ok=True) + torch.save(self.data_dict, data_dir / "data_dict.pth") + torch.save(self.data_ids_per_episode, data_dir / "data_ids_per_episode.pth") + + @property + def num_samples(self) -> int: + return len(self.data_dict["index"]) + + @property + def num_episodes(self) -> int: + return len(self.data_ids_per_episode) + + def __len__(self): + return self.num_samples + + def __getitem__(self, idx): + item = {} + + # get episode id and timestamp of the sampled frame + current_ts = self.data_dict["timestamp"][idx].item() + episode = self.data_dict["episode"][idx].item() + + for key in self.data_dict: + if self.delta_timestamps is not None and key in self.delta_timestamps: + data, is_pad = load_data_with_delta_timestamps( + self.data_dict, + self.data_ids_per_episode, + self.delta_timestamps, + key, + current_ts, + episode, + ) + item[key] = data + item[f"{key}_is_pad"] = is_pad + else: + item[key] = self.data_dict[key][idx] + + if self.transform is not None: + item = self.transform(item) + + return item def _download_and_preproc_obsolete(self): - # assert self.root is not None - # TODO(rcadene): finish download - # download() + assert self.root is not None + raw_dir = self.root / f"{self.dataset_id}_raw" + if not raw_dir.exists(): + download(raw_dir) dataset_path = self.root / f"{self.dataset_id}" / "buffer.pkl" print(f"Using offline dataset '{dataset_path}'") @@ -78,6 +107,9 @@ class SimxarmDataset(AbstractDataset): total_frames = dataset_dict["actions"].shape[0] + self.data_ids_per_episode = {} + ep_dicts = [] + idx0 = 0 idx1 = 0 episode_id = 0 @@ -91,37 +123,38 @@ class SimxarmDataset(AbstractDataset): image = torch.tensor(dataset_dict["observations"]["rgb"][idx0:idx1]) state = torch.tensor(dataset_dict["observations"]["state"][idx0:idx1]) - next_image = torch.tensor(dataset_dict["next_observations"]["rgb"][idx0:idx1]) - next_state = torch.tensor(dataset_dict["next_observations"]["state"][idx0:idx1]) + action = torch.tensor(dataset_dict["actions"][idx0:idx1]) + # TODO(rcadene): concat the last "next_observations" to "observations" + # next_image = torch.tensor(dataset_dict["next_observations"]["rgb"][idx0:idx1]) + # next_state = torch.tensor(dataset_dict["next_observations"]["state"][idx0:idx1]) next_reward = torch.tensor(dataset_dict["rewards"][idx0:idx1]) next_done = torch.tensor(dataset_dict["dones"][idx0:idx1]) - episode = TensorDict( - { - ("observation", "image"): image, - ("observation", "state"): state, - "action": torch.tensor(dataset_dict["actions"][idx0:idx1]), - "episode": torch.tensor([episode_id] * num_frames, dtype=torch.int), - "frame_id": torch.arange(0, num_frames, 1), - ("next", "observation", "image"): next_image, - ("next", "observation", "state"): next_state, - ("next", "reward"): next_reward, - ("next", "done"): next_done, - }, - batch_size=num_frames, - ) + ep_dict = { + "observation.image": image, + "observation.state": state, + "action": action, + "episode": torch.tensor([episode_id] * num_frames, dtype=torch.int), + "frame_id": torch.arange(0, num_frames, 1), + "timestamp": torch.arange(0, num_frames, 1) / self.fps, + # "next.observation.image": next_image, + # "next.observation.state": next_state, + "next.reward": next_reward, + "next.done": next_done, + } + ep_dicts.append(ep_dict) - if episode_id == 0: - # hack to initialize tensordict data structure to store episodes - td_data = ( - episode[0] - .expand(total_frames) - .memmap_like(self.root / f"{self.dataset_id}" / "replay_buffer") - ) + assert isinstance(episode_id, int) + self.data_ids_per_episode[episode_id] = torch.arange(idx0, idx1, 1) + assert len(self.data_ids_per_episode[episode_id]) == num_frames - td_data[idx0:idx1] = episode - - episode_id += 1 idx0 = idx1 + episode_id += 1 - return TensorStorage(td_data.lock_()) + self.data_dict = {} + + keys = ep_dicts[0].keys() + for key in keys: + self.data_dict[key] = torch.cat([x[key] for x in ep_dicts]) + + self.data_dict["index"] = torch.arange(0, total_frames, 1) diff --git a/lerobot/common/datasets/utils.py b/lerobot/common/datasets/utils.py index 0ad43a65..c8840169 100644 --- a/lerobot/common/datasets/utils.py +++ b/lerobot/common/datasets/utils.py @@ -3,6 +3,7 @@ import zipfile from pathlib import Path import requests +import torch import tqdm @@ -28,3 +29,71 @@ def download_and_extract_zip(url: str, destination_folder: Path) -> bool: return True else: return False + + +def euclidean_distance_matrix(mat0, mat1): + # Compute the square of the distance matrix + sq0 = torch.sum(mat0**2, dim=1, keepdim=True) + sq1 = torch.sum(mat1**2, dim=1, keepdim=True) + distance_sq = sq0 + sq1.transpose(0, 1) - 2 * mat0 @ mat1.transpose(0, 1) + + # Taking the square root to get the euclidean distance + distance = torch.sqrt(torch.clamp(distance_sq, min=0)) + return distance + + +def is_contiguously_true_or_false(bool_vector): + assert bool_vector.ndim == 1 + assert bool_vector.dtype == torch.bool + + # Compare each element with its neighbor to find changes + changes = bool_vector[1:] != bool_vector[:-1] + + # Count the number of changes + num_changes = changes.sum().item() + + # If there's more than one change, the list is not contiguous + return num_changes <= 1 + + # examples = [ + # ([True, False, True, False, False, False], False), + # ([True, True, True, False, False, False], True), + # ([False, False, False, False, False, False], True) + # ] + # for bool_list, expected in examples: + # result = is_contiguously_true_or_false(bool_list) + + +def load_data_with_delta_timestamps( + data_dict, data_ids_per_episode, delta_timestamps, key, current_ts, episode +): + # get indices of the frames associated to the episode, and their timestamps + ep_data_ids = data_ids_per_episode[episode] + ep_timestamps = data_dict["timestamp"][ep_data_ids] + + # get timestamps used as query to retrieve data of previous/future frames + delta_ts = delta_timestamps[key] + query_ts = current_ts + torch.tensor(delta_ts) + + # compute distances between each query timestamp and all timestamps of all the frames belonging to the episode + dist = euclidean_distance_matrix(query_ts[:, None], ep_timestamps[:, None]) + min_, argmin_ = dist.min(1) + + # get the indices of the data that are closest to the query timestamps + data_ids = ep_data_ids[argmin_] + # closest_ts = ep_timestamps[argmin_] + + # get the data + data = data_dict[key][data_ids].clone() + + # TODO(rcadene): synchronize timestamps + interpolation if needed + + tol = 0.02 + is_pad = min_ > tol + + assert is_contiguously_true_or_false(is_pad), ( + "One or several timestamps unexpectedly violate the tolerance." + "This might be due to synchronization issues with timestamps during data collection." + ) + + return data, is_pad diff --git a/lerobot/common/envs/factory.py b/lerobot/common/envs/factory.py index 855e073b..788af3cb 100644 --- a/lerobot/common/envs/factory.py +++ b/lerobot/common/envs/factory.py @@ -1,64 +1,40 @@ -from torchrl.envs import SerialEnv -from torchrl.envs.transforms import Compose, StepCounter, Transform, TransformedEnv +import gymnasium as gym -def make_env(cfg, transform=None): +def make_env(cfg, num_parallel_envs=0) -> gym.Env | gym.vector.SyncVectorEnv: """ - Note: The returned environment is wrapped in a torchrl.SerialEnv with cfg.rollout_batch_size underlying - environments. The env therefore returns batches.` + Note: When `num_parallel_envs > 0`, this function returns a `SyncVectorEnv` which takes batched action as input and + returns batched observation, reward, terminated, truncated of `num_parallel_envs` items. """ - - kwargs = { - "frame_skip": cfg.env.action_repeat, - "from_pixels": cfg.env.from_pixels, - "pixels_only": cfg.env.pixels_only, - "image_size": cfg.env.image_size, - "num_prev_obs": cfg.n_obs_steps - 1, - } + kwargs = {} if cfg.env.name == "simxarm": - from lerobot.common.envs.simxarm.env import SimxarmEnv - kwargs["task"] = cfg.env.task - clsfunc = SimxarmEnv elif cfg.env.name == "pusht": - from lerobot.common.envs.pusht.env import PushtEnv + import gym_pusht # noqa # assert kwargs["seed"] > 200, "Seed 0-200 are used for the demonstration dataset, so we don't want to seed the eval env with this range." - - clsfunc = PushtEnv + kwargs.update( + { + "obs_type": "pixels_agent_pos", + "render_action": False, + } + ) + env_fn = lambda: gym.make( # noqa: E731 + "gym_pusht/PushTPixels-v0", + render_mode="rgb_array", + max_episode_steps=cfg.env.episode_length, + **kwargs, + ) elif cfg.env.name == "aloha": - from lerobot.common.envs.aloha.env import AlohaEnv - kwargs["task"] = cfg.env.task - clsfunc = AlohaEnv else: raise ValueError(cfg.env.name) - def _make_env(seed): - nonlocal kwargs - kwargs["seed"] = seed - env = clsfunc(**kwargs) - - # limit rollout to max_steps - env = TransformedEnv(env, StepCounter(max_steps=cfg.env.episode_length)) - - if transform is not None: - # useful to add normalization - if isinstance(transform, Compose): - for tf in transform: - env.append_transform(tf.clone()) - elif isinstance(transform, Transform): - env.append_transform(transform.clone()) - else: - raise NotImplementedError() - - return env - - return SerialEnv( - cfg.rollout_batch_size, - create_env_fn=_make_env, - create_env_kwargs=[ - {"seed": env_seed} for env_seed in range(cfg.seed, cfg.seed + cfg.rollout_batch_size) - ], - ) + if num_parallel_envs == 0: + # non-batched version of the env that returns an observation of shape (c) + env = env_fn() + else: + # batched version of the env that returns an observation of shape (b, c) + env = gym.vector.SyncVectorEnv([env_fn for _ in range(num_parallel_envs)]) + return env diff --git a/lerobot/common/envs/simxarm/env.py b/lerobot/common/envs/simxarm/env.py index b81bf499..8ce6b24c 100644 --- a/lerobot/common/envs/simxarm/env.py +++ b/lerobot/common/envs/simxarm/env.py @@ -55,7 +55,7 @@ class SimxarmEnv(AbstractEnv): if not _has_gym: raise ImportError("Cannot import gymnasium.") - import gymnasium + import gymnasium as gym from lerobot.common.envs.simxarm.simxarm import TASKS @@ -65,7 +65,7 @@ class SimxarmEnv(AbstractEnv): self._env = TASKS[self.task]["env"]() num_actions = len(TASKS[self.task]["action_space"]) - self._action_space = gymnasium.spaces.Box(low=-1.0, high=1.0, shape=(num_actions,)) + self._action_space = gym.spaces.Box(low=-1.0, high=1.0, shape=(num_actions,)) self._action_padding = np.zeros((MAX_NUM_ACTIONS - num_actions), dtype=np.float32) if "w" not in TASKS[self.task]["action_space"]: self._action_padding[-1] = 1.0 diff --git a/lerobot/common/policies/diffusion/policy.py b/lerobot/common/policies/diffusion/policy.py index 82f39b28..a0fe0eba 100644 --- a/lerobot/common/policies/diffusion/policy.py +++ b/lerobot/common/policies/diffusion/policy.py @@ -1,18 +1,20 @@ import copy import logging import time +from collections import deque import hydra import torch +from torch import nn -from lerobot.common.policies.abstract import AbstractPolicy from lerobot.common.policies.diffusion.diffusion_unet_image_policy import DiffusionUnetImagePolicy from lerobot.common.policies.diffusion.model.lr_scheduler import get_scheduler from lerobot.common.policies.diffusion.model.multi_image_obs_encoder import MultiImageObsEncoder, RgbEncoder +from lerobot.common.policies.utils import populate_queues from lerobot.common.utils import get_safe_torch_device -class DiffusionPolicy(AbstractPolicy): +class DiffusionPolicy(nn.Module): name = "diffusion" def __init__( @@ -38,8 +40,12 @@ class DiffusionPolicy(AbstractPolicy): # parameters passed to step **kwargs, ): - super().__init__(n_action_steps) + super().__init__() self.cfg = cfg + self.n_obs_steps = n_obs_steps + self.n_action_steps = n_action_steps + # queues are populated during rollout of the policy, they contain the n latest observations and actions + self._queues = None noise_scheduler = hydra.utils.instantiate(cfg_noise_scheduler) rgb_model_input_shape = copy.deepcopy(shape_meta.obs.image.shape) @@ -100,76 +106,58 @@ class DiffusionPolicy(AbstractPolicy): last_epoch=self.global_step - 1, ) + def reset(self): + """ + Clear observation and action queues. Should be called on `env.reset()` + """ + self._queues = { + "observation.image": deque(maxlen=self.n_obs_steps), + "observation.state": deque(maxlen=self.n_obs_steps), + "action": deque(maxlen=self.n_action_steps), + } + @torch.no_grad() - def select_actions(self, observation, step_count): + def select_action(self, batch, step): """ Note: this uses the ema model weights if self.training == False, otherwise the non-ema model weights. """ - # TODO(rcadene): remove unused step_count - del step_count + # TODO(rcadene): remove unused step + del step + assert "observation.image" in batch + assert "observation.state" in batch + assert len(batch) == 2 - obs_dict = { - "image": observation["image"], - "agent_pos": observation["state"], - } - if self.training: - out = self.diffusion.predict_action(obs_dict) - else: - out = self.ema_diffusion.predict_action(obs_dict) - action = out["action"] + self._queues = populate_queues(self._queues, batch) + + if len(self._queues["action"]) == 0: + # stack n latest observations from the queue + batch = {key: torch.stack(list(self._queues[key]), dim=1) for key in batch} + + obs_dict = { + "image": batch["observation.image"], + "agent_pos": batch["observation.state"], + } + if self.training: + out = self.diffusion.predict_action(obs_dict) + else: + out = self.ema_diffusion.predict_action(obs_dict) + self._queues["action"].extend(out["action"].transpose(0, 1)) + + action = self._queues["action"].popleft() return action - def update(self, replay_buffer, step): + def forward(self, batch, step): start_time = time.time() self.diffusion.train() - num_slices = self.cfg.batch_size - batch_size = self.cfg.horizon * num_slices - - assert batch_size % self.cfg.horizon == 0 - assert batch_size % num_slices == 0 - - def process_batch(batch, horizon, num_slices): - # trajectory t = 64, horizon h = 16 - # (t h) ... -> t h ... - batch = batch.reshape(num_slices, horizon) # .transpose(1, 0).contiguous() - - # |-1|0|1|2|3|4|5|6|7|8|9|10|11|12|13|14| timestamps: 16 - # |o|o| observations: 2 - # | |a|a|a|a|a|a|a|a| actions executed: 8 - # |p|p|p|p|p|p|p|p|p|p|p| p| p| p| p| p| actions predicted: 16 - # note: we predict the action needed to go from t=-1 to t=0 similarly to an inverse kinematic model - - image = batch["observation", "image"] - state = batch["observation", "state"] - action = batch["action"] - assert image.shape[1] == horizon - assert state.shape[1] == horizon - assert action.shape[1] == horizon - - if not (horizon == 16 and self.cfg.n_obs_steps == 2): - raise NotImplementedError() - - # keep first 2 observations of the slice corresponding to t=[-1,0] - image = image[:, : self.cfg.n_obs_steps] - state = state[:, : self.cfg.n_obs_steps] - - out = { - "obs": { - "image": image.to(self.device, non_blocking=True), - "agent_pos": state.to(self.device, non_blocking=True), - }, - "action": action.to(self.device, non_blocking=True), - } - return out - - batch = replay_buffer.sample(batch_size) - batch = process_batch(batch, self.cfg.horizon, num_slices) - data_s = time.time() - start_time - loss = self.diffusion.compute_loss(batch) + obs_dict = { + "image": batch["observation.image"], + "agent_pos": batch["observation.state"], + } + loss = self.diffusion.compute_loss(obs_dict) loss.backward() grad_norm = torch.nn.utils.clip_grad_norm_( diff --git a/lerobot/common/transforms.py b/lerobot/common/transforms.py index 4832c91b..ec967614 100644 --- a/lerobot/common/transforms.py +++ b/lerobot/common/transforms.py @@ -1,53 +1,49 @@ -from typing import Sequence - import torch -from tensordict import TensorDictBase -from tensordict.nn import dispatch -from tensordict.utils import NestedKey -from torchrl.envs.transforms import ObservationTransform, Transform +from torchvision.transforms.v2 import Compose, Transform -class Prod(ObservationTransform): +def apply_inverse_transform(item, transform): + transforms = transform.transforms if isinstance(transform, Compose) else [transform] + for tf in transforms[::-1]: + if tf.invertible: + item = tf.inverse_transform(item) + else: + raise ValueError(f"Inverse transform called on a non invertible transform ({tf}).") + return item + + +class Prod(Transform): invertible = True - def __init__(self, in_keys: Sequence[NestedKey], prod: float): + def __init__(self, in_keys: list[str], prod: float): super().__init__() self.in_keys = in_keys self.prod = prod self.original_dtypes = {} - def _reset(self, tensordict: TensorDictBase, tensordict_reset: TensorDictBase) -> TensorDictBase: - # _reset is called once when the environment reset to normalize the first observation - tensordict_reset = self._call(tensordict_reset) - return tensordict_reset - - @dispatch(source="in_keys", dest="out_keys") - def forward(self, tensordict: TensorDictBase) -> TensorDictBase: - return self._call(tensordict) - - def _call(self, td): + def forward(self, item): for key in self.in_keys: - if td.get(key, None) is None: + if key not in item: continue - self.original_dtypes[key] = td[key].dtype - td[key] = td[key].type(torch.float32) * self.prod - return td + self.original_dtypes[key] = item[key].dtype + item[key] = item[key].type(torch.float32) * self.prod + return item - def _inv_call(self, td: TensorDictBase) -> TensorDictBase: + def inverse_transform(self, item): for key in self.in_keys: - if td.get(key, None) is None: + if key not in item: continue - td[key] = (td[key] / self.prod).type(self.original_dtypes[key]) - return td + item[key] = (item[key] / self.prod).type(self.original_dtypes[key]) + return item - def transform_observation_spec(self, obs_spec): - for key in self.in_keys: - if obs_spec.get(key, None) is None: - continue - obs_spec[key].space.high = obs_spec[key].space.high.type(torch.float32) * self.prod - obs_spec[key].space.low = obs_spec[key].space.low.type(torch.float32) * self.prod - obs_spec[key].dtype = torch.float32 - return obs_spec + # def transform_observation_spec(self, obs_spec): + # for key in self.in_keys: + # if obs_spec.get(key, None) is None: + # continue + # obs_spec[key].space.high = obs_spec[key].space.high.type(torch.float32) * self.prod + # obs_spec[key].space.low = obs_spec[key].space.low.type(torch.float32) * self.prod + # obs_spec[key].dtype = torch.float32 + # return obs_spec class NormalizeTransform(Transform): @@ -55,65 +51,50 @@ class NormalizeTransform(Transform): def __init__( self, - stats: TensorDictBase, - in_keys: Sequence[NestedKey] = None, - out_keys: Sequence[NestedKey] | None = None, - in_keys_inv: Sequence[NestedKey] | None = None, - out_keys_inv: Sequence[NestedKey] | None = None, + stats: dict, + in_keys: list[str] = None, + out_keys: list[str] | None = None, + in_keys_inv: list[str] | None = None, + out_keys_inv: list[str] | None = None, mode="mean_std", ): - if out_keys is None: - out_keys = in_keys - if in_keys_inv is None: - in_keys_inv = out_keys - if out_keys_inv is None: - out_keys_inv = in_keys - super().__init__( - in_keys=in_keys, out_keys=out_keys, in_keys_inv=in_keys_inv, out_keys_inv=out_keys_inv - ) + super().__init__() + self.in_keys = in_keys + self.out_keys = in_keys if out_keys is None else out_keys + self.in_keys_inv = self.out_keys if in_keys_inv is None else in_keys_inv + self.out_keys_inv = self.in_keys if out_keys_inv is None else out_keys_inv self.stats = stats assert mode in ["mean_std", "min_max"] self.mode = mode - def _reset(self, tensordict: TensorDictBase, tensordict_reset: TensorDictBase) -> TensorDictBase: - # _reset is called once when the environment reset to normalize the first observation - tensordict_reset = self._call(tensordict_reset) - return tensordict_reset - - @dispatch(source="in_keys", dest="out_keys") - def forward(self, tensordict: TensorDictBase) -> TensorDictBase: - return self._call(tensordict) - - def _call(self, td: TensorDictBase) -> TensorDictBase: + def forward(self, item): for inkey, outkey in zip(self.in_keys, self.out_keys, strict=False): - # TODO(rcadene): don't know how to do `inkey not in td` - if td.get(inkey, None) is None: + if inkey not in item: continue if self.mode == "mean_std": mean = self.stats[inkey]["mean"] std = self.stats[inkey]["std"] - td[outkey] = (td[inkey] - mean) / (std + 1e-8) + item[outkey] = (item[inkey] - mean) / (std + 1e-8) else: min = self.stats[inkey]["min"] max = self.stats[inkey]["max"] # normalize to [0,1] - td[outkey] = (td[inkey] - min) / (max - min) + item[outkey] = (item[inkey] - min) / (max - min) # normalize to [-1, 1] - td[outkey] = td[outkey] * 2 - 1 - return td + item[outkey] = item[outkey] * 2 - 1 + return item - def _inv_call(self, td: TensorDictBase) -> TensorDictBase: + def inverse_transform(self, item): for inkey, outkey in zip(self.in_keys_inv, self.out_keys_inv, strict=False): - # TODO(rcadene): don't know how to do `inkey not in td` - if td.get(inkey, None) is None: + if inkey not in item: continue if self.mode == "mean_std": mean = self.stats[inkey]["mean"] std = self.stats[inkey]["std"] - td[outkey] = td[inkey] * std + mean + item[outkey] = item[inkey] * std + mean else: min = self.stats[inkey]["min"] max = self.stats[inkey]["max"] - td[outkey] = (td[inkey] + 1) / 2 - td[outkey] = td[outkey] * (max - min) + min - return td + item[outkey] = (item[inkey] + 1) / 2 + item[outkey] = item[outkey] * (max - min) + min + return item diff --git a/lerobot/scripts/eval.py b/lerobot/scripts/eval.py index 216769d6..fe0f7bb2 100644 --- a/lerobot/scripts/eval.py +++ b/lerobot/scripts/eval.py @@ -36,111 +36,196 @@ from datetime import datetime as dt from pathlib import Path import einops +import gymnasium as gym +import hydra import imageio import numpy as np import torch -import tqdm from huggingface_hub import snapshot_download -from tensordict.nn import TensorDictModule -from torchrl.envs import EnvBase -from torchrl.envs.batched_envs import BatchedEnvBase -from lerobot.common.datasets.factory import make_offline_buffer +from lerobot.common.datasets.factory import make_dataset from lerobot.common.envs.factory import make_env from lerobot.common.logger import log_output_dir -from lerobot.common.policies.abstract import AbstractPolicy from lerobot.common.policies.factory import make_policy from lerobot.common.utils import get_safe_torch_device, init_hydra_config, init_logging, set_global_seed +from lerobot.common.transforms import apply_inverse_transform def write_video(video_path, stacked_frames, fps): imageio.mimsave(video_path, stacked_frames, fps=fps) +def preprocess_observation(observation, transform=None): + # map to expected inputs for the policy + obs = { + "observation.image": torch.from_numpy(observation["pixels"]).float(), + "observation.state": torch.from_numpy(observation["agent_pos"]).float(), + } + # convert to (b c h w) torch format + obs["observation.image"] = einops.rearrange(obs["observation.image"], "b h w c -> b c h w") + + # apply same transforms as in training + if transform is not None: + for key in obs: + obs[key] = torch.stack([transform({key: item})[key] for item in obs[key]]) + + return obs + + +def postprocess_action(action, transform=None): + action = action.to("cpu") + # action is a batch (num_env,action_dim) instead of an item (action_dim), + # we assume applying inverse transform on a batch works the same + action = apply_inverse_transform({"action": action}, transform)["action"].numpy() + assert ( + action.ndim == 2 + ), "we assume dimensions are respectively the number of parallel envs, action dimensions" + return action + + def eval_policy( - env: BatchedEnvBase, - policy: AbstractPolicy, - num_episodes: int = 10, - max_steps: int = 30, + env: gym.vector.VectorEnv, + policy, save_video: bool = False, video_dir: Path = None, + # TODO(rcadene): make it possible to overwrite fps? we should use env.fps fps: int = 15, return_first_video: bool = False, + transform: callable = None, ): if policy is not None: policy.eval() start = time.time() sum_rewards = [] max_rewards = [] - successes = [] + all_successes = [] seeds = [] threads = [] # for video saving threads episode_counter = 0 # for saving the correct number of videos + num_episodes = len(env.envs) + # TODO(alexander-soare): if num_episodes is not evenly divisible by the batch size, this will do more work than # needed as I'm currently taking a ceil. - for i in tqdm.tqdm(range(-(-num_episodes // env.batch_size[0]))): - ep_frames = [] + ep_frames = [] - def maybe_render_frame(env: EnvBase, _): - if save_video or (return_first_video and i == 0): # noqa: B023 - ep_frames.append(env.render()) # noqa: B023 + def maybe_render_frame(env): + if save_video: # noqa: B023 + if return_first_video: + visu = env.envs[0].render() + visu = visu[None, ...] # add batch dim + else: + visu = np.stack([env.render() for env in env.envs]) + ep_frames.append(visu) # noqa: B023 - # Clear the policy's action queue before the start of a new rollout. - if policy is not None: - policy.clear_action_queue() + for _ in range(num_episodes): + seeds.append("TODO") - if env.is_closed: - env.start() # needed to be able to get the seeds the first time as BatchedEnvs are lazy - seeds.extend(env._next_seed) + if hasattr(policy, "reset"): + policy.reset() + else: + logging.warning( + f"Policy {policy} doesnt have a `reset` method. This find if the policy doesnt rely on an internal state during rollout." + ) + + # reset the environment + observation, info = env.reset(seed=cfg.seed) + maybe_render_frame(env) + + rewards = [] + successes = [] + dones = [] + + done = torch.tensor([False for _ in env.envs]) + step = 0 + do_rollout = True + while do_rollout: + # apply transform to normalize the observations + observation = preprocess_observation(observation, transform) + + # send observation to device/gpu + observation = {key: observation[key].to(cfg.device, non_blocking=True) for key in observation} + + # get the next action for the environment with torch.inference_mode(): - # TODO(alexander-soare): When `break_when_any_done == False` this rolls out for max_steps even when all - # envs are done the first time. But we only use the first rollout. This is a waste of compute. - rollout = env.rollout( - max_steps=max_steps, - policy=policy, - auto_cast_to_device=True, - callback=maybe_render_frame, - break_when_any_done=env.batch_size[0] == 1, - ) - # Figure out where in each rollout sequence the first done condition was encountered (results after - # this won't be included). - # Note: this assumes that the shape of the done key is (batch_size, max_steps, 1). - # Note: this relies on a property of argmax: that it returns the first occurrence as a tiebreaker. - rollout_steps = rollout["next", "done"].shape[1] - done_indices = torch.argmax(rollout["next", "done"].to(int), axis=1) # (batch_size, rollout_steps) - mask = (torch.arange(rollout_steps) <= done_indices).unsqueeze(-1) # (batch_size, rollout_steps, 1) - batch_sum_reward = einops.reduce((rollout["next", "reward"] * mask), "b n 1 -> b", "sum") - batch_max_reward = einops.reduce((rollout["next", "reward"] * mask), "b n 1 -> b", "max") - batch_success = einops.reduce((rollout["next", "success"] * mask), "b n 1 -> b", "any") - sum_rewards.extend(batch_sum_reward.tolist()) - max_rewards.extend(batch_max_reward.tolist()) - successes.extend(batch_success.tolist()) + action = policy.select_action(observation, step) - if save_video or (return_first_video and i == 0): - batch_stacked_frames = np.stack(ep_frames) # (t, b, *) - batch_stacked_frames = batch_stacked_frames.transpose( - 1, 0, *range(2, batch_stacked_frames.ndim) - ) # (b, t, *) + # apply inverse transform to unnormalize the action + action = postprocess_action(action, transform) - if save_video: - for stacked_frames, done_index in zip( - batch_stacked_frames, done_indices.flatten().tolist(), strict=False - ): - if episode_counter >= num_episodes: - continue - video_dir.mkdir(parents=True, exist_ok=True) - video_path = video_dir / f"eval_episode_{episode_counter}.mp4" - thread = threading.Thread( - target=write_video, - args=(str(video_path), stacked_frames[:done_index], fps), - ) - thread.start() - threads.append(thread) - episode_counter += 1 + # apply the next + observation, reward, terminated, truncated, info = env.step(action) + maybe_render_frame(env) - if return_first_video and i == 0: - first_video = batch_stacked_frames[0].transpose(0, 3, 1, 2) + # TODO(rcadene): implement a wrapper over env to return torch tensors in float32 (and cuda?) + reward = torch.from_numpy(reward) + terminated = torch.from_numpy(terminated) + truncated = torch.from_numpy(truncated) + # environment is considered done (no more steps), when success state is reached (terminated is True), + # or time limit is reached (truncated is True), or it was previsouly done. + done = terminated | truncated | done + + if "final_info" in info: + # VectorEnv stores is_success into `info["final_info"][env_id]["is_success"]` instead of `info["is_success"]` + success = [ + env_info["is_success"] if env_info is not None else False for env_info in info["final_info"] + ] + else: + success = [False for _ in env.envs] + success = torch.tensor(success) + + rewards.append(reward) + dones.append(done) + successes.append(success) + + step += 1 + + if done.all(): + do_rollout = False + break + + rewards = torch.stack(rewards, dim=1) + successes = torch.stack(successes, dim=1) + dones = torch.stack(dones, dim=1) + + # Figure out where in each rollout sequence the first done condition was encountered (results after + # this won't be included). + # Note: this assumes that the shape of the done key is (batch_size, max_steps). + # Note: this relies on a property of argmax: that it returns the first occurrence as a tiebreaker. + done_indices = torch.argmax(dones.to(int), axis=1) # (batch_size, rollout_steps) + expand_done_indices = done_indices[:, None].expand(-1, step) + expand_step_indices = torch.arange(step)[None, :].expand(num_episodes, -1) + mask = (expand_step_indices <= expand_done_indices).int() # (batch_size, rollout_steps) + batch_sum_reward = einops.reduce((rewards * mask), "b n -> b", "sum") + batch_max_reward = einops.reduce((rewards * mask), "b n -> b", "max") + batch_success = einops.reduce((successes * mask), "b n -> b", "any") + sum_rewards.extend(batch_sum_reward.tolist()) + max_rewards.extend(batch_max_reward.tolist()) + all_successes.extend(batch_success.tolist()) + + env.close() + + if save_video or return_first_video: + batch_stacked_frames = np.stack(ep_frames, 1) # (b, t, *) + + if save_video: + for stacked_frames, done_index in zip( + batch_stacked_frames, done_indices.flatten().tolist(), strict=False + ): + if episode_counter >= num_episodes: + continue + video_dir.mkdir(parents=True, exist_ok=True) + video_path = video_dir / f"eval_episode_{episode_counter}.mp4" + thread = threading.Thread( + target=write_video, + args=(str(video_path), stacked_frames[:done_index], fps), + ) + thread.start() + threads.append(thread) + episode_counter += 1 + + if return_first_video: + first_video = batch_stacked_frames[0].transpose(0, 3, 1, 2) for thread in threads: thread.join() @@ -158,16 +243,16 @@ def eval_policy( zip( sum_rewards[:num_episodes], max_rewards[:num_episodes], - successes[:num_episodes], + all_successes[:num_episodes], seeds[:num_episodes], strict=True, ) ) ], "aggregated": { - "avg_sum_reward": np.nanmean(sum_rewards[:num_episodes]), - "avg_max_reward": np.nanmean(max_rewards[:num_episodes]), - "pc_success": np.nanmean(successes[:num_episodes]) * 100, + "avg_sum_reward": float(np.nanmean(sum_rewards[:num_episodes])), + "avg_max_reward": float(np.nanmean(max_rewards[:num_episodes])), + "pc_success": float(np.nanmean(all_successes[:num_episodes]) * 100), "eval_s": time.time() - start, "eval_ep_s": (time.time() - start) / num_episodes, }, @@ -194,21 +279,13 @@ def eval(cfg: dict, out_dir=None, stats_path=None): logging.info("Making transforms.") # TODO(alexander-soare): Completely decouple datasets from evaluation. - offline_buffer = make_offline_buffer(cfg, stats_path=stats_path) + dataset = make_dataset(cfg, stats_path=stats_path) logging.info("Making environment.") - env = make_env(cfg, transform=offline_buffer.transform) + env = make_env(cfg, num_parallel_envs=cfg.eval_episodes) - if cfg.policy.pretrained_model_path: - policy = make_policy(cfg) - policy = TensorDictModule( - policy, - in_keys=["observation", "step_count"], - out_keys=["action"], - ) - else: - # when policy is None, rollout a random policy - policy = None + # when policy is None, rollout a random policy + policy = make_policy(cfg) if cfg.policy.pretrained_model_path else None info = eval_policy( env, @@ -216,8 +293,8 @@ def eval(cfg: dict, out_dir=None, stats_path=None): save_video=True, video_dir=Path(out_dir) / "eval", fps=cfg.env.fps, - max_steps=cfg.env.episode_length, - num_episodes=cfg.eval_episodes, + # TODO(rcadene): what should we do with the transform? + transform=dataset.transform, ) print(info["aggregated"]) diff --git a/lerobot/scripts/train.py b/lerobot/scripts/train.py index 18c3715b..5e9cd361 100644 --- a/lerobot/scripts/train.py +++ b/lerobot/scripts/train.py @@ -1,14 +1,12 @@ import logging +from itertools import cycle from pathlib import Path import hydra import numpy as np import torch -from tensordict.nn import TensorDictModule -from torchrl.data import LazyMemmapStorage, TensorDictReplayBuffer -from torchrl.data.replay_buffers import PrioritizedSliceSampler -from lerobot.common.datasets.factory import make_offline_buffer +from lerobot.common.datasets.factory import make_dataset from lerobot.common.envs.factory import make_env from lerobot.common.logger import Logger, log_output_dir from lerobot.common.policies.factory import make_policy @@ -34,7 +32,7 @@ def train_notebook(out_dir=None, job_name=None, config_name="default", config_pa train(cfg, out_dir=out_dir, job_name=job_name) -def log_train_info(logger, info, step, cfg, offline_buffer, is_offline): +def log_train_info(logger, info, step, cfg, dataset, is_offline): loss = info["loss"] grad_norm = info["grad_norm"] lr = info["lr"] @@ -44,9 +42,9 @@ def log_train_info(logger, info, step, cfg, offline_buffer, is_offline): # A sample is an (observation,action) pair, where observation and action # can be on multiple timestamps. In a batch, we have `batch_size`` number of samples. num_samples = (step + 1) * cfg.policy.batch_size - avg_samples_per_ep = offline_buffer.num_samples / offline_buffer.num_episodes + avg_samples_per_ep = dataset.num_samples / dataset.num_episodes num_episodes = num_samples / avg_samples_per_ep - num_epochs = num_samples / offline_buffer.num_samples + num_epochs = num_samples / dataset.num_samples log_items = [ f"step:{format_big_number(step)}", # number of samples seen during training @@ -73,7 +71,7 @@ def log_train_info(logger, info, step, cfg, offline_buffer, is_offline): logger.log_dict(info, step, mode="train") -def log_eval_info(logger, info, step, cfg, offline_buffer, is_offline): +def log_eval_info(logger, info, step, cfg, dataset, is_offline): eval_s = info["eval_s"] avg_sum_reward = info["avg_sum_reward"] pc_success = info["pc_success"] @@ -81,9 +79,9 @@ def log_eval_info(logger, info, step, cfg, offline_buffer, is_offline): # A sample is an (observation,action) pair, where observation and action # can be on multiple timestamps. In a batch, we have `batch_size`` number of samples. num_samples = (step + 1) * cfg.policy.batch_size - avg_samples_per_ep = offline_buffer.num_samples / offline_buffer.num_episodes + avg_samples_per_ep = dataset.num_samples / dataset.num_episodes num_episodes = num_samples / avg_samples_per_ep - num_epochs = num_samples / offline_buffer.num_samples + num_epochs = num_samples / dataset.num_samples log_items = [ f"step:{format_big_number(step)}", # number of samples seen during training @@ -124,30 +122,30 @@ def train(cfg: dict, out_dir=None, job_name=None): torch.backends.cuda.matmul.allow_tf32 = True set_global_seed(cfg.seed) - logging.info("make_offline_buffer") - offline_buffer = make_offline_buffer(cfg) + logging.info("make_dataset") + dataset = make_dataset(cfg) # TODO(rcadene): move balanced_sampling, per_alpha, per_beta outside policy - if cfg.policy.balanced_sampling: - logging.info("make online_buffer") - num_traj_per_batch = cfg.policy.batch_size + # if cfg.policy.balanced_sampling: + # logging.info("make online_buffer") + # num_traj_per_batch = cfg.policy.batch_size - online_sampler = PrioritizedSliceSampler( - max_capacity=100_000, - alpha=cfg.policy.per_alpha, - beta=cfg.policy.per_beta, - num_slices=num_traj_per_batch, - strict_length=True, - ) + # online_sampler = PrioritizedSliceSampler( + # max_capacity=100_000, + # alpha=cfg.policy.per_alpha, + # beta=cfg.policy.per_beta, + # num_slices=num_traj_per_batch, + # strict_length=True, + # ) - online_buffer = TensorDictReplayBuffer( - storage=LazyMemmapStorage(100_000), - sampler=online_sampler, - transform=offline_buffer.transform, - ) + # online_buffer = TensorDictReplayBuffer( + # storage=LazyMemmapStorage(100_000), + # sampler=online_sampler, + # transform=dataset.transform, + # ) logging.info("make_env") - env = make_env(cfg, transform=offline_buffer.transform) + env = make_env(cfg) logging.info("make_policy") policy = make_policy(cfg) @@ -155,8 +153,6 @@ def train(cfg: dict, out_dir=None, job_name=None): num_learnable_params = sum(p.numel() for p in policy.parameters() if p.requires_grad) num_total_params = sum(p.numel() for p in policy.parameters()) - td_policy = TensorDictModule(policy, in_keys=["observation", "step_count"], out_keys=["action"]) - # log metrics to terminal and wandb logger = Logger(out_dir, job_name, cfg) @@ -165,8 +161,8 @@ def train(cfg: dict, out_dir=None, job_name=None): logging.info(f"{cfg.offline_steps=} ({format_big_number(cfg.offline_steps)})") logging.info(f"{cfg.online_steps=}") logging.info(f"{cfg.env.action_repeat=}") - logging.info(f"{offline_buffer.num_samples=} ({format_big_number(offline_buffer.num_samples)})") - logging.info(f"{offline_buffer.num_episodes=}") + logging.info(f"{dataset.num_samples=} ({format_big_number(dataset.num_samples)})") + logging.info(f"{dataset.num_episodes=}") logging.info(f"{num_learnable_params=} ({format_big_number(num_learnable_params)})") logging.info(f"{num_total_params=} ({format_big_number(num_total_params)})") @@ -176,14 +172,15 @@ def train(cfg: dict, out_dir=None, job_name=None): logging.info(f"Eval policy at step {step}") eval_info, first_video = eval_policy( env, - td_policy, + policy, num_episodes=cfg.eval_episodes, max_steps=cfg.env.episode_length, return_first_video=True, video_dir=Path(out_dir) / "eval", save_video=True, + transform=dataset.transform, ) - log_eval_info(logger, eval_info["aggregated"], step, cfg, offline_buffer, is_offline) + log_eval_info(logger, eval_info["aggregated"], step, cfg, dataset, is_offline) if cfg.wandb.enable: logger.log_video(first_video, step, mode="eval") logging.info("Resume training") @@ -196,14 +193,29 @@ def train(cfg: dict, out_dir=None, job_name=None): step = 0 # number of policy update (forward + backward + optim) is_offline = True + dataloader = torch.utils.data.DataLoader( + dataset, + num_workers=4, + batch_size=cfg.policy.batch_size, + shuffle=True, + pin_memory=cfg.device != "cpu", + drop_last=True, + ) + dl_iter = cycle(dataloader) for offline_step in range(cfg.offline_steps): if offline_step == 0: logging.info("Start offline training on a fixed dataset") - # TODO(rcadene): is it ok if step_t=0 = 0 and not 1 as previously done? policy.train() - train_info = policy.update(offline_buffer, step) + batch = next(dl_iter) + + for key in batch: + batch[key] = batch[key].to(cfg.device, non_blocking=True) + + train_info = policy.update(batch, step) + + # TODO(rcadene): is it ok if step_t=0 = 0 and not 1 as previously done? if step % cfg.log_freq == 0: - log_train_info(logger, train_info, step, cfg, offline_buffer, is_offline) + log_train_info(logger, train_info, step, cfg, dataset, is_offline) # Note: _maybe_eval_and_maybe_save happens **after** the `step`th training update has completed, so we pass in # step + 1. @@ -211,7 +223,7 @@ def train(cfg: dict, out_dir=None, job_name=None): step += 1 - demo_buffer = offline_buffer if cfg.policy.balanced_sampling else None + demo_buffer = dataset if cfg.policy.balanced_sampling else None online_step = 0 is_offline = False for env_step in range(cfg.online_steps): @@ -221,7 +233,7 @@ def train(cfg: dict, out_dir=None, job_name=None): with torch.no_grad(): rollout = env.rollout( max_steps=cfg.env.episode_length, - policy=td_policy, + policy=policy, auto_cast_to_device=True, ) @@ -242,7 +254,7 @@ def train(cfg: dict, out_dir=None, job_name=None): # set same episode index for all time steps contained in this rollout rollout["episode"] = torch.tensor([env_step] * len(rollout), dtype=torch.int) - online_buffer.extend(rollout) + # online_buffer.extend(rollout) ep_sum_reward = rollout["next", "reward"].sum() ep_max_reward = rollout["next", "reward"].max() @@ -257,13 +269,13 @@ def train(cfg: dict, out_dir=None, job_name=None): for _ in range(cfg.policy.utd): train_info = policy.update( - online_buffer, + # online_buffer, step, demo_buffer=demo_buffer, ) if step % cfg.log_freq == 0: train_info.update(rollout_info) - log_train_info(logger, train_info, step, cfg, offline_buffer, is_offline) + log_train_info(logger, train_info, step, cfg, dataset, is_offline) # Note: _maybe_eval_and_maybe_save happens **after** the `step`th training update has completed, so we pass # in step + 1. diff --git a/lerobot/scripts/visualize_dataset.py b/lerobot/scripts/visualize_dataset.py index 3dd7cdfa..93315e90 100644 --- a/lerobot/scripts/visualize_dataset.py +++ b/lerobot/scripts/visualize_dataset.py @@ -10,7 +10,7 @@ from torchrl.data.replay_buffers import ( SamplerWithoutReplacement, ) -from lerobot.common.datasets.factory import make_offline_buffer +from lerobot.common.datasets.factory import make_dataset from lerobot.common.logger import log_output_dir from lerobot.common.utils import init_logging @@ -44,8 +44,8 @@ def visualize_dataset(cfg: dict, out_dir=None): shuffle=False, ) - logging.info("make_offline_buffer") - offline_buffer = make_offline_buffer( + logging.info("make_dataset") + dataset = make_dataset( cfg, overwrite_sampler=sampler, # remove all transformations such as rescale images from [0,255] to [0,1] or normalization @@ -55,12 +55,12 @@ def visualize_dataset(cfg: dict, out_dir=None): ) logging.info("Start rendering episodes from offline buffer") - video_paths = render_dataset(offline_buffer, out_dir, MAX_NUM_STEPS * NUM_EPISODES_TO_RENDER, cfg.fps) + video_paths = render_dataset(dataset, out_dir, MAX_NUM_STEPS * NUM_EPISODES_TO_RENDER, cfg.fps) for video_path in video_paths: logging.info(video_path) -def render_dataset(offline_buffer, out_dir, max_num_samples, fps): +def render_dataset(dataset, out_dir, max_num_samples, fps): out_dir = Path(out_dir) video_paths = [] threads = [] @@ -69,17 +69,17 @@ def render_dataset(offline_buffer, out_dir, max_num_samples, fps): logging.info(f"Visualizing episode {current_ep_idx}") for i in range(max_num_samples): # TODO(rcadene): make it work with bsize > 1 - ep_td = offline_buffer.sample(1) + ep_td = dataset.sample(1) ep_idx = ep_td["episode"][FIRST_FRAME].item() - # TODO(rcadene): modify offline_buffer._sampler._sample_list or sampler to randomly sample an episode, but sequentially sample frames - num_frames_left = offline_buffer._sampler._sample_list.numel() + # TODO(rcadene): modify dataset._sampler._sample_list or sampler to randomly sample an episode, but sequentially sample frames + num_frames_left = dataset._sampler._sample_list.numel() episode_is_done = ep_idx != current_ep_idx if episode_is_done: logging.info(f"Rendering episode {current_ep_idx}") - for im_key in offline_buffer.image_keys: + for im_key in dataset.image_keys: if not episode_is_done and num_frames_left > 0 and i < (max_num_samples - 1): # when first frame of episode, initialize frames dict if im_key not in frames: @@ -93,7 +93,7 @@ def render_dataset(offline_buffer, out_dir, max_num_samples, fps): frames[im_key].append(ep_td["next"][im_key]) out_dir.mkdir(parents=True, exist_ok=True) - if len(offline_buffer.image_keys) > 1: + if len(dataset.image_keys) > 1: camera = im_key[-1] video_path = out_dir / f"episode_{current_ep_idx}_{camera}.mp4" else: diff --git a/poetry.lock b/poetry.lock index 72397001..8fb6b7a7 100644 --- a/poetry.lock +++ b/poetry.lock @@ -879,6 +879,29 @@ files = [ [package.extras] protobuf = ["grpcio-tools (>=1.62.1)"] +[[package]] +name = "gym-pusht" +version = "0.1.0" +description = "PushT environment for LeRobot" +optional = true +python-versions = "^3.10" +files = [] +develop = false + +[package.dependencies] +gymnasium = "^0.29.1" +opencv-python = "^4.9.0.80" +pygame = "^2.5.2" +pymunk = "^6.6.0" +scikit-image = "^0.22.0" +shapely = "^2.0.3" + +[package.source] +type = "git" +url = "git@github.com:huggingface/gym-pusht.git" +reference = "HEAD" +resolved_reference = "d7e1a39a31b1368741e9674791007d7cccf046a3" + [[package]] name = "gymnasium" version = "0.29.1" @@ -3586,7 +3609,10 @@ files = [ docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] +[extras] +pusht = ["gym_pusht"] + [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "174c7d42f8039eedd2c447a4e6cae5169782cbd94346b5606572a0010194ca05" +content-hash = "3eee17e4bf2b7a570f41ef9c400ec5a24a3113f62a13162229cf43504ca0d005" diff --git a/pyproject.toml b/pyproject.toml index 972c1b61..d0fc7c0d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -52,7 +52,10 @@ robomimic = "0.2.0" gymnasium-robotics = "^1.2.4" gymnasium = "^0.29.1" cmake = "^3.29.0.1" +gym_pusht = { git = "git@github.com:huggingface/gym-pusht.git", optional = true} +[tool.poetry.extras] +pusht = ["gym_pusht"] [tool.poetry.group.dev.dependencies] pre-commit = "^3.6.2" diff --git a/tests/test_datasets.py b/tests/test_datasets.py index df41b03f..f7f80a42 100644 --- a/tests/test_datasets.py +++ b/tests/test_datasets.py @@ -6,6 +6,8 @@ from torchrl.data.replay_buffers.samplers import SamplerWithoutReplacement from lerobot.common.datasets.factory import make_offline_buffer from lerobot.common.utils import init_hydra_config +import logging +from lerobot.common.datasets.factory import make_dataset from .utils import DEVICE, DEFAULT_CONFIG_PATH @@ -26,14 +28,29 @@ def test_factory(env_name, dataset_id): DEFAULT_CONFIG_PATH, overrides=[f"env={env_name}", f"env.task={dataset_id}", f"device={DEVICE}"] ) - offline_buffer = make_offline_buffer(cfg) - for key in offline_buffer.image_keys: - img = offline_buffer[0].get(key) + dataset = make_dataset(cfg) + + item = dataset[0] + + assert "action" in item + assert "episode" in item + assert "frame_id" in item + assert "timestamp" in item + assert "next.done" in item + # TODO(rcadene): should we rename it agent_pos? + assert "observation.state" in item + for key in dataset.image_keys: + img = item.get(key) assert img.dtype == torch.float32 # TODO(rcadene): we assume for now that image normalization takes place in the model assert img.max() <= 1.0 assert img.min() >= 0.0 + if "next.reward" not in item: + logging.warning(f'Missing "next.reward" key in dataset {dataset}.') + if "next.done" not in item: + logging.warning(f'Missing "next.done" key in dataset {dataset}.') + def test_compute_stats(): """Check that the statistics are computed correctly according to the stats_patterns property. diff --git a/tests/test_envs.py b/tests/test_envs.py index eb3746db..0c56f4fc 100644 --- a/tests/test_envs.py +++ b/tests/test_envs.py @@ -2,7 +2,7 @@ import pytest from tensordict import TensorDict import torch from torchrl.envs.utils import check_env_specs, step_mdp -from lerobot.common.datasets.factory import make_offline_buffer +from lerobot.common.datasets.factory import make_dataset from lerobot.common.envs.aloha.env import AlohaEnv from lerobot.common.envs.factory import make_env @@ -116,15 +116,15 @@ def test_factory(env_name): overrides=[f"env={env_name}", f"device={DEVICE}"], ) - offline_buffer = make_offline_buffer(cfg) + dataset = make_dataset(cfg) env = make_env(cfg) - for key in offline_buffer.image_keys: + for key in dataset.image_keys: assert env.reset().get(key).dtype == torch.uint8 check_env_specs(env) - env = make_env(cfg, transform=offline_buffer.transform) - for key in offline_buffer.image_keys: + env = make_env(cfg, transform=dataset.transform) + for key in dataset.image_keys: img = env.reset().get(key) assert img.dtype == torch.float32 # TODO(rcadene): we assume for now that image normalization takes place in the model diff --git a/tests/test_policies.py b/tests/test_policies.py index 5d6b46d0..a46c6025 100644 --- a/tests/test_policies.py +++ b/tests/test_policies.py @@ -7,7 +7,7 @@ from torchrl.envs import EnvBase from lerobot.common.policies.factory import make_policy from lerobot.common.envs.factory import make_env -from lerobot.common.datasets.factory import make_offline_buffer +from lerobot.common.datasets.factory import make_dataset from lerobot.common.policies.abstract import AbstractPolicy from lerobot.common.utils import init_hydra_config from .utils import DEVICE, DEFAULT_CONFIG_PATH @@ -45,13 +45,13 @@ def test_concrete_policy(env_name, policy_name, extra_overrides): # Check that we can make the policy object. policy = make_policy(cfg) # Check that we run select_actions and get the appropriate output. - offline_buffer = make_offline_buffer(cfg) - env = make_env(cfg, transform=offline_buffer.transform) + dataset = make_dataset(cfg) + env = make_env(cfg, transform=dataset.transform) if env_name != "aloha": # TODO(alexander-soare): Fix this part of the test. PrioritizedSliceSampler raises NotImplementedError: # seq_length as a list is not supported for now. - policy.update(offline_buffer, torch.tensor(0, device=DEVICE)) + policy.update(dataset, torch.tensor(0, device=DEVICE)) action = policy( env.observation_spec.rand()["observation"].to(DEVICE), From c93ce35d8c403db9933ae3bcf1fe23683e485d99 Mon Sep 17 00:00:00 2001 From: Cadene Date: Thu, 4 Apr 2024 16:36:03 +0000 Subject: [PATCH 06/80] WIP stats (TODO: run tests on stats + cmpute them) --- lerobot/common/datasets/abstract.py | 234 ---------------------------- lerobot/common/datasets/factory.py | 32 ++-- lerobot/common/datasets/utils.py | 101 ++++++++++++ lerobot/common/transforms.py | 16 +- tests/test_datasets.py | 60 ++++--- 5 files changed, 157 insertions(+), 286 deletions(-) delete mode 100644 lerobot/common/datasets/abstract.py diff --git a/lerobot/common/datasets/abstract.py b/lerobot/common/datasets/abstract.py deleted file mode 100644 index e9e9c610..00000000 --- a/lerobot/common/datasets/abstract.py +++ /dev/null @@ -1,234 +0,0 @@ -import logging -from copy import deepcopy -from math import ceil -from pathlib import Path -from typing import Callable - -import einops -import torch -import torchrl -import tqdm -from huggingface_hub import snapshot_download -from tensordict import TensorDict -from torchrl.data.replay_buffers.replay_buffers import TensorDictReplayBuffer -from torchrl.data.replay_buffers.samplers import Sampler, SamplerWithoutReplacement -from torchrl.data.replay_buffers.storages import TensorStorage, _collate_id -from torchrl.data.replay_buffers.writers import ImmutableDatasetWriter, Writer -from torchrl.envs.transforms.transforms import Compose - -HF_USER = "lerobot" - - -class AbstractDataset(TensorDictReplayBuffer): - """ - AbstractDataset represents a dataset in the context of imitation learning or reinforcement learning. - This class is designed to be subclassed by concrete implementations that specify particular types of datasets. - These implementations can vary based on the source of the data, the environment the data pertains to, - or the specific kind of data manipulation applied. - - Note: - - `TensorDictReplayBuffer` is the base class from which `AbstractDataset` inherits. It provides the foundational - functionality for storing and retrieving `TensorDict`-like data. - - `available_datasets` should be overridden by concrete subclasses to list the specific dataset variants supported. - It is expected that these variants correspond to a HuggingFace dataset on the hub. - For instance, the `AlohaDataset` which inherites from `AbstractDataset` has 4 available dataset variants: - - [aloha_sim_transfer_cube_scripted](https://huggingface.co/datasets/lerobot/aloha_sim_transfer_cube_scripted) - - [aloha_sim_insertion_scripted](https://huggingface.co/datasets/lerobot/aloha_sim_insertion_scripted) - - [aloha_sim_transfer_cube_human](https://huggingface.co/datasets/lerobot/aloha_sim_transfer_cube_human) - - [aloha_sim_insertion_human](https://huggingface.co/datasets/lerobot/aloha_sim_insertion_human) - - When implementing a concrete class (e.g. `AlohaDataset`, `PushtEnv`, `DiffusionPolicy`), you need to: - 1. set the required class attributes: - - for classes inheriting from `AbstractDataset`: `available_datasets` - - for classes inheriting from `AbstractEnv`: `name`, `available_tasks` - - for classes inheriting from `AbstractPolicy`: `name` - 2. update variables in `lerobot/__init__.py` (e.g. `available_envs`, `available_datasets_per_envs`, `available_policies`) - 3. update variables in `tests/test_available.py` by importing your new class - """ - - available_datasets: list[str] | None = None - - def __init__( - self, - dataset_id: str, - version: str | None = None, - batch_size: int | None = None, - *, - shuffle: bool = True, - root: Path | None = None, - pin_memory: bool = False, - prefetch: int = None, - sampler: Sampler | None = None, - collate_fn: Callable | None = None, - writer: Writer | None = None, - transform: "torchrl.envs.Transform" = None, - ): - assert ( - self.available_datasets is not None - ), "Subclasses of `AbstractDataset` should set the `available_datasets` class attribute." - assert ( - dataset_id in self.available_datasets - ), f"The provided dataset ({dataset_id}) is not on the list of available datasets {self.available_datasets}." - - self.dataset_id = dataset_id - self.version = version - self.shuffle = shuffle - self.root = root if root is None else Path(root) - - if self.root is not None and self.version is not None: - logging.warning( - f"The version of the dataset ({self.version}) is not enforced when root is provided ({self.root})." - ) - - storage = self._download_or_load_dataset() - - super().__init__( - storage=storage, - sampler=sampler, - writer=ImmutableDatasetWriter() if writer is None else writer, - collate_fn=_collate_id if collate_fn is None else collate_fn, - pin_memory=pin_memory, - prefetch=prefetch, - batch_size=batch_size, - transform=transform, - ) - - @property - def stats_patterns(self) -> dict: - return { - ("observation", "state"): "b c -> c", - ("observation", "image"): "b c h w -> c 1 1", - ("action",): "b c -> c", - } - - @property - def image_keys(self) -> list: - return [("observation", "image")] - - @property - def num_cameras(self) -> int: - return len(self.image_keys) - - @property - def num_samples(self) -> int: - return len(self) - - @property - def num_episodes(self) -> int: - return len(self._storage._storage["episode"].unique()) - - @property - def transform(self): - return self._transform - - def set_transform(self, transform): - if not isinstance(transform, Compose): - # required since torchrl calls `len(self._transform)` downstream - if isinstance(transform, list): - self._transform = Compose(*transform) - else: - self._transform = Compose(transform) - else: - self._transform = transform - - def compute_or_load_stats(self, batch_size: int = 32) -> TensorDict: - stats_path = self.data_dir / "stats.pth" - if stats_path.exists(): - stats = torch.load(stats_path) - else: - logging.info(f"compute_stats and save to {stats_path}") - stats = self._compute_stats(batch_size) - torch.save(stats, stats_path) - return stats - - def _download_or_load_dataset(self) -> torch.StorageBase: - if self.root is None: - self.data_dir = Path( - snapshot_download( - repo_id=f"{HF_USER}/{self.dataset_id}", repo_type="dataset", revision=self.version - ) - ) - else: - self.data_dir = self.root / self.dataset_id - return TensorStorage(TensorDict.load_memmap(self.data_dir / "replay_buffer")) - - def _compute_stats(self, batch_size: int = 32): - """Compute dataset statistics including minimum, maximum, mean, and standard deviation. - - TODO(alexander-soare): Add a num_batches argument which essentially allows one to use a subset of the - full dataset (for handling very large datasets). The sampling would then have to be random - (preferably without replacement). Both stats computation loops would ideally sample the same - items. - """ - rb = TensorDictReplayBuffer( - storage=self._storage, - batch_size=32, - prefetch=True, - # Note: Due to be refactored soon. The point is that we should go through the whole dataset. - sampler=SamplerWithoutReplacement(drop_last=False, shuffle=False), - ) - - # mean and std will be computed incrementally while max and min will track the running value. - mean, std, max, min = {}, {}, {}, {} - for key in self.stats_patterns: - mean[key] = torch.tensor(0.0).float() - std[key] = torch.tensor(0.0).float() - max[key] = torch.tensor(-float("inf")).float() - min[key] = torch.tensor(float("inf")).float() - - # Compute mean, min, max. - # Note: Due to be refactored soon. The point of storing `first_batch` is to make sure we don't get - # surprises when rerunning the sampler. - first_batch = None - running_item_count = 0 # for online mean computation - for _ in tqdm.tqdm(range(ceil(len(rb) / batch_size))): - batch = rb.sample() - this_batch_size = batch.batch_size[0] - running_item_count += this_batch_size - if first_batch is None: - first_batch = deepcopy(batch) - for key, pattern in self.stats_patterns.items(): - batch[key] = batch[key].float() - # Numerically stable update step for mean computation. - batch_mean = einops.reduce(batch[key], pattern, "mean") - # Hint: to update the mean we need x̄ₙ = (Nₙ₋₁x̄ₙ₋₁ + Bₙxₙ) / Nₙ, where the subscript represents - # the update step, N is the running item count, B is this batch size, x̄ is the running mean, - # and x is the current batch mean. Some rearrangement is then required to avoid risking - # numerical overflow. Another hint: Nₙ₋₁ = Nₙ - Bₙ. Rearrangement yields - # x̄ₙ = x̄ₙ₋₁ + Bₙ * (xₙ - x̄ₙ₋₁) / Nₙ - mean[key] = mean[key] + this_batch_size * (batch_mean - mean[key]) / running_item_count - max[key] = torch.maximum(max[key], einops.reduce(batch[key], pattern, "max")) - min[key] = torch.minimum(min[key], einops.reduce(batch[key], pattern, "min")) - - # Compute std. - first_batch_ = None - running_item_count = 0 # for online std computation - for _ in tqdm.tqdm(range(ceil(len(rb) / batch_size))): - batch = rb.sample() - this_batch_size = batch.batch_size[0] - running_item_count += this_batch_size - # Sanity check to make sure the batches are still in the same order as before. - if first_batch_ is None: - first_batch_ = deepcopy(batch) - for key in self.stats_patterns: - assert torch.equal(first_batch_[key], first_batch[key]) - for key, pattern in self.stats_patterns.items(): - batch[key] = batch[key].float() - # Numerically stable update step for mean computation (where the mean is over squared - # residuals).See notes in the mean computation loop above. - batch_std = einops.reduce((batch[key] - mean[key]) ** 2, pattern, "mean") - std[key] = std[key] + this_batch_size * (batch_std - std[key]) / running_item_count - - for key in self.stats_patterns: - std[key] = torch.sqrt(std[key]) - - stats = TensorDict({}, batch_size=[]) - for key in self.stats_patterns: - stats[(*key, "mean")] = mean[key] - stats[(*key, "std")] = std[key] - stats[(*key, "max")] = max[key] - stats[(*key, "min")] = min[key] - - if key[0] == "observation": - # use same stats for the next observations - stats[("next", *key)] = stats[key] - return stats diff --git a/lerobot/common/datasets/factory.py b/lerobot/common/datasets/factory.py index 94ac8ca4..32d76a50 100644 --- a/lerobot/common/datasets/factory.py +++ b/lerobot/common/datasets/factory.py @@ -4,7 +4,8 @@ from pathlib import Path import torch from torchvision.transforms import v2 -from lerobot.common.transforms import Prod +from lerobot.common.datasets.utils import compute_or_load_stats +from lerobot.common.transforms import NormalizeTransform, Prod # DATA_DIR specifies to location where datasets are loaded. By default, DATA_DIR is None and # we load from `$HOME/.cache/huggingface/hub/datasets`. For our unit tests, we set `DATA_DIR=tests/data` @@ -41,9 +42,8 @@ def make_dataset( # min_max_from_spec # stats = dataset.compute_or_load_stats() if stats_path is None else torch.load(stats_path) - stats = {} - if cfg.policy.name == "diffusion" and cfg.env.name == "pusht": + stats = {} # TODO(rcadene): we overwrite stats to have the same as pretrained model, but we should remove this stats["observation.state"] = {} stats["observation.state"]["min"] = torch.tensor([13.456424, 32.938293], dtype=torch.float32) @@ -51,22 +51,30 @@ def make_dataset( stats["action"] = {} stats["action"]["min"] = torch.tensor([12.0, 25.0], dtype=torch.float32) stats["action"]["max"] = torch.tensor([511.0, 511.0], dtype=torch.float32) + else: + # instantiate a one frame dataset with light transform + stats_dataset = clsfunc( + dataset_id=cfg.dataset_id, + root=DATA_DIR, + transform=Prod(in_keys=clsfunc.image_keys, prod=1 / 255.0), + ) + stats = compute_or_load_stats(stats_dataset) # TODO(rcadene): remove this and put it in config. Ideally we want to reproduce SOTA results just with mean_std - # normalization_mode = "mean_std" if cfg.env.name == "aloha" else "min_max" + normalization_mode = "mean_std" if cfg.env.name == "aloha" else "min_max" transforms = v2.Compose( [ # TODO(rcadene): we need to do something about image_keys Prod(in_keys=clsfunc.image_keys, prod=1 / 255.0), - # NormalizeTransform( - # stats, - # in_keys=[ - # "observation.state", - # "action", - # ], - # mode=normalization_mode, - # ), + NormalizeTransform( + stats, + in_keys=[ + "observation.state", + "action", + ], + mode=normalization_mode, + ), ] ) diff --git a/lerobot/common/datasets/utils.py b/lerobot/common/datasets/utils.py index c8840169..522227d7 100644 --- a/lerobot/common/datasets/utils.py +++ b/lerobot/common/datasets/utils.py @@ -1,7 +1,11 @@ import io +import logging import zipfile +from copy import deepcopy +from math import ceil from pathlib import Path +import einops import requests import torch import tqdm @@ -97,3 +101,100 @@ def load_data_with_delta_timestamps( ) return data, is_pad + + +def compute_or_load_stats(dataset, batch_size=32, max_num_samples=None): + stats_path = dataset.data_dir / "stats.pth" + if stats_path.exists(): + return torch.load(stats_path) + + logging.info(f"compute_stats and save to {stats_path}") + + if max_num_samples is None: + max_num_samples = len(dataset) + + dataloader = torch.utils.data.DataLoader( + dataset, + num_workers=4, + batch_size=batch_size, + shuffle=True, + # pin_memory=cfg.device != "cpu", + drop_last=False, + ) + + stats_patterns = { + "action": "b c -> c", + "observation.state": "b c -> c", + } + for key in dataset.image_keys: + stats_patterns[key] = "b c h w -> c 1 1" + + # mean and std will be computed incrementally while max and min will track the running value. + mean, std, max, min = {}, {}, {}, {} + for key in stats_patterns: + mean[key] = torch.tensor(0.0).float() + std[key] = torch.tensor(0.0).float() + max[key] = torch.tensor(-float("inf")).float() + min[key] = torch.tensor(float("inf")).float() + + # Note: Due to be refactored soon. The point of storing `first_batch` is to make sure we don't get + # surprises when rerunning the sampler. + first_batch = None + running_item_count = 0 # for online mean computation + for i, batch in enumerate( + tqdm(dataloader, total=ceil(max_num_samples / batch_size), desc="Compute mean, min, max") + ): + this_batch_size = batch.batch_size[0] + running_item_count += this_batch_size + if first_batch is None: + first_batch = deepcopy(batch) + for key, pattern in stats_patterns.items(): + batch[key] = batch[key].float() + # Numerically stable update step for mean computation. + batch_mean = einops.reduce(batch[key], pattern, "mean") + # Hint: to update the mean we need x̄ₙ = (Nₙ₋₁x̄ₙ₋₁ + Bₙxₙ) / Nₙ, where the subscript represents + # the update step, N is the running item count, B is this batch size, x̄ is the running mean, + # and x is the current batch mean. Some rearrangement is then required to avoid risking + # numerical overflow. Another hint: Nₙ₋₁ = Nₙ - Bₙ. Rearrangement yields + # x̄ₙ = x̄ₙ₋₁ + Bₙ * (xₙ - x̄ₙ₋₁) / Nₙ + mean[key] = mean[key] + this_batch_size * (batch_mean - mean[key]) / running_item_count + max[key] = torch.maximum(max[key], einops.reduce(batch[key], pattern, "max")) + min[key] = torch.minimum(min[key], einops.reduce(batch[key], pattern, "min")) + + if i == ceil(max_num_samples / batch_size) - 1: + break + + first_batch_ = None + running_item_count = 0 # for online std computation + for i, batch in enumerate(tqdm(dataloader, total=ceil(max_num_samples / batch_size), desc="Compute std")): + this_batch_size = batch.batch_size[0] + running_item_count += this_batch_size + # Sanity check to make sure the batches are still in the same order as before. + if first_batch_ is None: + first_batch_ = deepcopy(batch) + for key in stats_patterns: + assert torch.equal(first_batch_[key], first_batch[key]) + for key, pattern in stats_patterns.items(): + batch[key] = batch[key].float() + # Numerically stable update step for mean computation (where the mean is over squared + # residuals).See notes in the mean computation loop above. + batch_std = einops.reduce((batch[key] - mean[key]) ** 2, pattern, "mean") + std[key] = std[key] + this_batch_size * (batch_std - std[key]) / running_item_count + + if i == ceil(max_num_samples / batch_size) - 1: + break + + for key in stats_patterns: + std[key] = torch.sqrt(std[key]) + + stats = {} + for key in stats_patterns: + stats[key] = { + "mean": mean[key], + "std": std[key], + "max": max[key], + "min": min[key], + } + + torch.save(stats, stats_path) + return stats diff --git a/lerobot/common/transforms.py b/lerobot/common/transforms.py index ec967614..4974c086 100644 --- a/lerobot/common/transforms.py +++ b/lerobot/common/transforms.py @@ -72,12 +72,12 @@ class NormalizeTransform(Transform): if inkey not in item: continue if self.mode == "mean_std": - mean = self.stats[inkey]["mean"] - std = self.stats[inkey]["std"] + mean = self.stats[f"{inkey}.mean"] + std = self.stats[f"{inkey}.std"] item[outkey] = (item[inkey] - mean) / (std + 1e-8) else: - min = self.stats[inkey]["min"] - max = self.stats[inkey]["max"] + min = self.stats[f"{inkey}.min"] + max = self.stats[f"{inkey}.max"] # normalize to [0,1] item[outkey] = (item[inkey] - min) / (max - min) # normalize to [-1, 1] @@ -89,12 +89,12 @@ class NormalizeTransform(Transform): if inkey not in item: continue if self.mode == "mean_std": - mean = self.stats[inkey]["mean"] - std = self.stats[inkey]["std"] + mean = self.stats[f"{inkey}.mean"] + std = self.stats[f"{inkey}.std"] item[outkey] = item[inkey] * std + mean else: - min = self.stats[inkey]["min"] - max = self.stats[inkey]["max"] + min = self.stats[f"{inkey}.min"] + max = self.stats[f"{inkey}.max"] item[outkey] = (item[inkey] + 1) / 2 item[outkey] = item[outkey] * (max - min) + min return item diff --git a/tests/test_datasets.py b/tests/test_datasets.py index f7f80a42..00008259 100644 --- a/tests/test_datasets.py +++ b/tests/test_datasets.py @@ -1,10 +1,6 @@ -import einops import pytest import torch -from torchrl.data.replay_buffers.replay_buffers import TensorDictReplayBuffer -from torchrl.data.replay_buffers.samplers import SamplerWithoutReplacement -from lerobot.common.datasets.factory import make_offline_buffer from lerobot.common.utils import init_hydra_config import logging from lerobot.common.datasets.factory import make_dataset @@ -52,32 +48,32 @@ def test_factory(env_name, dataset_id): logging.warning(f'Missing "next.done" key in dataset {dataset}.') -def test_compute_stats(): - """Check that the statistics are computed correctly according to the stats_patterns property. +# def test_compute_stats(): +# """Check that the statistics are computed correctly according to the stats_patterns property. - We compare with taking a straight min, mean, max, std of all the data in one pass (which we can do - because we are working with a small dataset). - """ - cfg = init_hydra_config( - DEFAULT_CONFIG_PATH, overrides=["env=aloha", "env.task=sim_transfer_cube_human"] - ) - buffer = make_offline_buffer(cfg) - # Get all of the data. - all_data = TensorDictReplayBuffer( - storage=buffer._storage, - batch_size=len(buffer), - sampler=SamplerWithoutReplacement(), - ).sample().float() - # Note: we set the batch size to be smaller than the whole dataset to make sure we are testing batched - # computation of the statistics. While doing this, we also make sure it works when we don't divide the - # dataset into even batches. - computed_stats = buffer._compute_stats(batch_size=int(len(all_data) * 0.75)) - for k, pattern in buffer.stats_patterns.items(): - expected_mean = einops.reduce(all_data[k], pattern, "mean") - assert torch.allclose(computed_stats[k]["mean"], expected_mean) - assert torch.allclose( - computed_stats[k]["std"], - torch.sqrt(einops.reduce((all_data[k] - expected_mean) ** 2, pattern, "mean")) - ) - assert torch.allclose(computed_stats[k]["min"], einops.reduce(all_data[k], pattern, "min")) - assert torch.allclose(computed_stats[k]["max"], einops.reduce(all_data[k], pattern, "max")) +# We compare with taking a straight min, mean, max, std of all the data in one pass (which we can do +# because we are working with a small dataset). +# """ +# cfg = init_hydra_config( +# DEFAULT_CONFIG_PATH, overrides=["env=aloha", "env.task=sim_transfer_cube_human"] +# ) +# dataset = make_dataset(cfg) +# # Get all of the data. +# all_data = TensorDictReplayBuffer( +# storage=buffer._storage, +# batch_size=len(buffer), +# sampler=SamplerWithoutReplacement(), +# ).sample().float() +# # Note: we set the batch size to be smaller than the whole dataset to make sure we are testing batched +# # computation of the statistics. While doing this, we also make sure it works when we don't divide the +# # dataset into even batches. +# computed_stats = buffer._compute_stats(batch_size=int(len(all_data) * 0.75)) +# for k, pattern in buffer.stats_patterns.items(): +# expected_mean = einops.reduce(all_data[k], pattern, "mean") +# assert torch.allclose(computed_stats[k]["mean"], expected_mean) +# assert torch.allclose( +# computed_stats[k]["std"], +# torch.sqrt(einops.reduce((all_data[k] - expected_mean) ** 2, pattern, "mean")) +# ) +# assert torch.allclose(computed_stats[k]["min"], einops.reduce(all_data[k], pattern, "min")) +# assert torch.allclose(computed_stats[k]["max"], einops.reduce(all_data[k], pattern, "max")) From 3a4dfa82fe8393e0a401f28d97efa3fd2cac9a05 Mon Sep 17 00:00:00 2001 From: Alexander Soare Date: Thu, 4 Apr 2024 18:34:41 +0100 Subject: [PATCH 07/80] backup wip --- lerobot/common/policies/act/backbone.py | 115 ---- lerobot/common/policies/act/detr_vae.py | 229 ------- lerobot/common/policies/act/policy.py | 570 ++++++++++++++++-- .../common/policies/act/position_encoding.py | 102 ---- lerobot/common/policies/act/transformer.py | 240 -------- lerobot/common/policies/act/utils.py | 478 --------------- lerobot/configs/policy/act.yaml | 3 +- scripts/convert_act_weights.py | 28 +- 8 files changed, 538 insertions(+), 1227 deletions(-) delete mode 100644 lerobot/common/policies/act/backbone.py delete mode 100644 lerobot/common/policies/act/detr_vae.py delete mode 100644 lerobot/common/policies/act/position_encoding.py delete mode 100644 lerobot/common/policies/act/transformer.py delete mode 100644 lerobot/common/policies/act/utils.py diff --git a/lerobot/common/policies/act/backbone.py b/lerobot/common/policies/act/backbone.py deleted file mode 100644 index 6399d339..00000000 --- a/lerobot/common/policies/act/backbone.py +++ /dev/null @@ -1,115 +0,0 @@ -from typing import List - -import torch -import torchvision -from torch import nn -from torchvision.models._utils import IntermediateLayerGetter - -from .position_encoding import build_position_encoding -from .utils import NestedTensor, is_main_process - - -class FrozenBatchNorm2d(torch.nn.Module): - """ - BatchNorm2d where the batch statistics and the affine parameters are fixed. - - Copy-paste from torchvision.misc.ops with added eps before rqsrt, - without which any other policy_models than torchvision.policy_models.resnet[18,34,50,101] - produce nans. - """ - - def __init__(self, n): - super().__init__() - self.register_buffer("weight", torch.ones(n)) - self.register_buffer("bias", torch.zeros(n)) - self.register_buffer("running_mean", torch.zeros(n)) - self.register_buffer("running_var", torch.ones(n)) - - def _load_from_state_dict( - self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs - ): - num_batches_tracked_key = prefix + "num_batches_tracked" - if num_batches_tracked_key in state_dict: - del state_dict[num_batches_tracked_key] - - super()._load_from_state_dict( - state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs - ) - - def forward(self, x): - # move reshapes to the beginning - # to make it fuser-friendly - w = self.weight.reshape(1, -1, 1, 1) - b = self.bias.reshape(1, -1, 1, 1) - rv = self.running_var.reshape(1, -1, 1, 1) - rm = self.running_mean.reshape(1, -1, 1, 1) - eps = 1e-5 - scale = w * (rv + eps).rsqrt() - bias = b - rm * scale - return x * scale + bias - - -class BackboneBase(nn.Module): - def __init__( - self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool - ): - super().__init__() - # for name, parameter in backbone.named_parameters(): # only train later layers # TODO do we want this? - # if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name: - # parameter.requires_grad_(False) - if return_interm_layers: - return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"} - else: - return_layers = {"layer4": "0"} - self.body = IntermediateLayerGetter(backbone, return_layers=return_layers) - self.num_channels = num_channels - - def forward(self, tensor): - xs = self.body(tensor) - return xs - # out: Dict[str, NestedTensor] = {} - # for name, x in xs.items(): - # m = tensor_list.mask - # assert m is not None - # mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0] - # out[name] = NestedTensor(x, mask) - # return out - - -class Backbone(BackboneBase): - """ResNet backbone with frozen BatchNorm.""" - - def __init__(self, name: str, train_backbone: bool, return_interm_layers: bool, dilation: bool): - backbone = getattr(torchvision.models, name)( - replace_stride_with_dilation=[False, False, dilation], - pretrained=is_main_process(), - norm_layer=FrozenBatchNorm2d, - ) # pretrained # TODO do we want frozen batch_norm?? - num_channels = 512 if name in ("resnet18", "resnet34") else 2048 - super().__init__(backbone, train_backbone, num_channels, return_interm_layers) - - -class Joiner(nn.Sequential): - def __init__(self, backbone, position_embedding): - super().__init__(backbone, position_embedding) - - def forward(self, tensor_list: NestedTensor): - xs = self[0](tensor_list) - out: List[NestedTensor] = [] - pos = [] - for _, x in xs.items(): - out.append(x) - # position encoding - pos.append(self[1](x).to(x.dtype)) - - return out, pos - - -def build_backbone(args): - position_embedding = build_position_encoding(args) - train_backbone = args.lr_backbone > 0 - return_interm_layers = args.masks - backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation) - model = Joiner(backbone, position_embedding) - model.num_channels = backbone.num_channels - return model diff --git a/lerobot/common/policies/act/detr_vae.py b/lerobot/common/policies/act/detr_vae.py deleted file mode 100644 index aaf4d098..00000000 --- a/lerobot/common/policies/act/detr_vae.py +++ /dev/null @@ -1,229 +0,0 @@ -import einops -import numpy as np -import torch -from torch import nn - -from .backbone import build_backbone -from .transformer import Transformer, TransformerEncoder - - -def get_sinusoid_encoding_table(n_position, d_hid): - def get_position_angle_vec(position): - return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)] - - sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)]) - sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i - sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 - - return torch.FloatTensor(sinusoid_table).unsqueeze(0) - - -class ActionChunkingTransformer(nn.Module): - """ - Action Chunking Transformer as per Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware - (paper: https://arxiv.org/abs/2304.13705, code: https://github.com/tonyzhaozh/act) - - Note: In this code we use the symbols `vae_encoder`, 'encoder', `decoder`. The meanings are as follows. - - The `vae_encoder` is, as per the literature around conditional variational auto-encoders (cVAE), the - part of the model that encodes the target data (here, a sequence of actions), and the condition - (here, we include the robot joint-space state as an input to the encoder). - - The `transformer` is the cVAE's decoder. But since we have an option to train this model without the - variational objective (in which case we drop the `vae_encoder` altogether), we don't call it the - `vae_decoder`. - # TODO(now): remove the following - - The `encoder` is actually a component of the cVAE's "decoder". But we refer to it as an "encoder" - because, in terms of the transformer with cross-attention that forms the cVAE's decoder, it is the - "encoder" part. We drop the `vae_` prefix because we have an option to train this model without the - variational objective (in which case we drop the `vae_encoder` altogether), and nothing about this - model has anything to do with a VAE). - - The `decoder` is a building block of the VAE decoder, and is just the "decoder" part of a - transformer with cross-attention. For the same reasoning behind the naming of `encoder`, we make - this term agnostic to the option to use a variational objective for training. - - """ - - def __init__( - self, backbones, transformer, vae_encoder, state_dim, action_dim, horizon, camera_names, use_vae - ): - """Initializes the model. - Parameters: - backbones: torch module of the backbone to be used. See backbone.py - transformer: torch module of the transformer architecture. See transformer.py - state_dim: robot state dimension of the environment - horizon: number of object queries, ie detection slot. This is the maximal number of objects - DETR can detect in a single image. For COCO, we recommend 100 queries. - - Args: - state_dim: Robot positional state dimension. - action_dim: Action dimension. - horizon: The number of actions to generate in one forward pass. - use_vae: Whether to use the variational objective. TODO(now): Give more details. - """ - super().__init__() - - self.camera_names = camera_names - self.transformer = transformer - self.vae_encoder = vae_encoder - self.use_vae = use_vae - hidden_dim = transformer.d_model - - # BERT style VAE encoder with input [cls, *joint_space_configuration, *action_sequence]. - # The cls token forms parameters of the latent's distribution (like this [*means, *log_variances]). - if use_vae: - self.cls_embed = nn.Embedding(1, hidden_dim) - # Projection layer for joint-space configuration to hidden dimension. - self.vae_encoder_robot_state_input_proj = nn.Linear(state_dim, hidden_dim) - # Projection layer for action (joint-space target) to hidden dimension. - self.vae_encoder_action_input_proj = nn.Linear(state_dim, hidden_dim) - # Final size of latent z. TODO(now): Add to hyperparams. - self.latent_dim = 32 - # Projection layer from the VAE encoder's output to the latent distribution's parameter space. - self.vae_encoder_latent_output_proj = nn.Linear(hidden_dim, self.latent_dim * 2) - # Fixed sinusoidal positional embedding the whole input to the VAE encoder. - self.register_buffer( - "vae_encoder_pos_enc", get_sinusoid_encoding_table(1 + 1 + horizon, hidden_dim) - ) - - # Transformer encoder input projections. The tokens will be structured like - # [latent, robot_state, image_feature_map_pixels]. - self.backbones = nn.ModuleList(backbones) - self.encoder_img_feat_input_proj = nn.Conv2d(backbones[0].num_channels, hidden_dim, kernel_size=1) - self.encoder_robot_state_input_proj = nn.Linear(state_dim, hidden_dim) - self.encoder_latent_input_proj = nn.Linear(self.latent_dim, hidden_dim) - # TODO(now): Fix this nonsense. One positional embedding is needed. We should extract the image - # feature dimension with a dry run. - self.additional_pos_embed = nn.Embedding( - 2, hidden_dim - ) # learned position embedding for proprio and latent - - # Transformer decoder. - # Learnable positional embedding for the transformer's decoder (in the style of DETR object queries). - self.decoder_pos_embed = nn.Embedding(horizon, hidden_dim) - # Final action regression head on the output of the transformer's decoder. - self.action_head = nn.Linear(hidden_dim, action_dim) - - def forward(self, robot_state, image, actions=None): - """ - Args: - robot_state: (B, J) batch of robot joint configurations. - image: (B, N, C, H, W) batch of N camera frames. - actions: (B, S, A) batch of actions from the target dataset which must be provided if the - VAE is enabled and the model is in training mode. - """ - if self.use_vae and self.training: - assert ( - actions is not None - ), "actions must be provided when using the variational objective in training mode." - - batch_size, _ = robot_state.shape - - # Prepare the latent for input to the transformer. - if self.use_vae and actions is not None: - # Prepare the input to the VAE encoder: [cls, *joint_space_configuration, *action_sequence]. - cls_embed = einops.repeat(self.cls_embed.weight, "1 d -> b 1 d", b=batch_size) # (B, 1, D) - robot_state_embed = self.vae_encoder_robot_state_input_proj(robot_state).unsqueeze(1) # (B, 1, D) - action_embed = self.vae_encoder_action_input_proj(actions) # (B, S, D) - vae_encoder_input = torch.cat([cls_embed, robot_state_embed, action_embed], axis=1) # (B, S+2, D) - # Note: detach() shouldn't be necessary but leaving it the same as the original code just in case. - # Prepare fixed positional embedding. - pos_embed = self.vae_encoder_pos_enc.clone().detach() # (1, S+2, D) - # Forward pass through VAE encoder and sample the latent with the reparameterization trick. - cls_token_out = self.vae_encoder( - vae_encoder_input.permute(1, 0, 2), pos=pos_embed.permute(1, 0, 2) - )[0] # (B, D) - latent_pdf_params = self.vae_encoder_latent_output_proj(cls_token_out) - mu = latent_pdf_params[:, : self.latent_dim] - logvar = latent_pdf_params[:, self.latent_dim :] - # Use reparameterization trick to sample from the latent's PDF. - latent_sample = mu + logvar.div(2).exp() * torch.randn_like(mu) - else: - # When not using the VAE encoder, we set the latent to be all zeros. - mu = logvar = None - latent_sample = torch.zeros([batch_size, self.latent_dim], dtype=robot_state.dtype).to( - robot_state.device - ) - - # Prepare all other transformer inputs. - # Image observation features and position embeddings. - all_cam_features = [] - all_cam_pos = [] - for cam_id, _ in enumerate(self.camera_names): - # TODO(now): remove the positional embedding from the backbones. - cam_features, pos = self.backbones[0](image[:, cam_id]) # HARDCODED - cam_features = cam_features[0] # take the last layer feature - pos = pos[0] - cam_features = self.encoder_img_feat_input_proj(cam_features) # (B, C, h, w) - all_cam_features.append(cam_features) - all_cam_pos.append(pos) - # Concatenate image observation feature maps along the width dimension. - transformer_input = torch.cat(all_cam_features, axis=3) - # TODO(now): remove the positional embedding from the backbones. - pos = torch.cat(all_cam_pos, axis=3) - robot_state_embed = self.encoder_robot_state_input_proj(robot_state) - latent_embed = self.encoder_latent_input_proj(latent_sample) - - # TODO(now): Explain all of this madness. - transformer_input = torch.cat( - [ - torch.stack([latent_embed, robot_state_embed], axis=0), - transformer_input.flatten(2).permute(2, 0, 1), - ] - ) - pos_embed = torch.cat( - [self.additional_pos_embed.weight.unsqueeze(1), pos.flatten(2).permute(2, 0, 1)], axis=0 - ) - - # Run the transformer and project the outputs to the action space. - transformer_output = self.transformer( - transformer_input, - encoder_pos=pos_embed, - decoder_pos=self.decoder_pos_embed.weight.unsqueeze(1), - ).transpose(0, 1) # back to (B, S, C) - actions = self.action_head(transformer_output) - return actions, [mu, logvar] - - -def build(args): - # From state - # backbone = None # from state for now, no need for conv nets - # From image - backbones = [] - backbone = build_backbone(args) - backbones.append(backbone) - - transformer = Transformer( - d_model=args.hidden_dim, - dropout=args.dropout, - nhead=args.nheads, - dim_feedforward=args.dim_feedforward, - num_encoder_layers=args.enc_layers, - num_decoder_layers=args.dec_layers, - normalize_before=args.pre_norm, - ) - - # TODO(now): args.enc_layers shouldn't be shared with the transformer decoder - vae_encoder = TransformerEncoder( - num_layers=args.enc_layers, - d_model=args.hidden_dim, - nhead=args.nheads, - dim_feedforward=args.dim_feedforward, - dropout=args.dropout, - activation="relu", - normalize_before=args.pre_norm, - ) - - model = ActionChunkingTransformer( - backbones, - transformer, - vae_encoder, - state_dim=args.state_dim, - action_dim=args.action_dim, - horizon=args.num_queries, - camera_names=args.camera_names, - use_vae=args.vae, - ) - - n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) - print("number of parameters: {:.2f}M".format(n_parameters / 1e6)) - - return model diff --git a/lerobot/common/policies/act/policy.py b/lerobot/common/policies/act/policy.py index 7d24620a..906ea0cd 100644 --- a/lerobot/common/policies/act/policy.py +++ b/lerobot/common/policies/act/policy.py @@ -1,50 +1,32 @@ -import logging -import time +"""Action Chunking Transformer Policy +As per Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware (https://arxiv.org/abs/2304.13705). +""" + +import logging +import math +import time +from itertools import chain +from typing import Callable, Optional + +import einops +import numpy as np import torch import torch.nn.functional as F # noqa: N812 +import torchvision import torchvision.transforms as transforms +from torch import Tensor, nn +from torchvision.models._utils import IntermediateLayerGetter +from torchvision.ops.misc import FrozenBatchNorm2d from lerobot.common.policies.abstract import AbstractPolicy -from lerobot.common.policies.act.detr_vae import build from lerobot.common.utils import get_safe_torch_device -def build_act_model_and_optimizer(cfg): - model = build(cfg) - - param_dicts = [ - {"params": [p for n, p in model.named_parameters() if "backbone" not in n and p.requires_grad]}, - { - "params": [p for n, p in model.named_parameters() if "backbone" in n and p.requires_grad], - "lr": cfg.lr_backbone, - }, - ] - optimizer = torch.optim.AdamW(param_dicts, lr=cfg.lr, weight_decay=cfg.weight_decay) - - return model, optimizer - - -def kl_divergence(mu, logvar): - batch_size = mu.size(0) - assert batch_size != 0 - if mu.data.ndimension() == 4: - mu = mu.view(mu.size(0), mu.size(1)) - if logvar.data.ndimension() == 4: - logvar = logvar.view(logvar.size(0), logvar.size(1)) - - klds = -0.5 * (1 + logvar - mu.pow(2) - logvar.exp()) - total_kld = klds.sum(1).mean(0, True) - dimension_wise_kld = klds.mean(0) - mean_kld = klds.mean(1).mean(0, True) - - return total_kld, dimension_wise_kld, mean_kld - - class ActionChunkingTransformerPolicy(AbstractPolicy): """ - Action Chunking Transformer as per Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware - (https://arxiv.org/abs/2304.13705). + Action Chunking Transformer Policy as per Learning Fine-Grained Bimanual Manipulation with Low-Cost + Hardware (https://arxiv.org/abs/2304.13705). """ name = "act" @@ -68,7 +50,35 @@ class ActionChunkingTransformerPolicy(AbstractPolicy): self.cfg = cfg self.n_action_steps = n_action_steps self.device = get_safe_torch_device(device) - self.model, self.optimizer = build_act_model_and_optimizer(cfg) + + self.model = ActionChunkingTransformer( + cfg, + state_dim=cfg.state_dim, + action_dim=cfg.action_dim, + horizon=cfg.horizon, + camera_names=cfg.camera_names, + use_vae=cfg.vae, + ) + + optimizer_params_dicts = [ + { + "params": [ + p + for n, p in self.model.named_parameters() + if not n.startswith("backbone") and p.requires_grad + ] + }, + { + "params": [ + p + for n, p in self.model.named_parameters() + if n.startswith("backbone") and p.requires_grad + ], + "lr": cfg.lr_backbone, + }, + ] + self.optimizer = torch.optim.AdamW(optimizer_params_dicts, lr=cfg.lr, weight_decay=cfg.weight_decay) + self.kl_weight = self.cfg.kl_weight logging.info(f"KL Weight {self.kl_weight}") self.to(self.device) @@ -140,12 +150,10 @@ class ActionChunkingTransformerPolicy(AbstractPolicy): self.optimizer.step() self.optimizer.zero_grad() - # self.lr_scheduler.step() info = { "loss": loss.item(), "grad_norm": float(grad_norm), - # "lr": self.lr_scheduler.get_last_lr()[0], "lr": self.cfg.lr, "data_s": data_s, "update_s": time.time() - start_time, @@ -213,31 +221,495 @@ class ActionChunkingTransformerPolicy(AbstractPolicy): action = action[: self.n_action_steps] return action - def _forward(self, qpos, image, actions=None, is_pad=None): - env_state = None + def _forward(self, qpos, image, actions=None): normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) image = normalize(image) is_training = actions is not None if is_training: # training time - actions = actions[:, : self.model.num_queries] - if is_pad is not None: - is_pad = is_pad[:, : self.model.num_queries] + actions = actions[:, : self.model.horizon] - a_hat, (mu, logvar) = self.model(qpos, image, env_state, actions, is_pad) + a_hat, (mu, log_sigma_x2) = self.model(qpos, image, actions) all_l1 = F.l1_loss(actions, a_hat, reduction="none") - l1 = all_l1.mean() if is_pad is None else (all_l1 * ~is_pad.unsqueeze(-1)).mean() + l1 = all_l1.mean() loss_dict = {} loss_dict["l1"] = l1 if self.cfg.vae: - total_kld, dim_wise_kld, mean_kld = kl_divergence(mu, logvar) - loss_dict["kl"] = total_kld[0] + # Calculate Dₖₗ(latent_pdf || standard_normal). Note: After computing the KL-divergence for + # each dimension independently, we sum over the latent dimension to get the total + # KL-divergence per batch element, then take the mean over the batch. + # (See App. B of https://arxiv.org/abs/1312.6114 for more details). + mean_kld = (-0.5 * (1 + log_sigma_x2 - mu.pow(2) - (log_sigma_x2).exp())).sum(-1).mean() + loss_dict["kl"] = mean_kld loss_dict["loss"] = loss_dict["l1"] + loss_dict["kl"] * self.kl_weight else: loss_dict["loss"] = loss_dict["l1"] return loss_dict else: - action, _ = self.model(qpos, image, env_state) # no action, sample from prior + action, _ = self.model(qpos, image) # no action, sample from prior return action + + +def create_sinusoidal_position_embedding(n_position, d_hid): + def get_position_angle_vec(position): + return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)] + + sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)]) + sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i + sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 + + return torch.FloatTensor(sinusoid_table).unsqueeze(0) + + +# TODO(alexander-soare) move all this code into the policy when we have the policy API established. +class ActionChunkingTransformer(nn.Module): + """ + Action Chunking Transformer as per Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware + (paper: https://arxiv.org/abs/2304.13705, code: https://github.com/tonyzhaozh/act) + + Note: In this code we use the terms `vae_encoder`, 'encoder', `decoder`. The meanings are as follows. + - The `vae_encoder` is, as per the literature around variational auto-encoders (VAE), the part of the + model that encodes the target data (a sequence of actions), and the condition (the robot + joint-space). + - A transformer with an `encoder` (not the VAE encoder) and `decoder` (not the VAE decoder) with + cross-attention is used as the VAE decoder. For these terms, we drop the `vae_` prefix because we + have an option to train this model without the variational objective (in which case we drop the + `vae_encoder` altogether, and nothing about this model has anything to do with a VAE). + + Transformer + Used alone for inference + (acts as VAE decoder + during training) + ┌───────────────────────┐ + │ Outputs │ + │ ▲ │ + │ ┌─────►┌───────┐ │ + ┌──────┐ │ │ │Transf.│ │ + │ │ │ ├─────►│decoder│ │ + ┌────┴────┐ │ │ │ │ │ │ + │ │ │ │ ┌───┴───┬─►│ │ │ + │ VAE │ │ │ │ │ └───────┘ │ + │ encoder │ │ │ │Transf.│ │ + │ │ │ │ │encoder│ │ + └───▲─────┘ │ │ │ │ │ + │ │ │ └───▲───┘ │ + │ │ │ │ │ + inputs └─────┼─────┘ │ + │ │ + └───────────────────────┘ + """ + + def __init__(self, args, state_dim, action_dim, horizon, camera_names, use_vae): + """Initializes the model. + Parameters: + state_dim: robot state dimension of the environment + horizon: number of object queries, ie detection slot. This is the maximal number of objects + DETR can detect in a single image. For COCO, we recommend 100 queries. + + Args: + state_dim: Robot positional state dimension. + action_dim: Action dimension. + horizon: The number of actions to generate in one forward pass. + use_vae: Whether to use the variational objective. TODO(now): Give more details. + """ + super().__init__() + + self.camera_names = camera_names + self.use_vae = use_vae + self.horizon = horizon + self.hidden_dim = args.hidden_dim + + transformer_common_kwargs = dict( # noqa: C408 + d_model=self.hidden_dim, + nhead=args.nheads, + dim_feedforward=args.dim_feedforward, + dropout=args.dropout, + activation=args.activation, + normalize_before=args.pre_norm, + ) + + # BERT style VAE encoder with input [cls, *joint_space_configuration, *action_sequence]. + # The cls token forms parameters of the latent's distribution (like this [*means, *log_variances]). + if use_vae: + # TODO(now): args.enc_layers shouldn't be shared with the transformer decoder + self.vae_encoder = TransformerEncoder(num_layers=args.enc_layers, **transformer_common_kwargs) + self.cls_embed = nn.Embedding(1, self.hidden_dim) + # Projection layer for joint-space configuration to hidden dimension. + self.vae_encoder_robot_state_input_proj = nn.Linear(state_dim, self.hidden_dim) + # Projection layer for action (joint-space target) to hidden dimension. + self.vae_encoder_action_input_proj = nn.Linear(state_dim, self.hidden_dim) + # Final size of latent z. TODO(now): Add to hyperparams. + self.latent_dim = 32 + # Projection layer from the VAE encoder's output to the latent distribution's parameter space. + self.vae_encoder_latent_output_proj = nn.Linear(self.hidden_dim, self.latent_dim * 2) + # Fixed sinusoidal positional embedding the whole input to the VAE encoder. + self.register_buffer( + "vae_encoder_pos_enc", create_sinusoidal_position_embedding(1 + 1 + horizon, self.hidden_dim) + ) + + # Backbone for image feature extraction. + self.backbone_position_embedding = SinusoidalPositionEmbedding2D(self.hidden_dim // 2) + backbone_model = getattr(torchvision.models, args.backbone)( + replace_stride_with_dilation=[False, False, args.dilation], + pretrained=True, # TODO(now): Add pretrained option + norm_layer=FrozenBatchNorm2d, + ) + # Note: The forward method of this returns a dict: {"feature_map": output}. + self.backbone = IntermediateLayerGetter(backbone_model, return_layers={"layer4": "feature_map"}) + + # Transformer (acts as VAE decoder when training with the variational objective). + self.encoder = TransformerEncoder(num_layers=args.enc_layers, **transformer_common_kwargs) + self.decoder = TransformerDecoder(num_layers=args.dec_layers, **transformer_common_kwargs) + + # Transformer encoder input projections. The tokens will be structured like + # [latent, robot_state, image_feature_map_pixels]. + self.encoder_img_feat_input_proj = nn.Conv2d( + backbone_model.fc.in_features, self.hidden_dim, kernel_size=1 + ) + self.encoder_robot_state_input_proj = nn.Linear(state_dim, self.hidden_dim) + self.encoder_latent_input_proj = nn.Linear(self.latent_dim, self.hidden_dim) + # TODO(now): Fix this nonsense. One positional embedding is needed. We should extract the image + # feature dimension with a dry run. + self.additional_pos_embed = nn.Embedding( + 2, self.hidden_dim + ) # learned position embedding for proprio and latent + + # Transformer decoder. + # Learnable positional embedding for the transformer's decoder (in the style of DETR object queries). + self.decoder_pos_embed_embed = nn.Embedding(horizon, self.hidden_dim) + # Final action regression head on the output of the transformer's decoder. + self.action_head = nn.Linear(self.hidden_dim, action_dim) + + self._reset_parameters() + + def _reset_parameters(self): + """Xavier-uniform initialization of the transformer parameters as in the original code.""" + for p in chain(self.encoder.parameters(), self.decoder.parameters()): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def forward(self, robot_state, image, actions=None): + """ + Args: + robot_state: (B, J) batch of robot joint configurations. + image: (B, N, C, H, W) batch of N camera frames. + actions: (B, S, A) batch of actions from the target dataset which must be provided if the + VAE is enabled and the model is in training mode. + """ + if self.use_vae and self.training: + assert ( + actions is not None + ), "actions must be provided when using the variational objective in training mode." + + batch_size, _ = robot_state.shape + + # Prepare the latent for input to the transformer. + if self.use_vae and actions is not None: + # Prepare the input to the VAE encoder: [cls, *joint_space_configuration, *action_sequence]. + cls_embed = einops.repeat(self.cls_embed.weight, "1 d -> b 1 d", b=batch_size) # (B, 1, D) + robot_state_embed = self.vae_encoder_robot_state_input_proj(robot_state).unsqueeze(1) # (B, 1, D) + action_embed = self.vae_encoder_action_input_proj(actions) # (B, S, D) + vae_encoder_input = torch.cat([cls_embed, robot_state_embed, action_embed], axis=1) # (B, S+2, D) + # Note: detach() shouldn't be necessary but leaving it the same as the original code just in case. + # Prepare fixed positional embedding. + pos_embed = self.vae_encoder_pos_enc.clone().detach() # (1, S+2, D) + # Forward pass through VAE encoder and sample the latent with the reparameterization trick. + cls_token_out = self.vae_encoder( + vae_encoder_input.permute(1, 0, 2), pos=pos_embed.permute(1, 0, 2) + )[0] # (B, D) + latent_pdf_params = self.vae_encoder_latent_output_proj(cls_token_out) + mu = latent_pdf_params[:, : self.latent_dim] + # This is 2log(sigma). Done this way to match the original implementation. + log_sigma_x2 = latent_pdf_params[:, self.latent_dim :] + # Use reparameterization trick to sample from the latent's PDF. + latent_sample = mu + log_sigma_x2.div(2).exp() * torch.randn_like(mu) + else: + # When not using the VAE encoder, we set the latent to be all zeros. + mu = log_sigma_x2 = None + latent_sample = torch.zeros([batch_size, self.latent_dim], dtype=torch.float32).to( + robot_state.device + ) + + # Prepare all other transformer inputs. + # Image observation features and position embeddings. + all_cam_features = [] + all_cam_pos = [] + for cam_id, _ in enumerate(self.camera_names): + cam_features = self.backbone(image[:, cam_id])["feature_map"] + pos = self.backbone_position_embedding(cam_features).to(dtype=cam_features.dtype) + cam_features = self.encoder_img_feat_input_proj(cam_features) # (B, C, h, w) + all_cam_features.append(cam_features) + all_cam_pos.append(pos) + # Concatenate image observation feature maps along the width dimension. + encoder_in = torch.cat(all_cam_features, axis=3) + pos = torch.cat(all_cam_pos, axis=3) + robot_state_embed = self.encoder_robot_state_input_proj(robot_state) + latent_embed = self.encoder_latent_input_proj(latent_sample) + + # TODO(now): Explain all of this madness. + encoder_in = torch.cat( + [ + torch.stack([latent_embed, robot_state_embed], axis=0), + encoder_in.flatten(2).permute(2, 0, 1), + ] + ) + pos_embed = torch.cat( + [self.additional_pos_embed.weight.unsqueeze(1), pos.flatten(2).permute(2, 0, 1)], axis=0 + ) + + encoder_out = self.encoder(encoder_in, pos=pos_embed) + decoder_in = torch.zeros( + (self.horizon, batch_size, self.hidden_dim), dtype=pos_embed.dtype, device=pos_embed.device + ) + decoder_out = self.decoder( + decoder_in, + encoder_out, + encoder_pos_embed=pos_embed, + decoder_pos_embed=self.decoder_pos_embed_embed.weight.unsqueeze(1), + ).transpose(0, 1) # back to (B, S, C) + + actions = self.action_head(decoder_out) + return actions, [mu, log_sigma_x2] + + +class TransformerEncoder(nn.Module): + def __init__( + self, + num_layers, + d_model, + nhead, + dim_feedforward=2048, + dropout=0.1, + activation="relu", + normalize_before=False, + ): + super().__init__() + self.layers = nn.ModuleList( + [ + TransformerEncoderLayer( + d_model, nhead, dim_feedforward, dropout, activation, normalize_before + ) + for _ in range(num_layers) + ] + ) + self.norm = nn.LayerNorm(d_model) if normalize_before else nn.Identity() + + def forward(self, x, pos: Optional[Tensor] = None): + for layer in self.layers: + x = layer(x, pos=pos) + x = self.norm(x) + return x + + +class TransformerEncoderLayer(nn.Module): + def __init__( + self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation="relu", normalize_before=False + ): + super().__init__() + self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + def forward(self, x, pos_embed: Optional[Tensor] = None): + skip = x + if self.normalize_before: + x = self.norm1(x) + q = k = x if pos_embed is None else x + pos_embed + x = self.self_attn(q, k, value=x)[0] + x = skip + self.dropout1(x) + if self.normalize_before: + skip = x + x = self.norm2(x) + else: + x = self.norm1(x) + skip = x + x = self.linear2(self.dropout(self.activation(self.linear1(x)))) + x = skip + self.dropout2(x) + if not self.normalize_before: + x = self.norm2(x) + return x + + +class TransformerDecoder(nn.Module): + def __init__( + self, + num_layers, + d_model, + nhead, + dim_feedforward=2048, + dropout=0.1, + activation="relu", + normalize_before=False, + ): + super().__init__() + self.layers = nn.ModuleList( + [ + TransformerDecoderLayer( + d_model, nhead, dim_feedforward, dropout, activation, normalize_before + ) + for _ in range(num_layers) + ] + ) + self.num_layers = num_layers + self.norm = nn.LayerNorm(d_model) + + def forward( + self, x, encoder_out, decoder_pos_embed: Tensor | None = None, encoder_pos_embed: Tensor | None = None + ): + for layer in self.layers: + x = layer( + x, encoder_out, decoder_pos_embed=decoder_pos_embed, encoder_pos_embed=encoder_pos_embed + ) + if self.norm is not None: + x = self.norm(x) + return x + + +class TransformerDecoderLayer(nn.Module): + def __init__( + self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation="relu", normalize_before=False + ): + super().__init__() + self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.norm3 = nn.LayerNorm(d_model) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + self.dropout3 = nn.Dropout(dropout) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + def maybe_add_pos_embed(self, tensor: Tensor, pos_embed: Tensor | None) -> Tensor: + return tensor if pos_embed is None else tensor + pos_embed + + def forward( + self, + x: Tensor, + encoder_out: Tensor, + decoder_pos_embed: Tensor | None = None, + encoder_pos_embed: Tensor | None = None, + ) -> Tensor: + """ + Args: + x: (Decoder Sequence, Batch, Channel) tensor of input tokens. + encoder_out: (Encoder Sequence, B, C) output features from the last layer of the encoder we are + cross-attending with. + decoder_pos_embed: (ES, 1, C) positional embedding for keys (from the encoder). + encoder_pos_embed: (DS, 1, C) Positional_embedding for the queries (from the decoder). + Returns: + (DS, B, C) tensor of decoder output features. + """ + skip = x + if self.normalize_before: + x = self.norm1(x) + q = k = self.maybe_add_pos_embed(x, decoder_pos_embed) + x = self.self_attn(q, k, value=x)[0] + x = skip + self.dropout1(x) + if self.normalize_before: + skip = x + x = self.norm2(x) + else: + x = self.norm1(x) + skip = x + x = self.multihead_attn( + query=self.maybe_add_pos_embed(x, decoder_pos_embed), + key=self.maybe_add_pos_embed(encoder_out, encoder_pos_embed), + value=encoder_out, + )[0] + x = skip + self.dropout2(x) + if self.normalize_before: + skip = x + x = self.norm3(x) + else: + x = self.norm2(x) + skip = x + x = self.linear2(self.dropout(self.activation(self.linear1(x)))) + x = skip + self.dropout3(x) + if not self.normalize_before: + x = self.norm3(x) + return x + + +class SinusoidalPositionEmbedding2D(nn.Module): + """Sinusoidal positional embeddings similar to what's presented in Attention Is All You Need. + + The variation is that the position indices are normalized in [0, 2π] (not quite: the lower bound is 1/H + for the vertical direction, and 1/W for the horizontal direction. + """ + + def __init__(self, dimension: int): + """ + Args: + dimension: The desired dimension of the embeddings. + """ + super().__init__() + self.dimension = dimension + self._two_pi = 2 * math.pi + self._eps = 1e-6 + # Inverse "common ratio" for the geometric progression in sinusoid frequencies. + self._temperature = 10000 + + def forward(self, x: Tensor) -> Tensor: + """ + Args: + x: A (B, C, H, W) batch of 2D feature map to generate the embeddings for. + Returns: + A (1, C, H, W) batch of corresponding sinusoidal positional embeddings. + """ + not_mask = torch.ones_like(x[0, [0]]) # (1, H, W) + # Note: These are like range(1, H+1) and range(1, W+1) respectively, but in most implementations + # they would be range(0, H) and range(0, W). Keeping it at as to match the original code. + y_range = not_mask.cumsum(1, dtype=torch.float32) + x_range = not_mask.cumsum(2, dtype=torch.float32) + + # "Normalize" the position index such that it ranges in [0, 2π]. + # Note: Adding epsilon on the denominator should not be needed as all values of y_embed and x_range + # are non-zero by construction. This is an artifact of the original code. + y_range = y_range / (y_range[:, -1:, :] + self._eps) * self._two_pi + x_range = x_range / (x_range[:, :, -1:] + self._eps) * self._two_pi + + inverse_frequency = self._temperature ** ( + 2 * (torch.arange(self.dimension, dtype=torch.float32, device=x.device) // 2) / self.dimension + ) + + x_range = x_range.unsqueeze(-1) / inverse_frequency # (1, H, W, 1) + y_range = y_range.unsqueeze(-1) / inverse_frequency # (1, H, W, 1) + + # Note: this stack then flatten operation results in interleaved sine and cosine terms. + # pos_embed_x and pos_embed are (1, H, W, C // 2). + pos_embed_x = torch.stack((x_range[..., 0::2].sin(), x_range[..., 1::2].cos()), dim=-1).flatten(3) + pos_embed_y = torch.stack((y_range[..., 0::2].sin(), y_range[..., 1::2].cos()), dim=-1).flatten(3) + pos_embed = torch.cat((pos_embed_y, pos_embed_x), dim=3).permute(0, 3, 1, 2) # (1, C, H, W) + + return pos_embed + + +def _get_activation_fn(activation: str) -> Callable: + """Return an activation function given a string""" + if activation == "relu": + return F.relu + if activation == "gelu": + return F.gelu + if activation == "glu": + return F.glu + raise RuntimeError(f"activation should be relu/gelu/glu, not {activation}.") diff --git a/lerobot/common/policies/act/position_encoding.py b/lerobot/common/policies/act/position_encoding.py deleted file mode 100644 index 63bb4840..00000000 --- a/lerobot/common/policies/act/position_encoding.py +++ /dev/null @@ -1,102 +0,0 @@ -""" -Various positional encodings for the transformer. -""" - -import math - -import torch -from torch import nn - -from .utils import NestedTensor - - -class PositionEmbeddingSine(nn.Module): - """ - This is a more standard version of the position embedding, very similar to the one - used by the Attention is all you need paper, generalized to work on images. - """ - - def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None): - super().__init__() - self.num_pos_feats = num_pos_feats - self.temperature = temperature - self.normalize = normalize - if scale is not None and normalize is False: - raise ValueError("normalize should be True if scale is passed") - if scale is None: - scale = 2 * math.pi - self.scale = scale - - def forward(self, tensor): - x = tensor - # mask = tensor_list.mask - # assert mask is not None - # not_mask = ~mask - - not_mask = torch.ones_like(x[0, [0]]) - y_embed = not_mask.cumsum(1, dtype=torch.float32) - x_embed = not_mask.cumsum(2, dtype=torch.float32) - if self.normalize: - eps = 1e-6 - y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale - x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale - - dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) - dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) - - pos_x = x_embed[:, :, :, None] / dim_t - pos_y = y_embed[:, :, :, None] / dim_t - pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) - pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) - pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) - return pos - - -class PositionEmbeddingLearned(nn.Module): - """ - Absolute pos embedding, learned. - """ - - def __init__(self, num_pos_feats=256): - super().__init__() - self.row_embed = nn.Embedding(50, num_pos_feats) - self.col_embed = nn.Embedding(50, num_pos_feats) - self.reset_parameters() - - def reset_parameters(self): - nn.init.uniform_(self.row_embed.weight) - nn.init.uniform_(self.col_embed.weight) - - def forward(self, tensor_list: NestedTensor): - x = tensor_list.tensors - h, w = x.shape[-2:] - i = torch.arange(w, device=x.device) - j = torch.arange(h, device=x.device) - x_emb = self.col_embed(i) - y_emb = self.row_embed(j) - pos = ( - torch.cat( - [ - x_emb.unsqueeze(0).repeat(h, 1, 1), - y_emb.unsqueeze(1).repeat(1, w, 1), - ], - dim=-1, - ) - .permute(2, 0, 1) - .unsqueeze(0) - .repeat(x.shape[0], 1, 1, 1) - ) - return pos - - -def build_position_encoding(args): - n_steps = args.hidden_dim // 2 - if args.position_embedding in ("v2", "sine"): - # TODO find a better way of exposing other arguments - position_embedding = PositionEmbeddingSine(n_steps, normalize=True) - elif args.position_embedding in ("v3", "learned"): - position_embedding = PositionEmbeddingLearned(n_steps) - else: - raise ValueError(f"not supported {args.position_embedding}") - - return position_embedding diff --git a/lerobot/common/policies/act/transformer.py b/lerobot/common/policies/act/transformer.py deleted file mode 100644 index 7e71f3ea..00000000 --- a/lerobot/common/policies/act/transformer.py +++ /dev/null @@ -1,240 +0,0 @@ -""" -TODO(now) -""" - -from typing import Optional - -import torch -import torch.nn.functional as F # noqa: N812 -from torch import Tensor, nn - - -class Transformer(nn.Module): - def __init__( - self, - d_model=512, - nhead=8, - num_encoder_layers=6, - num_decoder_layers=6, - dim_feedforward=2048, - dropout=0.1, - activation="relu", - normalize_before=False, - ): - super().__init__() - self.encoder = TransformerEncoder( - num_encoder_layers, d_model, nhead, dim_feedforward, dropout, activation, normalize_before - ) - self.decoder = TransformerDecoder( - num_decoder_layers, d_model, nhead, dim_feedforward, dropout, activation, normalize_before - ) - self.d_model = d_model - self.nhead = nhead - self._init_params() # TODO(now): move to somewhere common - - def _init_params(self): - for p in self.parameters(): - if p.dim() > 1: - nn.init.xavier_uniform_(p) - - def forward(self, x, encoder_pos, decoder_pos): - """ - Args: - x: ((E)ncoder (S)equence, (B)atch, (C)hannels) - decoder_pos: (Decoder Sequence, C) tensor for the decoder's positional embedding. - encoder_pos: (ES, C) tenso - """ - # TODO flatten only when input has H and W - bs = x.shape[1] - - encoder_out = self.encoder(x, pos=encoder_pos) - decoder_in = torch.zeros( - (decoder_pos.shape[0], bs, decoder_pos.shape[2]), - dtype=decoder_pos.dtype, - device=decoder_pos.device, - ) - decoder_out = self.decoder(decoder_in, encoder_out, encoder_pos=encoder_pos, decoder_pos=decoder_pos) - return decoder_out - - -class TransformerEncoder(nn.Module): - def __init__( - self, - num_layers, - d_model, - nhead, - dim_feedforward=2048, - dropout=0.1, - activation="relu", - normalize_before=False, - ): - super().__init__() - self.layers = nn.ModuleList( - [ - TransformerEncoderLayer( - d_model, nhead, dim_feedforward, dropout, activation, normalize_before - ) - for _ in range(num_layers) - ] - ) - self.norm = nn.LayerNorm(d_model) if normalize_before else nn.Identity() - - def forward(self, x, pos: Optional[Tensor] = None): - for layer in self.layers: - x = layer(x, pos=pos) - x = self.norm(x) - return x - - -class TransformerEncoderLayer(nn.Module): - def __init__( - self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation="relu", normalize_before=False - ): - super().__init__() - self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) - # Implementation of Feedforward model - self.linear1 = nn.Linear(d_model, dim_feedforward) - self.dropout = nn.Dropout(dropout) - self.linear2 = nn.Linear(dim_feedforward, d_model) - - self.norm1 = nn.LayerNorm(d_model) - self.norm2 = nn.LayerNorm(d_model) - self.dropout1 = nn.Dropout(dropout) - self.dropout2 = nn.Dropout(dropout) - - self.activation = _get_activation_fn(activation) - self.normalize_before = normalize_before - - def forward(self, x, pos: Optional[Tensor] = None): - skip = x - if self.normalize_before: - x = self.norm1(x) - q = k = x if pos is None else x + pos - x = self.self_attn(q, k, value=x)[0] - x = skip + self.dropout1(x) - if self.normalize_before: - skip = x - x = self.norm2(x) - else: - x = self.norm1(x) - skip = x - x = self.linear2(self.dropout(self.activation(self.linear1(x)))) - x = skip + self.dropout2(x) - if not self.normalize_before: - x = self.norm2(x) - return x - - -class TransformerDecoder(nn.Module): - def __init__( - self, - num_layers, - d_model, - nhead, - dim_feedforward=2048, - dropout=0.1, - activation="relu", - normalize_before=False, - ): - super().__init__() - self.layers = nn.ModuleList( - [ - TransformerDecoderLayer( - d_model, nhead, dim_feedforward, dropout, activation, normalize_before - ) - for _ in range(num_layers) - ] - ) - self.num_layers = num_layers - self.norm = nn.LayerNorm(d_model) - - def forward(self, x, encoder_out, decoder_pos: Tensor | None = None, encoder_pos: Tensor | None = None): - for layer in self.layers: - x = layer(x, encoder_out, decoder_pos=decoder_pos, encoder_pos=encoder_pos) - if self.norm is not None: - x = self.norm(x) - return x - - -class TransformerDecoderLayer(nn.Module): - def __init__( - self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation="relu", normalize_before=False - ): - super().__init__() - self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) - self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) - # Implementation of Feedforward model - self.linear1 = nn.Linear(d_model, dim_feedforward) - self.dropout = nn.Dropout(dropout) - self.linear2 = nn.Linear(dim_feedforward, d_model) - - self.norm1 = nn.LayerNorm(d_model) - self.norm2 = nn.LayerNorm(d_model) - self.norm3 = nn.LayerNorm(d_model) - self.dropout1 = nn.Dropout(dropout) - self.dropout2 = nn.Dropout(dropout) - self.dropout3 = nn.Dropout(dropout) - - self.activation = _get_activation_fn(activation) - self.normalize_before = normalize_before - - def maybe_add_pos_embed(self, tensor: Tensor, pos: Tensor | None) -> Tensor: - return tensor if pos is None else tensor + pos - - def forward( - self, - x: Tensor, - encoder_out: Tensor, - decoder_pos: Tensor | None = None, - encoder_pos: Tensor | None = None, - ) -> Tensor: - """ - Args: - x: (Decoder Sequence, Batch, Channel) tensor of input tokens. - encoder_out: (Encoder Sequence, B, C) output features from the last layer of the encoder we are - cross-attending with. - decoder_pos: (ES, 1, C) positional embedding for keys (from the encoder). - encoder_pos: (DS, 1, C) Positional_embedding for the queries (from the decoder). - Returns: - (DS, B, C) tensor of decoder output features. - """ - skip = x - if self.normalize_before: - x = self.norm1(x) - q = k = self.maybe_add_pos_embed(x, decoder_pos) - x = self.self_attn(q, k, value=x)[0] - x = skip + self.dropout1(x) - if self.normalize_before: - skip = x - x = self.norm2(x) - else: - x = self.norm1(x) - skip = x - x = self.multihead_attn( - query=self.maybe_add_pos_embed(x, decoder_pos), - key=self.maybe_add_pos_embed(encoder_out, encoder_pos), - value=encoder_out, - )[0] - x = skip + self.dropout2(x) - if self.normalize_before: - skip = x - x = self.norm3(x) - else: - x = self.norm2(x) - skip = x - x = self.linear2(self.dropout(self.activation(self.linear1(x)))) - x = skip + self.dropout3(x) - if not self.normalize_before: - x = self.norm3(x) - return x - - -def _get_activation_fn(activation): - """Return an activation function given a string""" - if activation == "relu": - return F.relu - if activation == "gelu": - return F.gelu - if activation == "glu": - return F.glu - raise RuntimeError(f"activation should be relu/gelu/glu, not {activation}.") diff --git a/lerobot/common/policies/act/utils.py b/lerobot/common/policies/act/utils.py deleted file mode 100644 index 0d935839..00000000 --- a/lerobot/common/policies/act/utils.py +++ /dev/null @@ -1,478 +0,0 @@ -""" -Misc functions, including distributed helpers. - -Mostly copy-paste from torchvision references. -""" - -import datetime -import os -import pickle -import subprocess -import time -from collections import defaultdict, deque -from typing import List, Optional - -import torch -import torch.distributed as dist - -# needed due to empty tensor bug in pytorch and torchvision 0.5 -import torchvision -from packaging import version -from torch import Tensor - -if version.parse(torchvision.__version__) < version.parse("0.7"): - from torchvision.ops import _new_empty_tensor - from torchvision.ops.misc import _output_size - - -class SmoothedValue: - """Track a series of values and provide access to smoothed values over a - window or the global series average. - """ - - def __init__(self, window_size=20, fmt=None): - if fmt is None: - fmt = "{median:.4f} ({global_avg:.4f})" - self.deque = deque(maxlen=window_size) - self.total = 0.0 - self.count = 0 - self.fmt = fmt - - def update(self, value, n=1): - self.deque.append(value) - self.count += n - self.total += value * n - - def synchronize_between_processes(self): - """ - Warning: does not synchronize the deque! - """ - if not is_dist_avail_and_initialized(): - return - t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda") - dist.barrier() - dist.all_reduce(t) - t = t.tolist() - self.count = int(t[0]) - self.total = t[1] - - @property - def median(self): - d = torch.tensor(list(self.deque)) - return d.median().item() - - @property - def avg(self): - d = torch.tensor(list(self.deque), dtype=torch.float32) - return d.mean().item() - - @property - def global_avg(self): - return self.total / self.count - - @property - def max(self): - return max(self.deque) - - @property - def value(self): - return self.deque[-1] - - def __str__(self): - return self.fmt.format( - median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value - ) - - -def all_gather(data): - """ - Run all_gather on arbitrary picklable data (not necessarily tensors) - Args: - data: any picklable object - Returns: - list[data]: list of data gathered from each rank - """ - world_size = get_world_size() - if world_size == 1: - return [data] - - # serialized to a Tensor - buffer = pickle.dumps(data) - storage = torch.ByteStorage.from_buffer(buffer) - tensor = torch.ByteTensor(storage).to("cuda") - - # obtain Tensor size of each rank - local_size = torch.tensor([tensor.numel()], device="cuda") - size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)] - dist.all_gather(size_list, local_size) - size_list = [int(size.item()) for size in size_list] - max_size = max(size_list) - - # receiving Tensor from all ranks - # we pad the tensor because torch all_gather does not support - # gathering tensors of different shapes - tensor_list = [] - for _ in size_list: - tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda")) - if local_size != max_size: - padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda") - tensor = torch.cat((tensor, padding), dim=0) - dist.all_gather(tensor_list, tensor) - - data_list = [] - for size, tensor in zip(size_list, tensor_list, strict=False): - buffer = tensor.cpu().numpy().tobytes()[:size] - data_list.append(pickle.loads(buffer)) - - return data_list - - -def reduce_dict(input_dict, average=True): - """ - Args: - input_dict (dict): all the values will be reduced - average (bool): whether to do average or sum - Reduce the values in the dictionary from all processes so that all processes - have the averaged results. Returns a dict with the same fields as - input_dict, after reduction. - """ - world_size = get_world_size() - if world_size < 2: - return input_dict - with torch.no_grad(): - names = [] - values = [] - # sort the keys so that they are consistent across processes - for k in sorted(input_dict.keys()): - names.append(k) - values.append(input_dict[k]) - values = torch.stack(values, dim=0) - dist.all_reduce(values) - if average: - values /= world_size - reduced_dict = {k: v for k, v in zip(names, values, strict=False)} # noqa: C416 - return reduced_dict - - -class MetricLogger: - def __init__(self, delimiter="\t"): - self.meters = defaultdict(SmoothedValue) - self.delimiter = delimiter - - def update(self, **kwargs): - for k, v in kwargs.items(): - if isinstance(v, torch.Tensor): - v = v.item() - assert isinstance(v, (float, int)) - self.meters[k].update(v) - - def __getattr__(self, attr): - if attr in self.meters: - return self.meters[attr] - if attr in self.__dict__: - return self.__dict__[attr] - raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr)) - - def __str__(self): - loss_str = [] - for name, meter in self.meters.items(): - loss_str.append("{}: {}".format(name, str(meter))) - return self.delimiter.join(loss_str) - - def synchronize_between_processes(self): - for meter in self.meters.values(): - meter.synchronize_between_processes() - - def add_meter(self, name, meter): - self.meters[name] = meter - - def log_every(self, iterable, print_freq, header=None): - if not header: - header = "" - start_time = time.time() - end = time.time() - iter_time = SmoothedValue(fmt="{avg:.4f}") - data_time = SmoothedValue(fmt="{avg:.4f}") - space_fmt = ":" + str(len(str(len(iterable)))) + "d" - if torch.cuda.is_available(): - log_msg = self.delimiter.join( - [ - header, - "[{0" + space_fmt + "}/{1}]", - "eta: {eta}", - "{meters}", - "time: {time}", - "data: {data}", - "max mem: {memory:.0f}", - ] - ) - else: - log_msg = self.delimiter.join( - [ - header, - "[{0" + space_fmt + "}/{1}]", - "eta: {eta}", - "{meters}", - "time: {time}", - "data: {data}", - ] - ) - mega_b = 1024.0 * 1024.0 - for i, obj in enumerate(iterable): - data_time.update(time.time() - end) - yield obj - iter_time.update(time.time() - end) - if i % print_freq == 0 or i == len(iterable) - 1: - eta_seconds = iter_time.global_avg * (len(iterable) - i) - eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) - if torch.cuda.is_available(): - print( - log_msg.format( - i, - len(iterable), - eta=eta_string, - meters=str(self), - time=str(iter_time), - data=str(data_time), - memory=torch.cuda.max_memory_allocated() / mega_b, - ) - ) - else: - print( - log_msg.format( - i, - len(iterable), - eta=eta_string, - meters=str(self), - time=str(iter_time), - data=str(data_time), - ) - ) - end = time.time() - total_time = time.time() - start_time - total_time_str = str(datetime.timedelta(seconds=int(total_time))) - print("{} Total time: {} ({:.4f} s / it)".format(header, total_time_str, total_time / len(iterable))) - - -def get_sha(): - cwd = os.path.dirname(os.path.abspath(__file__)) - - def _run(command): - return subprocess.check_output(command, cwd=cwd).decode("ascii").strip() - - sha = "N/A" - diff = "clean" - branch = "N/A" - try: - sha = _run(["git", "rev-parse", "HEAD"]) - subprocess.check_output(["git", "diff"], cwd=cwd) - diff = _run(["git", "diff-index", "HEAD"]) - diff = "has uncommited changes" if diff else "clean" - branch = _run(["git", "rev-parse", "--abbrev-ref", "HEAD"]) - except Exception: - pass - message = f"sha: {sha}, status: {diff}, branch: {branch}" - return message - - -def collate_fn(batch): - batch = list(zip(*batch, strict=False)) - batch[0] = nested_tensor_from_tensor_list(batch[0]) - return tuple(batch) - - -def _max_by_axis(the_list): - # type: (List[List[int]]) -> List[int] - maxes = the_list[0] - for sublist in the_list[1:]: - for index, item in enumerate(sublist): - maxes[index] = max(maxes[index], item) - return maxes - - -class NestedTensor: - def __init__(self, tensors, mask: Optional[Tensor]): - self.tensors = tensors - self.mask = mask - - def to(self, device): - # type: (Device) -> NestedTensor # noqa - cast_tensor = self.tensors.to(device) - mask = self.mask - if mask is not None: - assert mask is not None - cast_mask = mask.to(device) - else: - cast_mask = None - return NestedTensor(cast_tensor, cast_mask) - - def decompose(self): - return self.tensors, self.mask - - def __repr__(self): - return str(self.tensors) - - -def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): - # TODO make this more general - if tensor_list[0].ndim == 3: - if torchvision._is_tracing(): - # nested_tensor_from_tensor_list() does not export well to ONNX - # call _onnx_nested_tensor_from_tensor_list() instead - return _onnx_nested_tensor_from_tensor_list(tensor_list) - - # TODO make it support different-sized images - max_size = _max_by_axis([list(img.shape) for img in tensor_list]) - # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list])) - batch_shape = [len(tensor_list)] + max_size - b, c, h, w = batch_shape - dtype = tensor_list[0].dtype - device = tensor_list[0].device - tensor = torch.zeros(batch_shape, dtype=dtype, device=device) - mask = torch.ones((b, h, w), dtype=torch.bool, device=device) - for img, pad_img, m in zip(tensor_list, tensor, mask, strict=False): - pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) - m[: img.shape[1], : img.shape[2]] = False - else: - raise ValueError("not supported") - return NestedTensor(tensor, mask) - - -# _onnx_nested_tensor_from_tensor_list() is an implementation of -# nested_tensor_from_tensor_list() that is supported by ONNX tracing. -@torch.jit.unused -def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor: - max_size = [] - for i in range(tensor_list[0].dim()): - max_size_i = torch.max(torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)).to( - torch.int64 - ) - max_size.append(max_size_i) - max_size = tuple(max_size) - - # work around for - # pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) - # m[: img.shape[1], :img.shape[2]] = False - # which is not yet supported in onnx - padded_imgs = [] - padded_masks = [] - for img in tensor_list: - padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape), strict=False)] - padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0])) - padded_imgs.append(padded_img) - - m = torch.zeros_like(img[0], dtype=torch.int, device=img.device) - padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1) - padded_masks.append(padded_mask.to(torch.bool)) - - tensor = torch.stack(padded_imgs) - mask = torch.stack(padded_masks) - - return NestedTensor(tensor, mask=mask) - - -def setup_for_distributed(is_master): - """ - This function disables printing when not in master process - """ - import builtins as __builtin__ - - builtin_print = __builtin__.print - - def print(*args, **kwargs): - force = kwargs.pop("force", False) - if is_master or force: - builtin_print(*args, **kwargs) - - __builtin__.print = print - - -def is_dist_avail_and_initialized(): - if not dist.is_available(): - return False - if not dist.is_initialized(): - return False - return True - - -def get_world_size(): - if not is_dist_avail_and_initialized(): - return 1 - return dist.get_world_size() - - -def get_rank(): - if not is_dist_avail_and_initialized(): - return 0 - return dist.get_rank() - - -def is_main_process(): - return get_rank() == 0 - - -def save_on_master(*args, **kwargs): - if is_main_process(): - torch.save(*args, **kwargs) - - -def init_distributed_mode(args): - if "RANK" in os.environ and "WORLD_SIZE" in os.environ: - args.rank = int(os.environ["RANK"]) - args.world_size = int(os.environ["WORLD_SIZE"]) - args.gpu = int(os.environ["LOCAL_RANK"]) - elif "SLURM_PROCID" in os.environ: - args.rank = int(os.environ["SLURM_PROCID"]) - args.gpu = args.rank % torch.cuda.device_count() - else: - print("Not using distributed mode") - args.distributed = False - return - - args.distributed = True - - torch.cuda.set_device(args.gpu) - args.dist_backend = "nccl" - print("| distributed init (rank {}): {}".format(args.rank, args.dist_url), flush=True) - torch.distributed.init_process_group( - backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank - ) - torch.distributed.barrier() - setup_for_distributed(args.rank == 0) - - -@torch.no_grad() -def accuracy(output, target, topk=(1,)): - """Computes the precision@k for the specified values of k""" - if target.numel() == 0: - return [torch.zeros([], device=output.device)] - maxk = max(topk) - batch_size = target.size(0) - - _, pred = output.topk(maxk, 1, True, True) - pred = pred.t() - correct = pred.eq(target.view(1, -1).expand_as(pred)) - - res = [] - for k in topk: - correct_k = correct[:k].view(-1).float().sum(0) - res.append(correct_k.mul_(100.0 / batch_size)) - return res - - -def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None): - # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor - """ - Equivalent to nn.functional.interpolate, but with support for empty batch sizes. - This will eventually be supported natively by PyTorch, and this - class can go away. - """ - if version.parse(torchvision.__version__) < version.parse("0.7"): - if input.numel() > 0: - return torch.nn.functional.interpolate(input, size, scale_factor, mode, align_corners) - - output_shape = _output_size(2, input, size, scale_factor) - output_shape = list(input.shape[:-2]) + list(output_shape) - return _new_empty_tensor(input, output_shape) - else: - return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners) diff --git a/lerobot/configs/policy/act.yaml b/lerobot/configs/policy/act.yaml index 1086b595..22b6cd6f 100644 --- a/lerobot/configs/policy/act.yaml +++ b/lerobot/configs/policy/act.yaml @@ -33,11 +33,10 @@ policy: nheads: 8 #camera_names: [top, front_close, left_pillar, right_pillar] camera_names: [top] - position_embedding: sine - masks: false dilation: false dropout: 0.1 pre_norm: false + activation: relu vae: true diff --git a/scripts/convert_act_weights.py b/scripts/convert_act_weights.py index d0c0c3e7..c8f83422 100644 --- a/scripts/convert_act_weights.py +++ b/scripts/convert_act_weights.py @@ -11,6 +11,19 @@ policy = make_policy(cfg) state_dict = torch.load("/home/alexander/Projects/act/outputs/sim_transfer_cube_human_vae/policy_last.ckpt") +# Remove keys based on what they start with. + +start_removals = [ + # There is a bug that means the pretrained model doesn't even use the final decoder layers. + *[f"model.transformer.decoder.layers.{i}" for i in range(1, 7)], + "model.is_pad_head.", +] + +for to_remove in start_removals: + for k in list(state_dict.keys()): + if k.startswith(to_remove): + del state_dict[k] + # Replace keys based on what they start with. @@ -26,6 +39,9 @@ start_replacements = [ ("model.input_proj.", "model.encoder_img_feat_input_proj."), ("model.input_proj_robot_state", "model.encoder_robot_state_input_proj"), ("model.latent_out_proj.", "model.encoder_latent_input_proj."), + ("model.transformer.encoder.", "model.encoder."), + ("model.transformer.decoder.", "model.decoder."), + ("model.backbones.0.0.body.", "model.backbone."), ] for to_replace, replace_with in start_replacements: @@ -35,18 +51,6 @@ for to_replace, replace_with in start_replacements: state_dict[k_] = state_dict[k] del state_dict[k] -# Remove keys based on what they start with. - -start_removals = [ - # There is a bug that means the pretrained model doesn't even use the final decoder layers. - *[f"model.transformer.decoder.layers.{i}" for i in range(1, 7)], - "model.is_pad_head.", -] - -for to_remove in start_removals: - for k in list(state_dict.keys()): - if k.startswith(to_remove): - del state_dict[k] missing_keys, unexpected_keys = policy.load_state_dict(state_dict, strict=False) From 5af00d0c1ee0aa3d9a90e6afe646474073ff5065 Mon Sep 17 00:00:00 2001 From: Cadene Date: Fri, 5 Apr 2024 09:31:39 +0000 Subject: [PATCH 08/80] fix train.py, stats, eval.py (training is running) --- lerobot/common/datasets/aloha.py | 16 +++++++------ lerobot/common/datasets/pusht.py | 16 +++++++------ lerobot/common/datasets/simxarm.py | 16 +++++++------ lerobot/common/datasets/utils.py | 15 ++++++++---- .../diffusion/diffusion_unet_image_policy.py | 7 +++--- lerobot/common/policies/diffusion/policy.py | 3 ++- lerobot/common/transforms.py | 16 ++++++------- lerobot/scripts/eval.py | 20 +++++++--------- lerobot/scripts/train.py | 9 +++---- tests/scripts/mock_dataset.py | 24 +++++++++---------- tests/test_datasets.py | 6 +---- 11 files changed, 76 insertions(+), 72 deletions(-) diff --git a/lerobot/common/datasets/aloha.py b/lerobot/common/datasets/aloha.py index 2744f595..102de08e 100644 --- a/lerobot/common/datasets/aloha.py +++ b/lerobot/common/datasets/aloha.py @@ -91,15 +91,17 @@ class AlohaDataset(torch.utils.data.Dataset): self.transform = transform self.delta_timestamps = delta_timestamps - data_dir = self.root / f"{self.dataset_id}" - if (data_dir / "data_dict.pth").exists() and (data_dir / "data_ids_per_episode.pth").exists(): - self.data_dict = torch.load(data_dir / "data_dict.pth") - self.data_ids_per_episode = torch.load(data_dir / "data_ids_per_episode.pth") + self.data_dir = self.root / f"{self.dataset_id}" + if (self.data_dir / "data_dict.pth").exists() and ( + self.data_dir / "data_ids_per_episode.pth" + ).exists(): + self.data_dict = torch.load(self.data_dir / "data_dict.pth") + self.data_ids_per_episode = torch.load(self.data_dir / "data_ids_per_episode.pth") else: self._download_and_preproc_obsolete() - data_dir.mkdir(parents=True, exist_ok=True) - torch.save(self.data_dict, data_dir / "data_dict.pth") - torch.save(self.data_ids_per_episode, data_dir / "data_ids_per_episode.pth") + self.data_dir.mkdir(parents=True, exist_ok=True) + torch.save(self.data_dict, self.data_dir / "data_dict.pth") + torch.save(self.data_ids_per_episode, self.data_dir / "data_ids_per_episode.pth") @property def num_samples(self) -> int: diff --git a/lerobot/common/datasets/pusht.py b/lerobot/common/datasets/pusht.py index 3de70b1f..9b73b101 100644 --- a/lerobot/common/datasets/pusht.py +++ b/lerobot/common/datasets/pusht.py @@ -105,15 +105,17 @@ class PushtDataset(torch.utils.data.Dataset): self.transform = transform self.delta_timestamps = delta_timestamps - data_dir = self.root / f"{self.dataset_id}" - if (data_dir / "data_dict.pth").exists() and (data_dir / "data_ids_per_episode.pth").exists(): - self.data_dict = torch.load(data_dir / "data_dict.pth") - self.data_ids_per_episode = torch.load(data_dir / "data_ids_per_episode.pth") + self.data_dir = self.root / f"{self.dataset_id}" + if (self.data_dir / "data_dict.pth").exists() and ( + self.data_dir / "data_ids_per_episode.pth" + ).exists(): + self.data_dict = torch.load(self.data_dir / "data_dict.pth") + self.data_ids_per_episode = torch.load(self.data_dir / "data_ids_per_episode.pth") else: self._download_and_preproc_obsolete() - data_dir.mkdir(parents=True, exist_ok=True) - torch.save(self.data_dict, data_dir / "data_dict.pth") - torch.save(self.data_ids_per_episode, data_dir / "data_ids_per_episode.pth") + self.data_dir.mkdir(parents=True, exist_ok=True) + torch.save(self.data_dict, self.data_dir / "data_dict.pth") + torch.save(self.data_ids_per_episode, self.data_dir / "data_ids_per_episode.pth") @property def num_samples(self) -> int: diff --git a/lerobot/common/datasets/simxarm.py b/lerobot/common/datasets/simxarm.py index 4b2c68ad..7bddf608 100644 --- a/lerobot/common/datasets/simxarm.py +++ b/lerobot/common/datasets/simxarm.py @@ -46,15 +46,17 @@ class SimxarmDataset(torch.utils.data.Dataset): self.transform = transform self.delta_timestamps = delta_timestamps - data_dir = self.root / f"{self.dataset_id}" - if (data_dir / "data_dict.pth").exists() and (data_dir / "data_ids_per_episode.pth").exists(): - self.data_dict = torch.load(data_dir / "data_dict.pth") - self.data_ids_per_episode = torch.load(data_dir / "data_ids_per_episode.pth") + self.data_dir = self.root / f"{self.dataset_id}" + if (self.data_dir / "data_dict.pth").exists() and ( + self.data_dir / "data_ids_per_episode.pth" + ).exists(): + self.data_dict = torch.load(self.data_dir / "data_dict.pth") + self.data_ids_per_episode = torch.load(self.data_dir / "data_ids_per_episode.pth") else: self._download_and_preproc_obsolete() - data_dir.mkdir(parents=True, exist_ok=True) - torch.save(self.data_dict, data_dir / "data_dict.pth") - torch.save(self.data_ids_per_episode, data_dir / "data_ids_per_episode.pth") + self.data_dir.mkdir(parents=True, exist_ok=True) + torch.save(self.data_dict, self.data_dir / "data_dict.pth") + torch.save(self.data_ids_per_episode, self.data_dir / "data_ids_per_episode.pth") @property def num_samples(self) -> int: diff --git a/lerobot/common/datasets/utils.py b/lerobot/common/datasets/utils.py index 522227d7..6b207b4d 100644 --- a/lerobot/common/datasets/utils.py +++ b/lerobot/common/datasets/utils.py @@ -112,16 +112,19 @@ def compute_or_load_stats(dataset, batch_size=32, max_num_samples=None): if max_num_samples is None: max_num_samples = len(dataset) + else: + raise NotImplementedError("We need to set shuffle=True, but this violate an assert for now.") dataloader = torch.utils.data.DataLoader( dataset, num_workers=4, batch_size=batch_size, - shuffle=True, + shuffle=False, # pin_memory=cfg.device != "cpu", drop_last=False, ) + # these einops patterns will be used to aggregate batches and compute statistics stats_patterns = { "action": "b c -> c", "observation.state": "b c -> c", @@ -142,9 +145,9 @@ def compute_or_load_stats(dataset, batch_size=32, max_num_samples=None): first_batch = None running_item_count = 0 # for online mean computation for i, batch in enumerate( - tqdm(dataloader, total=ceil(max_num_samples / batch_size), desc="Compute mean, min, max") + tqdm.tqdm(dataloader, total=ceil(max_num_samples / batch_size), desc="Compute mean, min, max") ): - this_batch_size = batch.batch_size[0] + this_batch_size = len(batch["index"]) running_item_count += this_batch_size if first_batch is None: first_batch = deepcopy(batch) @@ -166,8 +169,10 @@ def compute_or_load_stats(dataset, batch_size=32, max_num_samples=None): first_batch_ = None running_item_count = 0 # for online std computation - for i, batch in enumerate(tqdm(dataloader, total=ceil(max_num_samples / batch_size), desc="Compute std")): - this_batch_size = batch.batch_size[0] + for i, batch in enumerate( + tqdm.tqdm(dataloader, total=ceil(max_num_samples / batch_size), desc="Compute std") + ): + this_batch_size = len(batch["index"]) running_item_count += this_batch_size # Sanity check to make sure the batches are still in the same order as before. if first_batch_ is None: diff --git a/lerobot/common/policies/diffusion/diffusion_unet_image_policy.py b/lerobot/common/policies/diffusion/diffusion_unet_image_policy.py index 7719fdde..373e4b6c 100644 --- a/lerobot/common/policies/diffusion/diffusion_unet_image_policy.py +++ b/lerobot/common/policies/diffusion/diffusion_unet_image_policy.py @@ -243,10 +243,9 @@ class DiffusionUnetImagePolicy(BaseImagePolicy): result = {"action": action, "action_pred": action_pred} return result - def compute_loss(self, batch): - assert "valid_mask" not in batch - nobs = batch["obs"] - nactions = batch["action"] + def compute_loss(self, obs_dict, action): + nobs = obs_dict + nactions = action batch_size = nactions.shape[0] horizon = nactions.shape[1] diff --git a/lerobot/common/policies/diffusion/policy.py b/lerobot/common/policies/diffusion/policy.py index a0fe0eba..de8796ab 100644 --- a/lerobot/common/policies/diffusion/policy.py +++ b/lerobot/common/policies/diffusion/policy.py @@ -157,7 +157,8 @@ class DiffusionPolicy(nn.Module): "image": batch["observation.image"], "agent_pos": batch["observation.state"], } - loss = self.diffusion.compute_loss(obs_dict) + action = batch["action"] + loss = self.diffusion.compute_loss(obs_dict, action) loss.backward() grad_norm = torch.nn.utils.clip_grad_norm_( diff --git a/lerobot/common/transforms.py b/lerobot/common/transforms.py index 4974c086..ec967614 100644 --- a/lerobot/common/transforms.py +++ b/lerobot/common/transforms.py @@ -72,12 +72,12 @@ class NormalizeTransform(Transform): if inkey not in item: continue if self.mode == "mean_std": - mean = self.stats[f"{inkey}.mean"] - std = self.stats[f"{inkey}.std"] + mean = self.stats[inkey]["mean"] + std = self.stats[inkey]["std"] item[outkey] = (item[inkey] - mean) / (std + 1e-8) else: - min = self.stats[f"{inkey}.min"] - max = self.stats[f"{inkey}.max"] + min = self.stats[inkey]["min"] + max = self.stats[inkey]["max"] # normalize to [0,1] item[outkey] = (item[inkey] - min) / (max - min) # normalize to [-1, 1] @@ -89,12 +89,12 @@ class NormalizeTransform(Transform): if inkey not in item: continue if self.mode == "mean_std": - mean = self.stats[f"{inkey}.mean"] - std = self.stats[f"{inkey}.std"] + mean = self.stats[inkey]["mean"] + std = self.stats[inkey]["std"] item[outkey] = item[inkey] * std + mean else: - min = self.stats[f"{inkey}.min"] - max = self.stats[f"{inkey}.max"] + min = self.stats[inkey]["min"] + max = self.stats[inkey]["max"] item[outkey] = (item[inkey] + 1) / 2 item[outkey] = item[outkey] * (max - min) + min return item diff --git a/lerobot/scripts/eval.py b/lerobot/scripts/eval.py index fe0f7bb2..09399878 100644 --- a/lerobot/scripts/eval.py +++ b/lerobot/scripts/eval.py @@ -37,7 +37,6 @@ from pathlib import Path import einops import gymnasium as gym -import hydra import imageio import numpy as np import torch @@ -47,8 +46,8 @@ from lerobot.common.datasets.factory import make_dataset from lerobot.common.envs.factory import make_env from lerobot.common.logger import log_output_dir from lerobot.common.policies.factory import make_policy -from lerobot.common.utils import get_safe_torch_device, init_hydra_config, init_logging, set_global_seed from lerobot.common.transforms import apply_inverse_transform +from lerobot.common.utils import get_safe_torch_device, init_hydra_config, init_logging, set_global_seed def write_video(video_path, stacked_frames, fps): @@ -92,9 +91,12 @@ def eval_policy( fps: int = 15, return_first_video: bool = False, transform: callable = None, + seed=None, ): if policy is not None: policy.eval() + device = "cpu" if policy is None else next(policy.parameters()).device + start = time.time() sum_rewards = [] max_rewards = [] @@ -125,11 +127,11 @@ def eval_policy( policy.reset() else: logging.warning( - f"Policy {policy} doesnt have a `reset` method. This find if the policy doesnt rely on an internal state during rollout." + f"Policy {policy} doesnt have a `reset` method. It is required if the policy relies on an internal state during rollout." ) # reset the environment - observation, info = env.reset(seed=cfg.seed) + observation, info = env.reset(seed=seed) maybe_render_frame(env) rewards = [] @@ -138,13 +140,12 @@ def eval_policy( done = torch.tensor([False for _ in env.envs]) step = 0 - do_rollout = True - while do_rollout: + while not done.all(): # apply transform to normalize the observations observation = preprocess_observation(observation, transform) # send observation to device/gpu - observation = {key: observation[key].to(cfg.device, non_blocking=True) for key in observation} + observation = {key: observation[key].to(device, non_blocking=True) for key in observation} # get the next action for the environment with torch.inference_mode(): @@ -180,10 +181,6 @@ def eval_policy( step += 1 - if done.all(): - do_rollout = False - break - rewards = torch.stack(rewards, dim=1) successes = torch.stack(successes, dim=1) dones = torch.stack(dones, dim=1) @@ -295,6 +292,7 @@ def eval(cfg: dict, out_dir=None, stats_path=None): fps=cfg.env.fps, # TODO(rcadene): what should we do with the transform? transform=dataset.transform, + seed=cfg.seed, ) print(info["aggregated"]) diff --git a/lerobot/scripts/train.py b/lerobot/scripts/train.py index 5e9cd361..602fa5ab 100644 --- a/lerobot/scripts/train.py +++ b/lerobot/scripts/train.py @@ -145,7 +145,7 @@ def train(cfg: dict, out_dir=None, job_name=None): # ) logging.info("make_env") - env = make_env(cfg) + env = make_env(cfg, num_parallel_envs=cfg.eval_episodes) logging.info("make_policy") policy = make_policy(cfg) @@ -173,12 +173,11 @@ def train(cfg: dict, out_dir=None, job_name=None): eval_info, first_video = eval_policy( env, policy, - num_episodes=cfg.eval_episodes, - max_steps=cfg.env.episode_length, return_first_video=True, video_dir=Path(out_dir) / "eval", save_video=True, transform=dataset.transform, + seed=cfg.seed, ) log_eval_info(logger, eval_info["aggregated"], step, cfg, dataset, is_offline) if cfg.wandb.enable: @@ -211,7 +210,7 @@ def train(cfg: dict, out_dir=None, job_name=None): for key in batch: batch[key] = batch[key].to(cfg.device, non_blocking=True) - train_info = policy.update(batch, step) + train_info = policy(batch, step) # TODO(rcadene): is it ok if step_t=0 = 0 and not 1 as previously done? if step % cfg.log_freq == 0: @@ -223,6 +222,8 @@ def train(cfg: dict, out_dir=None, job_name=None): step += 1 + raise NotImplementedError() + demo_buffer = dataset if cfg.policy.balanced_sampling else None online_step = 0 is_offline = False diff --git a/tests/scripts/mock_dataset.py b/tests/scripts/mock_dataset.py index d9c86464..044417aa 100644 --- a/tests/scripts/mock_dataset.py +++ b/tests/scripts/mock_dataset.py @@ -18,28 +18,26 @@ Example: import argparse import shutil -from tensordict import TensorDict from pathlib import Path +import torch + def mock_dataset(in_data_dir, out_data_dir, num_frames): in_data_dir = Path(in_data_dir) out_data_dir = Path(out_data_dir) - # load full dataset as a tensor dict - in_td_data = TensorDict.load_memmap(in_data_dir / "replay_buffer") + # copy the first `n` frames for each data key so that we have real data + in_data_dict = torch.load(in_data_dir / "data_dict.pth") + out_data_dict = {key: in_data_dict[key][:num_frames].clone() for key in in_data_dict} + torch.save(out_data_dict, out_data_dir / "data_dict.pth") - # use 1 frame to know the specification of the dataset - # and copy it over `n` frames in the test artifact directory - out_td_data = in_td_data[0].expand(num_frames).memmap_like(out_data_dir / "replay_buffer") + # copy the full mapping between data_id and episode since it's small + in_ids_per_ep_path = in_data_dir / "data_ids_per_episode.pth" + out_ids_per_ep_path = out_data_dir / "data_ids_per_episode.pth" + shutil.copy(in_ids_per_ep_path, out_ids_per_ep_path) - # copy the first `n` frames so that we have real data - out_td_data[:num_frames] = in_td_data[:num_frames].clone() - - # make sure everything has been properly written - out_td_data.lock_() - - # copy the full statistics of dataset since it's pretty small + # copy the full statistics of dataset since it's small in_stats_path = in_data_dir / "stats.pth" out_stats_path = out_data_dir / "stats.pth" shutil.copy(in_stats_path, out_stats_path) diff --git a/tests/test_datasets.py b/tests/test_datasets.py index 00008259..e5ca0099 100644 --- a/tests/test_datasets.py +++ b/tests/test_datasets.py @@ -59,11 +59,7 @@ def test_factory(env_name, dataset_id): # ) # dataset = make_dataset(cfg) # # Get all of the data. -# all_data = TensorDictReplayBuffer( -# storage=buffer._storage, -# batch_size=len(buffer), -# sampler=SamplerWithoutReplacement(), -# ).sample().float() +# all_data = dataset.data_dict # # Note: we set the batch size to be smaller than the whole dataset to make sure we are testing batched # # computation of the statistics. While doing this, we also make sure it works when we don't divide the # # dataset into even batches. From edb125b35116a044574f7d406de19ee368d63583 Mon Sep 17 00:00:00 2001 From: Alexander Soare Date: Fri, 5 Apr 2024 11:03:28 +0100 Subject: [PATCH 09/80] backup wip --- lerobot/common/policies/act/policy.py | 390 ++++++++++++-------------- lerobot/configs/policy/act.yaml | 9 +- scripts/convert_act_weights.py | 2 + 3 files changed, 188 insertions(+), 213 deletions(-) diff --git a/lerobot/common/policies/act/policy.py b/lerobot/common/policies/act/policy.py index 906ea0cd..5071c09a 100644 --- a/lerobot/common/policies/act/policy.py +++ b/lerobot/common/policies/act/policy.py @@ -1,13 +1,13 @@ """Action Chunking Transformer Policy As per Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware (https://arxiv.org/abs/2304.13705). +The majority of changes here involve removing unused code, unifying naming, and adding helpful comments. """ -import logging import math import time from itertools import chain -from typing import Callable, Optional +from typing import Callable import einops import numpy as np @@ -26,40 +26,56 @@ from lerobot.common.utils import get_safe_torch_device class ActionChunkingTransformerPolicy(AbstractPolicy): """ Action Chunking Transformer Policy as per Learning Fine-Grained Bimanual Manipulation with Low-Cost - Hardware (https://arxiv.org/abs/2304.13705). + Hardware (paper: https://arxiv.org/abs/2304.13705, code: https://github.com/tonyzhaozh/act) + + Note: In this code we use the terms `vae_encoder`, 'encoder', `decoder`. The meanings are as follows. + - The `vae_encoder` is, as per the literature around variational auto-encoders (VAE), the part of the + model that encodes the target data (a sequence of actions), and the condition (the robot + joint-space). + - A transformer with an `encoder` (not the VAE encoder) and `decoder` (not the VAE decoder) with + cross-attention is used as the VAE decoder. For these terms, we drop the `vae_` prefix because we + have an option to train this model without the variational objective (in which case we drop the + `vae_encoder` altogether, and nothing about this model has anything to do with a VAE). + + Transformer + Used alone for inference + (acts as VAE decoder + during training) + ┌───────────────────────┐ + │ Outputs │ + │ ▲ │ + │ ┌─────►┌───────┐ │ + ┌──────┐ │ │ │Transf.│ │ + │ │ │ ├─────►│decoder│ │ + ┌────┴────┐ │ │ │ │ │ │ + │ │ │ │ ┌───┴───┬─►│ │ │ + │ VAE │ │ │ │ │ └───────┘ │ + │ encoder │ │ │ │Transf.│ │ + │ │ │ │ │encoder│ │ + └───▲─────┘ │ │ │ │ │ + │ │ │ └───▲───┘ │ + │ │ │ │ │ + inputs └─────┼─────┘ │ + │ │ + └───────────────────────┘ """ name = "act" def __init__(self, cfg, device, n_action_steps=1): """ - Args: - vae: Whether to use the variational objective. TODO(now): Give more details. - temporal_agg: Whether to do temporal aggregation. For each timestep during rollout, the action - returned as an exponential moving average of previously generated actions for that timestep. - n_obs_steps: Number of time steps worth of observation to use as input. - horizon: The number of actions to generate in one forward pass. - kl_weight: Weight for KL divergence. Defaults to None. Only applicable when using the variational - objective. - batch_size: Training batch size. - grad_clip_norm: Optionally clip the gradients to have this value as the norm at most. Defaults to - None meaning gradient clipping is not applied. - lr: Learning rate. + TODO(alexander-soare): Add documentation for all parameters. """ super().__init__(n_action_steps) self.cfg = cfg self.n_action_steps = n_action_steps self.device = get_safe_torch_device(device) - self.model = ActionChunkingTransformer( - cfg, - state_dim=cfg.state_dim, - action_dim=cfg.action_dim, - horizon=cfg.horizon, - camera_names=cfg.camera_names, - use_vae=cfg.vae, - ) + self.model = _ActionChunkingTransformer(cfg) + self._create_optimizer() + self.to(self.device) + def _create_optimizer(self): optimizer_params_dicts = [ { "params": [ @@ -74,14 +90,12 @@ class ActionChunkingTransformerPolicy(AbstractPolicy): for n, p in self.model.named_parameters() if n.startswith("backbone") and p.requires_grad ], - "lr": cfg.lr_backbone, + "lr": self.cfg.lr_backbone, }, ] - self.optimizer = torch.optim.AdamW(optimizer_params_dicts, lr=cfg.lr, weight_decay=cfg.weight_decay) - - self.kl_weight = self.cfg.kl_weight - logging.info(f"KL Weight {self.kl_weight}") - self.to(self.device) + self.optimizer = torch.optim.AdamW( + optimizer_params_dicts, lr=self.cfg.lr, weight_decay=self.cfg.weight_decay + ) def update(self, replay_buffer, step): del step @@ -137,7 +151,6 @@ class ActionChunkingTransformerPolicy(AbstractPolicy): batch = process_batch(batch, self.cfg.horizon, num_slices) data_s = time.time() - start_time - print(data_s) loss = self.compute_loss(batch) loss.backward() @@ -192,16 +205,6 @@ class ActionChunkingTransformerPolicy(AbstractPolicy): "image": observation["image", "top"], "agent_pos": observation["state"], } - # qpos = obs_dict["agent_pos"] - # img = obs_dict["image"] - # qpos_ = torch.load('/tmp/qpos.pth') - # img_ = torch.load('/tmp/curr_image.pth') - # out_ = torch.load('/tmp/out.pth') - # import cv2, numpy as np - # cv2.imwrite("ours.png", (obs_dict["image"][0, 0].permute(1, 2, 0).cpu().numpy() * 255).astype(np.uint8)) - # cv2.imwrite("theirs.png", (img_[0, 0].permute(1, 2, 0).cpu().numpy() * 255).astype(np.uint8)) - # out = self._forward(qpos_, img_) - # breakpoint() action = self._forward(qpos=obs_dict["agent_pos"] * 0.182, image=obs_dict["image"]) if self.cfg.temporal_agg: @@ -236,14 +239,14 @@ class ActionChunkingTransformerPolicy(AbstractPolicy): loss_dict = {} loss_dict["l1"] = l1 - if self.cfg.vae: + if self.cfg.use_vae: # Calculate Dₖₗ(latent_pdf || standard_normal). Note: After computing the KL-divergence for # each dimension independently, we sum over the latent dimension to get the total # KL-divergence per batch element, then take the mean over the batch. # (See App. B of https://arxiv.org/abs/1312.6114 for more details). mean_kld = (-0.5 * (1 + log_sigma_x2 - mu.pow(2) - (log_sigma_x2).exp())).sum(-1).mean() loss_dict["kl"] = mean_kld - loss_dict["loss"] = loss_dict["l1"] + loss_dict["kl"] * self.kl_weight + loss_dict["loss"] = loss_dict["l1"] + loss_dict["kl"] * self.cfg.kl_weight else: loss_dict["loss"] = loss_dict["l1"] return loss_dict @@ -252,135 +255,74 @@ class ActionChunkingTransformerPolicy(AbstractPolicy): return action -def create_sinusoidal_position_embedding(n_position, d_hid): - def get_position_angle_vec(position): - return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)] - - sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)]) - sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i - sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 - - return torch.FloatTensor(sinusoid_table).unsqueeze(0) - - # TODO(alexander-soare) move all this code into the policy when we have the policy API established. -class ActionChunkingTransformer(nn.Module): - """ - Action Chunking Transformer as per Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware - (paper: https://arxiv.org/abs/2304.13705, code: https://github.com/tonyzhaozh/act) - - Note: In this code we use the terms `vae_encoder`, 'encoder', `decoder`. The meanings are as follows. - - The `vae_encoder` is, as per the literature around variational auto-encoders (VAE), the part of the - model that encodes the target data (a sequence of actions), and the condition (the robot - joint-space). - - A transformer with an `encoder` (not the VAE encoder) and `decoder` (not the VAE decoder) with - cross-attention is used as the VAE decoder. For these terms, we drop the `vae_` prefix because we - have an option to train this model without the variational objective (in which case we drop the - `vae_encoder` altogether, and nothing about this model has anything to do with a VAE). - - Transformer - Used alone for inference - (acts as VAE decoder - during training) - ┌───────────────────────┐ - │ Outputs │ - │ ▲ │ - │ ┌─────►┌───────┐ │ - ┌──────┐ │ │ │Transf.│ │ - │ │ │ ├─────►│decoder│ │ - ┌────┴────┐ │ │ │ │ │ │ - │ │ │ │ ┌───┴───┬─►│ │ │ - │ VAE │ │ │ │ │ └───────┘ │ - │ encoder │ │ │ │Transf.│ │ - │ │ │ │ │encoder│ │ - └───▲─────┘ │ │ │ │ │ - │ │ │ └───▲───┘ │ - │ │ │ │ │ - inputs └─────┼─────┘ │ - │ │ - └───────────────────────┘ - """ - - def __init__(self, args, state_dim, action_dim, horizon, camera_names, use_vae): - """Initializes the model. - Parameters: - state_dim: robot state dimension of the environment - horizon: number of object queries, ie detection slot. This is the maximal number of objects - DETR can detect in a single image. For COCO, we recommend 100 queries. - - Args: - state_dim: Robot positional state dimension. - action_dim: Action dimension. - horizon: The number of actions to generate in one forward pass. - use_vae: Whether to use the variational objective. TODO(now): Give more details. - """ +class _ActionChunkingTransformer(nn.Module): + def __init__(self, cfg): super().__init__() - self.camera_names = camera_names - self.use_vae = use_vae - self.horizon = horizon - self.hidden_dim = args.hidden_dim + self.camera_names = cfg.camera_names + self.use_vae = cfg.use_vae + self.horizon = cfg.horizon + self.d_model = cfg.d_model transformer_common_kwargs = dict( # noqa: C408 - d_model=self.hidden_dim, - nhead=args.nheads, - dim_feedforward=args.dim_feedforward, - dropout=args.dropout, - activation=args.activation, - normalize_before=args.pre_norm, + d_model=self.d_model, + num_heads=cfg.num_heads, + dim_feedforward=cfg.dim_feedforward, + dropout=cfg.dropout, + activation=cfg.activation, + normalize_before=cfg.pre_norm, ) # BERT style VAE encoder with input [cls, *joint_space_configuration, *action_sequence]. # The cls token forms parameters of the latent's distribution (like this [*means, *log_variances]). - if use_vae: - # TODO(now): args.enc_layers shouldn't be shared with the transformer decoder - self.vae_encoder = TransformerEncoder(num_layers=args.enc_layers, **transformer_common_kwargs) - self.cls_embed = nn.Embedding(1, self.hidden_dim) + if self.use_vae: + self.vae_encoder = _TransformerEncoder(num_layers=cfg.vae_enc_layers, **transformer_common_kwargs) + self.vae_encoder_cls_embed = nn.Embedding(1, self.d_model) # Projection layer for joint-space configuration to hidden dimension. - self.vae_encoder_robot_state_input_proj = nn.Linear(state_dim, self.hidden_dim) + self.vae_encoder_robot_state_input_proj = nn.Linear(cfg.state_dim, self.d_model) # Projection layer for action (joint-space target) to hidden dimension. - self.vae_encoder_action_input_proj = nn.Linear(state_dim, self.hidden_dim) - # Final size of latent z. TODO(now): Add to hyperparams. - self.latent_dim = 32 + self.vae_encoder_action_input_proj = nn.Linear(cfg.state_dim, self.d_model) + self.latent_dim = cfg.latent_dim # Projection layer from the VAE encoder's output to the latent distribution's parameter space. - self.vae_encoder_latent_output_proj = nn.Linear(self.hidden_dim, self.latent_dim * 2) - # Fixed sinusoidal positional embedding the whole input to the VAE encoder. + self.vae_encoder_latent_output_proj = nn.Linear(self.d_model, self.latent_dim * 2) + # Fixed sinusoidal positional embedding the whole input to the VAE encoder. Unsqueeze for batch + # dimension. self.register_buffer( - "vae_encoder_pos_enc", create_sinusoidal_position_embedding(1 + 1 + horizon, self.hidden_dim) + "vae_encoder_pos_enc", + _create_sinusoidal_position_embedding(1 + 1 + self.horizon, self.d_model).unsqueeze(0), ) # Backbone for image feature extraction. - self.backbone_position_embedding = SinusoidalPositionEmbedding2D(self.hidden_dim // 2) - backbone_model = getattr(torchvision.models, args.backbone)( - replace_stride_with_dilation=[False, False, args.dilation], - pretrained=True, # TODO(now): Add pretrained option + backbone_model = getattr(torchvision.models, cfg.backbone)( + replace_stride_with_dilation=[False, False, cfg.dilation], + pretrained=cfg.pretrained_backbone, norm_layer=FrozenBatchNorm2d, ) # Note: The forward method of this returns a dict: {"feature_map": output}. self.backbone = IntermediateLayerGetter(backbone_model, return_layers={"layer4": "feature_map"}) # Transformer (acts as VAE decoder when training with the variational objective). - self.encoder = TransformerEncoder(num_layers=args.enc_layers, **transformer_common_kwargs) - self.decoder = TransformerDecoder(num_layers=args.dec_layers, **transformer_common_kwargs) + self.encoder = _TransformerEncoder(num_layers=cfg.enc_layers, **transformer_common_kwargs) + self.decoder = _TransformerDecoder(num_layers=cfg.dec_layers, **transformer_common_kwargs) # Transformer encoder input projections. The tokens will be structured like # [latent, robot_state, image_feature_map_pixels]. + self.encoder_robot_state_input_proj = nn.Linear(cfg.state_dim, self.d_model) + self.encoder_latent_input_proj = nn.Linear(self.latent_dim, self.d_model) self.encoder_img_feat_input_proj = nn.Conv2d( - backbone_model.fc.in_features, self.hidden_dim, kernel_size=1 + backbone_model.fc.in_features, self.d_model, kernel_size=1 ) - self.encoder_robot_state_input_proj = nn.Linear(state_dim, self.hidden_dim) - self.encoder_latent_input_proj = nn.Linear(self.latent_dim, self.hidden_dim) - # TODO(now): Fix this nonsense. One positional embedding is needed. We should extract the image - # feature dimension with a dry run. - self.additional_pos_embed = nn.Embedding( - 2, self.hidden_dim - ) # learned position embedding for proprio and latent + # Transformer encoder positional embeddings. + self.encoder_robot_and_latent_pos_embed = nn.Embedding(2, self.d_model) + self.encoder_cam_feat_pos_embed = _SinusoidalPositionEmbedding2D(self.d_model // 2) # Transformer decoder. # Learnable positional embedding for the transformer's decoder (in the style of DETR object queries). - self.decoder_pos_embed_embed = nn.Embedding(horizon, self.hidden_dim) + self.decoder_pos_embed = nn.Embedding(self.horizon, self.d_model) + # Final action regression head on the output of the transformer's decoder. - self.action_head = nn.Linear(self.hidden_dim, action_dim) + self.action_head = nn.Linear(self.d_model, cfg.action_dim) self._reset_parameters() @@ -390,7 +332,7 @@ class ActionChunkingTransformer(nn.Module): if p.dim() > 1: nn.init.xavier_uniform_(p) - def forward(self, robot_state, image, actions=None): + def forward(self, robot_state: Tensor, image: Tensor, actions: Tensor | None = None): """ Args: robot_state: (B, J) batch of robot joint configurations. @@ -405,10 +347,12 @@ class ActionChunkingTransformer(nn.Module): batch_size, _ = robot_state.shape - # Prepare the latent for input to the transformer. + # Prepare the latent for input to the transformer encoder. if self.use_vae and actions is not None: # Prepare the input to the VAE encoder: [cls, *joint_space_configuration, *action_sequence]. - cls_embed = einops.repeat(self.cls_embed.weight, "1 d -> b 1 d", b=batch_size) # (B, 1, D) + cls_embed = einops.repeat( + self.vae_encoder_cls_embed.weight, "1 d -> b 1 d", b=batch_size + ) # (B, 1, D) robot_state_embed = self.vae_encoder_robot_state_input_proj(robot_state).unsqueeze(1) # (B, 1, D) action_embed = self.vae_encoder_action_input_proj(actions) # (B, S, D) vae_encoder_input = torch.cat([cls_embed, robot_state_embed, action_embed], axis=1) # (B, S+2, D) @@ -417,7 +361,7 @@ class ActionChunkingTransformer(nn.Module): pos_embed = self.vae_encoder_pos_enc.clone().detach() # (1, S+2, D) # Forward pass through VAE encoder and sample the latent with the reparameterization trick. cls_token_out = self.vae_encoder( - vae_encoder_input.permute(1, 0, 2), pos=pos_embed.permute(1, 0, 2) + vae_encoder_input.permute(1, 0, 2), pos_embed=pos_embed.permute(1, 0, 2) )[0] # (B, D) latent_pdf_params = self.vae_encoder_latent_output_proj(cls_token_out) mu = latent_pdf_params[:, : self.latent_dim] @@ -432,23 +376,25 @@ class ActionChunkingTransformer(nn.Module): robot_state.device ) - # Prepare all other transformer inputs. - # Image observation features and position embeddings. + # Prepare all other transformer encoder inputs. + # Camera observation features and positional embeddings. all_cam_features = [] - all_cam_pos = [] + all_cam_pos_embeds = [] for cam_id, _ in enumerate(self.camera_names): cam_features = self.backbone(image[:, cam_id])["feature_map"] - pos = self.backbone_position_embedding(cam_features).to(dtype=cam_features.dtype) + cam_pos_embed = self.encoder_cam_feat_pos_embed(cam_features).to(dtype=cam_features.dtype) cam_features = self.encoder_img_feat_input_proj(cam_features) # (B, C, h, w) all_cam_features.append(cam_features) - all_cam_pos.append(pos) - # Concatenate image observation feature maps along the width dimension. + all_cam_pos_embeds.append(cam_pos_embed) + # Concatenate camera observation feature maps and positional embeddings along the width dimension. encoder_in = torch.cat(all_cam_features, axis=3) - pos = torch.cat(all_cam_pos, axis=3) + cam_pos_embed = torch.cat(all_cam_pos_embeds, axis=3) + + # Get positional embeddings for robot state and latent. robot_state_embed = self.encoder_robot_state_input_proj(robot_state) latent_embed = self.encoder_latent_input_proj(latent_sample) - # TODO(now): Explain all of this madness. + # Stack encoder input and positional embeddings moving to (S, B, C). encoder_in = torch.cat( [ torch.stack([latent_embed, robot_state_embed], axis=0), @@ -456,60 +402,68 @@ class ActionChunkingTransformer(nn.Module): ] ) pos_embed = torch.cat( - [self.additional_pos_embed.weight.unsqueeze(1), pos.flatten(2).permute(2, 0, 1)], axis=0 + [ + self.encoder_robot_and_latent_pos_embed.weight.unsqueeze(1), + cam_pos_embed.flatten(2).permute(2, 0, 1), + ], + axis=0, ) - encoder_out = self.encoder(encoder_in, pos=pos_embed) + # Forward pass through the transformer modules. + encoder_out = self.encoder(encoder_in, pos_embed=pos_embed) decoder_in = torch.zeros( - (self.horizon, batch_size, self.hidden_dim), dtype=pos_embed.dtype, device=pos_embed.device + (self.horizon, batch_size, self.d_model), dtype=pos_embed.dtype, device=pos_embed.device ) decoder_out = self.decoder( decoder_in, encoder_out, encoder_pos_embed=pos_embed, - decoder_pos_embed=self.decoder_pos_embed_embed.weight.unsqueeze(1), - ).transpose(0, 1) # back to (B, S, C) + decoder_pos_embed=self.decoder_pos_embed.weight.unsqueeze(1), + ) + + # Move back to (B, S, C). + decoder_out = decoder_out.transpose(0, 1) actions = self.action_head(decoder_out) + return actions, [mu, log_sigma_x2] -class TransformerEncoder(nn.Module): - def __init__( - self, - num_layers, - d_model, - nhead, - dim_feedforward=2048, - dropout=0.1, - activation="relu", - normalize_before=False, - ): +class _TransformerEncoder(nn.Module): + """Convenience module for running multiple encoder layers, maybe followed by normalization.""" + + def __init__(self, num_layers: int, **encoder_layer_kwargs: dict): super().__init__() self.layers = nn.ModuleList( - [ - TransformerEncoderLayer( - d_model, nhead, dim_feedforward, dropout, activation, normalize_before - ) - for _ in range(num_layers) - ] + [_TransformerEncoderLayer(**encoder_layer_kwargs) for _ in range(num_layers)] + ) + self.norm = ( + nn.LayerNorm(encoder_layer_kwargs["d_model"]) + if encoder_layer_kwargs["normalize_before"] + else nn.Identity() ) - self.norm = nn.LayerNorm(d_model) if normalize_before else nn.Identity() - def forward(self, x, pos: Optional[Tensor] = None): + def forward(self, x: Tensor, pos_embed: Tensor | None = None) -> Tensor: for layer in self.layers: - x = layer(x, pos=pos) + x = layer(x, pos_embed=pos_embed) x = self.norm(x) return x -class TransformerEncoderLayer(nn.Module): +class _TransformerEncoderLayer(nn.Module): def __init__( - self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation="relu", normalize_before=False + self, + d_model: int, + num_heads: int, + dim_feedforward: int, + dropout: float, + activation: str, + normalize_before: bool, ): super().__init__() - self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) - # Implementation of Feedforward model + self.self_attn = nn.MultiheadAttention(d_model, num_heads, dropout=dropout) + + # Feed forward layers. self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) @@ -522,7 +476,7 @@ class TransformerEncoderLayer(nn.Module): self.activation = _get_activation_fn(activation) self.normalize_before = normalize_before - def forward(self, x, pos_embed: Optional[Tensor] = None): + def forward(self, x, pos_embed: Tensor | None = None) -> Tensor: skip = x if self.normalize_before: x = self.norm1(x) @@ -542,32 +496,23 @@ class TransformerEncoderLayer(nn.Module): return x -class TransformerDecoder(nn.Module): - def __init__( - self, - num_layers, - d_model, - nhead, - dim_feedforward=2048, - dropout=0.1, - activation="relu", - normalize_before=False, - ): +class _TransformerDecoder(nn.Module): + def __init__(self, num_layers: int, **decoder_layer_kwargs): + """Convenience module for running multiple decoder layers followed by normalization.""" super().__init__() self.layers = nn.ModuleList( - [ - TransformerDecoderLayer( - d_model, nhead, dim_feedforward, dropout, activation, normalize_before - ) - for _ in range(num_layers) - ] + [_TransformerDecoderLayer(**decoder_layer_kwargs) for _ in range(num_layers)] ) self.num_layers = num_layers - self.norm = nn.LayerNorm(d_model) + self.norm = nn.LayerNorm(decoder_layer_kwargs["d_model"]) def forward( - self, x, encoder_out, decoder_pos_embed: Tensor | None = None, encoder_pos_embed: Tensor | None = None - ): + self, + x: Tensor, + encoder_out: Tensor, + decoder_pos_embed: Tensor | None = None, + encoder_pos_embed: Tensor | None = None, + ) -> Tensor: for layer in self.layers: x = layer( x, encoder_out, decoder_pos_embed=decoder_pos_embed, encoder_pos_embed=encoder_pos_embed @@ -577,14 +522,21 @@ class TransformerDecoder(nn.Module): return x -class TransformerDecoderLayer(nn.Module): +class _TransformerDecoderLayer(nn.Module): def __init__( - self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation="relu", normalize_before=False + self, + d_model: int, + num_heads: int, + dim_feedforward: int, + dropout: float, + activation: str, + normalize_before: bool, ): super().__init__() - self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) - self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) - # Implementation of Feedforward model + self.self_attn = nn.MultiheadAttention(d_model, num_heads, dropout=dropout) + self.multihead_attn = nn.MultiheadAttention(d_model, num_heads, dropout=dropout) + + # Feed forward layers. self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) @@ -650,8 +602,26 @@ class TransformerDecoderLayer(nn.Module): return x -class SinusoidalPositionEmbedding2D(nn.Module): - """Sinusoidal positional embeddings similar to what's presented in Attention Is All You Need. +def _create_sinusoidal_position_embedding(num_positions: int, dimension: int) -> Tensor: + """1D sinusoidal positional embeddings as in Attention is All You Need. + + Args: + num_positions: Number of token positions required. + Returns: (num_positions, dimension) position embeddings (the first dimension is the batch dimension). + + """ + + def get_position_angle_vec(position): + return [position / np.power(10000, 2 * (hid_j // 2) / dimension) for hid_j in range(dimension)] + + sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(num_positions)]) + sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i + sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 + return torch.from_numpy(sinusoid_table).float() + + +class _SinusoidalPositionEmbedding2D(nn.Module): + """2D sinusoidal positional embeddings similar to what's presented in Attention Is All You Need. The variation is that the position indices are normalized in [0, 2π] (not quite: the lower bound is 1/H for the vertical direction, and 1/W for the horizontal direction. @@ -705,7 +675,7 @@ class SinusoidalPositionEmbedding2D(nn.Module): def _get_activation_fn(activation: str) -> Callable: - """Return an activation function given a string""" + """Return an activation function given a string.""" if activation == "relu": return F.relu if activation == "gelu": diff --git a/lerobot/configs/policy/act.yaml b/lerobot/configs/policy/act.yaml index 22b6cd6f..3551768c 100644 --- a/lerobot/configs/policy/act.yaml +++ b/lerobot/configs/policy/act.yaml @@ -21,24 +21,27 @@ policy: lr: 1e-5 lr_backbone: 1e-5 + pretrained_backbone: true weight_decay: 1e-4 grad_clip_norm: 10 backbone: resnet18 horizon: ${horizon} # chunk_size kl_weight: 10 - hidden_dim: 512 + d_model: 512 dim_feedforward: 3200 + vae_enc_layers: 4 enc_layers: 4 dec_layers: 1 - nheads: 8 + num_heads: 8 #camera_names: [top, front_close, left_pillar, right_pillar] camera_names: [top] dilation: false dropout: 0.1 pre_norm: false activation: relu + latent_dim: 32 - vae: true + use_vae: true batch_size: 8 diff --git a/scripts/convert_act_weights.py b/scripts/convert_act_weights.py index c8f83422..b1492009 100644 --- a/scripts/convert_act_weights.py +++ b/scripts/convert_act_weights.py @@ -42,6 +42,8 @@ start_replacements = [ ("model.transformer.encoder.", "model.encoder."), ("model.transformer.decoder.", "model.decoder."), ("model.backbones.0.0.body.", "model.backbone."), + ("model.additional_pos_embed.weight", "model.encoder_robot_and_latent_pos_embed.weight"), + ("model.cls_embed.weight", "model.vae_encoder_cls_embed.weight"), ] for to_replace, replace_with in start_replacements: From ad3379a73ab88616434889644afcdf58a4d3302f Mon Sep 17 00:00:00 2001 From: Cadene Date: Fri, 5 Apr 2024 10:59:32 +0000 Subject: [PATCH 10/80] fix memory leak due to itertools.cycle --- lerobot/common/datasets/utils.py | 9 +++++++++ lerobot/common/utils.py | 12 ++++++++++++ lerobot/scripts/train.py | 9 +++++++-- 3 files changed, 28 insertions(+), 2 deletions(-) diff --git a/lerobot/common/datasets/utils.py b/lerobot/common/datasets/utils.py index 6b207b4d..18b091cd 100644 --- a/lerobot/common/datasets/utils.py +++ b/lerobot/common/datasets/utils.py @@ -203,3 +203,12 @@ def compute_or_load_stats(dataset, batch_size=32, max_num_samples=None): torch.save(stats, stats_path) return stats + + +def cycle(iterable): + iterator = iter(iterable) + while True: + try: + yield next(iterator) + except StopIteration: + iterator = iter(iterable) diff --git a/lerobot/common/utils.py b/lerobot/common/utils.py index 7ed29334..e3e22832 100644 --- a/lerobot/common/utils.py +++ b/lerobot/common/utils.py @@ -95,3 +95,15 @@ def init_hydra_config(config_path: str, overrides: list[str] | None = None) -> D ) cfg = hydra.compose(Path(config_path).stem, overrides) return cfg + + +def print_cuda_memory_usage(): + import gc + + gc.collect() + # Also clear the cache if you want to fully release the memory + torch.cuda.empty_cache() + print("Current GPU Memory Allocated: {:.2f} MB".format(torch.cuda.memory_allocated(0) / 1024**2)) + print("Maximum GPU Memory Allocated: {:.2f} MB".format(torch.cuda.max_memory_allocated(0) / 1024**2)) + print("Current GPU Memory Reserved: {:.2f} MB".format(torch.cuda.memory_reserved(0) / 1024**2)) + print("Maximum GPU Memory Reserved: {:.2f} MB".format(torch.cuda.max_memory_reserved(0) / 1024**2)) diff --git a/lerobot/scripts/train.py b/lerobot/scripts/train.py index 602fa5ab..cca26902 100644 --- a/lerobot/scripts/train.py +++ b/lerobot/scripts/train.py @@ -1,5 +1,4 @@ import logging -from itertools import cycle from pathlib import Path import hydra @@ -7,10 +6,16 @@ import numpy as np import torch from lerobot.common.datasets.factory import make_dataset +from lerobot.common.datasets.utils import cycle from lerobot.common.envs.factory import make_env from lerobot.common.logger import Logger, log_output_dir from lerobot.common.policies.factory import make_policy -from lerobot.common.utils import format_big_number, get_safe_torch_device, init_logging, set_global_seed +from lerobot.common.utils import ( + format_big_number, + get_safe_torch_device, + init_logging, + set_global_seed, +) from lerobot.scripts.eval import eval_policy From a420714ee4e8c4d5afbc8cc65fe98ef78965a8cf Mon Sep 17 00:00:00 2001 From: Cadene Date: Fri, 5 Apr 2024 11:33:39 +0000 Subject: [PATCH 11/80] fix: action_is_pad was missing in compute_loss --- .../diffusion/diffusion_unet_image_policy.py | 16 ++++++++++++---- lerobot/common/policies/diffusion/policy.py | 7 +------ 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/lerobot/common/policies/diffusion/diffusion_unet_image_policy.py b/lerobot/common/policies/diffusion/diffusion_unet_image_policy.py index 373e4b6c..f7432db3 100644 --- a/lerobot/common/policies/diffusion/diffusion_unet_image_policy.py +++ b/lerobot/common/policies/diffusion/diffusion_unet_image_policy.py @@ -243,9 +243,12 @@ class DiffusionUnetImagePolicy(BaseImagePolicy): result = {"action": action, "action_pred": action_pred} return result - def compute_loss(self, obs_dict, action): - nobs = obs_dict - nactions = action + def compute_loss(self, batch): + nobs = { + "image": batch["observation.image"], + "agent_pos": batch["observation.state"], + } + nactions = batch["action"] batch_size = nactions.shape[0] horizon = nactions.shape[1] @@ -302,6 +305,11 @@ class DiffusionUnetImagePolicy(BaseImagePolicy): loss = F.mse_loss(pred, target, reduction="none") loss = loss * loss_mask.type(loss.dtype) - loss = reduce(loss, "b ... -> b (...)", "mean") + + if "action_is_pad" in batch: + in_episode_bound = ~batch["action_is_pad"] + loss = loss * in_episode_bound[:, :, None].type(loss.dtype) + + loss = reduce(loss, "b t c -> b", "mean", b=batch_size) loss = loss.mean() return loss diff --git a/lerobot/common/policies/diffusion/policy.py b/lerobot/common/policies/diffusion/policy.py index de8796ab..a4f4a450 100644 --- a/lerobot/common/policies/diffusion/policy.py +++ b/lerobot/common/policies/diffusion/policy.py @@ -153,12 +153,7 @@ class DiffusionPolicy(nn.Module): data_s = time.time() - start_time - obs_dict = { - "image": batch["observation.image"], - "agent_pos": batch["observation.state"], - } - action = batch["action"] - loss = self.diffusion.compute_loss(obs_dict, action) + loss = self.diffusion.compute_loss(batch) loss.backward() grad_norm = torch.nn.utils.clip_grad_norm_( From c17dffe944dac4ea33b1d1b941bafa6e91e0c352 Mon Sep 17 00:00:00 2001 From: Cadene Date: Fri, 5 Apr 2024 11:47:15 +0000 Subject: [PATCH 12/80] policies/utils.py --- lerobot/common/policies/utils.py | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 lerobot/common/policies/utils.py diff --git a/lerobot/common/policies/utils.py b/lerobot/common/policies/utils.py new file mode 100644 index 00000000..b0503fe4 --- /dev/null +++ b/lerobot/common/policies/utils.py @@ -0,0 +1,10 @@ +def populate_queues(queues, batch): + for key in batch: + if len(queues[key]) != queues[key].maxlen: + # initialize by copying the first observation several times until the queue is full + while len(queues[key]) != queues[key].maxlen: + queues[key].append(batch[key]) + else: + # add latest observation to the queue + queues[key].append(batch[key]) + return queues From ab3cd3a7ba318e5eefea67a47b63b2f9688923dd Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Fri, 5 Apr 2024 15:35:20 +0200 Subject: [PATCH 13/80] (WIP) Add gym-xarm --- lerobot/common/envs/factory.py | 12 ++++++++++-- poetry.lock | 25 ++++++++++++++++++++++-- pyproject.toml | 4 ++++ tests/test_envs.py | 35 +++++++++++++++++----------------- 4 files changed, 54 insertions(+), 22 deletions(-) diff --git a/lerobot/common/envs/factory.py b/lerobot/common/envs/factory.py index 788af3cb..ab3e9294 100644 --- a/lerobot/common/envs/factory.py +++ b/lerobot/common/envs/factory.py @@ -9,9 +9,17 @@ def make_env(cfg, num_parallel_envs=0) -> gym.Env | gym.vector.SyncVectorEnv: kwargs = {} if cfg.env.name == "simxarm": - kwargs["task"] = cfg.env.task + import gym_xarm # noqa: F401 + + assert cfg.env.task == "lift" + env_fn = lambda: gym.make( + "gym_xarm/XarmLift-v0", + render_mode="rgb_array", + max_episode_steps=cfg.env.episode_length, + **kwargs, + ) elif cfg.env.name == "pusht": - import gym_pusht # noqa + import gym_pusht # noqa: F401 # assert kwargs["seed"] > 200, "Seed 0-200 are used for the demonstration dataset, so we don't want to seed the eval env with this range." kwargs.update( diff --git a/poetry.lock b/poetry.lock index 8fb6b7a7..b9e31930 100644 --- a/poetry.lock +++ b/poetry.lock @@ -900,7 +900,27 @@ shapely = "^2.0.3" type = "git" url = "git@github.com:huggingface/gym-pusht.git" reference = "HEAD" -resolved_reference = "d7e1a39a31b1368741e9674791007d7cccf046a3" +resolved_reference = "0fe4449cca5a2b08f529f7a07fbf5b9df24962ec" + +[[package]] +name = "gym-xarm" +version = "0.1.0" +description = "A gym environment for xArm" +optional = true +python-versions = "^3.10" +files = [] +develop = false + +[package.dependencies] +gymnasium = "^0.29.1" +gymnasium-robotics = "^1.2.4" +mujoco = "^2.3.7" + +[package.source] +type = "git" +url = "git@github.com:huggingface/gym-xarm.git" +reference = "HEAD" +resolved_reference = "2eb83fc4fc871b9d271c946d169e42f226ac3a7c" [[package]] name = "gymnasium" @@ -3611,8 +3631,9 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [extras] pusht = ["gym_pusht"] +xarm = ["gym_xarm"] [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "3eee17e4bf2b7a570f41ef9c400ec5a24a3113f62a13162229cf43504ca0d005" +content-hash = "c9524cdf000eaa755a2ab3be669118222b4f8b1c262013f103f6874cbd54eeb6" diff --git a/pyproject.toml b/pyproject.toml index d0fc7c0d..b7e1b9fb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,9 +53,13 @@ gymnasium-robotics = "^1.2.4" gymnasium = "^0.29.1" cmake = "^3.29.0.1" gym_pusht = { git = "git@github.com:huggingface/gym-pusht.git", optional = true} +gym_xarm = { git = "git@github.com:huggingface/gym-xarm.git", optional = true} +# gym_pusht = { path = "../gym-pusht", develop = true, optional = true} +# gym_xarm = { path = "../gym-xarm", develop = true, optional = true} [tool.poetry.extras] pusht = ["gym_pusht"] +xarm = ["gym_xarm"] [tool.poetry.group.dev.dependencies] pre-commit = "^3.6.2" diff --git a/tests/test_envs.py b/tests/test_envs.py index 0c56f4fc..665c1fba 100644 --- a/tests/test_envs.py +++ b/tests/test_envs.py @@ -3,6 +3,8 @@ from tensordict import TensorDict import torch from torchrl.envs.utils import check_env_specs, step_mdp from lerobot.common.datasets.factory import make_dataset +import gymnasium as gym +from gymnasium.utils.env_checker import check_env from lerobot.common.envs.aloha.env import AlohaEnv from lerobot.common.envs.factory import make_env @@ -61,29 +63,26 @@ def test_aloha(task, from_pixels, pixels_only): @pytest.mark.parametrize( - "task,from_pixels,pixels_only", + "task, obs_type", [ - ("lift", False, False), - ("lift", True, False), - ("lift", True, True), + ("XarmLift-v0", "state"), + ("XarmLift-v0", "pixels"), + ("XarmLift-v0", "pixels_agent_pos"), # TODO(aliberts): Add simxarm other tasks - # ("reach", False, False), - # ("reach", True, False), - # ("push", False, False), - # ("push", True, False), - # ("peg_in_box", False, False), - # ("peg_in_box", True, False), ], ) -def test_simxarm(task, from_pixels, pixels_only): - env = SimxarmEnv( - task, - from_pixels=from_pixels, - pixels_only=pixels_only, - image_size=84 if from_pixels else None, - ) +def test_xarm(env_task, obs_type): + import gym_xarm + env = gym.make(f"gym_xarm/{env_task}", obs_type=obs_type) + # env = SimxarmEnv( + # task, + # from_pixels=from_pixels, + # pixels_only=pixels_only, + # image_size=84 if from_pixels else None, + # ) # print_spec_rollout(env) - check_env_specs(env) + # check_env_specs(env) + check_env(env) @pytest.mark.parametrize( From f56b1a0e1630867691fd07329cd52c257ee3d44c Mon Sep 17 00:00:00 2001 From: Cadene Date: Fri, 5 Apr 2024 13:40:31 +0000 Subject: [PATCH 14/80] WIP tdmpc --- lerobot/common/policies/tdmpc/policy.py | 207 +++++++++++++----------- lerobot/configs/env/simxarm.yaml | 2 +- lerobot/configs/policy/tdmpc.yaml | 2 +- sbatch.sh | 4 +- 4 files changed, 121 insertions(+), 94 deletions(-) diff --git a/lerobot/common/policies/tdmpc/policy.py b/lerobot/common/policies/tdmpc/policy.py index 64dcc94d..85700913 100644 --- a/lerobot/common/policies/tdmpc/policy.py +++ b/lerobot/common/policies/tdmpc/policy.py @@ -1,6 +1,7 @@ # ruff: noqa: N806 import time +from collections import deque from copy import deepcopy import einops @@ -9,7 +10,7 @@ import torch import torch.nn as nn import lerobot.common.policies.tdmpc.helper as h -from lerobot.common.policies.abstract import AbstractPolicy +from lerobot.common.policies.utils import populate_queues from lerobot.common.utils import get_safe_torch_device FIRST_FRAME = 0 @@ -87,16 +88,18 @@ class TOLD(nn.Module): return torch.min(Q1, Q2) if return_type == "min" else (Q1 + Q2) / 2 -class TDMPCPolicy(AbstractPolicy): +class TDMPCPolicy(nn.Module): """Implementation of TD-MPC learning + inference.""" name = "tdmpc" - def __init__(self, cfg, device): - super().__init__(None) + def __init__(self, cfg, n_obs_steps, n_action_steps, device): + super().__init__() self.action_dim = cfg.action_dim self.cfg = cfg + self.n_obs_steps = n_obs_steps + self.n_action_steps = n_action_steps self.device = get_safe_torch_device(device) self.std = h.linear_schedule(cfg.std_schedule, 0) self.model = TOLD(cfg) @@ -128,20 +131,42 @@ class TDMPCPolicy(AbstractPolicy): self.model.load_state_dict(d["model"]) self.model_target.load_state_dict(d["model_target"]) - @torch.no_grad() - def select_actions(self, observation, step_count): - if observation["image"].shape[0] != 1: - raise NotImplementedError("Batch size > 1 not handled") - - t0 = step_count.item() == 0 - - obs = { - # TODO(rcadene): remove contiguous hack... - "rgb": observation["image"].contiguous(), - "state": observation["state"].contiguous(), + def reset(self): + """ + Clear observation and action queues. Should be called on `env.reset()` + """ + self._queues = { + "observation.image": deque(maxlen=self.n_obs_steps), + "observation.state": deque(maxlen=self.n_obs_steps), + "action": deque(maxlen=self.n_action_steps), } - # Note: unsqueeze needed because `act` still uses non-batch logic. - action = self.act(obs, t0=t0, step=self.step.item()).unsqueeze(0) + + @torch.no_grad() + def select_action(self, batch, step): + assert "observation.image" in batch + assert "observation.state" in batch + assert len(batch) == 2 + + self._queues = populate_queues(self._queues, batch) + + t0 = step == 0 + + if len(self._queues["action"]) == 0: + batch = {key: torch.stack(list(self._queues[key]), dim=1) for key in batch} + + actions = [] + batch_size = batch["observation.image."].shape[0] + for i in range(batch_size): + obs = { + "rgb": batch["observation.image"][[i]], + "state": batch["observation.state"][[i]], + } + # Note: unsqueeze needed because `act` still uses non-batch logic. + action = self.act(obs, t0=t0, step=self.step) + actions.append(action) + action = torch.stack(actions) + + action = self._queues["action"].popleft() return action @torch.no_grad() @@ -293,97 +318,97 @@ class TDMPCPolicy(AbstractPolicy): td_target = reward + self.cfg.discount * mask * next_v return td_target - def update(self, replay_buffer, step, demo_buffer=None): + def forward(self, batch, step): """Main update function. Corresponds to one iteration of the model learning.""" start_time = time.time() - num_slices = self.cfg.batch_size - batch_size = self.cfg.horizon * num_slices + # num_slices = self.cfg.batch_size + # batch_size = self.cfg.horizon * num_slices - if demo_buffer is None: - demo_batch_size = 0 - else: - # Update oversampling ratio - demo_pc_batch = h.linear_schedule(self.cfg.demo_schedule, step) - demo_num_slices = int(demo_pc_batch * self.batch_size) - demo_batch_size = self.cfg.horizon * demo_num_slices - batch_size -= demo_batch_size - num_slices -= demo_num_slices - replay_buffer._sampler.num_slices = num_slices - demo_buffer._sampler.num_slices = demo_num_slices + # if demo_buffer is None: + # demo_batch_size = 0 + # else: + # # Update oversampling ratio + # demo_pc_batch = h.linear_schedule(self.cfg.demo_schedule, step) + # demo_num_slices = int(demo_pc_batch * self.batch_size) + # demo_batch_size = self.cfg.horizon * demo_num_slices + # batch_size -= demo_batch_size + # num_slices -= demo_num_slices + # replay_buffer._sampler.num_slices = num_slices + # demo_buffer._sampler.num_slices = demo_num_slices - assert demo_batch_size % self.cfg.horizon == 0 - assert demo_batch_size % demo_num_slices == 0 + # assert demo_batch_size % self.cfg.horizon == 0 + # assert demo_batch_size % demo_num_slices == 0 - assert batch_size % self.cfg.horizon == 0 - assert batch_size % num_slices == 0 + # assert batch_size % self.cfg.horizon == 0 + # assert batch_size % num_slices == 0 - # Sample from interaction dataset + # # Sample from interaction dataset - def process_batch(batch, horizon, num_slices): - # trajectory t = 256, horizon h = 5 - # (t h) ... -> h t ... - batch = batch.reshape(num_slices, horizon).transpose(1, 0).contiguous() + # def process_batch(batch, horizon, num_slices): + # # trajectory t = 256, horizon h = 5 + # # (t h) ... -> h t ... + # batch = batch.reshape(num_slices, horizon).transpose(1, 0).contiguous() - obs = { - "rgb": batch["observation", "image"][FIRST_FRAME].to(self.device, non_blocking=True), - "state": batch["observation", "state"][FIRST_FRAME].to(self.device, non_blocking=True), - } - action = batch["action"].to(self.device, non_blocking=True) - next_obses = { - "rgb": batch["next", "observation", "image"].to(self.device, non_blocking=True), - "state": batch["next", "observation", "state"].to(self.device, non_blocking=True), - } - reward = batch["next", "reward"].to(self.device, non_blocking=True) + # obs = { + # "rgb": batch["observation", "image"][FIRST_FRAME].to(self.device, non_blocking=True), + # "state": batch["observation", "state"][FIRST_FRAME].to(self.device, non_blocking=True), + # } + # action = batch["action"].to(self.device, non_blocking=True) + # next_obses = { + # "rgb": batch["next", "observation", "image"].to(self.device, non_blocking=True), + # "state": batch["next", "observation", "state"].to(self.device, non_blocking=True), + # } + # reward = batch["next", "reward"].to(self.device, non_blocking=True) - idxs = batch["index"][FIRST_FRAME].to(self.device, non_blocking=True) - weights = batch["_weight"][FIRST_FRAME, :, None].to(self.device, non_blocking=True) + # idxs = batch["index"][FIRST_FRAME].to(self.device, non_blocking=True) + # weights = batch["_weight"][FIRST_FRAME, :, None].to(self.device, non_blocking=True) - # TODO(rcadene): rearrange directly in offline dataset - if reward.ndim == 2: - reward = einops.rearrange(reward, "h t -> h t 1") + # # TODO(rcadene): rearrange directly in offline dataset + # if reward.ndim == 2: + # reward = einops.rearrange(reward, "h t -> h t 1") - assert reward.ndim == 3 - assert reward.shape == (horizon, num_slices, 1) - # We dont use `batch["next", "done"]` since it only indicates the end of an - # episode, but not the end of the trajectory of an episode. - # Neither does `batch["next", "terminated"]` - done = torch.zeros_like(reward, dtype=torch.bool, device=reward.device) - mask = torch.ones_like(reward, dtype=torch.bool, device=reward.device) - return obs, action, next_obses, reward, mask, done, idxs, weights + # assert reward.ndim == 3 + # assert reward.shape == (horizon, num_slices, 1) + # # We dont use `batch["next", "done"]` since it only indicates the end of an + # # episode, but not the end of the trajectory of an episode. + # # Neither does `batch["next", "terminated"]` + # done = torch.zeros_like(reward, dtype=torch.bool, device=reward.device) + # mask = torch.ones_like(reward, dtype=torch.bool, device=reward.device) + # return obs, action, next_obses, reward, mask, done, idxs, weights - batch = replay_buffer.sample(batch_size) if self.cfg.balanced_sampling else replay_buffer.sample() + # batch = replay_buffer.sample(batch_size) if self.cfg.balanced_sampling else replay_buffer.sample() - obs, action, next_obses, reward, mask, done, idxs, weights = process_batch( - batch, self.cfg.horizon, num_slices - ) + # obs, action, next_obses, reward, mask, done, idxs, weights = process_batch( + # batch, self.cfg.horizon, num_slices + # ) # Sample from demonstration dataset - if demo_batch_size > 0: - demo_batch = demo_buffer.sample(demo_batch_size) - ( - demo_obs, - demo_action, - demo_next_obses, - demo_reward, - demo_mask, - demo_done, - demo_idxs, - demo_weights, - ) = process_batch(demo_batch, self.cfg.horizon, demo_num_slices) + # if demo_batch_size > 0: + # demo_batch = demo_buffer.sample(demo_batch_size) + # ( + # demo_obs, + # demo_action, + # demo_next_obses, + # demo_reward, + # demo_mask, + # demo_done, + # demo_idxs, + # demo_weights, + # ) = process_batch(demo_batch, self.cfg.horizon, demo_num_slices) - if isinstance(obs, dict): - obs = {k: torch.cat([obs[k], demo_obs[k]]) for k in obs} - next_obses = {k: torch.cat([next_obses[k], demo_next_obses[k]], dim=1) for k in next_obses} - else: - obs = torch.cat([obs, demo_obs]) - next_obses = torch.cat([next_obses, demo_next_obses], dim=1) - action = torch.cat([action, demo_action], dim=1) - reward = torch.cat([reward, demo_reward], dim=1) - mask = torch.cat([mask, demo_mask], dim=1) - done = torch.cat([done, demo_done], dim=1) - idxs = torch.cat([idxs, demo_idxs]) - weights = torch.cat([weights, demo_weights]) + # if isinstance(obs, dict): + # obs = {k: torch.cat([obs[k], demo_obs[k]]) for k in obs} + # next_obses = {k: torch.cat([next_obses[k], demo_next_obses[k]], dim=1) for k in next_obses} + # else: + # obs = torch.cat([obs, demo_obs]) + # next_obses = torch.cat([next_obses, demo_next_obses], dim=1) + # action = torch.cat([action, demo_action], dim=1) + # reward = torch.cat([reward, demo_reward], dim=1) + # mask = torch.cat([mask, demo_mask], dim=1) + # done = torch.cat([done, demo_done], dim=1) + # idxs = torch.cat([idxs, demo_idxs]) + # weights = torch.cat([weights, demo_weights]) # Apply augmentations aug_tf = h.aug(self.cfg) diff --git a/lerobot/configs/env/simxarm.yaml b/lerobot/configs/env/simxarm.yaml index f79db8f7..843f80c6 100644 --- a/lerobot/configs/env/simxarm.yaml +++ b/lerobot/configs/env/simxarm.yaml @@ -17,7 +17,7 @@ env: from_pixels: True pixels_only: False image_size: 84 - action_repeat: 2 + # action_repeat: 2 # we can remove if policy has n_action_steps=2 episode_length: 25 fps: ${fps} diff --git a/lerobot/configs/policy/tdmpc.yaml b/lerobot/configs/policy/tdmpc.yaml index ff0e6b04..5d5d8b62 100644 --- a/lerobot/configs/policy/tdmpc.yaml +++ b/lerobot/configs/policy/tdmpc.yaml @@ -1,6 +1,6 @@ # @package _global_ -n_action_steps: 1 +n_action_steps: 2 n_obs_steps: 1 policy: diff --git a/sbatch.sh b/sbatch.sh index cb5b285a..c08f7055 100644 --- a/sbatch.sh +++ b/sbatch.sh @@ -6,7 +6,7 @@ #SBATCH --time=2-00:00:00 #SBATCH --output=/home/rcadene/slurm/%j.out #SBATCH --error=/home/rcadene/slurm/%j.err -#SBATCH --qos=medium +#SBATCH --qos=low #SBATCH --mail-user=re.cadene@gmail.com #SBATCH --mail-type=ALL @@ -20,4 +20,6 @@ source ~/.bashrc #conda activate fowm conda activate lerobot +export DATA_DIR="data" + srun $CMD From 9c28ac8aa424d5e3b51004883e88cd35954329f4 Mon Sep 17 00:00:00 2001 From: Alexander Soare Date: Fri, 5 Apr 2024 15:25:11 +0100 Subject: [PATCH 15/80] re-add pre-commit check --- .pre-commit-config.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index da78b677..765b678a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -23,3 +23,11 @@ repos: - id: ruff args: [--fix] - id: ruff-format + - repo: https://github.com/python-poetry/poetry + rev: 1.8.0 + hooks: + - id: poetry-check + - id: poetry-lock + args: + - "--check" + - "--no-update" From 26602269cd7760e8eb9d54eafd01d7828f17c126 Mon Sep 17 00:00:00 2001 From: Cadene Date: Fri, 5 Apr 2024 16:21:07 +0000 Subject: [PATCH 16/80] test_envs.py are passing, remove simxarm and pusht directories --- lerobot/common/envs/abstract.py | 92 ----- lerobot/common/envs/factory.py | 19 +- lerobot/common/envs/pusht/env.py | 245 ------------ lerobot/common/envs/pusht/pusht_env.py | 378 ------------------ lerobot/common/envs/pusht/pusht_image_env.py | 41 -- lerobot/common/envs/pusht/pymunk_override.py | 244 ----------- lerobot/common/envs/simxarm/env.py | 237 ----------- .../common/envs/simxarm/simxarm/__init__.py | 166 -------- .../envs/simxarm/simxarm/tasks/__init__.py | 0 .../simxarm/simxarm/tasks/assets/lift.xml | 53 --- .../simxarm/tasks/assets/mesh/base_link.stl | 3 - .../simxarm/tasks/assets/mesh/block_inner.stl | 3 - .../tasks/assets/mesh/block_inner2.stl | 3 - .../simxarm/tasks/assets/mesh/block_outer.stl | 3 - .../simxarm/tasks/assets/mesh/left_finger.stl | 3 - .../tasks/assets/mesh/left_inner_knuckle.stl | 3 - .../tasks/assets/mesh/left_outer_knuckle.stl | 3 - .../simxarm/tasks/assets/mesh/link1.stl | 3 - .../simxarm/tasks/assets/mesh/link2.stl | 3 - .../simxarm/tasks/assets/mesh/link3.stl | 3 - .../simxarm/tasks/assets/mesh/link4.stl | 3 - .../simxarm/tasks/assets/mesh/link5.stl | 3 - .../simxarm/tasks/assets/mesh/link6.stl | 3 - .../simxarm/tasks/assets/mesh/link7.stl | 3 - .../simxarm/tasks/assets/mesh/link_base.stl | 3 - .../tasks/assets/mesh/right_finger.stl | 3 - .../tasks/assets/mesh/right_inner_knuckle.stl | 3 - .../tasks/assets/mesh/right_outer_knuckle.stl | 3 - .../simxarm/tasks/assets/peg_in_box.xml | 74 ---- .../simxarm/simxarm/tasks/assets/push.xml | 54 --- .../simxarm/simxarm/tasks/assets/reach.xml | 48 --- .../simxarm/simxarm/tasks/assets/shared.xml | 51 --- .../simxarm/simxarm/tasks/assets/xarm.xml | 88 ---- .../common/envs/simxarm/simxarm/tasks/base.py | 145 ------- .../common/envs/simxarm/simxarm/tasks/lift.py | 100 ----- .../envs/simxarm/simxarm/tasks/mocap.py | 67 ---- .../envs/simxarm/simxarm/tasks/peg_in_box.py | 86 ---- .../common/envs/simxarm/simxarm/tasks/push.py | 78 ---- .../envs/simxarm/simxarm/tasks/reach.py | 44 -- lerobot/common/envs/utils.py | 32 ++ lerobot/scripts/eval.py | 34 +- tests/test_envs.py | 100 ++--- 42 files changed, 86 insertions(+), 2444 deletions(-) delete mode 100644 lerobot/common/envs/abstract.py delete mode 100644 lerobot/common/envs/pusht/env.py delete mode 100644 lerobot/common/envs/pusht/pusht_env.py delete mode 100644 lerobot/common/envs/pusht/pusht_image_env.py delete mode 100644 lerobot/common/envs/pusht/pymunk_override.py delete mode 100644 lerobot/common/envs/simxarm/env.py delete mode 100644 lerobot/common/envs/simxarm/simxarm/__init__.py delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/__init__.py delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/assets/lift.xml delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/base_link.stl delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/block_inner.stl delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/block_inner2.stl delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/block_outer.stl delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/left_finger.stl delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/left_inner_knuckle.stl delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/left_outer_knuckle.stl delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link1.stl delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link2.stl delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link3.stl delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link4.stl delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link5.stl delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link6.stl delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link7.stl delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link_base.stl delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/right_finger.stl delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/right_inner_knuckle.stl delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/right_outer_knuckle.stl delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/assets/peg_in_box.xml delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/assets/push.xml delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/assets/reach.xml delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/assets/shared.xml delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/assets/xarm.xml delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/base.py delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/lift.py delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/mocap.py delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/peg_in_box.py delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/push.py delete mode 100644 lerobot/common/envs/simxarm/simxarm/tasks/reach.py create mode 100644 lerobot/common/envs/utils.py diff --git a/lerobot/common/envs/abstract.py b/lerobot/common/envs/abstract.py deleted file mode 100644 index ea5ce3da..00000000 --- a/lerobot/common/envs/abstract.py +++ /dev/null @@ -1,92 +0,0 @@ -from collections import deque -from typing import Optional - -from tensordict import TensorDict -from torchrl.envs import EnvBase - -from lerobot.common.utils import set_global_seed - - -class AbstractEnv(EnvBase): - """ - Note: - When implementing a concrete class (e.g. `AlohaDataset`, `PushtEnv`, `DiffusionPolicy`), you need to: - 1. set the required class attributes: - - for classes inheriting from `AbstractDataset`: `available_datasets` - - for classes inheriting from `AbstractEnv`: `name`, `available_tasks` - - for classes inheriting from `AbstractPolicy`: `name` - 2. update variables in `lerobot/__init__.py` (e.g. `available_envs`, `available_datasets_per_envs`, `available_policies`) - 3. update variables in `tests/test_available.py` by importing your new class - """ - - name: str | None = None # same name should be used to instantiate the environment in factory.py - available_tasks: list[str] | None = None # for instance: sim_insertion, sim_transfer_cube, pusht, lift - - def __init__( - self, - task, - frame_skip: int = 1, - from_pixels: bool = False, - pixels_only: bool = False, - image_size=None, - seed=1337, - device="cpu", - num_prev_obs=1, - num_prev_action=0, - ): - super().__init__(device=device, batch_size=[]) - assert self.name is not None, "Subclasses of `AbstractEnv` should set the `name` class attribute." - assert ( - self.available_tasks is not None - ), "Subclasses of `AbstractEnv` should set the `available_tasks` class attribute." - assert ( - task in self.available_tasks - ), f"The provided task ({task}) is not on the list of available tasks {self.available_tasks}." - - self.task = task - self.frame_skip = frame_skip - self.from_pixels = from_pixels - self.pixels_only = pixels_only - self.image_size = image_size - self.num_prev_obs = num_prev_obs - self.num_prev_action = num_prev_action - - if pixels_only: - assert from_pixels - if from_pixels: - assert image_size - - self._make_env() - self._make_spec() - - # self._next_seed will be used for the next reset. It is recommended that when self.set_seed is called - # you store the return value in self._next_seed (it will be a new randomly generated seed). - self._next_seed = seed - # Don't store the result of this in self._next_seed, as we want to make sure that the first time - # self._reset is called, we use seed. - self.set_seed(seed) - - if self.num_prev_obs > 0: - self._prev_obs_image_queue = deque(maxlen=self.num_prev_obs) - self._prev_obs_state_queue = deque(maxlen=self.num_prev_obs) - if self.num_prev_action > 0: - raise NotImplementedError() - # self._prev_action_queue = deque(maxlen=self.num_prev_action) - - def render(self, mode="rgb_array", width=640, height=480): - raise NotImplementedError("Abstract method") - - def _reset(self, tensordict: Optional[TensorDict] = None): - raise NotImplementedError("Abstract method") - - def _step(self, tensordict: TensorDict): - raise NotImplementedError("Abstract method") - - def _make_env(self): - raise NotImplementedError("Abstract method") - - def _make_spec(self): - raise NotImplementedError("Abstract method") - - def _set_seed(self, seed: Optional[int]): - set_global_seed(seed) diff --git a/lerobot/common/envs/factory.py b/lerobot/common/envs/factory.py index ab3e9294..4ddb81a2 100644 --- a/lerobot/common/envs/factory.py +++ b/lerobot/common/envs/factory.py @@ -6,32 +6,27 @@ def make_env(cfg, num_parallel_envs=0) -> gym.Env | gym.vector.SyncVectorEnv: Note: When `num_parallel_envs > 0`, this function returns a `SyncVectorEnv` which takes batched action as input and returns batched observation, reward, terminated, truncated of `num_parallel_envs` items. """ - kwargs = {} + kwargs = { + "obs_type": "pixels_agent_pos", + "max_episode_steps": cfg.env.episode_length, + "visualization_width": 384, + "visualization_height": 384, + } if cfg.env.name == "simxarm": import gym_xarm # noqa: F401 assert cfg.env.task == "lift" - env_fn = lambda: gym.make( + env_fn = lambda: gym.make( # noqa: E731 "gym_xarm/XarmLift-v0", - render_mode="rgb_array", - max_episode_steps=cfg.env.episode_length, **kwargs, ) elif cfg.env.name == "pusht": import gym_pusht # noqa: F401 # assert kwargs["seed"] > 200, "Seed 0-200 are used for the demonstration dataset, so we don't want to seed the eval env with this range." - kwargs.update( - { - "obs_type": "pixels_agent_pos", - "render_action": False, - } - ) env_fn = lambda: gym.make( # noqa: E731 "gym_pusht/PushTPixels-v0", - render_mode="rgb_array", - max_episode_steps=cfg.env.episode_length, **kwargs, ) elif cfg.env.name == "aloha": diff --git a/lerobot/common/envs/pusht/env.py b/lerobot/common/envs/pusht/env.py deleted file mode 100644 index 5f7fb2c3..00000000 --- a/lerobot/common/envs/pusht/env.py +++ /dev/null @@ -1,245 +0,0 @@ -import importlib -import logging -from collections import deque -from typing import Optional - -import cv2 -import numpy as np -import torch -from tensordict import TensorDict -from torchrl.data.tensor_specs import ( - BoundedTensorSpec, - CompositeSpec, - DiscreteTensorSpec, - UnboundedContinuousTensorSpec, -) -from torchrl.envs.libs.gym import _gym_to_torchrl_spec_transform - -from lerobot.common.envs.abstract import AbstractEnv -from lerobot.common.utils import set_global_seed - -_has_gym = importlib.util.find_spec("gymnasium") is not None - - -class PushtEnv(AbstractEnv): - name = "pusht" - available_tasks = ["pusht"] - _reset_warning_issued = False - - def __init__( - self, - task="pusht", - frame_skip: int = 1, - from_pixels: bool = False, - pixels_only: bool = False, - image_size=None, - seed=1337, - device="cpu", - num_prev_obs=1, - num_prev_action=0, - ): - super().__init__( - task=task, - frame_skip=frame_skip, - from_pixels=from_pixels, - pixels_only=pixels_only, - image_size=image_size, - seed=seed, - device=device, - num_prev_obs=num_prev_obs, - num_prev_action=num_prev_action, - ) - - def _make_env(self): - if not _has_gym: - raise ImportError("Cannot import gymnasium.") - - # TODO(rcadene) (PushTEnv is similar to PushTImageEnv, but without the image rendering, it's faster to iterate on) - # from lerobot.common.envs.pusht.pusht_env import PushTEnv - - if not self.from_pixels: - raise NotImplementedError("Use PushTEnv, instead of PushTImageEnv") - from lerobot.common.envs.pusht.pusht_image_env import PushTImageEnv - - self._env = PushTImageEnv(render_size=self.image_size) - - def render(self, mode="rgb_array", width=96, height=96, with_marker=True): - """ - with_marker adds a cursor showing the targeted action for the controller. - """ - if width != height: - raise NotImplementedError() - tmp = self._env.render_size - if width != self._env.render_size: - self._env.render_cache = None - self._env.render_size = width - out = self._env.render(mode).copy() - if with_marker and self._env.latest_action is not None: - action = np.array(self._env.latest_action) - coord = (action / 512 * self._env.render_size).astype(np.int32) - marker_size = int(8 / 96 * self._env.render_size) - thickness = int(1 / 96 * self._env.render_size) - cv2.drawMarker( - out, - coord, - color=(255, 0, 0), - markerType=cv2.MARKER_CROSS, - markerSize=marker_size, - thickness=thickness, - ) - self._env.render_size = tmp - return out - - def _format_raw_obs(self, raw_obs): - if self.from_pixels: - image = torch.from_numpy(raw_obs["image"]) - obs = {"image": image} - - if not self.pixels_only: - obs["state"] = torch.from_numpy(raw_obs["agent_pos"]).type(torch.float32) - else: - # TODO: - obs = {"state": torch.from_numpy(raw_obs["observation"]).type(torch.float32)} - - return obs - - def _reset(self, tensordict: Optional[TensorDict] = None): - if tensordict is not None and not PushtEnv._reset_warning_issued: - logging.warning(f"{self.__class__.__name__}._reset ignores the provided tensordict.") - PushtEnv._reset_warning_issued = True - - # Seed the environment and update the seed to be used for the next reset. - self._next_seed = self.set_seed(self._next_seed) - raw_obs = self._env.reset() - - obs = self._format_raw_obs(raw_obs) - - if self.num_prev_obs > 0: - stacked_obs = {} - if "image" in obs: - self._prev_obs_image_queue = deque( - [obs["image"]] * (self.num_prev_obs + 1), maxlen=(self.num_prev_obs + 1) - ) - stacked_obs["image"] = torch.stack(list(self._prev_obs_image_queue)) - if "state" in obs: - self._prev_obs_state_queue = deque( - [obs["state"]] * (self.num_prev_obs + 1), maxlen=(self.num_prev_obs + 1) - ) - stacked_obs["state"] = torch.stack(list(self._prev_obs_state_queue)) - obs = stacked_obs - - td = TensorDict( - { - "observation": TensorDict(obs, batch_size=[]), - "done": torch.tensor([False], dtype=torch.bool), - }, - batch_size=[], - ) - - return td - - def _step(self, tensordict: TensorDict): - td = tensordict - action = td["action"].numpy() - assert action.ndim == 1 - # TODO(rcadene): add info["is_success"] and info["success"] ? - - raw_obs, reward, done, info = self._env.step(action) - - obs = self._format_raw_obs(raw_obs) - - if self.num_prev_obs > 0: - stacked_obs = {} - if "image" in obs: - self._prev_obs_image_queue.append(obs["image"]) - stacked_obs["image"] = torch.stack(list(self._prev_obs_image_queue)) - if "state" in obs: - self._prev_obs_state_queue.append(obs["state"]) - stacked_obs["state"] = torch.stack(list(self._prev_obs_state_queue)) - obs = stacked_obs - - td = TensorDict( - { - "observation": TensorDict(obs, batch_size=[]), - "reward": torch.tensor([reward], dtype=torch.float32), - # success and done are true when coverage > self.success_threshold in env - "done": torch.tensor([done], dtype=torch.bool), - "success": torch.tensor([done], dtype=torch.bool), - }, - batch_size=[], - ) - return td - - def _make_spec(self): - obs = {} - if self.from_pixels: - image_shape = (3, self.image_size, self.image_size) - if self.num_prev_obs > 0: - image_shape = (self.num_prev_obs + 1, *image_shape) - - obs["image"] = BoundedTensorSpec( - low=0, - high=255, - shape=image_shape, - dtype=torch.uint8, - device=self.device, - ) - if not self.pixels_only: - state_shape = self._env.observation_space["agent_pos"].shape - if self.num_prev_obs > 0: - state_shape = (self.num_prev_obs + 1, *state_shape) - - obs["state"] = BoundedTensorSpec( - low=0, - high=512, - shape=state_shape, - dtype=torch.float32, - device=self.device, - ) - else: - # TODO(rcadene): add observation_space achieved_goal and desired_goal? - state_shape = self._env.observation_space["observation"].shape - if self.num_prev_obs > 0: - state_shape = (self.num_prev_obs + 1, *state_shape) - - obs["state"] = UnboundedContinuousTensorSpec( - # TODO: - shape=state_shape, - dtype=torch.float32, - device=self.device, - ) - self.observation_spec = CompositeSpec({"observation": obs}) - - self.action_spec = _gym_to_torchrl_spec_transform( - self._env.action_space, - device=self.device, - ) - - self.reward_spec = UnboundedContinuousTensorSpec( - shape=(1,), - dtype=torch.float32, - device=self.device, - ) - - self.done_spec = CompositeSpec( - { - "done": DiscreteTensorSpec( - 2, - shape=(1,), - dtype=torch.bool, - device=self.device, - ), - "success": DiscreteTensorSpec( - 2, - shape=(1,), - dtype=torch.bool, - device=self.device, - ), - } - ) - - def _set_seed(self, seed: Optional[int]): - # Set global seed. - set_global_seed(seed) - # Set PushTImageEnv seed as it relies on it's own internal _seed attribute. - self._env.seed(seed) diff --git a/lerobot/common/envs/pusht/pusht_env.py b/lerobot/common/envs/pusht/pusht_env.py deleted file mode 100644 index 6ef70aec..00000000 --- a/lerobot/common/envs/pusht/pusht_env.py +++ /dev/null @@ -1,378 +0,0 @@ -import collections - -import cv2 -import gymnasium as gym -import numpy as np -import pygame -import pymunk -import pymunk.pygame_util -import shapely.geometry as sg -import skimage.transform as st -from gymnasium import spaces -from pymunk.vec2d import Vec2d - -from lerobot.common.envs.pusht.pymunk_override import DrawOptions - - -def pymunk_to_shapely(body, shapes): - geoms = [] - for shape in shapes: - if isinstance(shape, pymunk.shapes.Poly): - verts = [body.local_to_world(v) for v in shape.get_vertices()] - verts += [verts[0]] - geoms.append(sg.Polygon(verts)) - else: - raise RuntimeError(f"Unsupported shape type {type(shape)}") - geom = sg.MultiPolygon(geoms) - return geom - - -class PushTEnv(gym.Env): - metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 10} - reward_range = (0.0, 1.0) - - def __init__( - self, - legacy=True, # compatibility with original - block_cog=None, - damping=None, - render_action=True, - render_size=96, - reset_to_state=None, - ): - self._seed = None - self.seed() - self.window_size = ws = 512 # The size of the PyGame window - self.render_size = render_size - self.sim_hz = 100 - # Local controller params. - self.k_p, self.k_v = 100, 20 # PD control.z - self.control_hz = self.metadata["video.frames_per_second"] - # legcay set_state for data compatibility - self.legacy = legacy - - # agent_pos, block_pos, block_angle - self.observation_space = spaces.Box( - low=np.array([0, 0, 0, 0, 0], dtype=np.float64), - high=np.array([ws, ws, ws, ws, np.pi * 2], dtype=np.float64), - shape=(5,), - dtype=np.float64, - ) - - # positional goal for agent - self.action_space = spaces.Box( - low=np.array([0, 0], dtype=np.float64), - high=np.array([ws, ws], dtype=np.float64), - shape=(2,), - dtype=np.float64, - ) - - self.block_cog = block_cog - self.damping = damping - self.render_action = render_action - - """ - If human-rendering is used, `self.window` will be a reference - to the window that we draw to. `self.clock` will be a clock that is used - to ensure that the environment is rendered at the correct framerate in - human-mode. They will remain `None` until human-mode is used for the - first time. - """ - self.window = None - self.clock = None - self.screen = None - - self.space = None - self.teleop = None - self.render_buffer = None - self.latest_action = None - self.reset_to_state = reset_to_state - - def reset(self): - seed = self._seed - self._setup() - if self.block_cog is not None: - self.block.center_of_gravity = self.block_cog - if self.damping is not None: - self.space.damping = self.damping - - # use legacy RandomState for compatibility - state = self.reset_to_state - if state is None: - rs = np.random.RandomState(seed=seed) - state = np.array( - [ - rs.randint(50, 450), - rs.randint(50, 450), - rs.randint(100, 400), - rs.randint(100, 400), - rs.randn() * 2 * np.pi - np.pi, - ] - ) - self._set_state(state) - - observation = self._get_obs() - return observation - - def step(self, action): - dt = 1.0 / self.sim_hz - self.n_contact_points = 0 - n_steps = self.sim_hz // self.control_hz - if action is not None: - self.latest_action = action - for _ in range(n_steps): - # Step PD control. - # self.agent.velocity = self.k_p * (act - self.agent.position) # P control works too. - acceleration = self.k_p * (action - self.agent.position) + self.k_v * ( - Vec2d(0, 0) - self.agent.velocity - ) - self.agent.velocity += acceleration * dt - - # Step physics. - self.space.step(dt) - - # compute reward - goal_body = self._get_goal_pose_body(self.goal_pose) - goal_geom = pymunk_to_shapely(goal_body, self.block.shapes) - block_geom = pymunk_to_shapely(self.block, self.block.shapes) - - intersection_area = goal_geom.intersection(block_geom).area - goal_area = goal_geom.area - coverage = intersection_area / goal_area - reward = np.clip(coverage / self.success_threshold, 0, 1) - done = coverage > self.success_threshold - - observation = self._get_obs() - info = self._get_info() - - return observation, reward, done, info - - def render(self, mode): - return self._render_frame(mode) - - def teleop_agent(self): - TeleopAgent = collections.namedtuple("TeleopAgent", ["act"]) - - def act(obs): - act = None - mouse_position = pymunk.pygame_util.from_pygame(Vec2d(*pygame.mouse.get_pos()), self.screen) - if self.teleop or (mouse_position - self.agent.position).length < 30: - self.teleop = True - act = mouse_position - return act - - return TeleopAgent(act) - - def _get_obs(self): - obs = np.array( - tuple(self.agent.position) + tuple(self.block.position) + (self.block.angle % (2 * np.pi),) - ) - return obs - - def _get_goal_pose_body(self, pose): - mass = 1 - inertia = pymunk.moment_for_box(mass, (50, 100)) - body = pymunk.Body(mass, inertia) - # preserving the legacy assignment order for compatibility - # the order here doesn't matter somehow, maybe because CoM is aligned with body origin - body.position = pose[:2].tolist() - body.angle = pose[2] - return body - - def _get_info(self): - n_steps = self.sim_hz // self.control_hz - n_contact_points_per_step = int(np.ceil(self.n_contact_points / n_steps)) - info = { - "pos_agent": np.array(self.agent.position), - "vel_agent": np.array(self.agent.velocity), - "block_pose": np.array(list(self.block.position) + [self.block.angle]), - "goal_pose": self.goal_pose, - "n_contacts": n_contact_points_per_step, - } - return info - - def _render_frame(self, mode): - if self.window is None and mode == "human": - pygame.init() - pygame.display.init() - self.window = pygame.display.set_mode((self.window_size, self.window_size)) - if self.clock is None and mode == "human": - self.clock = pygame.time.Clock() - - canvas = pygame.Surface((self.window_size, self.window_size)) - canvas.fill((255, 255, 255)) - self.screen = canvas - - draw_options = DrawOptions(canvas) - - # Draw goal pose. - goal_body = self._get_goal_pose_body(self.goal_pose) - for shape in self.block.shapes: - goal_points = [ - pymunk.pygame_util.to_pygame(goal_body.local_to_world(v), draw_options.surface) - for v in shape.get_vertices() - ] - goal_points += [goal_points[0]] - pygame.draw.polygon(canvas, self.goal_color, goal_points) - - # Draw agent and block. - self.space.debug_draw(draw_options) - - if mode == "human": - # The following line copies our drawings from `canvas` to the visible window - self.window.blit(canvas, canvas.get_rect()) - pygame.event.pump() - pygame.display.update() - - # the clock is already ticked during in step for "human" - - img = np.transpose(np.array(pygame.surfarray.pixels3d(canvas)), axes=(1, 0, 2)) - img = cv2.resize(img, (self.render_size, self.render_size)) - if self.render_action and self.latest_action is not None: - action = np.array(self.latest_action) - coord = (action / 512 * 96).astype(np.int32) - marker_size = int(8 / 96 * self.render_size) - thickness = int(1 / 96 * self.render_size) - cv2.drawMarker( - img, - coord, - color=(255, 0, 0), - markerType=cv2.MARKER_CROSS, - markerSize=marker_size, - thickness=thickness, - ) - return img - - def close(self): - if self.window is not None: - pygame.display.quit() - pygame.quit() - - def seed(self, seed=None): - if seed is None: - seed = np.random.randint(0, 25536) - self._seed = seed - self.np_random = np.random.default_rng(seed) - - def _handle_collision(self, arbiter, space, data): - self.n_contact_points += len(arbiter.contact_point_set.points) - - def _set_state(self, state): - if isinstance(state, np.ndarray): - state = state.tolist() - pos_agent = state[:2] - pos_block = state[2:4] - rot_block = state[4] - self.agent.position = pos_agent - # setting angle rotates with respect to center of mass - # therefore will modify the geometric position - # if not the same as CoM - # therefore should be modified first. - if self.legacy: - # for compatibility with legacy data - self.block.position = pos_block - self.block.angle = rot_block - else: - self.block.angle = rot_block - self.block.position = pos_block - - # Run physics to take effect - self.space.step(1.0 / self.sim_hz) - - def _set_state_local(self, state_local): - agent_pos_local = state_local[:2] - block_pose_local = state_local[2:] - tf_img_obj = st.AffineTransform(translation=self.goal_pose[:2], rotation=self.goal_pose[2]) - tf_obj_new = st.AffineTransform(translation=block_pose_local[:2], rotation=block_pose_local[2]) - tf_img_new = st.AffineTransform(matrix=tf_img_obj.params @ tf_obj_new.params) - agent_pos_new = tf_img_new(agent_pos_local) - new_state = np.array(list(agent_pos_new[0]) + list(tf_img_new.translation) + [tf_img_new.rotation]) - self._set_state(new_state) - return new_state - - def _setup(self): - self.space = pymunk.Space() - self.space.gravity = 0, 0 - self.space.damping = 0 - self.teleop = False - self.render_buffer = [] - - # Add walls. - walls = [ - self._add_segment((5, 506), (5, 5), 2), - self._add_segment((5, 5), (506, 5), 2), - self._add_segment((506, 5), (506, 506), 2), - self._add_segment((5, 506), (506, 506), 2), - ] - self.space.add(*walls) - - # Add agent, block, and goal zone. - self.agent = self.add_circle((256, 400), 15) - self.block = self.add_tee((256, 300), 0) - self.goal_color = pygame.Color("LightGreen") - self.goal_pose = np.array([256, 256, np.pi / 4]) # x, y, theta (in radians) - - # Add collision handling - self.collision_handeler = self.space.add_collision_handler(0, 0) - self.collision_handeler.post_solve = self._handle_collision - self.n_contact_points = 0 - - self.max_score = 50 * 100 - self.success_threshold = 0.95 # 95% coverage. - - def _add_segment(self, a, b, radius): - shape = pymunk.Segment(self.space.static_body, a, b, radius) - shape.color = pygame.Color("LightGray") # https://htmlcolorcodes.com/color-names - return shape - - def add_circle(self, position, radius): - body = pymunk.Body(body_type=pymunk.Body.KINEMATIC) - body.position = position - body.friction = 1 - shape = pymunk.Circle(body, radius) - shape.color = pygame.Color("RoyalBlue") - self.space.add(body, shape) - return body - - def add_box(self, position, height, width): - mass = 1 - inertia = pymunk.moment_for_box(mass, (height, width)) - body = pymunk.Body(mass, inertia) - body.position = position - shape = pymunk.Poly.create_box(body, (height, width)) - shape.color = pygame.Color("LightSlateGray") - self.space.add(body, shape) - return body - - def add_tee(self, position, angle, scale=30, color="LightSlateGray", mask=None): - if mask is None: - mask = pymunk.ShapeFilter.ALL_MASKS() - mass = 1 - length = 4 - vertices1 = [ - (-length * scale / 2, scale), - (length * scale / 2, scale), - (length * scale / 2, 0), - (-length * scale / 2, 0), - ] - inertia1 = pymunk.moment_for_poly(mass, vertices=vertices1) - vertices2 = [ - (-scale / 2, scale), - (-scale / 2, length * scale), - (scale / 2, length * scale), - (scale / 2, scale), - ] - inertia2 = pymunk.moment_for_poly(mass, vertices=vertices1) - body = pymunk.Body(mass, inertia1 + inertia2) - shape1 = pymunk.Poly(body, vertices1) - shape2 = pymunk.Poly(body, vertices2) - shape1.color = pygame.Color(color) - shape2.color = pygame.Color(color) - shape1.filter = pymunk.ShapeFilter(mask=mask) - shape2.filter = pymunk.ShapeFilter(mask=mask) - body.center_of_gravity = (shape1.center_of_gravity + shape2.center_of_gravity) / 2 - body.position = position - body.angle = angle - body.friction = 1 - self.space.add(body, shape1, shape2) - return body diff --git a/lerobot/common/envs/pusht/pusht_image_env.py b/lerobot/common/envs/pusht/pusht_image_env.py deleted file mode 100644 index 6547835a..00000000 --- a/lerobot/common/envs/pusht/pusht_image_env.py +++ /dev/null @@ -1,41 +0,0 @@ -import numpy as np -from gymnasium import spaces - -from lerobot.common.envs.pusht.pusht_env import PushTEnv - - -class PushTImageEnv(PushTEnv): - metadata = {"render.modes": ["rgb_array"], "video.frames_per_second": 10} - - # Note: legacy defaults to True for compatibility with original - def __init__(self, legacy=True, block_cog=None, damping=None, render_size=96): - super().__init__( - legacy=legacy, block_cog=block_cog, damping=damping, render_size=render_size, render_action=False - ) - ws = self.window_size - self.observation_space = spaces.Dict( - { - "image": spaces.Box(low=0, high=1, shape=(3, render_size, render_size), dtype=np.float32), - "agent_pos": spaces.Box(low=0, high=ws, shape=(2,), dtype=np.float32), - } - ) - self.render_cache = None - - def _get_obs(self): - img = super()._render_frame(mode="rgb_array") - - agent_pos = np.array(self.agent.position) - img_obs = np.moveaxis(img, -1, 0) - obs = {"image": img_obs, "agent_pos": agent_pos} - - self.render_cache = img - - return obs - - def render(self, mode): - assert mode == "rgb_array" - - if self.render_cache is None: - self._get_obs() - - return self.render_cache diff --git a/lerobot/common/envs/pusht/pymunk_override.py b/lerobot/common/envs/pusht/pymunk_override.py deleted file mode 100644 index 7ad76237..00000000 --- a/lerobot/common/envs/pusht/pymunk_override.py +++ /dev/null @@ -1,244 +0,0 @@ -# ---------------------------------------------------------------------------- -# pymunk -# Copyright (c) 2007-2016 Victor Blomqvist -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# ---------------------------------------------------------------------------- - -"""This submodule contains helper functions to help with quick prototyping -using pymunk together with pygame. - -Intended to help with debugging and prototyping, not for actual production use -in a full application. The methods contained in this module is opinionated -about your coordinate system and not in any way optimized. -""" - -__docformat__ = "reStructuredText" - -__all__ = [ - "DrawOptions", - "get_mouse_pos", - "to_pygame", - "from_pygame", - # "lighten", - "positive_y_is_up", -] - -from typing import Sequence, Tuple - -import numpy as np -import pygame -import pymunk -from pymunk.space_debug_draw_options import SpaceDebugColor -from pymunk.vec2d import Vec2d - -positive_y_is_up: bool = False -"""Make increasing values of y point upwards. - -When True:: - - y - ^ - | . (3, 3) - | - | . (2, 2) - | - +------ > x - -When False:: - - +------ > x - | - | . (2, 2) - | - | . (3, 3) - v - y - -""" - - -class DrawOptions(pymunk.SpaceDebugDrawOptions): - def __init__(self, surface: pygame.Surface) -> None: - """Draw a pymunk.Space on a pygame.Surface object. - - Typical usage:: - - >>> import pymunk - >>> surface = pygame.Surface((10,10)) - >>> space = pymunk.Space() - >>> options = pymunk.pygame_util.DrawOptions(surface) - >>> space.debug_draw(options) - - You can control the color of a shape by setting shape.color to the color - you want it drawn in:: - - >>> c = pymunk.Circle(None, 10) - >>> c.color = pygame.Color("pink") - - See pygame_util.demo.py for a full example - - Since pygame uses a coordinate system where y points down (in contrast - to many other cases), you either have to make the physics simulation - with Pymunk also behave in that way, or flip everything when you draw. - - The easiest is probably to just make the simulation behave the same - way as Pygame does. In that way all coordinates used are in the same - orientation and easy to reason about:: - - >>> space = pymunk.Space() - >>> space.gravity = (0, -1000) - >>> body = pymunk.Body() - >>> body.position = (0, 0) # will be positioned in the top left corner - >>> space.debug_draw(options) - - To flip the drawing its possible to set the module property - :py:data:`positive_y_is_up` to True. Then the pygame drawing will flip - the simulation upside down before drawing:: - - >>> positive_y_is_up = True - >>> body = pymunk.Body() - >>> body.position = (0, 0) - >>> # Body will be position in bottom left corner - - :Parameters: - surface : pygame.Surface - Surface that the objects will be drawn on - """ - self.surface = surface - super().__init__() - - def draw_circle( - self, - pos: Vec2d, - angle: float, - radius: float, - outline_color: SpaceDebugColor, - fill_color: SpaceDebugColor, - ) -> None: - p = to_pygame(pos, self.surface) - - pygame.draw.circle(self.surface, fill_color.as_int(), p, round(radius), 0) - pygame.draw.circle(self.surface, light_color(fill_color).as_int(), p, round(radius - 4), 0) - - # circle_edge = pos + Vec2d(radius, 0).rotated(angle) - # p2 = to_pygame(circle_edge, self.surface) - # line_r = 2 if radius > 20 else 1 - # pygame.draw.lines(self.surface, outline_color.as_int(), False, [p, p2], line_r) - - def draw_segment(self, a: Vec2d, b: Vec2d, color: SpaceDebugColor) -> None: - p1 = to_pygame(a, self.surface) - p2 = to_pygame(b, self.surface) - - pygame.draw.aalines(self.surface, color.as_int(), False, [p1, p2]) - - def draw_fat_segment( - self, - a: Tuple[float, float], - b: Tuple[float, float], - radius: float, - outline_color: SpaceDebugColor, - fill_color: SpaceDebugColor, - ) -> None: - p1 = to_pygame(a, self.surface) - p2 = to_pygame(b, self.surface) - - r = round(max(1, radius * 2)) - pygame.draw.lines(self.surface, fill_color.as_int(), False, [p1, p2], r) - if r > 2: - orthog = [abs(p2[1] - p1[1]), abs(p2[0] - p1[0])] - if orthog[0] == 0 and orthog[1] == 0: - return - scale = radius / (orthog[0] * orthog[0] + orthog[1] * orthog[1]) ** 0.5 - orthog[0] = round(orthog[0] * scale) - orthog[1] = round(orthog[1] * scale) - points = [ - (p1[0] - orthog[0], p1[1] - orthog[1]), - (p1[0] + orthog[0], p1[1] + orthog[1]), - (p2[0] + orthog[0], p2[1] + orthog[1]), - (p2[0] - orthog[0], p2[1] - orthog[1]), - ] - pygame.draw.polygon(self.surface, fill_color.as_int(), points) - pygame.draw.circle( - self.surface, - fill_color.as_int(), - (round(p1[0]), round(p1[1])), - round(radius), - ) - pygame.draw.circle( - self.surface, - fill_color.as_int(), - (round(p2[0]), round(p2[1])), - round(radius), - ) - - def draw_polygon( - self, - verts: Sequence[Tuple[float, float]], - radius: float, - outline_color: SpaceDebugColor, - fill_color: SpaceDebugColor, - ) -> None: - ps = [to_pygame(v, self.surface) for v in verts] - ps += [ps[0]] - - radius = 2 - pygame.draw.polygon(self.surface, light_color(fill_color).as_int(), ps) - - if radius > 0: - for i in range(len(verts)): - a = verts[i] - b = verts[(i + 1) % len(verts)] - self.draw_fat_segment(a, b, radius, fill_color, fill_color) - - def draw_dot(self, size: float, pos: Tuple[float, float], color: SpaceDebugColor) -> None: - p = to_pygame(pos, self.surface) - pygame.draw.circle(self.surface, color.as_int(), p, round(size), 0) - - -def get_mouse_pos(surface: pygame.Surface) -> Tuple[int, int]: - """Get position of the mouse pointer in pymunk coordinates.""" - p = pygame.mouse.get_pos() - return from_pygame(p, surface) - - -def to_pygame(p: Tuple[float, float], surface: pygame.Surface) -> Tuple[int, int]: - """Convenience method to convert pymunk coordinates to pygame surface - local coordinates. - - Note that in case positive_y_is_up is False, this function won't actually do - anything except converting the point to integers. - """ - if positive_y_is_up: - return round(p[0]), surface.get_height() - round(p[1]) - else: - return round(p[0]), round(p[1]) - - -def from_pygame(p: Tuple[float, float], surface: pygame.Surface) -> Tuple[int, int]: - """Convenience method to convert pygame surface local coordinates to - pymunk coordinates - """ - return to_pygame(p, surface) - - -def light_color(color: SpaceDebugColor): - color = np.minimum(1.2 * np.float32([color.r, color.g, color.b, color.a]), np.float32([255])) - color = SpaceDebugColor(r=color[0], g=color[1], b=color[2], a=color[3]) - return color diff --git a/lerobot/common/envs/simxarm/env.py b/lerobot/common/envs/simxarm/env.py deleted file mode 100644 index 8ce6b24c..00000000 --- a/lerobot/common/envs/simxarm/env.py +++ /dev/null @@ -1,237 +0,0 @@ -import importlib -import logging -from collections import deque -from typing import Optional - -import einops -import numpy as np -import torch -from tensordict import TensorDict -from torchrl.data.tensor_specs import ( - BoundedTensorSpec, - CompositeSpec, - DiscreteTensorSpec, - UnboundedContinuousTensorSpec, -) -from torchrl.envs.libs.gym import _gym_to_torchrl_spec_transform - -from lerobot.common.envs.abstract import AbstractEnv -from lerobot.common.utils import set_global_seed - -MAX_NUM_ACTIONS = 4 - -_has_gym = importlib.util.find_spec("gymnasium") is not None - - -class SimxarmEnv(AbstractEnv): - name = "simxarm" - available_tasks = ["lift"] - - def __init__( - self, - task, - frame_skip: int = 1, - from_pixels: bool = False, - pixels_only: bool = False, - image_size=None, - seed=1337, - device="cpu", - num_prev_obs=0, - num_prev_action=0, - ): - super().__init__( - task=task, - frame_skip=frame_skip, - from_pixels=from_pixels, - pixels_only=pixels_only, - image_size=image_size, - seed=seed, - device=device, - num_prev_obs=num_prev_obs, - num_prev_action=num_prev_action, - ) - - def _make_env(self): - if not _has_gym: - raise ImportError("Cannot import gymnasium.") - - import gymnasium as gym - - from lerobot.common.envs.simxarm.simxarm import TASKS - - if self.task not in TASKS: - raise ValueError(f"Unknown task {self.task}. Must be one of {list(TASKS.keys())}") - - self._env = TASKS[self.task]["env"]() - - num_actions = len(TASKS[self.task]["action_space"]) - self._action_space = gym.spaces.Box(low=-1.0, high=1.0, shape=(num_actions,)) - self._action_padding = np.zeros((MAX_NUM_ACTIONS - num_actions), dtype=np.float32) - if "w" not in TASKS[self.task]["action_space"]: - self._action_padding[-1] = 1.0 - - def render(self, mode="rgb_array", width=384, height=384): - return self._env.render(mode, width=width, height=height) - - def _format_raw_obs(self, raw_obs): - if self.from_pixels: - image = self.render(mode="rgb_array", width=self.image_size, height=self.image_size) - image = image.transpose(2, 0, 1) # (H, W, C) -> (C, H, W) - image = torch.tensor(image.copy(), dtype=torch.uint8) - - obs = {"image": image} - - if not self.pixels_only: - obs["state"] = torch.tensor(self._env.robot_state, dtype=torch.float32) - else: - obs = {"state": torch.tensor(raw_obs["observation"], dtype=torch.float32)} - - # obs = TensorDict(obs, batch_size=[]) - return obs - - def _reset(self, tensordict: Optional[TensorDict] = None): - td = tensordict - if td is None or td.is_empty(): - raw_obs = self._env.reset() - - obs = self._format_raw_obs(raw_obs) - - if self.num_prev_obs > 0: - stacked_obs = {} - if "image" in obs: - self._prev_obs_image_queue = deque( - [obs["image"]] * (self.num_prev_obs + 1), maxlen=(self.num_prev_obs + 1) - ) - stacked_obs["image"] = torch.stack(list(self._prev_obs_image_queue)) - if "state" in obs: - self._prev_obs_state_queue = deque( - [obs["state"]] * (self.num_prev_obs + 1), maxlen=(self.num_prev_obs + 1) - ) - stacked_obs["state"] = torch.stack(list(self._prev_obs_state_queue)) - obs = stacked_obs - - td = TensorDict( - { - "observation": TensorDict(obs, batch_size=[]), - "done": torch.tensor([False], dtype=torch.bool), - }, - batch_size=[], - ) - else: - raise NotImplementedError() - - return td - - def _step(self, tensordict: TensorDict): - td = tensordict - action = td["action"].numpy() - # step expects shape=(4,) so we pad if necessary - action = np.concatenate([action, self._action_padding]) - # TODO(rcadene): add info["is_success"] and info["success"] ? - sum_reward = 0 - - if action.ndim == 1: - action = einops.repeat(action, "c -> t c", t=self.frame_skip) - else: - if self.frame_skip > 1: - raise NotImplementedError() - - num_action_steps = action.shape[0] - for i in range(num_action_steps): - raw_obs, reward, done, info = self._env.step(action[i]) - sum_reward += reward - - obs = self._format_raw_obs(raw_obs) - - if self.num_prev_obs > 0: - stacked_obs = {} - if "image" in obs: - self._prev_obs_image_queue.append(obs["image"]) - stacked_obs["image"] = torch.stack(list(self._prev_obs_image_queue)) - if "state" in obs: - self._prev_obs_state_queue.append(obs["state"]) - stacked_obs["state"] = torch.stack(list(self._prev_obs_state_queue)) - obs = stacked_obs - - td = TensorDict( - { - "observation": self._format_raw_obs(raw_obs), - "reward": torch.tensor([sum_reward], dtype=torch.float32), - "done": torch.tensor([done], dtype=torch.bool), - "success": torch.tensor([info["success"]], dtype=torch.bool), - }, - batch_size=[], - ) - return td - - def _make_spec(self): - obs = {} - if self.from_pixels: - image_shape = (3, self.image_size, self.image_size) - if self.num_prev_obs > 0: - image_shape = (self.num_prev_obs + 1, *image_shape) - - obs["image"] = BoundedTensorSpec( - low=0, - high=255, - shape=image_shape, - dtype=torch.uint8, - device=self.device, - ) - if not self.pixels_only: - state_shape = (len(self._env.robot_state),) - if self.num_prev_obs > 0: - state_shape = (self.num_prev_obs + 1, *state_shape) - - obs["state"] = UnboundedContinuousTensorSpec( - shape=state_shape, - dtype=torch.float32, - device=self.device, - ) - else: - # TODO(rcadene): add observation_space achieved_goal and desired_goal? - state_shape = self._env.observation_space["observation"].shape - if self.num_prev_obs > 0: - state_shape = (self.num_prev_obs + 1, *state_shape) - - obs["state"] = UnboundedContinuousTensorSpec( - # TODO: - shape=state_shape, - dtype=torch.float32, - device=self.device, - ) - self.observation_spec = CompositeSpec({"observation": obs}) - - self.action_spec = _gym_to_torchrl_spec_transform( - self._action_space, - device=self.device, - ) - - self.reward_spec = UnboundedContinuousTensorSpec( - shape=(1,), - dtype=torch.float32, - device=self.device, - ) - - self.done_spec = CompositeSpec( - { - "done": DiscreteTensorSpec( - 2, - shape=(1,), - dtype=torch.bool, - device=self.device, - ), - "success": DiscreteTensorSpec( - 2, - shape=(1,), - dtype=torch.bool, - device=self.device, - ), - } - ) - - def _set_seed(self, seed: Optional[int]): - set_global_seed(seed) - self._seed = seed - # TODO(aliberts): change self._reset so that it takes in a seed value - logging.warning("simxarm env is not properly seeded") diff --git a/lerobot/common/envs/simxarm/simxarm/__init__.py b/lerobot/common/envs/simxarm/simxarm/__init__.py deleted file mode 100644 index 903d6042..00000000 --- a/lerobot/common/envs/simxarm/simxarm/__init__.py +++ /dev/null @@ -1,166 +0,0 @@ -from collections import OrderedDict, deque - -import gymnasium as gym -import numpy as np -from gymnasium.wrappers import TimeLimit - -from lerobot.common.envs.simxarm.simxarm.tasks.base import Base as Base -from lerobot.common.envs.simxarm.simxarm.tasks.lift import Lift -from lerobot.common.envs.simxarm.simxarm.tasks.peg_in_box import PegInBox -from lerobot.common.envs.simxarm.simxarm.tasks.push import Push -from lerobot.common.envs.simxarm.simxarm.tasks.reach import Reach - -TASKS = OrderedDict( - ( - ( - "reach", - { - "env": Reach, - "action_space": "xyz", - "episode_length": 50, - "description": "Reach a target location with the end effector", - }, - ), - ( - "push", - { - "env": Push, - "action_space": "xyz", - "episode_length": 50, - "description": "Push a cube to a target location", - }, - ), - ( - "peg_in_box", - { - "env": PegInBox, - "action_space": "xyz", - "episode_length": 50, - "description": "Insert a peg into a box", - }, - ), - ( - "lift", - { - "env": Lift, - "action_space": "xyzw", - "episode_length": 50, - "description": "Lift a cube above a height threshold", - }, - ), - ) -) - - -class SimXarmWrapper(gym.Wrapper): - """ - A wrapper for the SimXarm environments. This wrapper is used to - convert the action and observation spaces to the correct format. - """ - - def __init__(self, env, task, obs_mode, image_size, action_repeat, frame_stack=1, channel_last=False): - super().__init__(env) - self._env = env - self.obs_mode = obs_mode - self.image_size = image_size - self.action_repeat = action_repeat - self.frame_stack = frame_stack - self._frames = deque([], maxlen=frame_stack) - self.channel_last = channel_last - self._max_episode_steps = task["episode_length"] // action_repeat - - image_shape = ( - (image_size, image_size, 3 * frame_stack) - if channel_last - else (3 * frame_stack, image_size, image_size) - ) - if obs_mode == "state": - self.observation_space = env.observation_space["observation"] - elif obs_mode == "rgb": - self.observation_space = gym.spaces.Box(low=0, high=255, shape=image_shape, dtype=np.uint8) - elif obs_mode == "all": - self.observation_space = gym.spaces.Dict( - state=gym.spaces.Box(low=-np.inf, high=np.inf, shape=(4,), dtype=np.float32), - rgb=gym.spaces.Box(low=0, high=255, shape=image_shape, dtype=np.uint8), - ) - else: - raise ValueError(f"Unknown obs_mode {obs_mode}. Must be one of [rgb, all, state]") - self.action_space = gym.spaces.Box(low=-1.0, high=1.0, shape=(len(task["action_space"]),)) - self.action_padding = np.zeros(4 - len(task["action_space"]), dtype=np.float32) - if "w" not in task["action_space"]: - self.action_padding[-1] = 1.0 - - def _render_obs(self): - obs = self.render(mode="rgb_array", width=self.image_size, height=self.image_size) - if not self.channel_last: - obs = obs.transpose(2, 0, 1) - return obs.copy() - - def _update_frames(self, reset=False): - pixels = self._render_obs() - self._frames.append(pixels) - if reset: - for _ in range(1, self.frame_stack): - self._frames.append(pixels) - assert len(self._frames) == self.frame_stack - - def transform_obs(self, obs, reset=False): - if self.obs_mode == "state": - return obs["observation"] - elif self.obs_mode == "rgb": - self._update_frames(reset=reset) - rgb_obs = np.concatenate(list(self._frames), axis=-1 if self.channel_last else 0) - return rgb_obs - elif self.obs_mode == "all": - self._update_frames(reset=reset) - rgb_obs = np.concatenate(list(self._frames), axis=-1 if self.channel_last else 0) - return OrderedDict((("rgb", rgb_obs), ("state", self.robot_state))) - else: - raise ValueError(f"Unknown obs_mode {self.obs_mode}. Must be one of [rgb, all, state]") - - def reset(self): - return self.transform_obs(self._env.reset(), reset=True) - - def step(self, action): - action = np.concatenate([action, self.action_padding]) - reward = 0.0 - for _ in range(self.action_repeat): - obs, r, done, info = self._env.step(action) - reward += r - return self.transform_obs(obs), reward, done, info - - def render(self, mode="rgb_array", width=384, height=384, **kwargs): - return self._env.render(mode, width=width, height=height) - - @property - def state(self): - return self._env.robot_state - - -def make(task, obs_mode="state", image_size=84, action_repeat=1, frame_stack=1, channel_last=False, seed=0): - """ - Create a new environment. - Args: - task (str): The task to create an environment for. Must be one of: - - 'reach' - - 'push' - - 'peg-in-box' - - 'lift' - obs_mode (str): The observation mode to use. Must be one of: - - 'state': Only state observations - - 'rgb': RGB images - - 'all': RGB images and state observations - image_size (int): The size of the image observations - action_repeat (int): The number of times to repeat the action - seed (int): The random seed to use - Returns: - gym.Env: The environment - """ - if task not in TASKS: - raise ValueError(f"Unknown task {task}. Must be one of {list(TASKS.keys())}") - env = TASKS[task]["env"]() - env = TimeLimit(env, TASKS[task]["episode_length"]) - env = SimXarmWrapper(env, TASKS[task], obs_mode, image_size, action_repeat, frame_stack, channel_last) - env.seed(seed) - - return env diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/__init__.py b/lerobot/common/envs/simxarm/simxarm/tasks/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/assets/lift.xml b/lerobot/common/envs/simxarm/simxarm/tasks/assets/lift.xml deleted file mode 100644 index 92231f92..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/assets/lift.xml +++ /dev/null @@ -1,53 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/base_link.stl b/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/base_link.stl deleted file mode 100644 index f1f52955..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/base_link.stl +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:21fb81ae7fba19e3c6b2d2ca60c8051712ba273357287eb5a397d92d61c7a736 -size 1211434 diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/block_inner.stl b/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/block_inner.stl deleted file mode 100644 index 6cb88945..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/block_inner.stl +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:be68ce180d11630a667a5f37f4dffcc3feebe4217d4bb3912c813b6d9ca3ec66 -size 3284 diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/block_inner2.stl b/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/block_inner2.stl deleted file mode 100644 index dab55ef5..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/block_inner2.stl +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2c6448552bf6b1c4f17334d686a5320ce051bcdfe31431edf69303d8a570d1de -size 3284 diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/block_outer.stl b/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/block_outer.stl deleted file mode 100644 index 21cf11fa..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/block_outer.stl +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:748b9e197e6521914f18d1f6383a36f211136b3f33f2ad2a8c11b9f921c2cf86 -size 6284 diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/left_finger.stl b/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/left_finger.stl deleted file mode 100644 index 6bf4e502..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/left_finger.stl +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a44756eb72f9c214cb37e61dc209cd7073fdff3e4271a7423476ef6fd090d2d4 -size 242684 diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/left_inner_knuckle.stl b/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/left_inner_knuckle.stl deleted file mode 100644 index 817c7e1d..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/left_inner_knuckle.stl +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e8e48692ad26837bb3d6a97582c89784d09948fc09bfe4e5a59017859ff04dac -size 366284 diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/left_outer_knuckle.stl b/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/left_outer_knuckle.stl deleted file mode 100644 index 010c0f3b..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/left_outer_knuckle.stl +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:501665812b08d67e764390db781e839adc6896a9540301d60adf606f57648921 -size 22284 diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link1.stl b/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link1.stl deleted file mode 100644 index f2b676f2..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link1.stl +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:34b541122df84d2ef5fcb91b715eb19659dc15ad8d44a191dde481f780265636 -size 184184 diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link2.stl b/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link2.stl deleted file mode 100644 index bf93580c..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link2.stl +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:61e641cd47c169ecef779683332e00e4914db729bf02dfb61bfbe69351827455 -size 225584 diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link3.stl b/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link3.stl deleted file mode 100644 index d316d233..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link3.stl +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9e2798e7946dd70046c95455d5ba96392d0b54a6069caba91dc4ca66e1379b42 -size 237084 diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link4.stl b/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link4.stl deleted file mode 100644 index f6d5fe94..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link4.stl +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c757fee95f873191a0633c355c07a360032960771cabbd7593a6cdb0f1ffb089 -size 243684 diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link5.stl b/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link5.stl deleted file mode 100644 index e037b8b9..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link5.stl +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:715ad5787c5dab57589937fd47289882707b5e1eb997e340d567785b02f4ec90 -size 229084 diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link6.stl b/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link6.stl deleted file mode 100644 index 198c5300..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link6.stl +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:85b320aa420497827223d16d492bba8de091173374e361396fc7a5dad7bdb0cb -size 399384 diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link7.stl b/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link7.stl deleted file mode 100644 index ce9a39ac..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link7.stl +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:97115d848fbf802cb770cd9be639ae2af993103b9d9bbb0c50c943c738a36f18 -size 231684 diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link_base.stl b/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link_base.stl deleted file mode 100644 index 110b9531..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/link_base.stl +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f6fcbc18258090eb56c21cfb17baa5ae43abc98b1958cd366f3a73b9898fc7f0 -size 2106184 diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/right_finger.stl b/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/right_finger.stl deleted file mode 100644 index 03f26e9a..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/right_finger.stl +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c5dee87c7f37baf554b8456ebfe0b3e8ed0b22b8938bd1add6505c2ad6d32c7d -size 242684 diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/right_inner_knuckle.stl b/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/right_inner_knuckle.stl deleted file mode 100644 index 8586f344..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/right_inner_knuckle.stl +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b41dd2c2c550281bf78d7cc6fa117b14786700e5c453560a0cb5fd6dfa0ffb3e -size 366284 diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/right_outer_knuckle.stl b/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/right_outer_knuckle.stl deleted file mode 100644 index ae7afc25..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/assets/mesh/right_outer_knuckle.stl +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:75ca1107d0a42a0f03802a9a49cab48419b31851ee8935f8f1ca06be1c1c91e8 -size 22284 diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/assets/peg_in_box.xml b/lerobot/common/envs/simxarm/simxarm/tasks/assets/peg_in_box.xml deleted file mode 100644 index 0f85459f..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/assets/peg_in_box.xml +++ /dev/null @@ -1,74 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/assets/push.xml b/lerobot/common/envs/simxarm/simxarm/tasks/assets/push.xml deleted file mode 100644 index 42a78c8a..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/assets/push.xml +++ /dev/null @@ -1,54 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/assets/reach.xml b/lerobot/common/envs/simxarm/simxarm/tasks/assets/reach.xml deleted file mode 100644 index ded6d209..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/assets/reach.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/assets/shared.xml b/lerobot/common/envs/simxarm/simxarm/tasks/assets/shared.xml deleted file mode 100644 index ee56f8f0..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/assets/shared.xml +++ /dev/null @@ -1,51 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/assets/xarm.xml b/lerobot/common/envs/simxarm/simxarm/tasks/assets/xarm.xml deleted file mode 100644 index 023474d6..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/assets/xarm.xml +++ /dev/null @@ -1,88 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/base.py b/lerobot/common/envs/simxarm/simxarm/tasks/base.py deleted file mode 100644 index b937b290..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/base.py +++ /dev/null @@ -1,145 +0,0 @@ -import os - -import mujoco -import numpy as np -from gymnasium_robotics.envs import robot_env - -from lerobot.common.envs.simxarm.simxarm.tasks import mocap - - -class Base(robot_env.MujocoRobotEnv): - """ - Superclass for all simxarm environments. - Args: - xml_name (str): name of the xml environment file - gripper_rotation (list): initial rotation of the gripper (given as a quaternion) - """ - - def __init__(self, xml_name, gripper_rotation=None): - if gripper_rotation is None: - gripper_rotation = [0, 1, 0, 0] - self.gripper_rotation = np.array(gripper_rotation, dtype=np.float32) - self.center_of_table = np.array([1.655, 0.3, 0.63625]) - self.max_z = 1.2 - self.min_z = 0.2 - super().__init__( - model_path=os.path.join(os.path.dirname(__file__), "assets", xml_name + ".xml"), - n_substeps=20, - n_actions=4, - initial_qpos={}, - ) - - @property - def dt(self): - return self.n_substeps * self.model.opt.timestep - - @property - def eef(self): - return self._utils.get_site_xpos(self.model, self.data, "grasp") - - @property - def obj(self): - return self._utils.get_site_xpos(self.model, self.data, "object_site") - - @property - def robot_state(self): - gripper_angle = self._utils.get_joint_qpos(self.model, self.data, "right_outer_knuckle_joint") - return np.concatenate([self.eef, gripper_angle]) - - def is_success(self): - return NotImplementedError() - - def get_reward(self): - raise NotImplementedError() - - def _sample_goal(self): - raise NotImplementedError() - - def get_obs(self): - return self._get_obs() - - def _step_callback(self): - self._mujoco.mj_forward(self.model, self.data) - - def _limit_gripper(self, gripper_pos, pos_ctrl): - if gripper_pos[0] > self.center_of_table[0] - 0.105 + 0.15: - pos_ctrl[0] = min(pos_ctrl[0], 0) - if gripper_pos[0] < self.center_of_table[0] - 0.105 - 0.3: - pos_ctrl[0] = max(pos_ctrl[0], 0) - if gripper_pos[1] > self.center_of_table[1] + 0.3: - pos_ctrl[1] = min(pos_ctrl[1], 0) - if gripper_pos[1] < self.center_of_table[1] - 0.3: - pos_ctrl[1] = max(pos_ctrl[1], 0) - if gripper_pos[2] > self.max_z: - pos_ctrl[2] = min(pos_ctrl[2], 0) - if gripper_pos[2] < self.min_z: - pos_ctrl[2] = max(pos_ctrl[2], 0) - return pos_ctrl - - def _apply_action(self, action): - assert action.shape == (4,) - action = action.copy() - pos_ctrl, gripper_ctrl = action[:3], action[3] - pos_ctrl = self._limit_gripper( - self._utils.get_site_xpos(self.model, self.data, "grasp"), pos_ctrl - ) * (1 / self.n_substeps) - gripper_ctrl = np.array([gripper_ctrl, gripper_ctrl]) - mocap.apply_action( - self.model, - self._model_names, - self.data, - np.concatenate([pos_ctrl, self.gripper_rotation, gripper_ctrl]), - ) - - def _render_callback(self): - self._mujoco.mj_forward(self.model, self.data) - - def _reset_sim(self): - self.data.time = self.initial_time - self.data.qpos[:] = np.copy(self.initial_qpos) - self.data.qvel[:] = np.copy(self.initial_qvel) - self._sample_goal() - self._mujoco.mj_step(self.model, self.data, nstep=10) - return True - - def _set_gripper(self, gripper_pos, gripper_rotation): - self._utils.set_mocap_pos(self.model, self.data, "robot0:mocap", gripper_pos) - self._utils.set_mocap_quat(self.model, self.data, "robot0:mocap", gripper_rotation) - self._utils.set_joint_qpos(self.model, self.data, "right_outer_knuckle_joint", 0) - self.data.qpos[10] = 0.0 - self.data.qpos[12] = 0.0 - - def _env_setup(self, initial_qpos): - for name, value in initial_qpos.items(): - self.data.set_joint_qpos(name, value) - mocap.reset(self.model, self.data) - mujoco.mj_forward(self.model, self.data) - self._sample_goal() - mujoco.mj_forward(self.model, self.data) - - def reset(self): - self._reset_sim() - return self._get_obs() - - def step(self, action): - assert action.shape == (4,) - assert self.action_space.contains(action), "{!r} ({}) invalid".format(action, type(action)) - self._apply_action(action) - self._mujoco.mj_step(self.model, self.data, nstep=2) - self._step_callback() - obs = self._get_obs() - reward = self.get_reward() - done = False - info = {"is_success": self.is_success(), "success": self.is_success()} - return obs, reward, done, info - - def render(self, mode="rgb_array", width=384, height=384): - self._render_callback() - # HACK - self.model.vis.global_.offwidth = width - self.model.vis.global_.offheight = height - return self.mujoco_renderer.render(mode) - - def close(self): - if self.mujoco_renderer is not None: - self.mujoco_renderer.close() diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/lift.py b/lerobot/common/envs/simxarm/simxarm/tasks/lift.py deleted file mode 100644 index 0b11196c..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/lift.py +++ /dev/null @@ -1,100 +0,0 @@ -import numpy as np - -from lerobot.common.envs.simxarm.simxarm import Base - - -class Lift(Base): - def __init__(self): - self._z_threshold = 0.15 - super().__init__("lift") - - @property - def z_target(self): - return self._init_z + self._z_threshold - - def is_success(self): - return self.obj[2] >= self.z_target - - def get_reward(self): - reach_dist = np.linalg.norm(self.obj - self.eef) - reach_dist_xy = np.linalg.norm(self.obj[:-1] - self.eef[:-1]) - pick_completed = self.obj[2] >= (self.z_target - 0.01) - obj_dropped = (self.obj[2] < (self._init_z + 0.005)) and (reach_dist > 0.02) - - # Reach - if reach_dist < 0.05: - reach_reward = -reach_dist + max(self._action[-1], 0) / 50 - elif reach_dist_xy < 0.05: - reach_reward = -reach_dist - else: - z_bonus = np.linalg.norm(np.linalg.norm(self.obj[-1] - self.eef[-1])) - reach_reward = -reach_dist - 2 * z_bonus - - # Pick - if pick_completed and not obj_dropped: - pick_reward = self.z_target - elif (reach_dist < 0.1) and (self.obj[2] > (self._init_z + 0.005)): - pick_reward = min(self.z_target, self.obj[2]) - else: - pick_reward = 0 - - return reach_reward / 100 + pick_reward - - def _get_obs(self): - eef_velp = self._utils.get_site_xvelp(self.model, self.data, "grasp") * self.dt - gripper_angle = self._utils.get_joint_qpos(self.model, self.data, "right_outer_knuckle_joint") - eef = self.eef - self.center_of_table - - obj = self.obj - self.center_of_table - obj_rot = self._utils.get_joint_qpos(self.model, self.data, "object_joint0")[-4:] - obj_velp = self._utils.get_site_xvelp(self.model, self.data, "object_site") * self.dt - obj_velr = self._utils.get_site_xvelr(self.model, self.data, "object_site") * self.dt - - obs = np.concatenate( - [ - eef, - eef_velp, - obj, - obj_rot, - obj_velp, - obj_velr, - eef - obj, - np.array( - [ - np.linalg.norm(eef - obj), - np.linalg.norm(eef[:-1] - obj[:-1]), - self.z_target, - self.z_target - obj[-1], - self.z_target - eef[-1], - ] - ), - gripper_angle, - ], - axis=0, - ) - return {"observation": obs, "state": eef, "achieved_goal": eef, "desired_goal": eef} - - def _sample_goal(self): - # Gripper - gripper_pos = np.array([1.280, 0.295, 0.735]) + self.np_random.uniform(-0.05, 0.05, size=3) - super()._set_gripper(gripper_pos, self.gripper_rotation) - - # Object - object_pos = self.center_of_table - np.array([0.15, 0.10, 0.07]) - object_pos[0] += self.np_random.uniform(-0.05, 0.05, size=1) - object_pos[1] += self.np_random.uniform(-0.05, 0.05, size=1) - object_qpos = self._utils.get_joint_qpos(self.model, self.data, "object_joint0") - object_qpos[:3] = object_pos - self._utils.set_joint_qpos(self.model, self.data, "object_joint0", object_qpos) - self._init_z = object_pos[2] - - # Goal - return object_pos + np.array([0, 0, self._z_threshold]) - - def reset(self): - self._action = np.zeros(4) - return super().reset() - - def step(self, action): - self._action = action.copy() - return super().step(action) diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/mocap.py b/lerobot/common/envs/simxarm/simxarm/tasks/mocap.py deleted file mode 100644 index 4295bf19..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/mocap.py +++ /dev/null @@ -1,67 +0,0 @@ -# import mujoco_py -import mujoco -import numpy as np - - -def apply_action(model, model_names, data, action): - if model.nmocap > 0: - pos_action, gripper_action = np.split(action, (model.nmocap * 7,)) - if data.ctrl is not None: - for i in range(gripper_action.shape[0]): - data.ctrl[i] = gripper_action[i] - pos_action = pos_action.reshape(model.nmocap, 7) - pos_delta, quat_delta = pos_action[:, :3], pos_action[:, 3:] - reset_mocap2body_xpos(model, model_names, data) - data.mocap_pos[:] = data.mocap_pos + pos_delta - data.mocap_quat[:] = data.mocap_quat + quat_delta - - -def reset(model, data): - if model.nmocap > 0 and model.eq_data is not None: - for i in range(model.eq_data.shape[0]): - # if sim.model.eq_type[i] == mujoco_py.const.EQ_WELD: - if model.eq_type[i] == mujoco.mjtEq.mjEQ_WELD: - # model.eq_data[i, :] = np.array([0., 0., 0., 1., 0., 0., 0.]) - model.eq_data[i, :] = np.array( - [ - 0.0, - 0.0, - 0.0, - 1.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - ] - ) - # sim.forward() - mujoco.mj_forward(model, data) - - -def reset_mocap2body_xpos(model, model_names, data): - if model.eq_type is None or model.eq_obj1id is None or model.eq_obj2id is None: - return - - # For all weld constraints - for eq_type, obj1_id, obj2_id in zip(model.eq_type, model.eq_obj1id, model.eq_obj2id, strict=False): - # if eq_type != mujoco_py.const.EQ_WELD: - if eq_type != mujoco.mjtEq.mjEQ_WELD: - continue - # body2 = model.body_id2name(obj2_id) - body2 = model_names.body_id2name[obj2_id] - if body2 == "B0" or body2 == "B9" or body2 == "B1": - continue - mocap_id = model.body_mocapid[obj1_id] - if mocap_id != -1: - # obj1 is the mocap, obj2 is the welded body - body_idx = obj2_id - else: - # obj2 is the mocap, obj1 is the welded body - mocap_id = model.body_mocapid[obj2_id] - body_idx = obj1_id - assert mocap_id != -1 - data.mocap_pos[mocap_id][:] = data.xpos[body_idx] - data.mocap_quat[mocap_id][:] = data.xquat[body_idx] diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/peg_in_box.py b/lerobot/common/envs/simxarm/simxarm/tasks/peg_in_box.py deleted file mode 100644 index 42e41520..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/peg_in_box.py +++ /dev/null @@ -1,86 +0,0 @@ -import numpy as np - -from lerobot.common.envs.simxarm.simxarm import Base - - -class PegInBox(Base): - def __init__(self): - super().__init__("peg_in_box") - - def _reset_sim(self): - self._act_magnitude = 0 - super()._reset_sim() - for _ in range(10): - self._apply_action(np.array([0, 0, 0, 1], dtype=np.float32)) - self.sim.step() - - @property - def box(self): - return self.sim.data.get_site_xpos("box_site") - - def is_success(self): - return np.linalg.norm(self.obj - self.box) <= 0.05 - - def get_reward(self): - dist_xy = np.linalg.norm(self.obj[:2] - self.box[:2]) - dist_xyz = np.linalg.norm(self.obj - self.box) - return float(dist_xy <= 0.045) * (2 - 6 * dist_xyz) - 0.2 * np.square(self._act_magnitude) - dist_xy - - def _get_obs(self): - eef_velp = self.sim.data.get_site_xvelp("grasp") * self.dt - gripper_angle = self.sim.data.get_joint_qpos("right_outer_knuckle_joint") - eef, box = self.eef - self.center_of_table, self.box - self.center_of_table - - obj = self.obj - self.center_of_table - obj_rot = self.sim.data.get_joint_qpos("object_joint0")[-4:] - obj_velp = self.sim.data.get_site_xvelp("object_site") * self.dt - obj_velr = self.sim.data.get_site_xvelr("object_site") * self.dt - - obs = np.concatenate( - [ - eef, - eef_velp, - box, - obj, - obj_rot, - obj_velp, - obj_velr, - eef - box, - eef - obj, - obj - box, - np.array( - [ - np.linalg.norm(eef - box), - np.linalg.norm(eef - obj), - np.linalg.norm(obj - box), - gripper_angle, - ] - ), - ], - axis=0, - ) - return {"observation": obs, "state": eef, "achieved_goal": eef, "desired_goal": box} - - def _sample_goal(self): - # Gripper - gripper_pos = np.array([1.280, 0.295, 0.9]) + self.np_random.uniform(-0.05, 0.05, size=3) - super()._set_gripper(gripper_pos, self.gripper_rotation) - - # Object - object_pos = gripper_pos - np.array([0, 0, 0.06]) + self.np_random.uniform(-0.005, 0.005, size=3) - object_qpos = self.sim.data.get_joint_qpos("object_joint0") - object_qpos[:3] = object_pos - self.sim.data.set_joint_qpos("object_joint0", object_qpos) - - # Box - box_pos = np.array([1.61, 0.18, 0.58]) - box_pos[:2] += self.np_random.uniform(-0.11, 0.11, size=2) - box_qpos = self.sim.data.get_joint_qpos("box_joint0") - box_qpos[:3] = box_pos - self.sim.data.set_joint_qpos("box_joint0", box_qpos) - - return self.box - - def step(self, action): - self._act_magnitude = np.linalg.norm(action[:3]) - return super().step(action) diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/push.py b/lerobot/common/envs/simxarm/simxarm/tasks/push.py deleted file mode 100644 index 36c4a550..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/push.py +++ /dev/null @@ -1,78 +0,0 @@ -import numpy as np - -from lerobot.common.envs.simxarm.simxarm import Base - - -class Push(Base): - def __init__(self): - super().__init__("push") - - def _reset_sim(self): - self._act_magnitude = 0 - super()._reset_sim() - - def is_success(self): - return np.linalg.norm(self.obj - self.goal) <= 0.05 - - def get_reward(self): - dist = np.linalg.norm(self.obj - self.goal) - penalty = self._act_magnitude**2 - return -(dist + 0.15 * penalty) - - def _get_obs(self): - eef_velp = self.sim.data.get_site_xvelp("grasp") * self.dt - gripper_angle = self.sim.data.get_joint_qpos("right_outer_knuckle_joint") - eef, goal = self.eef - self.center_of_table, self.goal - self.center_of_table - - obj = self.obj - self.center_of_table - obj_rot = self.sim.data.get_joint_qpos("object_joint0")[-4:] - obj_velp = self.sim.data.get_site_xvelp("object_site") * self.dt - obj_velr = self.sim.data.get_site_xvelr("object_site") * self.dt - - obs = np.concatenate( - [ - eef, - eef_velp, - goal, - obj, - obj_rot, - obj_velp, - obj_velr, - eef - goal, - eef - obj, - obj - goal, - np.array( - [ - np.linalg.norm(eef - goal), - np.linalg.norm(eef - obj), - np.linalg.norm(obj - goal), - gripper_angle, - ] - ), - ], - axis=0, - ) - return {"observation": obs, "state": eef, "achieved_goal": eef, "desired_goal": goal} - - def _sample_goal(self): - # Gripper - gripper_pos = np.array([1.280, 0.295, 0.735]) + self.np_random.uniform(-0.05, 0.05, size=3) - super()._set_gripper(gripper_pos, self.gripper_rotation) - - # Object - object_pos = self.center_of_table - np.array([0.25, 0, 0.07]) - object_pos[0] += self.np_random.uniform(-0.08, 0.08, size=1) - object_pos[1] += self.np_random.uniform(-0.08, 0.08, size=1) - object_qpos = self.sim.data.get_joint_qpos("object_joint0") - object_qpos[:3] = object_pos - self.sim.data.set_joint_qpos("object_joint0", object_qpos) - - # Goal - self.goal = np.array([1.600, 0.200, 0.545]) - self.goal[:2] += self.np_random.uniform(-0.1, 0.1, size=2) - self.sim.model.site_pos[self.sim.model.site_name2id("target0")] = self.goal - return self.goal - - def step(self, action): - self._act_magnitude = np.linalg.norm(action[:3]) - return super().step(action) diff --git a/lerobot/common/envs/simxarm/simxarm/tasks/reach.py b/lerobot/common/envs/simxarm/simxarm/tasks/reach.py deleted file mode 100644 index 941a586f..00000000 --- a/lerobot/common/envs/simxarm/simxarm/tasks/reach.py +++ /dev/null @@ -1,44 +0,0 @@ -import numpy as np - -from lerobot.common.envs.simxarm.simxarm import Base - - -class Reach(Base): - def __init__(self): - super().__init__("reach") - - def _reset_sim(self): - self._act_magnitude = 0 - super()._reset_sim() - - def is_success(self): - return np.linalg.norm(self.eef - self.goal) <= 0.05 - - def get_reward(self): - dist = np.linalg.norm(self.eef - self.goal) - penalty = self._act_magnitude**2 - return -(dist + 0.15 * penalty) - - def _get_obs(self): - eef_velp = self.sim.data.get_site_xvelp("grasp") * self.dt - gripper_angle = self.sim.data.get_joint_qpos("right_outer_knuckle_joint") - eef, goal = self.eef - self.center_of_table, self.goal - self.center_of_table - obs = np.concatenate( - [eef, eef_velp, goal, eef - goal, np.array([np.linalg.norm(eef - goal), gripper_angle])], axis=0 - ) - return {"observation": obs, "state": eef, "achieved_goal": eef, "desired_goal": goal} - - def _sample_goal(self): - # Gripper - gripper_pos = np.array([1.280, 0.295, 0.735]) + self.np_random.uniform(-0.05, 0.05, size=3) - super()._set_gripper(gripper_pos, self.gripper_rotation) - - # Goal - self.goal = np.array([1.550, 0.287, 0.580]) - self.goal[:2] += self.np_random.uniform(-0.125, 0.125, size=2) - self.sim.model.site_pos[self.sim.model.site_name2id("target0")] = self.goal - return self.goal - - def step(self, action): - self._act_magnitude = np.linalg.norm(action[:3]) - return super().step(action) diff --git a/lerobot/common/envs/utils.py b/lerobot/common/envs/utils.py new file mode 100644 index 00000000..1696ddbe --- /dev/null +++ b/lerobot/common/envs/utils.py @@ -0,0 +1,32 @@ +import einops +import torch + +from lerobot.common.transforms import apply_inverse_transform + + +def preprocess_observation(observation, transform=None): + # map to expected inputs for the policy + obs = { + "observation.image": torch.from_numpy(observation["pixels"]).float(), + "observation.state": torch.from_numpy(observation["agent_pos"]).float(), + } + # convert to (b c h w) torch format + obs["observation.image"] = einops.rearrange(obs["observation.image"], "b h w c -> b c h w") + + # apply same transforms as in training + if transform is not None: + for key in obs: + obs[key] = torch.stack([transform({key: item})[key] for item in obs[key]]) + + return obs + + +def postprocess_action(action, transform=None): + action = action.to("cpu") + # action is a batch (num_env,action_dim) instead of an item (action_dim), + # we assume applying inverse transform on a batch works the same + action = apply_inverse_transform({"action": action}, transform)["action"].numpy() + assert ( + action.ndim == 2 + ), "we assume dimensions are respectively the number of parallel envs, action dimensions" + return action diff --git a/lerobot/scripts/eval.py b/lerobot/scripts/eval.py index 09399878..e7ba53fc 100644 --- a/lerobot/scripts/eval.py +++ b/lerobot/scripts/eval.py @@ -44,9 +44,9 @@ from huggingface_hub import snapshot_download from lerobot.common.datasets.factory import make_dataset from lerobot.common.envs.factory import make_env +from lerobot.common.envs.utils import postprocess_action, preprocess_observation from lerobot.common.logger import log_output_dir from lerobot.common.policies.factory import make_policy -from lerobot.common.transforms import apply_inverse_transform from lerobot.common.utils import get_safe_torch_device, init_hydra_config, init_logging, set_global_seed @@ -54,34 +54,6 @@ def write_video(video_path, stacked_frames, fps): imageio.mimsave(video_path, stacked_frames, fps=fps) -def preprocess_observation(observation, transform=None): - # map to expected inputs for the policy - obs = { - "observation.image": torch.from_numpy(observation["pixels"]).float(), - "observation.state": torch.from_numpy(observation["agent_pos"]).float(), - } - # convert to (b c h w) torch format - obs["observation.image"] = einops.rearrange(obs["observation.image"], "b h w c -> b c h w") - - # apply same transforms as in training - if transform is not None: - for key in obs: - obs[key] = torch.stack([transform({key: item})[key] for item in obs[key]]) - - return obs - - -def postprocess_action(action, transform=None): - action = action.to("cpu") - # action is a batch (num_env,action_dim) instead of an item (action_dim), - # we assume applying inverse transform on a batch works the same - action = apply_inverse_transform({"action": action}, transform)["action"].numpy() - assert ( - action.ndim == 2 - ), "we assume dimensions are respectively the number of parallel envs, action dimensions" - return action - - def eval_policy( env: gym.vector.VectorEnv, policy, @@ -114,10 +86,10 @@ def eval_policy( def maybe_render_frame(env): if save_video: # noqa: B023 if return_first_video: - visu = env.envs[0].render() + visu = env.envs[0].render(mode="visualization") visu = visu[None, ...] # add batch dim else: - visu = np.stack([env.render() for env in env.envs]) + visu = np.stack([env.render(mode="visualization") for env in env.envs]) ep_frames.append(visu) # noqa: B023 for _ in range(num_episodes): diff --git a/tests/test_envs.py b/tests/test_envs.py index 665c1fba..a94d76f2 100644 --- a/tests/test_envs.py +++ b/tests/test_envs.py @@ -1,47 +1,47 @@ import pytest -from tensordict import TensorDict import torch -from torchrl.envs.utils import check_env_specs, step_mdp from lerobot.common.datasets.factory import make_dataset import gymnasium as gym from gymnasium.utils.env_checker import check_env -from lerobot.common.envs.aloha.env import AlohaEnv from lerobot.common.envs.factory import make_env -from lerobot.common.envs.pusht.env import PushtEnv -from lerobot.common.envs.simxarm.env import SimxarmEnv from lerobot.common.utils import init_hydra_config +from lerobot.common.envs.utils import preprocess_observation + +# import dmc_aloha # noqa: F401 + from .utils import DEVICE, DEFAULT_CONFIG_PATH -def print_spec_rollout(env): - print("observation_spec:", env.observation_spec) - print("action_spec:", env.action_spec) - print("reward_spec:", env.reward_spec) - print("done_spec:", env.done_spec) +# def print_spec_rollout(env): +# print("observation_spec:", env.observation_spec) +# print("action_spec:", env.action_spec) +# print("reward_spec:", env.reward_spec) +# print("done_spec:", env.done_spec) - td = env.reset() - print("reset tensordict", td) +# td = env.reset() +# print("reset tensordict", td) - td = env.rand_step(td) - print("random step tensordict", td) +# td = env.rand_step(td) +# print("random step tensordict", td) - def simple_rollout(steps=100): - # preallocate: - data = TensorDict({}, [steps]) - # reset - _data = env.reset() - for i in range(steps): - _data["action"] = env.action_spec.rand() - _data = env.step(_data) - data[i] = _data - _data = step_mdp(_data, keep_other=True) - return data +# def simple_rollout(steps=100): +# # preallocate: +# data = TensorDict({}, [steps]) +# # reset +# _data = env.reset() +# for i in range(steps): +# _data["action"] = env.action_spec.rand() +# _data = env.step(_data) +# data[i] = _data +# _data = step_mdp(_data, keep_other=True) +# return data - print("data from rollout:", simple_rollout(100)) +# print("data from rollout:", simple_rollout(100)) +@pytest.mark.skip("TODO") @pytest.mark.parametrize( "task,from_pixels,pixels_only", [ @@ -63,50 +63,41 @@ def test_aloha(task, from_pixels, pixels_only): @pytest.mark.parametrize( - "task, obs_type", + "env_task, obs_type", [ ("XarmLift-v0", "state"), ("XarmLift-v0", "pixels"), ("XarmLift-v0", "pixels_agent_pos"), - # TODO(aliberts): Add simxarm other tasks + # TODO(aliberts): Add gym_xarm other tasks ], ) def test_xarm(env_task, obs_type): - import gym_xarm + import gym_xarm # noqa: F401 env = gym.make(f"gym_xarm/{env_task}", obs_type=obs_type) - # env = SimxarmEnv( - # task, - # from_pixels=from_pixels, - # pixels_only=pixels_only, - # image_size=84 if from_pixels else None, - # ) - # print_spec_rollout(env) - # check_env_specs(env) check_env(env) + @pytest.mark.parametrize( - "from_pixels,pixels_only", + "env_task, obs_type", [ - (True, False), + ("PushTPixels-v0", "state"), + ("PushTPixels-v0", "pixels"), + ("PushTPixels-v0", "pixels_agent_pos"), ], ) -def test_pusht(from_pixels, pixels_only): - env = PushtEnv( - from_pixels=from_pixels, - pixels_only=pixels_only, - image_size=96 if from_pixels else None, - ) - # print_spec_rollout(env) - check_env_specs(env) +def test_pusht(env_task, obs_type): + import gym_pusht # noqa: F401 + env = gym.make(f"gym_pusht/{env_task}", obs_type=obs_type) + check_env(env) @pytest.mark.parametrize( "env_name", [ - "simxarm", "pusht", - "aloha", + "simxarm", + # "aloha", ], ) def test_factory(env_name): @@ -118,15 +109,12 @@ def test_factory(env_name): dataset = make_dataset(cfg) env = make_env(cfg) + obs, info = env.reset() + obs = {key: obs[key][None, ...] for key in obs} + obs = preprocess_observation(obs, transform=dataset.transform) for key in dataset.image_keys: - assert env.reset().get(key).dtype == torch.uint8 - check_env_specs(env) - - env = make_env(cfg, transform=dataset.transform) - for key in dataset.image_keys: - img = env.reset().get(key) + img = obs[key] assert img.dtype == torch.float32 # TODO(rcadene): we assume for now that image normalization takes place in the model assert img.max() <= 1.0 assert img.min() >= 0.0 - check_env_specs(env) From 1e71196fe3d45ab973d0f612f6b9aa3800af40fb Mon Sep 17 00:00:00 2001 From: Alexander Soare Date: Fri, 5 Apr 2024 17:38:29 +0100 Subject: [PATCH 17/80] backup wip --- lerobot/common/datasets/aloha.py | 8 +- lerobot/common/datasets/factory.py | 179 +++++----- lerobot/common/policies/act/policy.py | 362 +++++++++----------- lerobot/common/policies/diffusion/policy.py | 1 - lerobot/scripts/train.py | 4 +- poetry.lock | 49 ++- pyproject.toml | 1 + 7 files changed, 306 insertions(+), 298 deletions(-) diff --git a/lerobot/common/datasets/aloha.py b/lerobot/common/datasets/aloha.py index 102de08e..4c0795dd 100644 --- a/lerobot/common/datasets/aloha.py +++ b/lerobot/common/datasets/aloha.py @@ -158,7 +158,7 @@ class AlohaDataset(torch.utils.data.Dataset): self.data_ids_per_episode = {} ep_dicts = [] - logging.info("Initialize and feed offline buffer") + frame_idx = 0 for ep_id in tqdm.tqdm(range(NUM_EPISODES[self.dataset_id])): ep_path = raw_dir / f"episode_{ep_id}.hdf5" with h5py.File(ep_path, "r") as ep: @@ -190,8 +190,14 @@ class AlohaDataset(torch.utils.data.Dataset): ep_dict[f"observation.images.{cam}"] = image[:-1] # ep_dict[f"next.observation.images.{cam}"] = image[1:] + assert isinstance(ep_id, int) + self.data_ids_per_episode[ep_id] = torch.arange(frame_idx, frame_idx + num_frames, 1) + assert len(self.data_ids_per_episode[ep_id]) == num_frames + ep_dicts.append(ep_dict) + frame_idx += num_frames + self.data_dict = {} keys = ep_dicts[0].keys() diff --git a/lerobot/common/datasets/factory.py b/lerobot/common/datasets/factory.py index 49170098..0217583a 100644 --- a/lerobot/common/datasets/factory.py +++ b/lerobot/common/datasets/factory.py @@ -59,96 +59,95 @@ def make_dataset( transform=Prod(in_keys=clsfunc.image_keys, prod=1 / 255.0), ) stats = compute_or_load_stats(stats_dataset) - # TODO(rcadene): remove this and put it in config. Ideally we want to reproduce SOTA results just with mean_std normalization_mode = "mean_std" if cfg.env.name == "aloha" else "min_max" - # TODO(now): These stats are needed to use their pretrained model for sim_transfer_cube_human. - # (Pdb) stats['observation']['state']['mean'] - # tensor([-0.0071, -0.6293, 1.0351, -0.0517, -0.4642, -0.0754, 0.4751, -0.0373, - # -0.3324, 0.9034, -0.2258, -0.3127, -0.2412, 0.6866]) - stats["observation", "state", "mean"] = torch.tensor( - [ - -0.00740268, - -0.63187766, - 1.0356655, - -0.05027218, - -0.46199223, - -0.07467502, - 0.47467607, - -0.03615446, - -0.33203387, - 0.9038929, - -0.22060776, - -0.31011587, - -0.23484458, - 0.6842416, - ] - ) - # (Pdb) stats['observation']['state']['std'] - # tensor([0.0022, 0.0520, 0.0291, 0.0092, 0.0267, 0.0145, 0.0563, 0.0179, 0.0494, - # 0.0326, 0.0476, 0.0535, 0.0956, 0.0513]) - stats["observation", "state", "std"] = torch.tensor( - [ - 0.01219023, - 0.2975381, - 0.16728032, - 0.04733803, - 0.1486037, - 0.08788499, - 0.31752336, - 0.1049916, - 0.27933604, - 0.18094037, - 0.26604933, - 0.30466506, - 0.5298686, - 0.25505227, - ] - ) - # (Pdb) stats['action']['mean'] - # tensor([-0.0075, -0.6346, 1.0353, -0.0465, -0.4686, -0.0738, 0.3723, -0.0396, - # -0.3184, 0.8991, -0.2065, -0.3182, -0.2338, 0.5593]) - stats["action"]["mean"] = torch.tensor( - [ - -0.00756444, - -0.6281845, - 1.0312834, - -0.04664314, - -0.47211358, - -0.074527, - 0.37389806, - -0.03718753, - -0.3261143, - 0.8997205, - -0.21371077, - -0.31840396, - -0.23360962, - 0.551947, - ] - ) - # (Pdb) stats['action']['std'] - # tensor([0.0023, 0.0514, 0.0290, 0.0086, 0.0263, 0.0143, 0.0593, 0.0185, 0.0510, - # 0.0328, 0.0478, 0.0531, 0.0945, 0.0794]) - stats["action"]["std"] = torch.tensor( - [ - 0.01252818, - 0.2957442, - 0.16701928, - 0.04584508, - 0.14833844, - 0.08763024, - 0.30665937, - 0.10600077, - 0.27572668, - 0.1805853, - 0.26304692, - 0.30708534, - 0.5305411, - 0.38381037, - ] - ) - transforms.append(NormalizeTransform(stats, in_keys, mode=normalization_mode)) # noqa: F821 + # # TODO(now): These stats are needed to use their pretrained model for sim_transfer_cube_human. + # # (Pdb) stats['observation']['state']['mean'] + # # tensor([-0.0071, -0.6293, 1.0351, -0.0517, -0.4642, -0.0754, 0.4751, -0.0373, + # # -0.3324, 0.9034, -0.2258, -0.3127, -0.2412, 0.6866]) + # stats["observation", "state", "mean"] = torch.tensor( + # [ + # -0.00740268, + # -0.63187766, + # 1.0356655, + # -0.05027218, + # -0.46199223, + # -0.07467502, + # 0.47467607, + # -0.03615446, + # -0.33203387, + # 0.9038929, + # -0.22060776, + # -0.31011587, + # -0.23484458, + # 0.6842416, + # ] + # ) + # # (Pdb) stats['observation']['state']['std'] + # # tensor([0.0022, 0.0520, 0.0291, 0.0092, 0.0267, 0.0145, 0.0563, 0.0179, 0.0494, + # # 0.0326, 0.0476, 0.0535, 0.0956, 0.0513]) + # stats["observation", "state", "std"] = torch.tensor( + # [ + # 0.01219023, + # 0.2975381, + # 0.16728032, + # 0.04733803, + # 0.1486037, + # 0.08788499, + # 0.31752336, + # 0.1049916, + # 0.27933604, + # 0.18094037, + # 0.26604933, + # 0.30466506, + # 0.5298686, + # 0.25505227, + # ] + # ) + # # (Pdb) stats['action']['mean'] + # # tensor([-0.0075, -0.6346, 1.0353, -0.0465, -0.4686, -0.0738, 0.3723, -0.0396, + # # -0.3184, 0.8991, -0.2065, -0.3182, -0.2338, 0.5593]) + # stats["action"]["mean"] = torch.tensor( + # [ + # -0.00756444, + # -0.6281845, + # 1.0312834, + # -0.04664314, + # -0.47211358, + # -0.074527, + # 0.37389806, + # -0.03718753, + # -0.3261143, + # 0.8997205, + # -0.21371077, + # -0.31840396, + # -0.23360962, + # 0.551947, + # ] + # ) + # # (Pdb) stats['action']['std'] + # # tensor([0.0023, 0.0514, 0.0290, 0.0086, 0.0263, 0.0143, 0.0593, 0.0185, 0.0510, + # # 0.0328, 0.0478, 0.0531, 0.0945, 0.0794]) + # stats["action"]["std"] = torch.tensor( + # [ + # 0.01252818, + # 0.2957442, + # 0.16701928, + # 0.04584508, + # 0.14833844, + # 0.08763024, + # 0.30665937, + # 0.10600077, + # 0.27572668, + # 0.1805853, + # 0.26304692, + # 0.30708534, + # 0.5305411, + # 0.38381037, + # ] + # ) + # transforms.append(NormalizeTransform(stats, in_keys, mode=normalization_mode)) # noqa: F821 transforms = v2.Compose( [ @@ -173,7 +172,11 @@ def make_dataset( "action": [-0.1] + [i / clsfunc.fps for i in range(15)], } else: - delta_timestamps = None + delta_timestamps = { + "observation.images.top": [0], + "observation.state": [0], + "action": [i / clsfunc.fps for i in range(cfg.policy.horizon)], + } dataset = clsfunc( dataset_id=cfg.dataset_id, diff --git a/lerobot/common/policies/act/policy.py b/lerobot/common/policies/act/policy.py index 5071c09a..1aacc41d 100644 --- a/lerobot/common/policies/act/policy.py +++ b/lerobot/common/policies/act/policy.py @@ -19,11 +19,10 @@ from torch import Tensor, nn from torchvision.models._utils import IntermediateLayerGetter from torchvision.ops.misc import FrozenBatchNorm2d -from lerobot.common.policies.abstract import AbstractPolicy from lerobot.common.utils import get_safe_torch_device -class ActionChunkingTransformerPolicy(AbstractPolicy): +class ActionChunkingTransformerPolicy(nn.Module): """ Action Chunking Transformer Policy as per Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware (paper: https://arxiv.org/abs/2304.13705, code: https://github.com/tonyzhaozh/act) @@ -61,205 +60,20 @@ class ActionChunkingTransformerPolicy(AbstractPolicy): """ name = "act" + _multiple_obs_steps_not_handled_msg = ( + "ActionChunkingTransformerPolicy does not handle multiple observation steps." + ) def __init__(self, cfg, device, n_action_steps=1): """ TODO(alexander-soare): Add documentation for all parameters. """ - super().__init__(n_action_steps) + super().__init__() + if getattr(cfg, "n_obs_steps", 1) != 1: + raise ValueError(self._multiple_obs_steps_not_handled_msg) self.cfg = cfg self.n_action_steps = n_action_steps self.device = get_safe_torch_device(device) - - self.model = _ActionChunkingTransformer(cfg) - self._create_optimizer() - self.to(self.device) - - def _create_optimizer(self): - optimizer_params_dicts = [ - { - "params": [ - p - for n, p in self.model.named_parameters() - if not n.startswith("backbone") and p.requires_grad - ] - }, - { - "params": [ - p - for n, p in self.model.named_parameters() - if n.startswith("backbone") and p.requires_grad - ], - "lr": self.cfg.lr_backbone, - }, - ] - self.optimizer = torch.optim.AdamW( - optimizer_params_dicts, lr=self.cfg.lr, weight_decay=self.cfg.weight_decay - ) - - def update(self, replay_buffer, step): - del step - - self.train() - - num_slices = self.cfg.batch_size - batch_size = self.cfg.horizon * num_slices - - assert batch_size % self.cfg.horizon == 0 - assert batch_size % num_slices == 0 - - def process_batch(batch, horizon, num_slices): - # trajectory t = 64, horizon h = 16 - # (t h) ... -> t h ... - batch = batch.reshape(num_slices, horizon) - - image = batch["observation", "image", "top"] - image = image[:, 0] # first observation t=0 - # batch, num_cam, channel, height, width - image = image.unsqueeze(1) - assert image.ndim == 5 - image = image.float() - - state = batch["observation", "state"] - state = state[:, 0] # first observation t=0 - # batch, qpos_dim - assert state.ndim == 2 - - action = batch["action"] - # batch, seq, action_dim - assert action.ndim == 3 - assert action.shape[1] == horizon - - if self.cfg.n_obs_steps > 1: - raise NotImplementedError() - # # keep first n observations of the slice corresponding to t=[-1,0] - # image = image[:, : self.cfg.n_obs_steps] - # state = state[:, : self.cfg.n_obs_steps] - - out = { - "obs": { - "image": image.to(self.device, non_blocking=True), - "agent_pos": state.to(self.device, non_blocking=True), - }, - "action": action.to(self.device, non_blocking=True), - } - return out - - start_time = time.time() - - batch = replay_buffer.sample(batch_size) - batch = process_batch(batch, self.cfg.horizon, num_slices) - - data_s = time.time() - start_time - - loss = self.compute_loss(batch) - loss.backward() - - grad_norm = torch.nn.utils.clip_grad_norm_( - self.model.parameters(), - self.cfg.grad_clip_norm, - error_if_nonfinite=False, - ) - - self.optimizer.step() - self.optimizer.zero_grad() - - info = { - "loss": loss.item(), - "grad_norm": float(grad_norm), - "lr": self.cfg.lr, - "data_s": data_s, - "update_s": time.time() - start_time, - } - - return info - - def save(self, fp): - torch.save(self.state_dict(), fp) - - def load(self, fp): - d = torch.load(fp) - self.load_state_dict(d) - - def compute_loss(self, batch): - loss_dict = self._forward( - qpos=batch["obs"]["agent_pos"], - image=batch["obs"]["image"], - actions=batch["action"], - ) - loss = loss_dict["loss"] - return loss - - @torch.no_grad() - def select_actions(self, observation, step_count): - # TODO(rcadene): remove unused step_count - del step_count - - self.eval() - - # TODO(rcadene): remove hack - # add 1 camera dimension - observation["image", "top"] = observation["image", "top"].unsqueeze(1) - - obs_dict = { - "image": observation["image", "top"], - "agent_pos": observation["state"], - } - action = self._forward(qpos=obs_dict["agent_pos"] * 0.182, image=obs_dict["image"]) - - if self.cfg.temporal_agg: - # TODO(rcadene): implement temporal aggregation - raise NotImplementedError() - # all_time_actions[[t], t:t+num_queries] = action - # actions_for_curr_step = all_time_actions[:, t] - # actions_populated = torch.all(actions_for_curr_step != 0, axis=1) - # actions_for_curr_step = actions_for_curr_step[actions_populated] - # k = 0.01 - # exp_weights = np.exp(-k * np.arange(len(actions_for_curr_step))) - # exp_weights = exp_weights / exp_weights.sum() - # exp_weights = torch.from_numpy(exp_weights).cuda().unsqueeze(dim=1) - # raw_action = (actions_for_curr_step * exp_weights).sum(dim=0, keepdim=True) - - # take first predicted action or n first actions - action = action[: self.n_action_steps] - return action - - def _forward(self, qpos, image, actions=None): - normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - image = normalize(image) - - is_training = actions is not None - if is_training: # training time - actions = actions[:, : self.model.horizon] - - a_hat, (mu, log_sigma_x2) = self.model(qpos, image, actions) - - all_l1 = F.l1_loss(actions, a_hat, reduction="none") - l1 = all_l1.mean() - - loss_dict = {} - loss_dict["l1"] = l1 - if self.cfg.use_vae: - # Calculate Dₖₗ(latent_pdf || standard_normal). Note: After computing the KL-divergence for - # each dimension independently, we sum over the latent dimension to get the total - # KL-divergence per batch element, then take the mean over the batch. - # (See App. B of https://arxiv.org/abs/1312.6114 for more details). - mean_kld = (-0.5 * (1 + log_sigma_x2 - mu.pow(2) - (log_sigma_x2).exp())).sum(-1).mean() - loss_dict["kl"] = mean_kld - loss_dict["loss"] = loss_dict["l1"] + loss_dict["kl"] * self.cfg.kl_weight - else: - loss_dict["loss"] = loss_dict["l1"] - return loss_dict - else: - action, _ = self.model(qpos, image) # no action, sample from prior - return action - - -# TODO(alexander-soare) move all this code into the policy when we have the policy API established. -class _ActionChunkingTransformer(nn.Module): - def __init__(self, cfg): - super().__init__() - self.camera_names = cfg.camera_names self.use_vae = cfg.use_vae self.horizon = cfg.horizon @@ -326,26 +140,179 @@ class _ActionChunkingTransformer(nn.Module): self._reset_parameters() + self._create_optimizer() + self.to(self.device) + + def _create_optimizer(self): + optimizer_params_dicts = [ + { + "params": [ + p for n, p in self.named_parameters() if not n.startswith("backbone") and p.requires_grad + ] + }, + { + "params": [ + p for n, p in self.named_parameters() if n.startswith("backbone") and p.requires_grad + ], + "lr": self.cfg.lr_backbone, + }, + ] + self.optimizer = torch.optim.AdamW( + optimizer_params_dicts, lr=self.cfg.lr, weight_decay=self.cfg.weight_decay + ) + def _reset_parameters(self): """Xavier-uniform initialization of the transformer parameters as in the original code.""" for p in chain(self.encoder.parameters(), self.decoder.parameters()): if p.dim() > 1: nn.init.xavier_uniform_(p) + @torch.no_grad() + def select_actions(self, observation, step_count): + # TODO(rcadene): remove unused step_count + del step_count + + self.eval() + + # TODO(rcadene): remove hack + # add 1 camera dimension + observation["image", "top"] = observation["image", "top"].unsqueeze(1) + + obs_dict = { + "image": observation["image", "top"], + "agent_pos": observation["state"], + } + action = self._forward(qpos=obs_dict["agent_pos"] * 0.182, image=obs_dict["image"]) + + if self.cfg.temporal_agg: + # TODO(rcadene): implement temporal aggregation + raise NotImplementedError() + # all_time_actions[[t], t:t+num_queries] = action + # actions_for_curr_step = all_time_actions[:, t] + # actions_populated = torch.all(actions_for_curr_step != 0, axis=1) + # actions_for_curr_step = actions_for_curr_step[actions_populated] + # k = 0.01 + # exp_weights = np.exp(-k * np.arange(len(actions_for_curr_step))) + # exp_weights = exp_weights / exp_weights.sum() + # exp_weights = torch.from_numpy(exp_weights).cuda().unsqueeze(dim=1) + # raw_action = (actions_for_curr_step * exp_weights).sum(dim=0, keepdim=True) + + # take first predicted action or n first actions + action = action[: self.n_action_steps] + return action + + def __call__(self, *args, **kwargs): + # TODO(now): Temporary bridge. + return self.update(*args, **kwargs) + + def _preprocess_batch(self, batch: dict[str, Tensor]) -> dict[str, Tensor]: + """ + Expects batch to have (at least): + { + "observation.state": (B, 1, J) tensor of robot states (joint configuration) + + "observation.images.top": (B, 1, C, H, W) tensor of images. + "action": (B, H, J) tensor of actions (positional target for robot joint configuration) + "action_is_pad": (B, H) mask for whether the actions are padding outside of the episode bounds. + } + """ + if batch["observation.state"].shape[1] != 1: + raise ValueError(self._multiple_obs_steps_not_handled_msg) + batch["observation.state"] = batch["observation.state"].squeeze(1) + # TODO(alexander-soare): generalize this to multiple images. Note: no squeeze is required for + # "observation.images.top" because then we'd have to unsqueeze to get get the image index dimension. + + def update(self, batch, *_): + start_time = time.time() + self._preprocess_batch(batch) + + self.train() + + num_slices = self.cfg.batch_size + batch_size = self.cfg.horizon * num_slices + + assert batch_size % self.cfg.horizon == 0 + assert batch_size % num_slices == 0 + + loss = self.compute_loss(batch) + loss.backward() + + grad_norm = torch.nn.utils.clip_grad_norm_( + self.parameters(), + self.cfg.grad_clip_norm, + error_if_nonfinite=False, + ) + + self.optimizer.step() + self.optimizer.zero_grad() + + info = { + "loss": loss.item(), + "grad_norm": float(grad_norm), + "lr": self.cfg.lr, + "update_s": time.time() - start_time, + } + + return info + + def compute_loss(self, batch): + loss_dict = self.forward( + robot_state=batch["observation.state"], + image=batch["observation.images.top"], + actions=batch["action"], + ) + loss = loss_dict["loss"] + return loss + def forward(self, robot_state: Tensor, image: Tensor, actions: Tensor | None = None): + # TODO(now): Maybe this shouldn't be here? + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + image = normalize(image) + + is_training = actions is not None + if is_training: # training time + actions = actions[:, : self.horizon] + + a_hat, (mu, log_sigma_x2) = self._forward(robot_state, image, actions) + + all_l1 = F.l1_loss(actions, a_hat, reduction="none") + l1 = all_l1.mean() + + loss_dict = {} + loss_dict["l1"] = l1 + if self.cfg.use_vae: + # Calculate Dₖₗ(latent_pdf || standard_normal). Note: After computing the KL-divergence for + # each dimension independently, we sum over the latent dimension to get the total + # KL-divergence per batch element, then take the mean over the batch. + # (See App. B of https://arxiv.org/abs/1312.6114 for more details). + mean_kld = (-0.5 * (1 + log_sigma_x2 - mu.pow(2) - (log_sigma_x2).exp())).sum(-1).mean() + loss_dict["kl"] = mean_kld + loss_dict["loss"] = loss_dict["l1"] + loss_dict["kl"] * self.cfg.kl_weight + else: + loss_dict["loss"] = loss_dict["l1"] + return loss_dict + else: + action, _ = self._forward(robot_state, image) # no action, sample from prior + return action + + def _forward(self, robot_state: Tensor, image: Tensor, actions: Tensor | None = None): """ Args: robot_state: (B, J) batch of robot joint configurations. image: (B, N, C, H, W) batch of N camera frames. actions: (B, S, A) batch of actions from the target dataset which must be provided if the VAE is enabled and the model is in training mode. + Returns: + (B, S, A) batch of action sequences + Tuple containing the latent PDF's parameters (mean, log(σ²)) both as (B, L) tensors where L is the + latent dimension. """ if self.use_vae and self.training: assert ( actions is not None ), "actions must be provided when using the variational objective in training mode." - batch_size, _ = robot_state.shape + batch_size = robot_state.shape[0] # Prepare the latent for input to the transformer encoder. if self.use_vae and actions is not None: @@ -428,6 +395,13 @@ class _ActionChunkingTransformer(nn.Module): return actions, [mu, log_sigma_x2] + def save(self, fp): + torch.save(self.state_dict(), fp) + + def load(self, fp): + d = torch.load(fp) + self.load_state_dict(d) + class _TransformerEncoder(nn.Module): """Convenience module for running multiple encoder layers, maybe followed by normalization.""" diff --git a/lerobot/common/policies/diffusion/policy.py b/lerobot/common/policies/diffusion/policy.py index a4f4a450..93e5ba5d 100644 --- a/lerobot/common/policies/diffusion/policy.py +++ b/lerobot/common/policies/diffusion/policy.py @@ -152,7 +152,6 @@ class DiffusionPolicy(nn.Module): self.diffusion.train() data_s = time.time() - start_time - loss = self.diffusion.compute_loss(batch) loss.backward() diff --git a/lerobot/scripts/train.py b/lerobot/scripts/train.py index 631ecc93..d49dfff8 100644 --- a/lerobot/scripts/train.py +++ b/lerobot/scripts/train.py @@ -41,7 +41,6 @@ def log_train_info(logger, info, step, cfg, dataset, is_offline): loss = info["loss"] grad_norm = info["grad_norm"] lr = info["lr"] - data_s = info["data_s"] update_s = info["update_s"] # A sample is an (observation,action) pair, where observation and action @@ -62,7 +61,6 @@ def log_train_info(logger, info, step, cfg, dataset, is_offline): f"grdn:{grad_norm:.3f}", f"lr:{lr:0.1e}", # in seconds - f"data_s:{data_s:.3f}", f"updt_s:{update_s:.3f}", ] logging.info(" ".join(log_items)) @@ -200,7 +198,7 @@ def train(cfg: dict, out_dir=None, job_name=None): is_offline = True dataloader = torch.utils.data.DataLoader( dataset, - num_workers=4, + num_workers=0, batch_size=cfg.policy.batch_size, shuffle=True, pin_memory=cfg.device != "cpu", diff --git a/poetry.lock b/poetry.lock index 0cbf9318..b8c6c638 100644 --- a/poetry.lock +++ b/poetry.lock @@ -880,6 +880,29 @@ files = [ [package.extras] protobuf = ["grpcio-tools (>=1.62.1)"] +[[package]] +name = "gym-pusht" +version = "0.1.0" +description = "PushT environment for LeRobot" +optional = true +python-versions = "^3.10" +files = [] +develop = false + +[package.dependencies] +gymnasium = "^0.29.1" +opencv-python = "^4.9.0.80" +pygame = "^2.5.2" +pymunk = "^6.6.0" +scikit-image = "^0.22.0" +shapely = "^2.0.3" + +[package.source] +type = "git" +url = "git@github.com:huggingface/gym-pusht.git" +reference = "HEAD" +resolved_reference = "0fe4449cca5a2b08f529f7a07fbf5b9df24962ec" + [[package]] name = "gymnasium" version = "0.29.1" @@ -1261,17 +1284,21 @@ setuptools = "!=50.0.0" [[package]] name = "lazy-loader" -version = "0.3" -description = "lazy_loader" +version = "0.4" +description = "Makes it easy to load subpackages and functions on demand." optional = false python-versions = ">=3.7" files = [ - {file = "lazy_loader-0.3-py3-none-any.whl", hash = "sha256:1e9e76ee8631e264c62ce10006718e80b2cfc74340d17d1031e0f84af7478554"}, - {file = "lazy_loader-0.3.tar.gz", hash = "sha256:3b68898e34f5b2a29daaaac172c6555512d0f32074f147e2254e4a6d9d838f37"}, + {file = "lazy_loader-0.4-py3-none-any.whl", hash = "sha256:342aa8e14d543a154047afb4ba8ef17f5563baad3fc610d7b15b213b0f119efc"}, + {file = "lazy_loader-0.4.tar.gz", hash = "sha256:47c75182589b91a4e1a85a136c074285a5ad4d9f39c63e0d7fb76391c4574cd1"}, ] +[package.dependencies] +packaging = "*" + [package.extras] -lint = ["pre-commit (>=3.3)"] +dev = ["changelist (==0.5)"] +lint = ["pre-commit (==3.7.0)"] test = ["pytest (>=7.4)", "pytest-cov (>=4.1)"] [[package]] @@ -3274,7 +3301,7 @@ protobuf = ">=3.20" [[package]] name = "tensordict" -version = "0.4.0+b4c91e8" +version = "0.4.0+f622b2f" description = "" optional = false python-versions = "*" @@ -3518,13 +3545,13 @@ tutorials = ["matplotlib", "pandas", "tabulate", "torch"] [[package]] name = "typing-extensions" -version = "4.10.0" +version = "4.11.0" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.10.0-py3-none-any.whl", hash = "sha256:69b1a937c3a517342112fb4c6df7e72fc39a38e7891a5730ed4985b5214b5475"}, - {file = "typing_extensions-4.10.0.tar.gz", hash = "sha256:b0abd7c89e8fb96f98db18d86106ff1d90ab692004eb746cf6eda2682f91b3cb"}, + {file = "typing_extensions-4.11.0-py3-none-any.whl", hash = "sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a"}, + {file = "typing_extensions-4.11.0.tar.gz", hash = "sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0"}, ] [[package]] @@ -3667,9 +3694,9 @@ docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.link testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] [extras] -pusht = [] +pusht = ["gym_pusht"] [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "04b17fa57f189ad63181611d2e724d7fbdfb3485bc1a587b259d0a3751db918d" +content-hash = "3eee17e4bf2b7a570f41ef9c400ec5a24a3113f62a13162229cf43504ca0d005" diff --git a/pyproject.toml b/pyproject.toml index f0869158..a7d2dd65 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -52,6 +52,7 @@ robomimic = "0.2.0" gymnasium-robotics = "^1.2.4" gymnasium = "^0.29.1" cmake = "^3.29.0.1" +gym_pusht = { git = "git@github.com:huggingface/gym-pusht.git", optional = true} [tool.poetry.extras] pusht = ["gym_pusht"] From 29032fbcd3309395a2a81289a2b23b900ca93d1c Mon Sep 17 00:00:00 2001 From: Cadene Date: Fri, 5 Apr 2024 17:17:14 +0000 Subject: [PATCH 18/80] wrap dm_control aloha into gymnasium (TODO: properly seeding the env) --- lerobot/common/envs/aloha/env.py | 303 +++++++++---------------------- lerobot/common/envs/factory.py | 7 + tests/test_envs.py | 26 ++- 3 files changed, 104 insertions(+), 232 deletions(-) diff --git a/lerobot/common/envs/aloha/env.py b/lerobot/common/envs/aloha/env.py index 8f907650..719c2d19 100644 --- a/lerobot/common/envs/aloha/env.py +++ b/lerobot/common/envs/aloha/env.py @@ -1,22 +1,9 @@ -import importlib -import logging -from collections import deque -from typing import Optional - -import einops +import gymnasium as gym import numpy as np -import torch from dm_control import mujoco from dm_control.rl import control -from tensordict import TensorDict -from torchrl.data.tensor_specs import ( - BoundedTensorSpec, - CompositeSpec, - DiscreteTensorSpec, - UnboundedContinuousTensorSpec, -) +from gymnasium import spaces -from lerobot.common.envs.abstract import AbstractEnv from lerobot.common.envs.aloha.constants import ( ACTIONS, ASSETS_DIR, @@ -31,49 +18,67 @@ from lerobot.common.envs.aloha.tasks.sim_end_effector import ( from lerobot.common.envs.aloha.utils import sample_box_pose, sample_insertion_pose from lerobot.common.utils import set_global_seed -_has_gym = importlib.util.find_spec("gymnasium") is not None - -class AlohaEnv(AbstractEnv): - name = "aloha" - available_tasks = ["sim_insertion", "sim_transfer_cube"] - _reset_warning_issued = False +class AlohaEnv(gym.Env): + metadata = {"render_modes": [], "render_fps": 50} def __init__( self, task, - frame_skip: int = 1, - from_pixels: bool = False, - pixels_only: bool = False, - image_size=None, - seed=1337, - device="cpu", - num_prev_obs=1, - num_prev_action=0, + obs_type="state", + observation_width=640, + observation_height=480, + visualization_width=640, + visualization_height=480, ): - super().__init__( - task=task, - frame_skip=frame_skip, - from_pixels=from_pixels, - pixels_only=pixels_only, - image_size=image_size, - seed=seed, - device=device, - num_prev_obs=num_prev_obs, - num_prev_action=num_prev_action, - ) - - def _make_env(self): - if not _has_gym: - raise ImportError("Cannot import gymnasium.") - - if not self.from_pixels: - raise NotImplementedError() + super().__init__() + self.task = task + self.obs_type = obs_type + self.observation_width = observation_width + self.observation_height = observation_height + self.visualization_width = visualization_width + self.visualization_height = visualization_height self._env = self._make_env_task(self.task) - def render(self, mode="rgb_array", width=640, height=480): + if self.obs_type == "state": + raise NotImplementedError() + self.observation_space = spaces.Box( + low=np.array([0] * len(JOINTS)), # ??? + high=np.array([255] * len(JOINTS)), # ??? + dtype=np.float64, + ) + elif self.obs_type == "pixels": + self.observation_space = spaces.Box( + low=0, high=255, shape=(self.observation_height, self.observation_width, 3), dtype=np.uint8 + ) + elif self.obs_type == "pixels_agent_pos": + self.observation_space = spaces.Dict( + { + "pixels": spaces.Box( + low=0, + high=255, + shape=(self.observation_height, self.observation_width, 3), + dtype=np.uint8, + ), + "agent_pos": spaces.Box( + low=np.array([-1] * len(JOINTS)), # ??? + high=np.array([1] * len(JOINTS)), # ??? + dtype=np.float64, + ), + } + ) + + self.action_space = spaces.Box(low=-1, high=1, shape=(len(ACTIONS),), dtype=np.float32) + + def render(self, mode="rgb_array"): # TODO(rcadene): render and visualizer several cameras (e.g. angle, front_close) + if mode in ["visualize", "human"]: + height, width = self.visualize_height, self.visualize_width + elif mode == "rgb_array": + height, width = self.observation_height, self.observation_width + else: + raise ValueError(mode) image = self._env.physics.render(height=height, width=width, camera_id="top") return image @@ -81,20 +86,20 @@ class AlohaEnv(AbstractEnv): # time limit is controlled by StepCounter in env factory time_limit = float("inf") - if "sim_transfer_cube" in task_name: + if "transfer_cube" in task_name: xml_path = ASSETS_DIR / "bimanual_viperx_transfer_cube.xml" physics = mujoco.Physics.from_xml_path(str(xml_path)) task = TransferCubeTask(random=False) - elif "sim_insertion" in task_name: + elif "insertion" in task_name: xml_path = ASSETS_DIR / "bimanual_viperx_insertion.xml" physics = mujoco.Physics.from_xml_path(str(xml_path)) task = InsertionTask(random=False) - elif "sim_end_effector_transfer_cube" in task_name: + elif "end_effector_transfer_cube" in task_name: raise NotImplementedError() xml_path = ASSETS_DIR / "bimanual_viperx_end_effector_transfer_cube.xml" physics = mujoco.Physics.from_xml_path(str(xml_path)) task = TransferCubeEndEffectorTask(random=False) - elif "sim_end_effector_insertion" in task_name: + elif "end_effector_insertion" in task_name: raise NotImplementedError() xml_path = ASSETS_DIR / "bimanual_viperx_end_effector_insertion.xml" physics = mujoco.Physics.from_xml_path(str(xml_path)) @@ -108,191 +113,55 @@ class AlohaEnv(AbstractEnv): return env def _format_raw_obs(self, raw_obs): - if self.from_pixels: - image = torch.from_numpy(raw_obs["images"]["top"].copy()) - image = einops.rearrange(image, "h w c -> c h w") - assert image.dtype == torch.uint8 - obs = {"image": {"top": image}} - - if not self.pixels_only: - obs["state"] = torch.from_numpy(raw_obs["qpos"]).type(torch.float32) - else: - # TODO(rcadene): + if self.obs_type == "state": raise NotImplementedError() - # obs = {"state": torch.from_numpy(raw_obs["observation"]).type(torch.float32)} - + elif self.obs_type == "pixels": + obs = raw_obs["images"]["top"].copy() + elif self.obs_type == "pixels_agent_pos": + obs = { + "pixels": raw_obs["images"]["top"].copy(), + "agent_pos": raw_obs["qpos"], + } return obs - def _reset(self, tensordict: Optional[TensorDict] = None): - if tensordict is not None and not AlohaEnv._reset_warning_issued: - logging.warning(f"{self.__class__.__name__}._reset ignores the provided tensordict.") - AlohaEnv._reset_warning_issued = True + def reset(self, seed=None, options=None): + super().reset(seed=seed) - # Seed the environment and update the seed to be used for the next reset. - self._next_seed = self.set_seed(self._next_seed) + # TODO(rcadene): how to seed the env? + if seed is not None: + set_global_seed(seed) + self._env.task.random.seed(seed) # TODO(rcadene): do not use global variable for this - if "sim_transfer_cube" in self.task: + if "transfer_cube" in self.task: BOX_POSE[0] = sample_box_pose() # used in sim reset - elif "sim_insertion" in self.task: + elif "insertion" in self.task: BOX_POSE[0] = np.concatenate(sample_insertion_pose()) # used in sim reset + else: + raise ValueError(self.task) raw_obs = self._env.reset() - obs = self._format_raw_obs(raw_obs.observation) + observation = self._format_raw_obs(raw_obs.observation) - if self.num_prev_obs > 0: - stacked_obs = {} - if "image" in obs: - self._prev_obs_image_queue = deque( - [obs["image"]["top"]] * (self.num_prev_obs + 1), maxlen=(self.num_prev_obs + 1) - ) - stacked_obs["image"] = {"top": torch.stack(list(self._prev_obs_image_queue))} - if "state" in obs: - self._prev_obs_state_queue = deque( - [obs["state"]] * (self.num_prev_obs + 1), maxlen=(self.num_prev_obs + 1) - ) - stacked_obs["state"] = torch.stack(list(self._prev_obs_state_queue)) - obs = stacked_obs + info = {"is_success": False} + return observation, info - td = TensorDict( - { - "observation": TensorDict(obs, batch_size=[]), - "done": torch.tensor([False], dtype=torch.bool), - }, - batch_size=[], - ) - - return td - - def _step(self, tensordict: TensorDict): - td = tensordict - action = td["action"].numpy() + def step(self, action): assert action.ndim == 1 # TODO(rcadene): add info["is_success"] and info["success"] ? _, reward, _, raw_obs = self._env.step(action) # TODO(rcadene): add an enum - success = done = reward == 4 - obs = self._format_raw_obs(raw_obs) + terminated = is_success = reward == 4 - if self.num_prev_obs > 0: - stacked_obs = {} - if "image" in obs: - self._prev_obs_image_queue.append(obs["image"]["top"]) - stacked_obs["image"] = {"top": torch.stack(list(self._prev_obs_image_queue))} - if "state" in obs: - self._prev_obs_state_queue.append(obs["state"]) - stacked_obs["state"] = torch.stack(list(self._prev_obs_state_queue)) - obs = stacked_obs + info = {"is_success": is_success} - td = TensorDict( - { - "observation": TensorDict(obs, batch_size=[]), - "reward": torch.tensor([reward], dtype=torch.float32), - # success and done are true when coverage > self.success_threshold in env - "done": torch.tensor([done], dtype=torch.bool), - "success": torch.tensor([success], dtype=torch.bool), - }, - batch_size=[], - ) - return td + observation = self._format_raw_obs(raw_obs) - def _make_spec(self): - obs = {} - from omegaconf import OmegaConf + truncated = False + return observation, reward, terminated, truncated, info - if self.from_pixels: - if isinstance(self.image_size, int): - image_shape = (3, self.image_size, self.image_size) - elif OmegaConf.is_list(self.image_size) or isinstance(self.image_size, list): - assert len(self.image_size) == 3 # c h w - assert self.image_size[0] == 3 # c is RGB - image_shape = tuple(self.image_size) - else: - raise ValueError(self.image_size) - if self.num_prev_obs > 0: - image_shape = (self.num_prev_obs + 1, *image_shape) - - obs["image"] = { - "top": BoundedTensorSpec( - low=0, - high=255, - shape=image_shape, - dtype=torch.uint8, - device=self.device, - ) - } - if not self.pixels_only: - state_shape = (len(JOINTS),) - if self.num_prev_obs > 0: - state_shape = (self.num_prev_obs + 1, *state_shape) - - obs["state"] = UnboundedContinuousTensorSpec( - # TODO: add low and high bounds - shape=state_shape, - dtype=torch.float32, - device=self.device, - ) - else: - # TODO(rcadene): add observation_space achieved_goal and desired_goal? - state_shape = (len(JOINTS),) - if self.num_prev_obs > 0: - state_shape = (self.num_prev_obs + 1, *state_shape) - - obs["state"] = UnboundedContinuousTensorSpec( - # TODO: add low and high bounds - shape=state_shape, - dtype=torch.float32, - device=self.device, - ) - self.observation_spec = CompositeSpec({"observation": obs}) - - # TODO(rcadene): valid when controling end effector? - # action_space = self._env.action_spec() - # self.action_spec = BoundedTensorSpec( - # low=action_space.minimum, - # high=action_space.maximum, - # shape=action_space.shape, - # dtype=torch.float32, - # device=self.device, - # ) - - # TODO(rcaene): add bounds (where are they????) - self.action_spec = BoundedTensorSpec( - shape=(len(ACTIONS)), - low=-1, - high=1, - dtype=torch.float32, - device=self.device, - ) - - self.reward_spec = UnboundedContinuousTensorSpec( - shape=(1,), - dtype=torch.float32, - device=self.device, - ) - - self.done_spec = CompositeSpec( - { - "done": DiscreteTensorSpec( - 2, - shape=(1,), - dtype=torch.bool, - device=self.device, - ), - "success": DiscreteTensorSpec( - 2, - shape=(1,), - dtype=torch.bool, - device=self.device, - ), - } - ) - - def _set_seed(self, seed: Optional[int]): - set_global_seed(seed) - # TODO(rcadene): seed the env - # self._env.seed(seed) - logging.warning("Aloha env is not seeded") + def close(self): + pass diff --git a/lerobot/common/envs/factory.py b/lerobot/common/envs/factory.py index 4ddb81a2..9225cbc5 100644 --- a/lerobot/common/envs/factory.py +++ b/lerobot/common/envs/factory.py @@ -30,7 +30,14 @@ def make_env(cfg, num_parallel_envs=0) -> gym.Env | gym.vector.SyncVectorEnv: **kwargs, ) elif cfg.env.name == "aloha": + from lerobot.common.envs import aloha as gym_aloha # noqa: F401 + kwargs["task"] = cfg.env.task + + env_fn = lambda: gym.make( # noqa: E731 + "gym_aloha/AlohaInsertion-v0", + **kwargs, + ) else: raise ValueError(cfg.env.name) diff --git a/tests/test_envs.py b/tests/test_envs.py index a94d76f2..495453e2 100644 --- a/tests/test_envs.py +++ b/tests/test_envs.py @@ -41,25 +41,21 @@ from .utils import DEVICE, DEFAULT_CONFIG_PATH # print("data from rollout:", simple_rollout(100)) -@pytest.mark.skip("TODO") @pytest.mark.parametrize( - "task,from_pixels,pixels_only", + "env_task, obs_type", [ - ("sim_insertion", True, False), - ("sim_insertion", True, True), - ("sim_transfer_cube", True, False), - ("sim_transfer_cube", True, True), + # ("AlohaInsertion-v0", "state"), + ("AlohaInsertion-v0", "pixels"), + ("AlohaInsertion-v0", "pixels_agent_pos"), + ("AlohaTransferCube-v0", "pixels"), + ("AlohaTransferCube-v0", "pixels_agent_pos"), ], ) -def test_aloha(task, from_pixels, pixels_only): - env = AlohaEnv( - task, - from_pixels=from_pixels, - pixels_only=pixels_only, - image_size=[3, 480, 640] if from_pixels else None, - ) - # print_spec_rollout(env) - check_env_specs(env) +def test_aloha(env_task, obs_type): + from lerobot.common.envs import aloha as gym_aloha # noqa: F401 + env = gym.make(f"gym_aloha/{env_task}", obs_type=obs_type) + check_env(env) + @pytest.mark.parametrize( From a2d3588fca71aaa4b742f87301b8996adb6c6ac5 Mon Sep 17 00:00:00 2001 From: Cadene Date: Fri, 5 Apr 2024 17:17:31 +0000 Subject: [PATCH 19/80] wrap dm_control aloha into gymnasium (TODO: properly seeding the env) --- lerobot/common/envs/aloha/__init__.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 lerobot/common/envs/aloha/__init__.py diff --git a/lerobot/common/envs/aloha/__init__.py b/lerobot/common/envs/aloha/__init__.py new file mode 100644 index 00000000..16fe3c43 --- /dev/null +++ b/lerobot/common/envs/aloha/__init__.py @@ -0,0 +1,15 @@ +from gymnasium.envs.registration import register + +register( + id="gym_aloha/AlohaInsertion-v0", + entry_point="lerobot.common.envs.aloha.env:AlohaEnv", + max_episode_steps=300, + kwargs={"obs_type": "state", "task": "insertion"}, +) + +register( + id="gym_aloha/AlohaTransferCube-v0", + entry_point="lerobot.common.envs.aloha.env:AlohaEnv", + max_episode_steps=300, + kwargs={"obs_type": "state", "task": "transfer_cube"}, +) From 5eff40b3d6fb5342927998ed7ca3f404c6de9280 Mon Sep 17 00:00:00 2001 From: Cadene Date: Fri, 5 Apr 2024 17:18:37 +0000 Subject: [PATCH 20/80] rename task, sim_transfer -> transfer --- lerobot/configs/env/aloha.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lerobot/configs/env/aloha.yaml b/lerobot/configs/env/aloha.yaml index 51569fea..2bfbbaa8 100644 --- a/lerobot/configs/env/aloha.yaml +++ b/lerobot/configs/env/aloha.yaml @@ -14,7 +14,7 @@ dataset_id: aloha_sim_insertion_human env: name: aloha - task: sim_insertion + task: insertion from_pixels: True pixels_only: False image_size: [3, 480, 640] From 8d2463f45b4cd22f5ce6e38b7beade9231e52f37 Mon Sep 17 00:00:00 2001 From: Alexander Soare Date: Fri, 5 Apr 2024 18:46:30 +0100 Subject: [PATCH 21/80] backup wip --- lerobot/common/policies/abstract.py | 76 ------------------ lerobot/common/policies/act/policy.py | 111 ++++++++++++++++++++------ lerobot/scripts/eval.py | 2 +- lerobot/scripts/train.py | 3 +- scripts/convert_act_weights.py | 33 ++++---- 5 files changed, 105 insertions(+), 120 deletions(-) diff --git a/lerobot/common/policies/abstract.py b/lerobot/common/policies/abstract.py index 6dc72bef..beebd8ac 100644 --- a/lerobot/common/policies/abstract.py +++ b/lerobot/common/policies/abstract.py @@ -4,79 +4,3 @@ import torch from torch import Tensor, nn -class AbstractPolicy(nn.Module): - """Base policy which all policies should be derived from. - - The forward method should generally not be overriden as it plays the role of handling multi-step policies. See its - documentation for more information. - - Note: - When implementing a concrete class (e.g. `AlohaDataset`, `PushtEnv`, `DiffusionPolicy`), you need to: - 1. set the required class attributes: - - for classes inheriting from `AbstractDataset`: `available_datasets` - - for classes inheriting from `AbstractEnv`: `name`, `available_tasks` - - for classes inheriting from `AbstractPolicy`: `name` - 2. update variables in `lerobot/__init__.py` (e.g. `available_envs`, `available_datasets_per_envs`, `available_policies`) - 3. update variables in `tests/test_available.py` by importing your new class - """ - - name: str | None = None # same name should be used to instantiate the policy in factory.py - - def __init__(self, n_action_steps: int | None): - """ - n_action_steps: Sets the cache size for storing action trajectories. If None, it is assumed that a single - action is returned by `select_actions` and that doesn't have a horizon dimension. The `forward` method then - adds that dimension. - """ - super().__init__() - assert self.name is not None, "Subclasses of `AbstractPolicy` should set the `name` class attribute." - self.n_action_steps = n_action_steps - self.clear_action_queue() - - def update(self, replay_buffer, step): - """One step of the policy's learning algorithm.""" - raise NotImplementedError("Abstract method") - - def save(self, fp): - torch.save(self.state_dict(), fp) - - def load(self, fp): - d = torch.load(fp) - self.load_state_dict(d) - - def select_actions(self, observation) -> Tensor: - """Select an action (or trajectory of actions) based on an observation during rollout. - - If n_action_steps was provided at initialization, this should return a (batch_size, n_action_steps, *) tensor of - actions. Otherwise if n_actions_steps is None, this should return a (batch_size, *) tensor of actions. - """ - raise NotImplementedError("Abstract method") - - def clear_action_queue(self): - """This should be called whenever the environment is reset.""" - if self.n_action_steps is not None: - self._action_queue = deque([], maxlen=self.n_action_steps) - - def forward(self, *args, **kwargs) -> Tensor: - """Inference step that makes multi-step policies compatible with their single-step environments. - - WARNING: In general, this should not be overriden. - - Consider a "policy" that observes the environment then charts a course of N actions to take. To make this fit - into the formalism of a TorchRL environment, we view it as being effectively a policy that (1) makes an - observation and prepares a queue of actions, (2) consumes that queue when queried, regardless of the environment - observation, (3) repopulates the action queue when empty. This method handles the aforementioned logic so that - the subclass doesn't have to. - - This method effectively wraps the `select_actions` method of the subclass. The following assumptions are made: - 1. The `select_actions` method returns a Tensor of actions with shape (B, H, *) where B is the batch size, H is - the action trajectory horizon and * is the action dimensions. - 2. Prior to the `select_actions` method being called, theres is an `n_action_steps` instance attribute defined. - """ - if self.n_action_steps is None: - return self.select_actions(*args, **kwargs) - if len(self._action_queue) == 0: - # `select_actions` returns a (batch_size, n_action_steps, *) tensor, but the queue effectively has shape - # (n_action_steps, batch_size, *), hence the transpose. - self._action_queue.extend(self.select_actions(*args, **kwargs).transpose(0, 1)) - return self._action_queue.popleft() diff --git a/lerobot/common/policies/act/policy.py b/lerobot/common/policies/act/policy.py index f42c6a3c..a9a5ac06 100644 --- a/lerobot/common/policies/act/policy.py +++ b/lerobot/common/policies/act/policy.py @@ -3,7 +3,7 @@ As per Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware (https://arxiv.org/abs/2304.13705). The majority of changes here involve removing unused code, unifying naming, and adding helpful comments. """ - +from collections import deque import math import time from itertools import chain @@ -22,6 +22,67 @@ from torchvision.ops.misc import FrozenBatchNorm2d from lerobot.common.utils import get_safe_torch_device +# class AbstractPolicy(nn.Module): +# """Base policy which all policies should be derived from. + +# The forward method should generally not be overriden as it plays the role of handling multi-step policies. See its +# documentation for more information. + +# Note: +# When implementing a concrete class (e.g. `AlohaDataset`, `PushtEnv`, `DiffusionPolicy`), you need to: +# 1. set the required class attributes: +# - for classes inheriting from `AbstractDataset`: `available_datasets` +# - for classes inheriting from `AbstractEnv`: `name`, `available_tasks` +# - for classes inheriting from `AbstractPolicy`: `name` +# 2. update variables in `lerobot/__init__.py` (e.g. `available_envs`, `available_datasets_per_envs`, `available_policies`) +# 3. update variables in `tests/test_available.py` by importing your new class +# """ + +# name: str | None = None # same name should be used to instantiate the policy in factory.py + +# def __init__(self, n_action_steps: int | None): +# """ +# n_action_steps: Sets the cache size for storing action trajectories. If None, it is assumed that a single +# action is returned by `select_actions` and that doesn't have a horizon dimension. The `forward` method then +# adds that dimension. +# """ +# super().__init__() +# assert self.name is not None, "Subclasses of `AbstractPolicy` should set the `name` class attribute." +# self.n_action_steps = n_action_steps +# self.clear_action_queue() + +# def clear_action_queue(self): +# """This should be called whenever the environment is reset.""" +# if self.n_action_steps is not None: +# self._action_queue = deque([], maxlen=self.n_action_steps) + +# def forward(self, fn) -> Tensor: +# """Inference step that makes multi-step policies compatible with their single-step environments. + +# WARNING: In general, this should not be overriden. + +# Consider a "policy" that observes the environment then charts a course of N actions to take. To make this fit +# into the formalism of a TorchRL environment, we view it as being effectively a policy that (1) makes an +# observation and prepares a queue of actions, (2) consumes that queue when queried, regardless of the environment +# observation, (3) repopulates the action queue when empty. This method handles the aforementioned logic so that +# the subclass doesn't have to. + +# This method effectively wraps the `select_actions` method of the subclass. The following assumptions are made: +# 1. The `select_actions` method returns a Tensor of actions with shape (B, H, *) where B is the batch size, H is +# the action trajectory horizon and * is the action dimensions. +# 2. Prior to the `select_actions` method being called, theres is an `n_action_steps` instance attribute defined. +# """ +# if self.n_action_steps is None: +# return self.select_actions(*args, **kwargs) +# if len(self._action_queue) == 0: +# # `select_actions` returns a (batch_size, n_action_steps, *) tensor, but the queue effectively has shape +# # (n_action_steps, batch_size, *), hence the transpose. +# self._action_queue.extend(self.select_actions(*args, **kwargs).transpose(0, 1)) +# return self._action_queue.popleft() + + + + class ActionChunkingTransformerPolicy(nn.Module): """ Action Chunking Transformer Policy as per Learning Fine-Grained Bimanual Manipulation with Low-Cost @@ -168,14 +229,16 @@ class ActionChunkingTransformerPolicy(nn.Module): nn.init.xavier_uniform_(p) @torch.no_grad() - def select_actions(self, batch, *_): + def select_action(self, batch, *_): # TODO(now): Implement queueing mechanism. self.eval() self._preprocess_batch(batch) # TODO(now): What's up with this 0.182? action = self.forward( - robot_state=batch["observation.state"] * 0.182, image=batch["observation.images.top"] + robot_state=batch["observation.state"] * 0.182, + image=batch["observation.images.top"], + return_loss=False, ) if self.cfg.temporal_agg: @@ -226,7 +289,7 @@ class ActionChunkingTransformerPolicy(nn.Module): assert batch_size % self.cfg.horizon == 0 assert batch_size % num_slices == 0 - loss = self.compute_loss(batch) + loss = self.forward(batch, return_loss=True)["loss"] loss.backward() grad_norm = torch.nn.utils.clip_grad_norm_( @@ -247,44 +310,38 @@ class ActionChunkingTransformerPolicy(nn.Module): return info - def compute_loss(self, batch): - loss_dict = self.forward( - robot_state=batch["observation.state"], - image=batch["observation.images.top"], - actions=batch["action"], - ) - loss = loss_dict["loss"] - return loss - - def forward(self, robot_state: Tensor, image: Tensor, actions: Tensor | None = None): + def forward(self, batch: dict[str, Tensor], return_loss: bool = False): # TODO(now): Maybe this shouldn't be here? normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - image = normalize(image) + images = normalize(batch["observation.images.top"]) - is_training = actions is not None - if is_training: # training time - actions = actions[:, : self.horizon] + if return_loss: # training time + actions_hat, (mu_hat, log_sigma_x2_hat) = self._forward( + batch["observation.state"], images, batch["action"] + ) - a_hat, (mu, log_sigma_x2) = self._forward(robot_state, image, actions) - - all_l1 = F.l1_loss(actions, a_hat, reduction="none") - l1 = all_l1.mean() + l1_loss = ( + F.l1_loss(batch["action"], actions_hat, reduction="none") + * ~batch["action_is_pad"].unsqueeze(-1) + ).mean() loss_dict = {} - loss_dict["l1"] = l1 + loss_dict["l1"] = l1_loss if self.cfg.use_vae: # Calculate Dₖₗ(latent_pdf || standard_normal). Note: After computing the KL-divergence for # each dimension independently, we sum over the latent dimension to get the total # KL-divergence per batch element, then take the mean over the batch. # (See App. B of https://arxiv.org/abs/1312.6114 for more details). - mean_kld = (-0.5 * (1 + log_sigma_x2 - mu.pow(2) - (log_sigma_x2).exp())).sum(-1).mean() + mean_kld = ( + (-0.5 * (1 + log_sigma_x2_hat - mu_hat.pow(2) - (log_sigma_x2_hat).exp())).sum(-1).mean() + ) loss_dict["kl"] = mean_kld loss_dict["loss"] = loss_dict["l1"] + loss_dict["kl"] * self.cfg.kl_weight else: loss_dict["loss"] = loss_dict["l1"] return loss_dict else: - action, _ = self._forward(robot_state, image) # no action, sample from prior + action, _ = self._forward(batch["observation.state"], images) return action def _forward(self, robot_state: Tensor, image: Tensor, actions: Tensor | None = None): @@ -321,7 +378,9 @@ class ActionChunkingTransformerPolicy(nn.Module): # Forward pass through VAE encoder and sample the latent with the reparameterization trick. cls_token_out = self.vae_encoder( vae_encoder_input.permute(1, 0, 2), pos_embed=pos_embed.permute(1, 0, 2) - )[0] # (B, D) + )[ + 0 + ] # (B, D) latent_pdf_params = self.vae_encoder_latent_output_proj(cls_token_out) mu = latent_pdf_params[:, : self.latent_dim] # This is 2log(sigma). Done this way to match the original implementation. diff --git a/lerobot/scripts/eval.py b/lerobot/scripts/eval.py index e7ba53fc..b05f9704 100644 --- a/lerobot/scripts/eval.py +++ b/lerobot/scripts/eval.py @@ -251,7 +251,7 @@ def eval(cfg: dict, out_dir=None, stats_path=None): dataset = make_dataset(cfg, stats_path=stats_path) logging.info("Making environment.") - env = make_env(cfg, num_parallel_envs=cfg.eval_episodes) + env = make_env(cfg, num_parallel_envs=cfg.rollout_batch_size) # when policy is None, rollout a random policy policy = make_policy(cfg) if cfg.policy.pretrained_model_path else None diff --git a/lerobot/scripts/train.py b/lerobot/scripts/train.py index d49dfff8..81f3cdbc 100644 --- a/lerobot/scripts/train.py +++ b/lerobot/scripts/train.py @@ -148,7 +148,8 @@ def train(cfg: dict, out_dir=None, job_name=None): # ) logging.info("make_env") - env = make_env(cfg, num_parallel_envs=cfg.eval_episodes) + # TODO(now): uncomment + #env = make_env(cfg, num_parallel_envs=cfg.eval_episodes) logging.info("make_policy") policy = make_policy(cfg) diff --git a/scripts/convert_act_weights.py b/scripts/convert_act_weights.py index b1492009..d5e38796 100644 --- a/scripts/convert_act_weights.py +++ b/scripts/convert_act_weights.py @@ -28,22 +28,23 @@ for to_remove in start_removals: # Replace keys based on what they start with. start_replacements = [ - ("model.query_embed.weight", "model.pos_embed.weight"), - ("model.pos_table", "model.vae_encoder_pos_enc"), - ("model.pos_embed.weight", "model.decoder_pos_embed.weight"), - ("model.encoder.", "model.vae_encoder."), - ("model.encoder_action_proj.", "model.vae_encoder_action_input_proj."), - ("model.encoder_joint_proj.", "model.vae_encoder_robot_state_input_proj."), - ("model.latent_proj.", "model.vae_encoder_latent_output_proj."), - ("model.latent_proj.", "model.vae_encoder_latent_output_proj."), - ("model.input_proj.", "model.encoder_img_feat_input_proj."), - ("model.input_proj_robot_state", "model.encoder_robot_state_input_proj"), - ("model.latent_out_proj.", "model.encoder_latent_input_proj."), - ("model.transformer.encoder.", "model.encoder."), - ("model.transformer.decoder.", "model.decoder."), - ("model.backbones.0.0.body.", "model.backbone."), - ("model.additional_pos_embed.weight", "model.encoder_robot_and_latent_pos_embed.weight"), - ("model.cls_embed.weight", "model.vae_encoder_cls_embed.weight"), + ("model.", ""), + ("query_embed.weight", "pos_embed.weight"), + ("pos_table", "vae_encoder_pos_enc"), + ("pos_embed.weight", "decoder_pos_embed.weight"), + ("encoder.", "vae_encoder."), + ("encoder_action_proj.", "vae_encoder_action_input_proj."), + ("encoder_joint_proj.", "vae_encoder_robot_state_input_proj."), + ("latent_proj.", "vae_encoder_latent_output_proj."), + ("latent_proj.", "vae_encoder_latent_output_proj."), + ("input_proj.", "encoder_img_feat_input_proj."), + ("input_proj_robot_state", "encoder_robot_state_input_proj"), + ("latent_out_proj.", "encoder_latent_input_proj."), + ("transformer.encoder.", "encoder."), + ("transformer.decoder.", "decoder."), + ("backbones.0.0.body.", "backbone."), + ("additional_pos_embed.weight", "encoder_robot_and_latent_pos_embed.weight"), + ("cls_embed.weight", "vae_encoder_cls_embed.weight"), ] for to_replace, replace_with in start_replacements: From 44656d2706e9675d677c80405b64e5397ae08bb8 Mon Sep 17 00:00:00 2001 From: Cadene Date: Fri, 5 Apr 2024 23:27:12 +0000 Subject: [PATCH 22/80] test_envs are passing --- lerobot/common/datasets/pusht.py | 2 +- lerobot/common/envs/aloha/__init__.py | 6 +++ lerobot/common/envs/aloha/env.py | 38 ++++++++------- lerobot/common/envs/aloha/utils.py | 14 ++++-- lerobot/common/envs/utils.py | 20 +++++--- tests/test_available.py | 70 +++++++++++++-------------- tests/test_envs.py | 40 ++------------- 7 files changed, 91 insertions(+), 99 deletions(-) diff --git a/lerobot/common/datasets/pusht.py b/lerobot/common/datasets/pusht.py index 9b73b101..9088fdf4 100644 --- a/lerobot/common/datasets/pusht.py +++ b/lerobot/common/datasets/pusht.py @@ -6,9 +6,9 @@ import pygame import pymunk import torch import tqdm +from gym_pusht.envs.pusht import pymunk_to_shapely from lerobot.common.datasets.utils import download_and_extract_zip, load_data_with_delta_timestamps -from lerobot.common.envs.pusht.pusht_env import pymunk_to_shapely from lerobot.common.policies.diffusion.replay_buffer import ReplayBuffer as DiffusionPolicyReplayBuffer # as define in env diff --git a/lerobot/common/envs/aloha/__init__.py b/lerobot/common/envs/aloha/__init__.py index 16fe3c43..48907a4c 100644 --- a/lerobot/common/envs/aloha/__init__.py +++ b/lerobot/common/envs/aloha/__init__.py @@ -4,6 +4,9 @@ register( id="gym_aloha/AlohaInsertion-v0", entry_point="lerobot.common.envs.aloha.env:AlohaEnv", max_episode_steps=300, + # Even after seeding, the rendered observations are slightly different, + # so we set `nondeterministic=True` to pass `check_env` tests + nondeterministic=True, kwargs={"obs_type": "state", "task": "insertion"}, ) @@ -11,5 +14,8 @@ register( id="gym_aloha/AlohaTransferCube-v0", entry_point="lerobot.common.envs.aloha.env:AlohaEnv", max_episode_steps=300, + # Even after seeding, the rendered observations are slightly different, + # so we set `nondeterministic=True` to pass `check_env` tests + nondeterministic=True, kwargs={"obs_type": "state", "task": "transfer_cube"}, ) diff --git a/lerobot/common/envs/aloha/env.py b/lerobot/common/envs/aloha/env.py index 719c2d19..22cd0116 100644 --- a/lerobot/common/envs/aloha/env.py +++ b/lerobot/common/envs/aloha/env.py @@ -16,7 +16,6 @@ from lerobot.common.envs.aloha.tasks.sim_end_effector import ( TransferCubeEndEffectorTask, ) from lerobot.common.envs.aloha.utils import sample_box_pose, sample_insertion_pose -from lerobot.common.utils import set_global_seed class AlohaEnv(gym.Env): @@ -55,15 +54,20 @@ class AlohaEnv(gym.Env): elif self.obs_type == "pixels_agent_pos": self.observation_space = spaces.Dict( { - "pixels": spaces.Box( - low=0, - high=255, - shape=(self.observation_height, self.observation_width, 3), - dtype=np.uint8, + "pixels": spaces.Dict( + { + "top": spaces.Box( + low=0, + high=255, + shape=(self.observation_height, self.observation_width, 3), + dtype=np.uint8, + ) + } ), "agent_pos": spaces.Box( - low=np.array([-1] * len(JOINTS)), # ??? - high=np.array([1] * len(JOINTS)), # ??? + low=-np.inf, + high=np.inf, + shape=(len(JOINTS),), dtype=np.float64, ), } @@ -89,21 +93,21 @@ class AlohaEnv(gym.Env): if "transfer_cube" in task_name: xml_path = ASSETS_DIR / "bimanual_viperx_transfer_cube.xml" physics = mujoco.Physics.from_xml_path(str(xml_path)) - task = TransferCubeTask(random=False) + task = TransferCubeTask() elif "insertion" in task_name: xml_path = ASSETS_DIR / "bimanual_viperx_insertion.xml" physics = mujoco.Physics.from_xml_path(str(xml_path)) - task = InsertionTask(random=False) + task = InsertionTask() elif "end_effector_transfer_cube" in task_name: raise NotImplementedError() xml_path = ASSETS_DIR / "bimanual_viperx_end_effector_transfer_cube.xml" physics = mujoco.Physics.from_xml_path(str(xml_path)) - task = TransferCubeEndEffectorTask(random=False) + task = TransferCubeEndEffectorTask() elif "end_effector_insertion" in task_name: raise NotImplementedError() xml_path = ASSETS_DIR / "bimanual_viperx_end_effector_insertion.xml" physics = mujoco.Physics.from_xml_path(str(xml_path)) - task = InsertionEndEffectorTask(random=False) + task = InsertionEndEffectorTask() else: raise NotImplementedError(task_name) @@ -116,10 +120,10 @@ class AlohaEnv(gym.Env): if self.obs_type == "state": raise NotImplementedError() elif self.obs_type == "pixels": - obs = raw_obs["images"]["top"].copy() + obs = {"top": raw_obs["images"]["top"].copy()} elif self.obs_type == "pixels_agent_pos": obs = { - "pixels": raw_obs["images"]["top"].copy(), + "pixels": {"top": raw_obs["images"]["top"].copy()}, "agent_pos": raw_obs["qpos"], } return obs @@ -129,14 +133,14 @@ class AlohaEnv(gym.Env): # TODO(rcadene): how to seed the env? if seed is not None: - set_global_seed(seed) self._env.task.random.seed(seed) + self._env.task._random = np.random.RandomState(seed) # TODO(rcadene): do not use global variable for this if "transfer_cube" in self.task: - BOX_POSE[0] = sample_box_pose() # used in sim reset + BOX_POSE[0] = sample_box_pose(seed) # used in sim reset elif "insertion" in self.task: - BOX_POSE[0] = np.concatenate(sample_insertion_pose()) # used in sim reset + BOX_POSE[0] = np.concatenate(sample_insertion_pose(seed)) # used in sim reset else: raise ValueError(self.task) diff --git a/lerobot/common/envs/aloha/utils.py b/lerobot/common/envs/aloha/utils.py index 5ac8b955..5b7d8cfe 100644 --- a/lerobot/common/envs/aloha/utils.py +++ b/lerobot/common/envs/aloha/utils.py @@ -1,26 +1,30 @@ import numpy as np -def sample_box_pose(): +def sample_box_pose(seed=None): x_range = [0.0, 0.2] y_range = [0.4, 0.6] z_range = [0.05, 0.05] + rng = np.random.RandomState(seed) + ranges = np.vstack([x_range, y_range, z_range]) - cube_position = np.random.uniform(ranges[:, 0], ranges[:, 1]) + cube_position = rng.uniform(ranges[:, 0], ranges[:, 1]) cube_quat = np.array([1, 0, 0, 0]) return np.concatenate([cube_position, cube_quat]) -def sample_insertion_pose(): +def sample_insertion_pose(seed=None): # Peg x_range = [0.1, 0.2] y_range = [0.4, 0.6] z_range = [0.05, 0.05] + rng = np.random.RandomState(seed) + ranges = np.vstack([x_range, y_range, z_range]) - peg_position = np.random.uniform(ranges[:, 0], ranges[:, 1]) + peg_position = rng.uniform(ranges[:, 0], ranges[:, 1]) peg_quat = np.array([1, 0, 0, 0]) peg_pose = np.concatenate([peg_position, peg_quat]) @@ -31,7 +35,7 @@ def sample_insertion_pose(): z_range = [0.05, 0.05] ranges = np.vstack([x_range, y_range, z_range]) - socket_position = np.random.uniform(ranges[:, 0], ranges[:, 1]) + socket_position = rng.uniform(ranges[:, 0], ranges[:, 1]) socket_quat = np.array([1, 0, 0, 0]) socket_pose = np.concatenate([socket_position, socket_quat]) diff --git a/lerobot/common/envs/utils.py b/lerobot/common/envs/utils.py index 1696ddbe..9d0fb853 100644 --- a/lerobot/common/envs/utils.py +++ b/lerobot/common/envs/utils.py @@ -6,12 +6,20 @@ from lerobot.common.transforms import apply_inverse_transform def preprocess_observation(observation, transform=None): # map to expected inputs for the policy - obs = { - "observation.image": torch.from_numpy(observation["pixels"]).float(), - "observation.state": torch.from_numpy(observation["agent_pos"]).float(), - } - # convert to (b c h w) torch format - obs["observation.image"] = einops.rearrange(obs["observation.image"], "b h w c -> b c h w") + obs = {} + + if isinstance(observation["pixels"], dict): + imgs = {f"observation.images.{key}": img for key, img in observation["pixels"].items()} + else: + imgs = {"observation.image": observation["pixels"]} + + for imgkey, img in imgs.items(): + img = torch.from_numpy(img).float() + # convert to (b c h w) torch format + img = einops.rearrange(img, "b h w c -> b c h w") + obs[imgkey] = img + + obs["observation.state"] = torch.from_numpy(observation["agent_pos"]).float() # apply same transforms as in training if transform is not None: diff --git a/tests/test_available.py b/tests/test_available.py index 9cc91efa..8a2ece38 100644 --- a/tests/test_available.py +++ b/tests/test_available.py @@ -15,50 +15,50 @@ Note: import pytest import lerobot -from lerobot.common.envs.aloha.env import AlohaEnv -from lerobot.common.envs.pusht.env import PushtEnv -from lerobot.common.envs.simxarm.env import SimxarmEnv +# from lerobot.common.envs.aloha.env import AlohaEnv +# from gym_pusht.envs import PushtEnv +# from gym_xarm.envs import SimxarmEnv -from lerobot.common.datasets.simxarm import SimxarmDataset -from lerobot.common.datasets.aloha import AlohaDataset -from lerobot.common.datasets.pusht import PushtDataset +# from lerobot.common.datasets.simxarm import SimxarmDataset +# from lerobot.common.datasets.aloha import AlohaDataset +# from lerobot.common.datasets.pusht import PushtDataset -from lerobot.common.policies.act.policy import ActionChunkingTransformerPolicy -from lerobot.common.policies.diffusion.policy import DiffusionPolicy -from lerobot.common.policies.tdmpc.policy import TDMPCPolicy +# from lerobot.common.policies.act.policy import ActionChunkingTransformerPolicy +# from lerobot.common.policies.diffusion.policy import DiffusionPolicy +# from lerobot.common.policies.tdmpc.policy import TDMPCPolicy -def test_available(): - pol_classes = [ - ActionChunkingTransformerPolicy, - DiffusionPolicy, - TDMPCPolicy, - ] +# def test_available(): +# pol_classes = [ +# ActionChunkingTransformerPolicy, +# DiffusionPolicy, +# TDMPCPolicy, +# ] - env_classes = [ - AlohaEnv, - PushtEnv, - SimxarmEnv, - ] +# env_classes = [ +# AlohaEnv, +# PushtEnv, +# SimxarmEnv, +# ] - dat_classes = [ - AlohaDataset, - PushtDataset, - SimxarmDataset, - ] +# dat_classes = [ +# AlohaDataset, +# PushtDataset, +# SimxarmDataset, +# ] - policies = [pol_cls.name for pol_cls in pol_classes] - assert set(policies) == set(lerobot.available_policies) +# policies = [pol_cls.name for pol_cls in pol_classes] +# assert set(policies) == set(lerobot.available_policies) - envs = [env_cls.name for env_cls in env_classes] - assert set(envs) == set(lerobot.available_envs) +# envs = [env_cls.name for env_cls in env_classes] +# assert set(envs) == set(lerobot.available_envs) - tasks_per_env = {env_cls.name: env_cls.available_tasks for env_cls in env_classes} - for env in envs: - assert set(tasks_per_env[env]) == set(lerobot.available_tasks_per_env[env]) +# tasks_per_env = {env_cls.name: env_cls.available_tasks for env_cls in env_classes} +# for env in envs: +# assert set(tasks_per_env[env]) == set(lerobot.available_tasks_per_env[env]) - datasets_per_env = {env_cls.name: dat_cls.available_datasets for env_cls, dat_cls in zip(env_classes, dat_classes)} - for env in envs: - assert set(datasets_per_env[env]) == set(lerobot.available_datasets_per_env[env]) +# datasets_per_env = {env_cls.name: dat_cls.available_datasets for env_cls, dat_cls in zip(env_classes, dat_classes)} +# for env in envs: +# assert set(datasets_per_env[env]) == set(lerobot.available_datasets_per_env[env]) diff --git a/tests/test_envs.py b/tests/test_envs.py index 495453e2..effe4032 100644 --- a/tests/test_envs.py +++ b/tests/test_envs.py @@ -9,38 +9,9 @@ from lerobot.common.utils import init_hydra_config from lerobot.common.envs.utils import preprocess_observation -# import dmc_aloha # noqa: F401 - from .utils import DEVICE, DEFAULT_CONFIG_PATH -# def print_spec_rollout(env): -# print("observation_spec:", env.observation_spec) -# print("action_spec:", env.action_spec) -# print("reward_spec:", env.reward_spec) -# print("done_spec:", env.done_spec) - -# td = env.reset() -# print("reset tensordict", td) - -# td = env.rand_step(td) -# print("random step tensordict", td) - -# def simple_rollout(steps=100): -# # preallocate: -# data = TensorDict({}, [steps]) -# # reset -# _data = env.reset() -# for i in range(steps): -# _data["action"] = env.action_spec.rand() -# _data = env.step(_data) -# data[i] = _data -# _data = step_mdp(_data, keep_other=True) -# return data - -# print("data from rollout:", simple_rollout(100)) - - @pytest.mark.parametrize( "env_task, obs_type", [ @@ -54,7 +25,7 @@ from .utils import DEVICE, DEFAULT_CONFIG_PATH def test_aloha(env_task, obs_type): from lerobot.common.envs import aloha as gym_aloha # noqa: F401 env = gym.make(f"gym_aloha/{env_task}", obs_type=obs_type) - check_env(env) + check_env(env.unwrapped) @@ -70,7 +41,7 @@ def test_aloha(env_task, obs_type): def test_xarm(env_task, obs_type): import gym_xarm # noqa: F401 env = gym.make(f"gym_xarm/{env_task}", obs_type=obs_type) - check_env(env) + check_env(env.unwrapped) @@ -85,7 +56,7 @@ def test_xarm(env_task, obs_type): def test_pusht(env_task, obs_type): import gym_pusht # noqa: F401 env = gym.make(f"gym_pusht/{env_task}", obs_type=obs_type) - check_env(env) + check_env(env.unwrapped) @pytest.mark.parametrize( @@ -93,7 +64,7 @@ def test_pusht(env_task, obs_type): [ "pusht", "simxarm", - # "aloha", + "aloha", ], ) def test_factory(env_name): @@ -104,9 +75,8 @@ def test_factory(env_name): dataset = make_dataset(cfg) - env = make_env(cfg) + env = make_env(cfg, num_parallel_envs=1) obs, info = env.reset() - obs = {key: obs[key][None, ...] for key in obs} obs = preprocess_observation(obs, transform=dataset.transform) for key in dataset.image_keys: img = obs[key] From 4371a5570dc9064f094726817daa8b6d011d6891 Mon Sep 17 00:00:00 2001 From: Cadene Date: Sun, 7 Apr 2024 16:01:22 +0000 Subject: [PATCH 23/80] Remove latency, tdmpc policy passes tests (TODO: make it work with online RL) --- examples/3_train_policy.py | 2 +- lerobot/common/datasets/factory.py | 14 +-- lerobot/common/policies/factory.py | 15 +-- lerobot/common/policies/tdmpc/policy.py | 87 +++++++++++------ lerobot/configs/policy/act.yaml | 1 - lerobot/configs/policy/diffusion.yaml | 7 +- lerobot/configs/policy/tdmpc.yaml | 6 ++ tests/test_policies.py | 124 +++++++----------------- 8 files changed, 123 insertions(+), 133 deletions(-) diff --git a/examples/3_train_policy.py b/examples/3_train_policy.py index 01a4cf76..6e01a5d5 100644 --- a/examples/3_train_policy.py +++ b/examples/3_train_policy.py @@ -37,7 +37,7 @@ policy = DiffusionPolicy( cfg_obs_encoder=cfg.obs_encoder, cfg_optimizer=cfg.optimizer, cfg_ema=cfg.ema, - n_action_steps=cfg.n_action_steps + cfg.n_latency_steps, + n_action_steps=cfg.n_action_steps, **cfg.policy, ) policy.train() diff --git a/lerobot/common/datasets/factory.py b/lerobot/common/datasets/factory.py index 32d76a50..c22ae698 100644 --- a/lerobot/common/datasets/factory.py +++ b/lerobot/common/datasets/factory.py @@ -78,15 +78,11 @@ def make_dataset( ] ) - if cfg.policy.name == "diffusion" and cfg.env.name == "pusht": - # TODO(rcadene): implement delta_timestamps in config - delta_timestamps = { - "observation.image": [-0.1, 0], - "observation.state": [-0.1, 0], - "action": [-0.1] + [i / clsfunc.fps for i in range(15)], - } - else: - delta_timestamps = None + delta_timestamps = cfg.policy.get("delta_timestamps") + if delta_timestamps is not None: + for key in delta_timestamps: + if isinstance(delta_timestamps[key], str): + delta_timestamps[key] = eval(delta_timestamps[key]) dataset = clsfunc( dataset_id=cfg.dataset_id, diff --git a/lerobot/common/policies/factory.py b/lerobot/common/policies/factory.py index 934f0962..90e7ecc1 100644 --- a/lerobot/common/policies/factory.py +++ b/lerobot/common/policies/factory.py @@ -1,11 +1,10 @@ def make_policy(cfg): - if cfg.policy.name != "diffusion" and cfg.rollout_batch_size > 1: - raise NotImplementedError("Only diffusion policy supports rollout_batch_size > 1 for the time being.") - if cfg.policy.name == "tdmpc": from lerobot.common.policies.tdmpc.policy import TDMPCPolicy - policy = TDMPCPolicy(cfg.policy, cfg.device) + policy = TDMPCPolicy( + cfg.policy, n_obs_steps=cfg.n_obs_steps, n_action_steps=cfg.n_action_steps, device=cfg.device + ) elif cfg.policy.name == "diffusion": from lerobot.common.policies.diffusion.policy import DiffusionPolicy @@ -17,14 +16,18 @@ def make_policy(cfg): cfg_obs_encoder=cfg.obs_encoder, cfg_optimizer=cfg.optimizer, cfg_ema=cfg.ema, - n_action_steps=cfg.n_action_steps + cfg.n_latency_steps, + n_obs_steps=cfg.n_obs_steps, + n_action_steps=cfg.n_action_steps, **cfg.policy, ) elif cfg.policy.name == "act": from lerobot.common.policies.act.policy import ActionChunkingTransformerPolicy policy = ActionChunkingTransformerPolicy( - cfg.policy, cfg.device, n_action_steps=cfg.n_action_steps + cfg.n_latency_steps + cfg.policy, + cfg.device, + n_obs_steps=cfg.n_obs_steps, + n_action_steps=cfg.n_action_steps, ) else: raise ValueError(cfg.policy.name) diff --git a/lerobot/common/policies/tdmpc/policy.py b/lerobot/common/policies/tdmpc/policy.py index 85700913..f763dbc6 100644 --- a/lerobot/common/policies/tdmpc/policy.py +++ b/lerobot/common/policies/tdmpc/policy.py @@ -154,8 +154,14 @@ class TDMPCPolicy(nn.Module): if len(self._queues["action"]) == 0: batch = {key: torch.stack(list(self._queues[key]), dim=1) for key in batch} + if self.n_obs_steps == 1: + # hack to remove the time dimension + for key in batch: + assert batch[key].shape[1] == 1 + batch[key] = batch[key][:, 0] + actions = [] - batch_size = batch["observation.image."].shape[0] + batch_size = batch["observation.image"].shape[0] for i in range(batch_size): obs = { "rgb": batch["observation.image"][[i]], @@ -166,6 +172,10 @@ class TDMPCPolicy(nn.Module): actions.append(action) action = torch.stack(actions) + # self.act returns an action for 1 timestep only, so we copy it over `n_action_steps` time + if i in range(self.n_action_steps): + self._queues["action"].append(action) + action = self._queues["action"].popleft() return action @@ -410,22 +420,45 @@ class TDMPCPolicy(nn.Module): # idxs = torch.cat([idxs, demo_idxs]) # weights = torch.cat([weights, demo_weights]) + # TODO(rcadene): convert tdmpc with (batch size, time/horizon, channels) + # instead of currently (time/horizon, batch size, channels) which is not the pytorch convention + # batch size b = 256, time/horizon t = 5 + # b t ... -> t b ... + for key in batch: + if batch[key].ndim > 1: + batch[key] = batch[key].transpose(1, 0) + + action = batch["action"] + reward = batch["next.reward"][:, :, None] # add extra channel dimension + # idxs = batch["index"] # TODO(rcadene): use idxs to update sampling weights + done = torch.zeros_like(reward, dtype=torch.bool, device=reward.device) + mask = torch.ones_like(reward, dtype=torch.bool, device=reward.device) + weights = torch.ones_like(reward, dtype=torch.bool, device=reward.device) + + obses = { + "rgb": batch["observation.image"], + "state": batch["observation.state"], + } + + shapes = {} + for k in obses: + shapes[k] = obses[k].shape + obses[k] = einops.rearrange(obses[k], "t b ... -> (t b) ... ") + # Apply augmentations aug_tf = h.aug(self.cfg) - obs = aug_tf(obs) + obses = aug_tf(obses) - for k in next_obses: - next_obses[k] = einops.rearrange(next_obses[k], "h t ... -> (h t) ...") - next_obses = aug_tf(next_obses) - for k in next_obses: - next_obses[k] = einops.rearrange( - next_obses[k], - "(h t) ... -> h t ...", - h=self.cfg.horizon, - t=self.cfg.batch_size, - ) + for k in obses: + t, b = shapes[k][:2] + obses[k] = einops.rearrange(obses[k], "(t b) ... -> t b ... ", b=b, t=t) - horizon = self.cfg.horizon + obs, next_obses = {}, {} + for k in obses: + obs[k] = obses[k][0] + next_obses[k] = obses[k][1:].clone() + + horizon = next_obses["rgb"].shape[0] loss_mask = torch.ones_like(mask, device=self.device) for t in range(1, horizon): loss_mask[t] = loss_mask[t - 1] * (~done[t - 1]) @@ -497,19 +530,19 @@ class TDMPCPolicy(nn.Module): ) self.optim.step() - if self.cfg.per: - # Update priorities - priorities = priority_loss.clamp(max=1e4).detach() - has_nan = torch.isnan(priorities).any().item() - if has_nan: - print(f"priorities has nan: {priorities=}") - else: - replay_buffer.update_priority( - idxs[:num_slices], - priorities[:num_slices], - ) - if demo_batch_size > 0: - demo_buffer.update_priority(demo_idxs, priorities[num_slices:]) + # if self.cfg.per: + # # Update priorities + # priorities = priority_loss.clamp(max=1e4).detach() + # has_nan = torch.isnan(priorities).any().item() + # if has_nan: + # print(f"priorities has nan: {priorities=}") + # else: + # replay_buffer.update_priority( + # idxs[:num_slices], + # priorities[:num_slices], + # ) + # if demo_batch_size > 0: + # demo_buffer.update_priority(demo_idxs, priorities[num_slices:]) # Update policy + target network _, pi_update_info = self.update_pi(zs[:-1].detach(), acts=action) @@ -532,7 +565,7 @@ class TDMPCPolicy(nn.Module): "data_s": data_s, "update_s": time.time() - start_time, } - info["demo_batch_size"] = demo_batch_size + # info["demo_batch_size"] = demo_batch_size info["expectile"] = expectile info.update(value_info) info.update(pi_update_info) diff --git a/lerobot/configs/policy/act.yaml b/lerobot/configs/policy/act.yaml index a52c3f54..9dca436f 100644 --- a/lerobot/configs/policy/act.yaml +++ b/lerobot/configs/policy/act.yaml @@ -10,7 +10,6 @@ log_freq: 250 horizon: 100 n_obs_steps: 1 -n_latency_steps: 0 # when temporal_agg=False, n_action_steps=horizon n_action_steps: ${horizon} diff --git a/lerobot/configs/policy/diffusion.yaml b/lerobot/configs/policy/diffusion.yaml index 4d6eedca..c3bebe2d 100644 --- a/lerobot/configs/policy/diffusion.yaml +++ b/lerobot/configs/policy/diffusion.yaml @@ -16,7 +16,6 @@ seed: 100000 horizon: 16 n_obs_steps: 2 n_action_steps: 8 -n_latency_steps: 0 dataset_obs_steps: ${n_obs_steps} past_action_visible: False keypoint_visible_rate: 1.0 @@ -38,7 +37,6 @@ policy: shape_meta: ${shape_meta} horizon: ${horizon} - # n_action_steps: ${eval:'${n_action_steps}+${n_latency_steps}'} n_obs_steps: ${n_obs_steps} num_inference_steps: 100 obs_as_global_cond: ${obs_as_global_cond} @@ -64,6 +62,11 @@ policy: lr_warmup_steps: 500 grad_clip_norm: 10 + delta_timestamps: + observation.image: [-.1, 0] + observation.state: [-.1, 0] + action: [-.1, 0, .1, .2, .3, .4, .5, .6, .7, .8, .9, 1.0, 1.1, 1.2, 1.3, 1.4] + noise_scheduler: _target_: diffusers.schedulers.scheduling_ddpm.DDPMScheduler num_train_timesteps: 100 diff --git a/lerobot/configs/policy/tdmpc.yaml b/lerobot/configs/policy/tdmpc.yaml index 5d5d8b62..4fd2b6bb 100644 --- a/lerobot/configs/policy/tdmpc.yaml +++ b/lerobot/configs/policy/tdmpc.yaml @@ -77,3 +77,9 @@ policy: num_q: 5 mlp_dim: 512 latent_dim: 50 + + delta_timestamps: + observation.image: "[i / ${fps} for i in range(6)]" + observation.state: "[i / ${fps} for i in range(6)]" + action: "[i / ${fps} for i in range(5)]" + next.reward: "[i / ${fps} for i in range(5)]" diff --git a/tests/test_policies.py b/tests/test_policies.py index a46c6025..e1b3a4b6 100644 --- a/tests/test_policies.py +++ b/tests/test_policies.py @@ -1,14 +1,11 @@ import pytest -from tensordict import TensorDict -from tensordict.nn import TensorDictModule import torch -from torchrl.data import UnboundedContinuousTensorSpec -from torchrl.envs import EnvBase +from lerobot.common.datasets.utils import cycle +from lerobot.common.envs.utils import postprocess_action, preprocess_observation from lerobot.common.policies.factory import make_policy from lerobot.common.envs.factory import make_env from lerobot.common.datasets.factory import make_dataset -from lerobot.common.policies.abstract import AbstractPolicy from lerobot.common.utils import init_hydra_config from .utils import DEVICE, DEFAULT_CONFIG_PATH @@ -16,22 +13,23 @@ from .utils import DEVICE, DEFAULT_CONFIG_PATH "env_name,policy_name,extra_overrides", [ ("simxarm", "tdmpc", ["policy.mpc=true"]), - ("pusht", "tdmpc", ["policy.mpc=false"]), + #("pusht", "tdmpc", ["policy.mpc=false"]), ("pusht", "diffusion", []), - ("aloha", "act", ["env.task=sim_insertion", "dataset_id=aloha_sim_insertion_human"]), - ("aloha", "act", ["env.task=sim_insertion", "dataset_id=aloha_sim_insertion_scripted"]), - ("aloha", "act", ["env.task=sim_transfer_cube", "dataset_id=aloha_sim_transfer_cube_human"]), - ("aloha", "act", ["env.task=sim_transfer_cube", "dataset_id=aloha_sim_transfer_cube_scripted"]), + # ("aloha", "act", ["env.task=sim_insertion", "dataset_id=aloha_sim_insertion_human"]), + #("aloha", "act", ["env.task=sim_insertion", "dataset_id=aloha_sim_insertion_scripted"]), + #("aloha", "act", ["env.task=sim_transfer_cube", "dataset_id=aloha_sim_transfer_cube_human"]), + #("aloha", "act", ["env.task=sim_transfer_cube", "dataset_id=aloha_sim_transfer_cube_scripted"]), # TODO(aliberts): simxarm not working with diffusion # ("simxarm", "diffusion", []), ], ) -def test_concrete_policy(env_name, policy_name, extra_overrides): +def test_policy(env_name, policy_name, extra_overrides): """ Tests: - Making the policy object. - Updating the policy. - Using the policy to select actions at inference time. + - Test the action can be applied to the policy """ cfg = init_hydra_config( DEFAULT_CONFIG_PATH, @@ -46,91 +44,43 @@ def test_concrete_policy(env_name, policy_name, extra_overrides): policy = make_policy(cfg) # Check that we run select_actions and get the appropriate output. dataset = make_dataset(cfg) - env = make_env(cfg, transform=dataset.transform) + env = make_env(cfg, num_parallel_envs=2) - if env_name != "aloha": - # TODO(alexander-soare): Fix this part of the test. PrioritizedSliceSampler raises NotImplementedError: - # seq_length as a list is not supported for now. - policy.update(dataset, torch.tensor(0, device=DEVICE)) - - action = policy( - env.observation_spec.rand()["observation"].to(DEVICE), - torch.tensor(0, device=DEVICE), + dataloader = torch.utils.data.DataLoader( + dataset, + num_workers=4, + batch_size=cfg.policy.batch_size, + shuffle=True, + pin_memory=DEVICE != "cpu", + drop_last=True, ) - assert action.shape == env.action_spec.shape + dl_iter = cycle(dataloader) + batch = next(dl_iter) -def test_abstract_policy_forward(): - """ - Given an underlying policy that produces an action trajectory with n_action_steps actions, checks that: - - The policy is invoked the expected number of times during a rollout. - - The environment's termination condition is respected even when part way through an action trajectory. - - The observations are returned correctly. - """ + for key in batch: + batch[key] = batch[key].to(DEVICE, non_blocking=True) - n_action_steps = 8 # our test policy will output 8 action step horizons - terminate_at = 10 # some number that is more than n_action_steps but not a multiple - rollout_max_steps = terminate_at + 1 # some number greater than terminate_at + # Test updating the policy + policy(batch, step=0) - # A minimal environment for testing. - class StubEnv(EnvBase): + # reset the policy and environment + policy.reset() + observation, _ = env.reset(seed=cfg.seed) - def __init__(self): - super().__init__() - self.action_spec = UnboundedContinuousTensorSpec(shape=(1,)) - self.reward_spec = UnboundedContinuousTensorSpec(shape=(1,)) + # apply transform to normalize the observations + observation = preprocess_observation(observation, dataset.transform) - def _step(self, tensordict: TensorDict) -> TensorDict: - self.invocation_count += 1 - return TensorDict( - { - "observation": torch.tensor([self.invocation_count]), - "reward": torch.tensor([self.invocation_count]), - "terminated": torch.tensor( - tensordict["action"].item() == terminate_at - ), - } - ) + # send observation to device/gpu + observation = {key: observation[key].to(DEVICE, non_blocking=True) for key in observation} - def _reset(self, tensordict: TensorDict) -> TensorDict: - self.invocation_count = 0 - return TensorDict( - { - "observation": torch.tensor([self.invocation_count]), - "reward": torch.tensor([self.invocation_count]), - } - ) + # get the next action for the environment + with torch.inference_mode(): + action = policy.select_action(observation, step=0) - def _set_seed(self, seed: int | None): - return + # apply inverse transform to unnormalize the action + action = postprocess_action(action, dataset.transform) - class StubPolicy(AbstractPolicy): - name = "stub" + # Test step through policy + env.step(action) - def __init__(self): - super().__init__(n_action_steps) - self.n_policy_invocations = 0 - - def update(self): - pass - - def select_actions(self): - self.n_policy_invocations += 1 - return torch.stack( - [torch.tensor([i]) for i in range(self.n_action_steps)] - ).unsqueeze(0) - - env = StubEnv() - policy = StubPolicy() - policy = TensorDictModule( - policy, - in_keys=[], - out_keys=["action"], - ) - - # Keep track to make sure the policy is called the expected number of times - rollout = env.rollout(rollout_max_steps, policy) - - assert len(rollout) == terminate_at + 1 # +1 for the reset observation - assert policy.n_policy_invocations == (terminate_at // n_action_steps) + 1 - assert torch.equal(rollout["observation"].flatten(), torch.arange(terminate_at + 1)) From e1ac5dc62fac32b6d452a45019870e602fbd884c Mon Sep 17 00:00:00 2001 From: Cadene Date: Sun, 7 Apr 2024 17:20:54 +0000 Subject: [PATCH 24/80] fix aloha pixels env test --- lerobot/common/envs/aloha/env.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/lerobot/common/envs/aloha/env.py b/lerobot/common/envs/aloha/env.py index 22cd0116..bd14e6d8 100644 --- a/lerobot/common/envs/aloha/env.py +++ b/lerobot/common/envs/aloha/env.py @@ -48,8 +48,15 @@ class AlohaEnv(gym.Env): dtype=np.float64, ) elif self.obs_type == "pixels": - self.observation_space = spaces.Box( - low=0, high=255, shape=(self.observation_height, self.observation_width, 3), dtype=np.uint8 + self.observation_space = spaces.Dict( + { + "top": spaces.Box( + low=0, + high=255, + shape=(self.observation_height, self.observation_width, 3), + dtype=np.uint8, + ) + } ) elif self.obs_type == "pixels_agent_pos": self.observation_space = spaces.Dict( From 1bab4a1dd5fab56f18306496077e7a9db9c9b2fc Mon Sep 17 00:00:00 2001 From: Alexander Soare Date: Mon, 8 Apr 2024 10:23:26 +0100 Subject: [PATCH 25/80] Eval reproduction works with gym_aloha --- lerobot/common/envs/factory.py | 2 +- lerobot/common/policies/act/policy.py | 130 +++++++++----------------- lerobot/common/policies/factory.py | 1 - lerobot/configs/policy/act.yaml | 2 +- lerobot/scripts/eval.py | 8 +- poetry.lock | 26 +++--- 6 files changed, 66 insertions(+), 103 deletions(-) diff --git a/lerobot/common/envs/factory.py b/lerobot/common/envs/factory.py index 971f4b63..749bb533 100644 --- a/lerobot/common/envs/factory.py +++ b/lerobot/common/envs/factory.py @@ -35,7 +35,7 @@ def make_env(cfg, num_parallel_envs=0) -> gym.Env | gym.vector.SyncVectorEnv: kwargs["task"] = cfg.env.task env_fn = lambda: gym.make( # noqa: E731 - "gym_aloha/AlohaInsertion-v0", + "gym_aloha/AlohaTransferCube-v0", **kwargs, ) else: diff --git a/lerobot/common/policies/act/policy.py b/lerobot/common/policies/act/policy.py index a9a5ac06..75d5ca0e 100644 --- a/lerobot/common/policies/act/policy.py +++ b/lerobot/common/policies/act/policy.py @@ -3,9 +3,10 @@ As per Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware (https://arxiv.org/abs/2304.13705). The majority of changes here involve removing unused code, unifying naming, and adding helpful comments. """ -from collections import deque + import math import time +from collections import deque from itertools import chain from typing import Callable @@ -22,67 +23,6 @@ from torchvision.ops.misc import FrozenBatchNorm2d from lerobot.common.utils import get_safe_torch_device -# class AbstractPolicy(nn.Module): -# """Base policy which all policies should be derived from. - -# The forward method should generally not be overriden as it plays the role of handling multi-step policies. See its -# documentation for more information. - -# Note: -# When implementing a concrete class (e.g. `AlohaDataset`, `PushtEnv`, `DiffusionPolicy`), you need to: -# 1. set the required class attributes: -# - for classes inheriting from `AbstractDataset`: `available_datasets` -# - for classes inheriting from `AbstractEnv`: `name`, `available_tasks` -# - for classes inheriting from `AbstractPolicy`: `name` -# 2. update variables in `lerobot/__init__.py` (e.g. `available_envs`, `available_datasets_per_envs`, `available_policies`) -# 3. update variables in `tests/test_available.py` by importing your new class -# """ - -# name: str | None = None # same name should be used to instantiate the policy in factory.py - -# def __init__(self, n_action_steps: int | None): -# """ -# n_action_steps: Sets the cache size for storing action trajectories. If None, it is assumed that a single -# action is returned by `select_actions` and that doesn't have a horizon dimension. The `forward` method then -# adds that dimension. -# """ -# super().__init__() -# assert self.name is not None, "Subclasses of `AbstractPolicy` should set the `name` class attribute." -# self.n_action_steps = n_action_steps -# self.clear_action_queue() - -# def clear_action_queue(self): -# """This should be called whenever the environment is reset.""" -# if self.n_action_steps is not None: -# self._action_queue = deque([], maxlen=self.n_action_steps) - -# def forward(self, fn) -> Tensor: -# """Inference step that makes multi-step policies compatible with their single-step environments. - -# WARNING: In general, this should not be overriden. - -# Consider a "policy" that observes the environment then charts a course of N actions to take. To make this fit -# into the formalism of a TorchRL environment, we view it as being effectively a policy that (1) makes an -# observation and prepares a queue of actions, (2) consumes that queue when queried, regardless of the environment -# observation, (3) repopulates the action queue when empty. This method handles the aforementioned logic so that -# the subclass doesn't have to. - -# This method effectively wraps the `select_actions` method of the subclass. The following assumptions are made: -# 1. The `select_actions` method returns a Tensor of actions with shape (B, H, *) where B is the batch size, H is -# the action trajectory horizon and * is the action dimensions. -# 2. Prior to the `select_actions` method being called, theres is an `n_action_steps` instance attribute defined. -# """ -# if self.n_action_steps is None: -# return self.select_actions(*args, **kwargs) -# if len(self._action_queue) == 0: -# # `select_actions` returns a (batch_size, n_action_steps, *) tensor, but the queue effectively has shape -# # (n_action_steps, batch_size, *), hence the transpose. -# self._action_queue.extend(self.select_actions(*args, **kwargs).transpose(0, 1)) -# return self._action_queue.popleft() - - - - class ActionChunkingTransformerPolicy(nn.Module): """ Action Chunking Transformer Policy as per Learning Fine-Grained Bimanual Manipulation with Low-Cost @@ -228,18 +168,30 @@ class ActionChunkingTransformerPolicy(nn.Module): if p.dim() > 1: nn.init.xavier_uniform_(p) - @torch.no_grad() - def select_action(self, batch, *_): - # TODO(now): Implement queueing mechanism. - self.eval() - self._preprocess_batch(batch) + def reset(self): + """This should be called whenever the environment is reset.""" + if self.n_action_steps is not None: + self._action_queue = deque([], maxlen=self.n_action_steps) - # TODO(now): What's up with this 0.182? - action = self.forward( - robot_state=batch["observation.state"] * 0.182, - image=batch["observation.images.top"], - return_loss=False, - ) + def select_action(self, batch: dict[str, Tensor], *_): + """ + This method wraps `select_actions` in order to return one action at a time for execution in the + environment. It works by managing the actions in a queue and only calling `select_actions` when the + queue is empty. + """ + if len(self._action_queue) == 0: + # `select_actions` returns a (batch_size, n_action_steps, *) tensor, but the queue effectively has shape + # (n_action_steps, batch_size, *), hence the transpose. + self._action_queue.extend(self.select_actions(batch).transpose(0, 1)) + return self._action_queue.popleft() + + @torch.no_grad() + def select_actions(self, batch: dict[str, Tensor]): + """Use the action chunking transformer to generate a sequence of actions.""" + self.eval() + self._preprocess_batch(batch, add_obs_steps_dim=True) + + action = self.forward(batch, return_loss=False) if self.cfg.temporal_agg: # TODO(rcadene): implement temporal aggregation @@ -257,25 +209,37 @@ class ActionChunkingTransformerPolicy(nn.Module): return action[: self.n_action_steps] def __call__(self, *args, **kwargs): - # TODO(now): Temporary bridge. + # TODO(now): Temporary bridge until we know what to do about the `update` method. return self.update(*args, **kwargs) - def _preprocess_batch(self, batch: dict[str, Tensor]) -> dict[str, Tensor]: + def _preprocess_batch( + self, batch: dict[str, Tensor], add_obs_steps_dim: bool = False + ) -> dict[str, Tensor]: """ - Expects batch to have (at least): + This function expects `batch` to have (at least): { - "observation.state": (B, 1, J) tensor of robot states (joint configuration) - - "observation.images.top": (B, 1, C, H, W) tensor of images. + "observation.state": (B, 1, J) OR (B, J) tensor of robot states (joint configuration). + "observation.images.top": (B, 1, C, H, W) OR (B, C, H, W) tensor of images. "action": (B, H, J) tensor of actions (positional target for robot joint configuration) "action_is_pad": (B, H) mask for whether the actions are padding outside of the episode bounds. } """ + if add_obs_steps_dim: + # Add a dimension for the observations steps. Since n_obs_steps > 1 is not supported right now, + # this just amounts to an unsqueeze. + for k in batch: + if k.startswith("observation."): + batch[k] = batch[k].unsqueeze(1) + if batch["observation.state"].shape[1] != 1: raise ValueError(self._multiple_obs_steps_not_handled_msg) batch["observation.state"] = batch["observation.state"].squeeze(1) - # TODO(alexander-soare): generalize this to multiple images. Note: no squeeze is required for - # "observation.images.top" because then we'd have to unsqueeze to get get the image index dimension. + # TODO(alexander-soare): generalize this to multiple images. + assert ( + sum(k.startswith("observation.images.") and not k.endswith("is_pad") for k in batch) == 1 + ), "ACT only handles one image for now." + # Note: no squeeze is required for "observation.images.top" because then we'd have to unsqueeze to get + # the image index dimension. def update(self, batch, *_): start_time = time.time() @@ -378,9 +342,7 @@ class ActionChunkingTransformerPolicy(nn.Module): # Forward pass through VAE encoder and sample the latent with the reparameterization trick. cls_token_out = self.vae_encoder( vae_encoder_input.permute(1, 0, 2), pos_embed=pos_embed.permute(1, 0, 2) - )[ - 0 - ] # (B, D) + )[0] # (B, D) latent_pdf_params = self.vae_encoder_latent_output_proj(cls_token_out) mu = latent_pdf_params[:, : self.latent_dim] # This is 2log(sigma). Done this way to match the original implementation. diff --git a/lerobot/common/policies/factory.py b/lerobot/common/policies/factory.py index 90e7ecc1..cc956014 100644 --- a/lerobot/common/policies/factory.py +++ b/lerobot/common/policies/factory.py @@ -26,7 +26,6 @@ def make_policy(cfg): policy = ActionChunkingTransformerPolicy( cfg.policy, cfg.device, - n_obs_steps=cfg.n_obs_steps, n_action_steps=cfg.n_action_steps, ) else: diff --git a/lerobot/configs/policy/act.yaml b/lerobot/configs/policy/act.yaml index c1d1801f..80f50003 100644 --- a/lerobot/configs/policy/act.yaml +++ b/lerobot/configs/policy/act.yaml @@ -58,6 +58,6 @@ policy: action_dim: ??? delta_timestamps: - observation.image: [0.0] + observation.images.top: [0.0] observation.state: [0.0] action: [0.0, 0.02, 0.04, 0.06, 0.08, 0.1, 0.12, 0.14, 0.16, 0.18, 0.2, 0.22, 0.24, 0.26, 0.28, 0.3, 0.32, 0.34, 0.36, 0.38, 0.4, 0.42, 0.44, 0.46, 0.48, 0.5, 0.52, 0.54, 0.56, 0.58, 0.6, 0.62, 0.64, 0.66, 0.68, 0.70, 0.72, 0.74, 0.76, 0.78, 0.8, 0.82, 0.84, 0.86, 0.88, 0.9, 0.92, 0.94, 0.96, 0.98, 1.0, 1.02, 1.04, 1.06, 1.08, 1.1, 1.12, 1.14, 1.16, 1.18, 1.2, 1.22, 1.24, 1.26, 1.28, 1.3, 1.32, 1.34, 1.36, 1.38, 1.40, 1.42, 1.44, 1.46, 1.48, 1.5, 1.52, 1.54, 1.56, 1.58, 1.6, 1.62, 1.64, 1.66, 1.68, 1.7, 1.72, 1.74, 1.76, 1.78, 1.8, 1.82, 1.84, 1.86, 1.88, 1.90, 1.92, 1.94, 1.96, 1.98] diff --git a/lerobot/scripts/eval.py b/lerobot/scripts/eval.py index b05f9704..b43f4ed1 100644 --- a/lerobot/scripts/eval.py +++ b/lerobot/scripts/eval.py @@ -89,7 +89,9 @@ def eval_policy( visu = env.envs[0].render(mode="visualization") visu = visu[None, ...] # add batch dim else: - visu = np.stack([env.render(mode="visualization") for env in env.envs]) + # TODO(now): Put mode back in. + visu = np.stack([env.render() for env in env.envs]) + # visu = np.stack([env.render(mode="visualization") for env in env.envs]) ep_frames.append(visu) # noqa: B023 for _ in range(num_episodes): @@ -248,7 +250,7 @@ def eval(cfg: dict, out_dir=None, stats_path=None): logging.info("Making transforms.") # TODO(alexander-soare): Completely decouple datasets from evaluation. - dataset = make_dataset(cfg, stats_path=stats_path) + transform = make_dataset(cfg, stats_path=stats_path).transform logging.info("Making environment.") env = make_env(cfg, num_parallel_envs=cfg.rollout_batch_size) @@ -263,7 +265,7 @@ def eval(cfg: dict, out_dir=None, stats_path=None): video_dir=Path(out_dir) / "eval", fps=cfg.env.fps, # TODO(rcadene): what should we do with the transform? - transform=dataset.transform, + transform=transform, seed=cfg.seed, ) print(info["aggregated"]) diff --git a/poetry.lock b/poetry.lock index f96f66bc..60354b8a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -941,7 +941,7 @@ mujoco = "^2.3.7" type = "git" url = "git@github.com:huggingface/gym-xarm.git" reference = "HEAD" -resolved_reference = "2eb83fc4fc871b9d271c946d169e42f226ac3a7c" +resolved_reference = "08ddd5a9400783a6898bbf3c3014fc5da3961b9d" [[package]] name = "gymnasium" @@ -1709,20 +1709,20 @@ pyopengl = "*" [[package]] name = "networkx" -version = "3.2.1" +version = "3.3" description = "Python package for creating and manipulating graphs and networks" optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" files = [ - {file = "networkx-3.2.1-py3-none-any.whl", hash = "sha256:f18c69adc97877c42332c170849c96cefa91881c99a7cb3e95b7c659ebdc1ec2"}, - {file = "networkx-3.2.1.tar.gz", hash = "sha256:9f1bb5cf3409bf324e0a722c20bdb4c20ee39bf1c30ce8ae499c8502b0b5e0c6"}, + {file = "networkx-3.3-py3-none-any.whl", hash = "sha256:28575580c6ebdaf4505b22c6256a2b9de86b316dc63ba9e93abde3d78dfdbcf2"}, + {file = "networkx-3.3.tar.gz", hash = "sha256:0c127d8b2f4865f59ae9cb8aafcd60b5c70f3241ebd66f7defad7c4ab90126c9"}, ] [package.extras] -default = ["matplotlib (>=3.5)", "numpy (>=1.22)", "pandas (>=1.4)", "scipy (>=1.9,!=1.11.0,!=1.11.1)"] -developer = ["changelist (==0.4)", "mypy (>=1.1)", "pre-commit (>=3.2)", "rtoml"] -doc = ["nb2plots (>=0.7)", "nbconvert (<7.9)", "numpydoc (>=1.6)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.14)", "sphinx (>=7)", "sphinx-gallery (>=0.14)", "texext (>=0.6.7)"] -extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.11)", "sympy (>=1.10)"] +default = ["matplotlib (>=3.6)", "numpy (>=1.23)", "pandas (>=1.4)", "scipy (>=1.9,!=1.11.0,!=1.11.1)"] +developer = ["changelist (==0.5)", "mypy (>=1.1)", "pre-commit (>=3.2)", "rtoml"] +doc = ["myst-nb (>=1.0)", "numpydoc (>=1.7)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.14)", "sphinx (>=7)", "sphinx-gallery (>=0.14)", "texext (>=0.6.7)"] +extra = ["lxml (>=4.6)", "pydot (>=2.0)", "pygraphviz (>=1.12)", "sympy (>=1.10)"] test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"] [[package]] @@ -3699,20 +3699,20 @@ watchdog = ["watchdog (>=2.3)"] [[package]] name = "zarr" -version = "2.17.1" +version = "2.17.2" description = "An implementation of chunked, compressed, N-dimensional arrays for Python" optional = false python-versions = ">=3.9" files = [ - {file = "zarr-2.17.1-py3-none-any.whl", hash = "sha256:e25df2741a6e92645f3890f30f3136d5b57a0f8f831094b024bbcab5f2797bc7"}, - {file = "zarr-2.17.1.tar.gz", hash = "sha256:564b3aa072122546fe69a0fa21736f466b20fad41754334b62619f088ce46261"}, + {file = "zarr-2.17.2-py3-none-any.whl", hash = "sha256:70d7cc07c24280c380ef80644151d136b7503b0d83c9f214e8000ddc0f57f69b"}, + {file = "zarr-2.17.2.tar.gz", hash = "sha256:2cbaa6cb4e342d45152d4a7a4b2013c337fcd3a8e7bc98253560180de60552ce"}, ] [package.dependencies] asciitree = "*" fasteners = {version = "*", markers = "sys_platform != \"emscripten\""} numcodecs = ">=0.10.0" -numpy = ">=1.21.1" +numpy = ">=1.23" [package.extras] docs = ["numcodecs[msgpack]", "numpydoc", "pydata-sphinx-theme", "sphinx", "sphinx-automodapi", "sphinx-copybutton", "sphinx-design", "sphinx-issues"] From 863f28ffd8883cf0b21ebc4bd4f57c327ecb0cd2 Mon Sep 17 00:00:00 2001 From: Alexander Soare Date: Mon, 8 Apr 2024 13:10:19 +0100 Subject: [PATCH 26/80] ready for review --- lerobot/common/datasets/factory.py | 88 +-------------------- lerobot/common/policies/abstract.py | 76 ++++++++++++++++++ lerobot/common/policies/act/policy.py | 9 ++- lerobot/common/policies/diffusion/policy.py | 2 - lerobot/configs/policy/act.yaml | 10 ++- lerobot/scripts/eval.py | 4 +- scripts/convert_act_weights.py | 71 ----------------- 7 files changed, 92 insertions(+), 168 deletions(-) delete mode 100644 scripts/convert_act_weights.py diff --git a/lerobot/common/datasets/factory.py b/lerobot/common/datasets/factory.py index ed7854ff..c22ae698 100644 --- a/lerobot/common/datasets/factory.py +++ b/lerobot/common/datasets/factory.py @@ -59,96 +59,10 @@ def make_dataset( transform=Prod(in_keys=clsfunc.image_keys, prod=1 / 255.0), ) stats = compute_or_load_stats(stats_dataset) + # TODO(rcadene): remove this and put it in config. Ideally we want to reproduce SOTA results just with mean_std normalization_mode = "mean_std" if cfg.env.name == "aloha" else "min_max" - # # TODO(now): These stats are needed to use their pretrained model for sim_transfer_cube_human. - # # (Pdb) stats['observation']['state']['mean'] - # # tensor([-0.0071, -0.6293, 1.0351, -0.0517, -0.4642, -0.0754, 0.4751, -0.0373, - # # -0.3324, 0.9034, -0.2258, -0.3127, -0.2412, 0.6866]) - # stats["observation", "state", "mean"] = torch.tensor( - # [ - # -0.00740268, - # -0.63187766, - # 1.0356655, - # -0.05027218, - # -0.46199223, - # -0.07467502, - # 0.47467607, - # -0.03615446, - # -0.33203387, - # 0.9038929, - # -0.22060776, - # -0.31011587, - # -0.23484458, - # 0.6842416, - # ] - # ) - # # (Pdb) stats['observation']['state']['std'] - # # tensor([0.0022, 0.0520, 0.0291, 0.0092, 0.0267, 0.0145, 0.0563, 0.0179, 0.0494, - # # 0.0326, 0.0476, 0.0535, 0.0956, 0.0513]) - # stats["observation", "state", "std"] = torch.tensor( - # [ - # 0.01219023, - # 0.2975381, - # 0.16728032, - # 0.04733803, - # 0.1486037, - # 0.08788499, - # 0.31752336, - # 0.1049916, - # 0.27933604, - # 0.18094037, - # 0.26604933, - # 0.30466506, - # 0.5298686, - # 0.25505227, - # ] - # ) - # # (Pdb) stats['action']['mean'] - # # tensor([-0.0075, -0.6346, 1.0353, -0.0465, -0.4686, -0.0738, 0.3723, -0.0396, - # # -0.3184, 0.8991, -0.2065, -0.3182, -0.2338, 0.5593]) - # stats["action"]["mean"] = torch.tensor( - # [ - # -0.00756444, - # -0.6281845, - # 1.0312834, - # -0.04664314, - # -0.47211358, - # -0.074527, - # 0.37389806, - # -0.03718753, - # -0.3261143, - # 0.8997205, - # -0.21371077, - # -0.31840396, - # -0.23360962, - # 0.551947, - # ] - # ) - # # (Pdb) stats['action']['std'] - # # tensor([0.0023, 0.0514, 0.0290, 0.0086, 0.0263, 0.0143, 0.0593, 0.0185, 0.0510, - # # 0.0328, 0.0478, 0.0531, 0.0945, 0.0794]) - # stats["action"]["std"] = torch.tensor( - # [ - # 0.01252818, - # 0.2957442, - # 0.16701928, - # 0.04584508, - # 0.14833844, - # 0.08763024, - # 0.30665937, - # 0.10600077, - # 0.27572668, - # 0.1805853, - # 0.26304692, - # 0.30708534, - # 0.5305411, - # 0.38381037, - # ] - # ) - # transforms.append(NormalizeTransform(stats, in_keys, mode=normalization_mode)) # noqa: F821 - transforms = v2.Compose( [ # TODO(rcadene): we need to do something about image_keys diff --git a/lerobot/common/policies/abstract.py b/lerobot/common/policies/abstract.py index beebd8ac..6dc72bef 100644 --- a/lerobot/common/policies/abstract.py +++ b/lerobot/common/policies/abstract.py @@ -4,3 +4,79 @@ import torch from torch import Tensor, nn +class AbstractPolicy(nn.Module): + """Base policy which all policies should be derived from. + + The forward method should generally not be overriden as it plays the role of handling multi-step policies. See its + documentation for more information. + + Note: + When implementing a concrete class (e.g. `AlohaDataset`, `PushtEnv`, `DiffusionPolicy`), you need to: + 1. set the required class attributes: + - for classes inheriting from `AbstractDataset`: `available_datasets` + - for classes inheriting from `AbstractEnv`: `name`, `available_tasks` + - for classes inheriting from `AbstractPolicy`: `name` + 2. update variables in `lerobot/__init__.py` (e.g. `available_envs`, `available_datasets_per_envs`, `available_policies`) + 3. update variables in `tests/test_available.py` by importing your new class + """ + + name: str | None = None # same name should be used to instantiate the policy in factory.py + + def __init__(self, n_action_steps: int | None): + """ + n_action_steps: Sets the cache size for storing action trajectories. If None, it is assumed that a single + action is returned by `select_actions` and that doesn't have a horizon dimension. The `forward` method then + adds that dimension. + """ + super().__init__() + assert self.name is not None, "Subclasses of `AbstractPolicy` should set the `name` class attribute." + self.n_action_steps = n_action_steps + self.clear_action_queue() + + def update(self, replay_buffer, step): + """One step of the policy's learning algorithm.""" + raise NotImplementedError("Abstract method") + + def save(self, fp): + torch.save(self.state_dict(), fp) + + def load(self, fp): + d = torch.load(fp) + self.load_state_dict(d) + + def select_actions(self, observation) -> Tensor: + """Select an action (or trajectory of actions) based on an observation during rollout. + + If n_action_steps was provided at initialization, this should return a (batch_size, n_action_steps, *) tensor of + actions. Otherwise if n_actions_steps is None, this should return a (batch_size, *) tensor of actions. + """ + raise NotImplementedError("Abstract method") + + def clear_action_queue(self): + """This should be called whenever the environment is reset.""" + if self.n_action_steps is not None: + self._action_queue = deque([], maxlen=self.n_action_steps) + + def forward(self, *args, **kwargs) -> Tensor: + """Inference step that makes multi-step policies compatible with their single-step environments. + + WARNING: In general, this should not be overriden. + + Consider a "policy" that observes the environment then charts a course of N actions to take. To make this fit + into the formalism of a TorchRL environment, we view it as being effectively a policy that (1) makes an + observation and prepares a queue of actions, (2) consumes that queue when queried, regardless of the environment + observation, (3) repopulates the action queue when empty. This method handles the aforementioned logic so that + the subclass doesn't have to. + + This method effectively wraps the `select_actions` method of the subclass. The following assumptions are made: + 1. The `select_actions` method returns a Tensor of actions with shape (B, H, *) where B is the batch size, H is + the action trajectory horizon and * is the action dimensions. + 2. Prior to the `select_actions` method being called, theres is an `n_action_steps` instance attribute defined. + """ + if self.n_action_steps is None: + return self.select_actions(*args, **kwargs) + if len(self._action_queue) == 0: + # `select_actions` returns a (batch_size, n_action_steps, *) tensor, but the queue effectively has shape + # (n_action_steps, batch_size, *), hence the transpose. + self._action_queue.extend(self.select_actions(*args, **kwargs).transpose(0, 1)) + return self._action_queue.popleft() diff --git a/lerobot/common/policies/act/policy.py b/lerobot/common/policies/act/policy.py index 75d5ca0e..834dd9b2 100644 --- a/lerobot/common/policies/act/policy.py +++ b/lerobot/common/policies/act/policy.py @@ -67,7 +67,7 @@ class ActionChunkingTransformerPolicy(nn.Module): def __init__(self, cfg, device, n_action_steps=1): """ - TODO(alexander-soare): Add documentation for all parameters. + TODO(alexander-soare): Add documentation for all parameters once we have model configs established. """ super().__init__() if getattr(cfg, "n_obs_steps", 1) != 1: @@ -109,6 +109,9 @@ class ActionChunkingTransformerPolicy(nn.Module): ) # Backbone for image feature extraction. + self.image_normalizer = transforms.Normalize( + mean=cfg.image_normalization.mean, std=cfg.image_normalization.std + ) backbone_model = getattr(torchvision.models, cfg.backbone)( replace_stride_with_dilation=[False, False, cfg.dilation], pretrained=cfg.pretrained_backbone, @@ -275,9 +278,7 @@ class ActionChunkingTransformerPolicy(nn.Module): return info def forward(self, batch: dict[str, Tensor], return_loss: bool = False): - # TODO(now): Maybe this shouldn't be here? - normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - images = normalize(batch["observation.images.top"]) + images = self.image_normalizer(batch["observation.images.top"]) if return_loss: # training time actions_hat, (mu_hat, log_sigma_x2_hat) = self._forward( diff --git a/lerobot/common/policies/diffusion/policy.py b/lerobot/common/policies/diffusion/policy.py index 93e5ba5d..9785358b 100644 --- a/lerobot/common/policies/diffusion/policy.py +++ b/lerobot/common/policies/diffusion/policy.py @@ -151,7 +151,6 @@ class DiffusionPolicy(nn.Module): self.diffusion.train() - data_s = time.time() - start_time loss = self.diffusion.compute_loss(batch) loss.backward() @@ -172,7 +171,6 @@ class DiffusionPolicy(nn.Module): "loss": loss.item(), "grad_norm": float(grad_norm), "lr": self.lr_scheduler.get_last_lr()[0], - "data_s": data_s, "update_s": time.time() - start_time, } diff --git a/lerobot/configs/policy/act.yaml b/lerobot/configs/policy/act.yaml index 80f50003..cd34d115 100644 --- a/lerobot/configs/policy/act.yaml +++ b/lerobot/configs/policy/act.yaml @@ -1,6 +1,6 @@ # @package _global_ -offline_steps: 2000 +offline_steps: 80000 online_steps: 0 eval_episodes: 1 @@ -54,8 +54,12 @@ policy: temporal_agg: false - state_dim: ??? - action_dim: ??? + state_dim: 14 + action_dim: 14 + + image_normalization: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] delta_timestamps: observation.images.top: [0.0] diff --git a/lerobot/scripts/eval.py b/lerobot/scripts/eval.py index b43f4ed1..72966211 100644 --- a/lerobot/scripts/eval.py +++ b/lerobot/scripts/eval.py @@ -86,7 +86,9 @@ def eval_policy( def maybe_render_frame(env): if save_video: # noqa: B023 if return_first_video: - visu = env.envs[0].render(mode="visualization") + # TODO(now): Put mode back in. + visu = env.envs[0].render() + # visu = env.envs[0].render(mode="visualization") visu = visu[None, ...] # add batch dim else: # TODO(now): Put mode back in. diff --git a/scripts/convert_act_weights.py b/scripts/convert_act_weights.py deleted file mode 100644 index d5e38796..00000000 --- a/scripts/convert_act_weights.py +++ /dev/null @@ -1,71 +0,0 @@ -import torch - -from lerobot.common.policies.factory import make_policy -from lerobot.common.utils import init_hydra_config - -cfg = init_hydra_config( - "/home/alexander/Projects/lerobot/outputs/train/act_aloha_sim_transfer_cube_human/.hydra/config.yaml" -) - -policy = make_policy(cfg) - -state_dict = torch.load("/home/alexander/Projects/act/outputs/sim_transfer_cube_human_vae/policy_last.ckpt") - -# Remove keys based on what they start with. - -start_removals = [ - # There is a bug that means the pretrained model doesn't even use the final decoder layers. - *[f"model.transformer.decoder.layers.{i}" for i in range(1, 7)], - "model.is_pad_head.", -] - -for to_remove in start_removals: - for k in list(state_dict.keys()): - if k.startswith(to_remove): - del state_dict[k] - - -# Replace keys based on what they start with. - -start_replacements = [ - ("model.", ""), - ("query_embed.weight", "pos_embed.weight"), - ("pos_table", "vae_encoder_pos_enc"), - ("pos_embed.weight", "decoder_pos_embed.weight"), - ("encoder.", "vae_encoder."), - ("encoder_action_proj.", "vae_encoder_action_input_proj."), - ("encoder_joint_proj.", "vae_encoder_robot_state_input_proj."), - ("latent_proj.", "vae_encoder_latent_output_proj."), - ("latent_proj.", "vae_encoder_latent_output_proj."), - ("input_proj.", "encoder_img_feat_input_proj."), - ("input_proj_robot_state", "encoder_robot_state_input_proj"), - ("latent_out_proj.", "encoder_latent_input_proj."), - ("transformer.encoder.", "encoder."), - ("transformer.decoder.", "decoder."), - ("backbones.0.0.body.", "backbone."), - ("additional_pos_embed.weight", "encoder_robot_and_latent_pos_embed.weight"), - ("cls_embed.weight", "vae_encoder_cls_embed.weight"), -] - -for to_replace, replace_with in start_replacements: - for k in list(state_dict.keys()): - if k.startswith(to_replace): - k_ = replace_with + k.removeprefix(to_replace) - state_dict[k_] = state_dict[k] - del state_dict[k] - - -missing_keys, unexpected_keys = policy.load_state_dict(state_dict, strict=False) - -if len(missing_keys) != 0: - print("MISSING KEYS") - print(missing_keys) -if len(unexpected_keys) != 0: - print("UNEXPECTED KEYS") - print(unexpected_keys) - -# if len(missing_keys) != 0 or len(unexpected_keys) != 0: -# print("Failed due to mismatch in state dicts.") -# exit() - -policy.save("/tmp/weights.pth") From 86365adf9fd909c5037f0a3a00a0e1d706a44c61 Mon Sep 17 00:00:00 2001 From: Alexander Soare Date: Mon, 8 Apr 2024 14:44:10 +0100 Subject: [PATCH 28/80] revision --- lerobot/common/envs/factory.py | 12 +++++++----- lerobot/common/policies/act/policy.py | 19 +++++++++++-------- lerobot/configs/policy/act.yaml | 2 +- lerobot/scripts/train.py | 3 +-- 4 files changed, 20 insertions(+), 16 deletions(-) diff --git a/lerobot/common/envs/factory.py b/lerobot/common/envs/factory.py index 749bb533..bcbdb95d 100644 --- a/lerobot/common/envs/factory.py +++ b/lerobot/common/envs/factory.py @@ -32,12 +32,14 @@ def make_env(cfg, num_parallel_envs=0) -> gym.Env | gym.vector.SyncVectorEnv: elif cfg.env.name == "aloha": import gym_aloha # noqa: F401 - kwargs["task"] = cfg.env.task + if cfg.env.task == "sim_transfer_cube": + env_name = "gym_aloha/AlohaTransferCube-v0" + elif cfg.env.task == "sim_insertion": + env_name = "gym_aloha/AlohaInsertion-v0" + else: + raise ValueError(f"`{cfg.env.task}` has no environment implementation.") - env_fn = lambda: gym.make( # noqa: E731 - "gym_aloha/AlohaTransferCube-v0", - **kwargs, - ) + env_fn = lambda: gym.make(env_name, **kwargs) # noqa: E731 else: raise ValueError(cfg.env.name) diff --git a/lerobot/common/policies/act/policy.py b/lerobot/common/policies/act/policy.py index 834dd9b2..7fb03576 100644 --- a/lerobot/common/policies/act/policy.py +++ b/lerobot/common/policies/act/policy.py @@ -337,18 +337,21 @@ class ActionChunkingTransformerPolicy(nn.Module): robot_state_embed = self.vae_encoder_robot_state_input_proj(robot_state).unsqueeze(1) # (B, 1, D) action_embed = self.vae_encoder_action_input_proj(actions) # (B, S, D) vae_encoder_input = torch.cat([cls_embed, robot_state_embed, action_embed], axis=1) # (B, S+2, D) - # Note: detach() shouldn't be necessary but leaving it the same as the original code just in case. + # Prepare fixed positional embedding. + # Note: detach() shouldn't be necessary but leaving it the same as the original code just in case. pos_embed = self.vae_encoder_pos_enc.clone().detach() # (1, S+2, D) - # Forward pass through VAE encoder and sample the latent with the reparameterization trick. + + # Forward pass through VAE encoder. cls_token_out = self.vae_encoder( vae_encoder_input.permute(1, 0, 2), pos_embed=pos_embed.permute(1, 0, 2) - )[0] # (B, D) + )[0] # select the class token, with shape (B, D) latent_pdf_params = self.vae_encoder_latent_output_proj(cls_token_out) + + # Sample the latent with the reparameterization trick. mu = latent_pdf_params[:, : self.latent_dim] # This is 2log(sigma). Done this way to match the original implementation. log_sigma_x2 = latent_pdf_params[:, self.latent_dim :] - # Use reparameterization trick to sample from the latent's PDF. latent_sample = mu + log_sigma_x2.div(2).exp() * torch.randn_like(mu) else: # When not using the VAE encoder, we set the latent to be all zeros. @@ -469,7 +472,7 @@ class _TransformerEncoderLayer(nn.Module): if self.normalize_before: x = self.norm1(x) q = k = x if pos_embed is None else x + pos_embed - x = self.self_attn(q, k, value=x)[0] + x = self.self_attn(q, k, value=x)[0] # select just the output, not the attention weights x = skip + self.dropout1(x) if self.normalize_before: skip = x @@ -563,7 +566,7 @@ class _TransformerDecoderLayer(nn.Module): if self.normalize_before: x = self.norm1(x) q = k = self.maybe_add_pos_embed(x, decoder_pos_embed) - x = self.self_attn(q, k, value=x)[0] + x = self.self_attn(q, k, value=x)[0] # select just the output, not the attention weights x = skip + self.dropout1(x) if self.normalize_before: skip = x @@ -575,7 +578,7 @@ class _TransformerDecoderLayer(nn.Module): query=self.maybe_add_pos_embed(x, decoder_pos_embed), key=self.maybe_add_pos_embed(encoder_out, encoder_pos_embed), value=encoder_out, - )[0] + )[0] # select just the output, not the attention weights x = skip + self.dropout2(x) if self.normalize_before: skip = x @@ -634,7 +637,7 @@ class _SinusoidalPositionEmbedding2D(nn.Module): Returns: A (1, C, H, W) batch of corresponding sinusoidal positional embeddings. """ - not_mask = torch.ones_like(x[0, [0]]) # (1, H, W) + not_mask = torch.ones_like(x[0, :1]) # (1, H, W) # Note: These are like range(1, H+1) and range(1, W+1) respectively, but in most implementations # they would be range(0, H) and range(0, W). Keeping it at as to match the original code. y_range = not_mask.cumsum(1, dtype=torch.float32) diff --git a/lerobot/configs/policy/act.yaml b/lerobot/configs/policy/act.yaml index cd34d115..79729a02 100644 --- a/lerobot/configs/policy/act.yaml +++ b/lerobot/configs/policy/act.yaml @@ -64,4 +64,4 @@ policy: delta_timestamps: observation.images.top: [0.0] observation.state: [0.0] - action: [0.0, 0.02, 0.04, 0.06, 0.08, 0.1, 0.12, 0.14, 0.16, 0.18, 0.2, 0.22, 0.24, 0.26, 0.28, 0.3, 0.32, 0.34, 0.36, 0.38, 0.4, 0.42, 0.44, 0.46, 0.48, 0.5, 0.52, 0.54, 0.56, 0.58, 0.6, 0.62, 0.64, 0.66, 0.68, 0.70, 0.72, 0.74, 0.76, 0.78, 0.8, 0.82, 0.84, 0.86, 0.88, 0.9, 0.92, 0.94, 0.96, 0.98, 1.0, 1.02, 1.04, 1.06, 1.08, 1.1, 1.12, 1.14, 1.16, 1.18, 1.2, 1.22, 1.24, 1.26, 1.28, 1.3, 1.32, 1.34, 1.36, 1.38, 1.40, 1.42, 1.44, 1.46, 1.48, 1.5, 1.52, 1.54, 1.56, 1.58, 1.6, 1.62, 1.64, 1.66, 1.68, 1.7, 1.72, 1.74, 1.76, 1.78, 1.8, 1.82, 1.84, 1.86, 1.88, 1.90, 1.92, 1.94, 1.96, 1.98] + action: "[i / ${fps} for i in range(${horizon})]" diff --git a/lerobot/scripts/train.py b/lerobot/scripts/train.py index d49dfff8..caaf5182 100644 --- a/lerobot/scripts/train.py +++ b/lerobot/scripts/train.py @@ -152,7 +152,6 @@ def train(cfg: dict, out_dir=None, job_name=None): logging.info("make_policy") policy = make_policy(cfg) - policy.save("act.pt") num_learnable_params = sum(p.numel() for p in policy.parameters() if p.requires_grad) num_total_params = sum(p.numel() for p in policy.parameters()) @@ -198,7 +197,7 @@ def train(cfg: dict, out_dir=None, job_name=None): is_offline = True dataloader = torch.utils.data.DataLoader( dataset, - num_workers=0, + num_workers=4, batch_size=cfg.policy.batch_size, shuffle=True, pin_memory=cfg.device != "cpu", From 62b18a7607d955eed60ba7eff70b71162f5acaf2 Mon Sep 17 00:00:00 2001 From: Alexander Soare Date: Mon, 8 Apr 2024 14:51:45 +0100 Subject: [PATCH 29/80] Add type hints --- lerobot/common/policies/act/policy.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/lerobot/common/policies/act/policy.py b/lerobot/common/policies/act/policy.py index 7fb03576..e14a1e88 100644 --- a/lerobot/common/policies/act/policy.py +++ b/lerobot/common/policies/act/policy.py @@ -176,7 +176,7 @@ class ActionChunkingTransformerPolicy(nn.Module): if self.n_action_steps is not None: self._action_queue = deque([], maxlen=self.n_action_steps) - def select_action(self, batch: dict[str, Tensor], *_): + def select_action(self, batch: dict[str, Tensor], *_) -> Tensor: """ This method wraps `select_actions` in order to return one action at a time for execution in the environment. It works by managing the actions in a queue and only calling `select_actions` when the @@ -189,7 +189,7 @@ class ActionChunkingTransformerPolicy(nn.Module): return self._action_queue.popleft() @torch.no_grad() - def select_actions(self, batch: dict[str, Tensor]): + def select_actions(self, batch: dict[str, Tensor]) -> Tensor: """Use the action chunking transformer to generate a sequence of actions.""" self.eval() self._preprocess_batch(batch, add_obs_steps_dim=True) @@ -211,7 +211,7 @@ class ActionChunkingTransformerPolicy(nn.Module): return action[: self.n_action_steps] - def __call__(self, *args, **kwargs): + def __call__(self, *args, **kwargs) -> dict: # TODO(now): Temporary bridge until we know what to do about the `update` method. return self.update(*args, **kwargs) @@ -244,7 +244,7 @@ class ActionChunkingTransformerPolicy(nn.Module): # Note: no squeeze is required for "observation.images.top" because then we'd have to unsqueeze to get # the image index dimension. - def update(self, batch, *_): + def update(self, batch, *_) -> dict: start_time = time.time() self._preprocess_batch(batch) @@ -277,7 +277,7 @@ class ActionChunkingTransformerPolicy(nn.Module): return info - def forward(self, batch: dict[str, Tensor], return_loss: bool = False): + def forward(self, batch: dict[str, Tensor], return_loss: bool = False) -> dict | Tensor: images = self.image_normalizer(batch["observation.images.top"]) if return_loss: # training time @@ -309,7 +309,9 @@ class ActionChunkingTransformerPolicy(nn.Module): action, _ = self._forward(batch["observation.state"], images) return action - def _forward(self, robot_state: Tensor, image: Tensor, actions: Tensor | None = None): + def _forward( + self, robot_state: Tensor, image: Tensor, actions: Tensor | None = None + ) -> tuple[Tensor, tuple[Tensor, Tensor]]: """ Args: robot_state: (B, J) batch of robot joint configurations. @@ -410,7 +412,7 @@ class ActionChunkingTransformerPolicy(nn.Module): actions = self.action_head(decoder_out) - return actions, [mu, log_sigma_x2] + return actions, (mu, log_sigma_x2) def save(self, fp): torch.save(self.state_dict(), fp) From 0b4c42f4ffa6c0efcaf30f8b407789150bc001d2 Mon Sep 17 00:00:00 2001 From: Alexander Soare Date: Mon, 8 Apr 2024 14:59:37 +0100 Subject: [PATCH 30/80] typos --- lerobot/common/policies/act/policy.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lerobot/common/policies/act/policy.py b/lerobot/common/policies/act/policy.py index e14a1e88..b8276214 100644 --- a/lerobot/common/policies/act/policy.py +++ b/lerobot/common/policies/act/policy.py @@ -641,7 +641,7 @@ class _SinusoidalPositionEmbedding2D(nn.Module): """ not_mask = torch.ones_like(x[0, :1]) # (1, H, W) # Note: These are like range(1, H+1) and range(1, W+1) respectively, but in most implementations - # they would be range(0, H) and range(0, W). Keeping it at as to match the original code. + # they would be range(0, H) and range(0, W). Keeping it at as is to match the original code. y_range = not_mask.cumsum(1, dtype=torch.float32) x_range = not_mask.cumsum(2, dtype=torch.float32) @@ -659,7 +659,7 @@ class _SinusoidalPositionEmbedding2D(nn.Module): y_range = y_range.unsqueeze(-1) / inverse_frequency # (1, H, W, 1) # Note: this stack then flatten operation results in interleaved sine and cosine terms. - # pos_embed_x and pos_embed are (1, H, W, C // 2). + # pos_embed_x and pos_embed_y are (1, H, W, C // 2). pos_embed_x = torch.stack((x_range[..., 0::2].sin(), x_range[..., 1::2].cos()), dim=-1).flatten(3) pos_embed_y = torch.stack((y_range[..., 0::2].sin(), y_range[..., 1::2].cos()), dim=-1).flatten(3) pos_embed = torch.cat((pos_embed_y, pos_embed_x), dim=3).permute(0, 3, 1, 2) # (1, C, H, W) From 91e0e4e175236b859cdc463d8d3418b22d9c2ef8 Mon Sep 17 00:00:00 2001 From: Alexander Soare Date: Mon, 8 Apr 2024 15:05:40 +0100 Subject: [PATCH 31/80] rever change --- lerobot/scripts/eval.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lerobot/scripts/eval.py b/lerobot/scripts/eval.py index 72966211..802a2eb6 100644 --- a/lerobot/scripts/eval.py +++ b/lerobot/scripts/eval.py @@ -255,7 +255,7 @@ def eval(cfg: dict, out_dir=None, stats_path=None): transform = make_dataset(cfg, stats_path=stats_path).transform logging.info("Making environment.") - env = make_env(cfg, num_parallel_envs=cfg.rollout_batch_size) + env = make_env(cfg, num_parallel_envs=cfg.eval_episodes) # when policy is None, rollout a random policy policy = make_policy(cfg) if cfg.policy.pretrained_model_path else None From 70aaf1c4cbefeb525370ad466aa8876c68f1a30f Mon Sep 17 00:00:00 2001 From: Cadene Date: Mon, 8 Apr 2024 14:02:03 +0000 Subject: [PATCH 32/80] test_datasets.py are passing! --- lerobot/common/datasets/aloha.py | 11 ++- lerobot/common/datasets/pusht.py | 8 +- lerobot/common/datasets/utils.py | 4 +- lerobot/common/policies/tdmpc/policy.py | 2 +- lerobot/configs/policy/diffusion.yaml | 6 +- .../aloha_sim_insertion_human/data_dict.pth | Bin 0 -> 46089944 bytes .../data_ids_per_episode.pth | Bin 0 -> 1629 bytes .../replay_buffer/action.memmap | 3 - .../replay_buffer/episode.memmap | 3 - .../replay_buffer/frame_id.memmap | 3 - .../replay_buffer/meta.json | 1 - .../replay_buffer/next/done.memmap | 3 - .../replay_buffer/next/meta.json | 1 - .../next/observation/image/meta.json | 1 - .../next/observation/image/top.memmap | 3 - .../replay_buffer/next/observation/meta.json | 1 - .../next/observation/state.memmap | 3 - .../replay_buffer/observation/image/meta.json | 1 - .../observation/image/top.memmap | 3 - .../replay_buffer/observation/meta.json | 1 - .../replay_buffer/observation/state.memmap | 3 - .../data/aloha_sim_insertion_human/stats.pth | Bin 4370 -> 3922 bytes .../data_dict.pth | Bin 0 -> 46089944 bytes .../data_ids_per_episode.pth | Bin 0 -> 1629 bytes .../replay_buffer/action.memmap | 3 - .../replay_buffer/episode.memmap | 3 - .../replay_buffer/frame_id.memmap | 3 - .../replay_buffer/meta.json | 1 - .../replay_buffer/next/done.memmap | 3 - .../replay_buffer/next/meta.json | 1 - .../next/observation/image/meta.json | 1 - .../next/observation/image/top.memmap | 3 - .../replay_buffer/next/observation/meta.json | 1 - .../next/observation/state.memmap | 3 - .../replay_buffer/observation/image/meta.json | 1 - .../observation/image/top.memmap | 3 - .../replay_buffer/observation/meta.json | 1 - .../replay_buffer/observation/state.memmap | 3 - .../aloha_sim_insertion_scripted/stats.pth | Bin 4370 -> 3922 bytes .../data_dict.pth | Bin 0 -> 46089944 bytes .../data_ids_per_episode.pth | Bin 0 -> 1629 bytes .../replay_buffer/action.memmap | 3 - .../replay_buffer/episode.memmap | 3 - .../replay_buffer/frame_id.memmap | 3 - .../replay_buffer/meta.json | 1 - .../replay_buffer/next/done.memmap | 3 - .../replay_buffer/next/meta.json | 1 - .../next/observation/image/meta.json | 1 - .../next/observation/image/top.memmap | 3 - .../replay_buffer/next/observation/meta.json | 1 - .../next/observation/state.memmap | 3 - .../replay_buffer/observation/image/meta.json | 1 - .../observation/image/top.memmap | 3 - .../replay_buffer/observation/meta.json | 1 - .../replay_buffer/observation/state.memmap | 3 - .../aloha_sim_transfer_cube_human/stats.pth | Bin 4370 -> 3922 bytes .../data_dict.pth | Bin 0 -> 46089944 bytes .../data_ids_per_episode.pth | Bin 0 -> 1629 bytes .../replay_buffer/action.memmap | 3 - .../replay_buffer/episode.memmap | 3 - .../replay_buffer/frame_id.memmap | 3 - .../replay_buffer/meta.json | 1 - .../replay_buffer/next/done.memmap | 3 - .../replay_buffer/next/meta.json | 1 - .../next/observation/image/meta.json | 1 - .../next/observation/image/top.memmap | 3 - .../replay_buffer/next/observation/meta.json | 1 - .../next/observation/state.memmap | 3 - .../replay_buffer/observation/image/meta.json | 1 - .../observation/image/top.memmap | 3 - .../replay_buffer/observation/meta.json | 1 - .../replay_buffer/observation/state.memmap | 3 - .../stats.pth | Bin 4370 -> 3922 bytes tests/data/pusht/data_dict.pth | Bin 0 -> 5535316 bytes tests/data/pusht/data_ids_per_episode.pth | Bin 0 -> 1629 bytes tests/data/pusht/replay_buffer/action.memmap | 3 - tests/data/pusht/replay_buffer/episode.memmap | 3 - .../data/pusht/replay_buffer/frame_id.memmap | 3 - tests/data/pusht/replay_buffer/meta.json | 1 - .../data/pusht/replay_buffer/next/done.memmap | 3 - tests/data/pusht/replay_buffer/next/meta.json | 1 - .../next/observation/image.memmap | 3 - .../replay_buffer/next/observation/meta.json | 1 - .../next/observation/state.memmap | 3 - .../pusht/replay_buffer/next/reward.memmap | 3 - .../pusht/replay_buffer/next/success.memmap | 3 - .../replay_buffer/observation/image.memmap | 3 - .../pusht/replay_buffer/observation/meta.json | 1 - .../replay_buffer/observation/state.memmap | 3 - tests/data/pusht/stats.pth | Bin 4306 -> 3922 bytes tests/data/xarm_lift_medium/data_dict.pth | Bin 0 -> 1064598 bytes .../xarm_lift_medium/data_ids_per_episode.pth | Bin 0 -> 1894 bytes .../replay_buffer/action.memmap | 3 - .../replay_buffer/episode.memmap | 3 - .../replay_buffer/frame_id.memmap | 3 - .../xarm_lift_medium/replay_buffer/meta.json | 1 - .../replay_buffer/next/done.memmap | 3 - .../replay_buffer/next/meta.json | 1 - .../next/observation/image.memmap | 3 - .../replay_buffer/next/observation/meta.json | 1 - .../next/observation/state.memmap | 3 - .../replay_buffer/next/reward.memmap | 3 - .../replay_buffer/observation/image.memmap | 3 - .../replay_buffer/observation/meta.json | 1 - .../replay_buffer/observation/state.memmap | 3 - tests/data/xarm_lift_medium/stats.pth | Bin 4306 -> 3922 bytes tests/scripts/mock_dataset.py | 15 +++- tests/test_datasets.py | 85 ++++++++++++------ tests/test_policies.py | 2 +- 109 files changed, 90 insertions(+), 228 deletions(-) create mode 100644 tests/data/aloha_sim_insertion_human/data_dict.pth create mode 100644 tests/data/aloha_sim_insertion_human/data_ids_per_episode.pth delete mode 100644 tests/data/aloha_sim_insertion_human/replay_buffer/action.memmap delete mode 100644 tests/data/aloha_sim_insertion_human/replay_buffer/episode.memmap delete mode 100644 tests/data/aloha_sim_insertion_human/replay_buffer/frame_id.memmap delete mode 100644 tests/data/aloha_sim_insertion_human/replay_buffer/meta.json delete mode 100644 tests/data/aloha_sim_insertion_human/replay_buffer/next/done.memmap delete mode 100644 tests/data/aloha_sim_insertion_human/replay_buffer/next/meta.json delete mode 100644 tests/data/aloha_sim_insertion_human/replay_buffer/next/observation/image/meta.json delete mode 100644 tests/data/aloha_sim_insertion_human/replay_buffer/next/observation/image/top.memmap delete mode 100644 tests/data/aloha_sim_insertion_human/replay_buffer/next/observation/meta.json delete mode 100644 tests/data/aloha_sim_insertion_human/replay_buffer/next/observation/state.memmap delete mode 100644 tests/data/aloha_sim_insertion_human/replay_buffer/observation/image/meta.json delete mode 100644 tests/data/aloha_sim_insertion_human/replay_buffer/observation/image/top.memmap delete mode 100644 tests/data/aloha_sim_insertion_human/replay_buffer/observation/meta.json delete mode 100644 tests/data/aloha_sim_insertion_human/replay_buffer/observation/state.memmap create mode 100644 tests/data/aloha_sim_insertion_scripted/data_dict.pth create mode 100644 tests/data/aloha_sim_insertion_scripted/data_ids_per_episode.pth delete mode 100644 tests/data/aloha_sim_insertion_scripted/replay_buffer/action.memmap delete mode 100644 tests/data/aloha_sim_insertion_scripted/replay_buffer/episode.memmap delete mode 100644 tests/data/aloha_sim_insertion_scripted/replay_buffer/frame_id.memmap delete mode 100644 tests/data/aloha_sim_insertion_scripted/replay_buffer/meta.json delete mode 100644 tests/data/aloha_sim_insertion_scripted/replay_buffer/next/done.memmap delete mode 100644 tests/data/aloha_sim_insertion_scripted/replay_buffer/next/meta.json delete mode 100644 tests/data/aloha_sim_insertion_scripted/replay_buffer/next/observation/image/meta.json delete mode 100644 tests/data/aloha_sim_insertion_scripted/replay_buffer/next/observation/image/top.memmap delete mode 100644 tests/data/aloha_sim_insertion_scripted/replay_buffer/next/observation/meta.json delete mode 100644 tests/data/aloha_sim_insertion_scripted/replay_buffer/next/observation/state.memmap delete mode 100644 tests/data/aloha_sim_insertion_scripted/replay_buffer/observation/image/meta.json delete mode 100644 tests/data/aloha_sim_insertion_scripted/replay_buffer/observation/image/top.memmap delete mode 100644 tests/data/aloha_sim_insertion_scripted/replay_buffer/observation/meta.json delete mode 100644 tests/data/aloha_sim_insertion_scripted/replay_buffer/observation/state.memmap create mode 100644 tests/data/aloha_sim_transfer_cube_human/data_dict.pth create mode 100644 tests/data/aloha_sim_transfer_cube_human/data_ids_per_episode.pth delete mode 100644 tests/data/aloha_sim_transfer_cube_human/replay_buffer/action.memmap delete mode 100644 tests/data/aloha_sim_transfer_cube_human/replay_buffer/episode.memmap delete mode 100644 tests/data/aloha_sim_transfer_cube_human/replay_buffer/frame_id.memmap delete mode 100644 tests/data/aloha_sim_transfer_cube_human/replay_buffer/meta.json delete mode 100644 tests/data/aloha_sim_transfer_cube_human/replay_buffer/next/done.memmap delete mode 100644 tests/data/aloha_sim_transfer_cube_human/replay_buffer/next/meta.json delete mode 100644 tests/data/aloha_sim_transfer_cube_human/replay_buffer/next/observation/image/meta.json delete mode 100644 tests/data/aloha_sim_transfer_cube_human/replay_buffer/next/observation/image/top.memmap delete mode 100644 tests/data/aloha_sim_transfer_cube_human/replay_buffer/next/observation/meta.json delete mode 100644 tests/data/aloha_sim_transfer_cube_human/replay_buffer/next/observation/state.memmap delete mode 100644 tests/data/aloha_sim_transfer_cube_human/replay_buffer/observation/image/meta.json delete mode 100644 tests/data/aloha_sim_transfer_cube_human/replay_buffer/observation/image/top.memmap delete mode 100644 tests/data/aloha_sim_transfer_cube_human/replay_buffer/observation/meta.json delete mode 100644 tests/data/aloha_sim_transfer_cube_human/replay_buffer/observation/state.memmap create mode 100644 tests/data/aloha_sim_transfer_cube_scripted/data_dict.pth create mode 100644 tests/data/aloha_sim_transfer_cube_scripted/data_ids_per_episode.pth delete mode 100644 tests/data/aloha_sim_transfer_cube_scripted/replay_buffer/action.memmap delete mode 100644 tests/data/aloha_sim_transfer_cube_scripted/replay_buffer/episode.memmap delete mode 100644 tests/data/aloha_sim_transfer_cube_scripted/replay_buffer/frame_id.memmap delete mode 100644 tests/data/aloha_sim_transfer_cube_scripted/replay_buffer/meta.json delete mode 100644 tests/data/aloha_sim_transfer_cube_scripted/replay_buffer/next/done.memmap delete mode 100644 tests/data/aloha_sim_transfer_cube_scripted/replay_buffer/next/meta.json delete mode 100644 tests/data/aloha_sim_transfer_cube_scripted/replay_buffer/next/observation/image/meta.json delete mode 100644 tests/data/aloha_sim_transfer_cube_scripted/replay_buffer/next/observation/image/top.memmap delete mode 100644 tests/data/aloha_sim_transfer_cube_scripted/replay_buffer/next/observation/meta.json delete mode 100644 tests/data/aloha_sim_transfer_cube_scripted/replay_buffer/next/observation/state.memmap delete mode 100644 tests/data/aloha_sim_transfer_cube_scripted/replay_buffer/observation/image/meta.json delete mode 100644 tests/data/aloha_sim_transfer_cube_scripted/replay_buffer/observation/image/top.memmap delete mode 100644 tests/data/aloha_sim_transfer_cube_scripted/replay_buffer/observation/meta.json delete mode 100644 tests/data/aloha_sim_transfer_cube_scripted/replay_buffer/observation/state.memmap create mode 100644 tests/data/pusht/data_dict.pth create mode 100644 tests/data/pusht/data_ids_per_episode.pth delete mode 100644 tests/data/pusht/replay_buffer/action.memmap delete mode 100644 tests/data/pusht/replay_buffer/episode.memmap delete mode 100644 tests/data/pusht/replay_buffer/frame_id.memmap delete mode 100644 tests/data/pusht/replay_buffer/meta.json delete mode 100644 tests/data/pusht/replay_buffer/next/done.memmap delete mode 100644 tests/data/pusht/replay_buffer/next/meta.json delete mode 100644 tests/data/pusht/replay_buffer/next/observation/image.memmap delete mode 100644 tests/data/pusht/replay_buffer/next/observation/meta.json delete mode 100644 tests/data/pusht/replay_buffer/next/observation/state.memmap delete mode 100644 tests/data/pusht/replay_buffer/next/reward.memmap delete mode 100644 tests/data/pusht/replay_buffer/next/success.memmap delete mode 100644 tests/data/pusht/replay_buffer/observation/image.memmap delete mode 100644 tests/data/pusht/replay_buffer/observation/meta.json delete mode 100644 tests/data/pusht/replay_buffer/observation/state.memmap create mode 100644 tests/data/xarm_lift_medium/data_dict.pth create mode 100644 tests/data/xarm_lift_medium/data_ids_per_episode.pth delete mode 100644 tests/data/xarm_lift_medium/replay_buffer/action.memmap delete mode 100644 tests/data/xarm_lift_medium/replay_buffer/episode.memmap delete mode 100644 tests/data/xarm_lift_medium/replay_buffer/frame_id.memmap delete mode 100644 tests/data/xarm_lift_medium/replay_buffer/meta.json delete mode 100644 tests/data/xarm_lift_medium/replay_buffer/next/done.memmap delete mode 100644 tests/data/xarm_lift_medium/replay_buffer/next/meta.json delete mode 100644 tests/data/xarm_lift_medium/replay_buffer/next/observation/image.memmap delete mode 100644 tests/data/xarm_lift_medium/replay_buffer/next/observation/meta.json delete mode 100644 tests/data/xarm_lift_medium/replay_buffer/next/observation/state.memmap delete mode 100644 tests/data/xarm_lift_medium/replay_buffer/next/reward.memmap delete mode 100644 tests/data/xarm_lift_medium/replay_buffer/observation/image.memmap delete mode 100644 tests/data/xarm_lift_medium/replay_buffer/observation/meta.json delete mode 100644 tests/data/xarm_lift_medium/replay_buffer/observation/state.memmap diff --git a/lerobot/common/datasets/aloha.py b/lerobot/common/datasets/aloha.py index 102de08e..639acf1f 100644 --- a/lerobot/common/datasets/aloha.py +++ b/lerobot/common/datasets/aloha.py @@ -158,6 +158,7 @@ class AlohaDataset(torch.utils.data.Dataset): self.data_ids_per_episode = {} ep_dicts = [] + idx0 = idx1 = 0 logging.info("Initialize and feed offline buffer") for ep_id in tqdm.tqdm(range(NUM_EPISODES[self.dataset_id])): ep_path = raw_dir / f"episode_{ep_id}.hdf5" @@ -165,7 +166,7 @@ class AlohaDataset(torch.utils.data.Dataset): num_frames = ep["/action"].shape[0] # last step of demonstration is considered done - done = torch.zeros(num_frames, 1, dtype=torch.bool) + done = torch.zeros(num_frames, dtype=torch.bool) done[-1] = True state = torch.from_numpy(ep["/observations/qpos"][:]) @@ -192,6 +193,14 @@ class AlohaDataset(torch.utils.data.Dataset): ep_dicts.append(ep_dict) + idx1 += num_frames + + assert isinstance(ep_id, int) + self.data_ids_per_episode[ep_id] = torch.arange(idx0, idx1, 1) + assert len(self.data_ids_per_episode[ep_id]) == num_frames + + idx0 = idx1 + self.data_dict = {} keys = ep_dicts[0].keys() diff --git a/lerobot/common/datasets/pusht.py b/lerobot/common/datasets/pusht.py index 9088fdf4..b468637e 100644 --- a/lerobot/common/datasets/pusht.py +++ b/lerobot/common/datasets/pusht.py @@ -193,8 +193,6 @@ class PushtDataset(torch.utils.data.Dataset): idx0 = 0 for episode_id in tqdm.tqdm(range(num_episodes)): idx1 = dataset_dict.meta["episode_ends"][episode_id] - # to create test artifact - # idx1 = 51 num_frames = idx1 - idx0 @@ -207,9 +205,9 @@ class PushtDataset(torch.utils.data.Dataset): block_pos = state[:, 2:4] block_angle = state[:, 4] - reward = torch.zeros(num_frames, 1) - success = torch.zeros(num_frames, 1, dtype=torch.bool) - done = torch.zeros(num_frames, 1, dtype=torch.bool) + reward = torch.zeros(num_frames) + success = torch.zeros(num_frames, dtype=torch.bool) + done = torch.zeros(num_frames, dtype=torch.bool) for i in range(num_frames): space = pymunk.Space() space.gravity = 0, 0 diff --git a/lerobot/common/datasets/utils.py b/lerobot/common/datasets/utils.py index 18b091cd..3b4aacfc 100644 --- a/lerobot/common/datasets/utils.py +++ b/lerobot/common/datasets/utils.py @@ -92,11 +92,11 @@ def load_data_with_delta_timestamps( # TODO(rcadene): synchronize timestamps + interpolation if needed - tol = 0.02 + tol = 0.04 is_pad = min_ > tol assert is_contiguously_true_or_false(is_pad), ( - "One or several timestamps unexpectedly violate the tolerance." + f"One or several timestamps unexpectedly violate the tolerance ({min_} > {tol=})." "This might be due to synchronization issues with timestamps during data collection." ) diff --git a/lerobot/common/policies/tdmpc/policy.py b/lerobot/common/policies/tdmpc/policy.py index f763dbc6..04aa5b11 100644 --- a/lerobot/common/policies/tdmpc/policy.py +++ b/lerobot/common/policies/tdmpc/policy.py @@ -429,7 +429,7 @@ class TDMPCPolicy(nn.Module): batch[key] = batch[key].transpose(1, 0) action = batch["action"] - reward = batch["next.reward"][:, :, None] # add extra channel dimension + reward = batch["next.reward"] # idxs = batch["index"] # TODO(rcadene): use idxs to update sampling weights done = torch.zeros_like(reward, dtype=torch.bool, device=reward.device) mask = torch.ones_like(reward, dtype=torch.bool, device=reward.device) diff --git a/lerobot/configs/policy/diffusion.yaml b/lerobot/configs/policy/diffusion.yaml index c3bebe2d..6da62e10 100644 --- a/lerobot/configs/policy/diffusion.yaml +++ b/lerobot/configs/policy/diffusion.yaml @@ -63,9 +63,9 @@ policy: grad_clip_norm: 10 delta_timestamps: - observation.image: [-.1, 0] - observation.state: [-.1, 0] - action: [-.1, 0, .1, .2, .3, .4, .5, .6, .7, .8, .9, 1.0, 1.1, 1.2, 1.3, 1.4] + observation.image: [-0.1, 0] + observation.state: [-0.1, 0] + action: [-0.1, 0, .1, .2, .3, .4, .5, .6, .7, .8, .9, 1.0, 1.1, 1.2, 1.3, 1.4] noise_scheduler: _target_: diffusers.schedulers.scheduling_ddpm.DDPMScheduler diff --git a/tests/data/aloha_sim_insertion_human/data_dict.pth b/tests/data/aloha_sim_insertion_human/data_dict.pth new file mode 100644 index 0000000000000000000000000000000000000000..1370c9ea3e37d797361ce677782a5b74520ba40b GIT binary patch literal 46089944 zcmeF42VfM(_3-6x->@MrZr<$K)8nugsXf9|;-{xfj}L}3=wa{CE6RRjn*F9I z`_1F8g=(gegY35?KpzsWN}z4@v)`IP>%L8_`*vFQ9UBs`iaXQncSYInCcU1XoR~T< ze!Bf0S_j|vQS;``6uW*e>3VSlLslMc-UbOe4 zb@u1$9DsGcKq_xUxelDbm9%ddWfSmJ;BZQT|S#I%K1WuLx7 zhrHZ&LsFBn95fcN?Nqw!my7Z^%oi6Gwc=Jtda`Ry4INbUWt!{GC-S*|F^=Y*(@R=^ zVykytePBS)%nz<=E^JA6-Ml%EyJvgSpa+w-w+}W^9obsi99og?`s?02?&x!m23`MdLF<6xCoOm6PZ-qfd^Rxc zbc*Y{Y8AH{JW3^dHz7=O_e)Vq03mNIIpEl=l4IhsS+HiAz z>(p9RdzPe?~)1|HRno1lY6&oxKSM_%+o{{c4(2~b} z|95N9k*vJd$(MUrB5n$4{%YWk!1@!(u0{VS;0k*D!SUmw($*6{Ep+spvcuxs73Nqs zA>B3gojk5Aeqhj!HF>RzQhQh~zdf~iYs2Bd|BO#|RbOAo{rEq>aP0Z4v~}GVIgVmW zt>wV~W;&h-Pj~IOn8y`;^u2aU7=&zfD4w2hV_o-!Rf9SPULY<-||Y(ns0MkEpa_8kUJ0 zvn<_T*K8f}QK0?$d9K1?g{N zx3A{4Mt*d=rSx1&^MCL9Ja9w(T-Va!h1}mO{_2=;tgJOODa7&OEq}6{J-WlO`1*8L z!t6Zm_S%<&USFTr`snm~EH}RNX7k%md=WTn!dzGE%Z1#11OMW9u(YhT`RRN|=DELH zdJWp=*z=n-*HKFzchwuO1icrT*Lvu?do6$J`)+e~$(Mm&{%4MB&y$5*#Q*;6n7pK{ z^|^;GJMNqQorSyZpkr}$nrr1%dEBu07lU4WD6jRdlE*AHt3PdyeD-gFPv*{X{r>(! z?jLXd#bK!~YwcAU(9(E+fc2Z6RgQm5NOQejnadsgY)H`eDY>nWXFO^7&hmNl_T%3K zK5_jV*Wa%$Ik{Ito7ZkeOnriU1|N;RqOcInN-*LKDk`Yxv4=8SAyt1X`wUXBAeluHKzr5Od)u#=P?Y~QLeU+KR&EnPv z#r|naYmaTOS{kqaMf1esfS?BJEZ2sW1>Dn#54G$&S=c)9-OQGQZ(nOo+jP`%UxnTE zL3%cK-7o5bo>{iG_2$FzmW-pnZZ1f@D(KP&iLO7N%jcd98q?wm%5MGgja@BsQ?Iu^ z{D&sT?|uVJ+?d6U|NM_Zt1{DDubs8V(y->+=BK{ACg{m~5?ntn&Ex)3ez4`$t!rER z9%^m*{(~E=f4Sb_7?Ygn+V))Sk;ht`M!FlJP8(Ww6f5BOL@D}T{_n&Z_IXKC6=#Oi-sN3$g z#Ql7<^Sb*#v23=aIS$=;L(uipC%DeEY~;qDd)4_-YM%4!Euqekcy!*?K@HO)TpPBo;{JV0sPm6Y%Pf0G zCpvR2cUYg^=yd$!bhs;eYzDXfPj^~|fBk^<*gyYm3EQ^cF{0pKfkFL8xn><$&IR@v z?M#2@5$j*(+MQ>I+-c1^)#^yqwqrDgwcZ2WqN>#vh0 zb5ET5Ptd$=XRY-g1Xz3Zu5?`YR9N7+s6nos^;5VdZ%%eb_WO->cWJV-xaU3AtNT9C zGBA6v>lZQOxp#a2HfZH@KeKMA`>*9UaeEz8mVQ3y+kd>|`uU;>+}P}B=W9d0v*uoy z>AZdEJ=TwZG_>WuJyuubu+d!b%*#PPS@s93?cMJz8!O5j`}69LA8L5P^-95LF7Szo z&OaAj&3$rWlGF09d#p{FX)S4=1i3aQMRKQKYYpo2n{TaG)qQ9Av~s)S?D>g~;0yg- z|9Lu+yZYL3&fw8EaEl%p?Yu4NUhA~Cx3y%v80Z>!Hk_;aPi>Inx&ZEB*Eg2po<)wo zocyC>_V;~V4}KNKMdplgj(PkhuHdJmorAu(*P7U{w`IPT^3Xva}^hI z?>{VGOet_=ZywyzE2@v{7oUc5aTCTmAFH{U%d(Dj&RKV#^-%VamM4Q=c0KsJFz(B@ zRt4=pe+@T1`WwrhTMHdet>4fx4d*_JnjCa#^>y3>+&7kamx>)ncQv;BzVKPsce_Hl=DcWU(vNQC_G}#E{JQ1= z>!jB{X!*y97hSKlhI4ExSveaeerwQ0|HQr#Zu- zZsmUW{ut+ztq)nZ&ibMy`@{>bD}o}pH;VfP-E#T{ZeYdtmOHi{c05pYjq|QAdb^@S z!?-(E%yhn$dMlSvH^zD8lE7`6C7dZFsB)H~jv8TfPl#aNK2m zz}Y{gx9gFggmEnc<~U!Ay_Ngt=}FF%uXB7qILoZPUEeQ_;Qm^=(7EsEE!?xepX7Y#4y*OST~|9t1@?Cx zdMJt;esXZowU=(-Zu&gHdc&`3n}0iCw6pfL-mV{IL~>XCCeyj$pnAzWB15e~ zm#=eX{<@!QV__6mxN~68kEY+i1;KUHa&y1HRVmY*+d_J~A`eAzO9R(9BVW6jOP)N| zdH>_ntd;paoIQ5+bM5*~6!-ow`v&#-_* zwlUnomp3`z3%rplczCR{A$E)PsO>iAGmrIi?H+C83Y`6e?p=HxxBuh+TE2ejOwjmc z>zzM;vA1i?pT}|+(l59M4~KrxmxK%u9FKt$Eys{bGaedb+6fcY}IZp8oUyT3EeL zZVhqleR&t?g@}5#8@2V`vwa`<{;9cQd-YfBZpC(m_Vo0yJ=BYyN&9)~ZAlOHz0t$- zWA(0%*)BUjY;RXl0f* z?J52D+)dK?O<#4J==Y#}N$xS9+(3kHW?_XH}{ah^S z;W^x~ejE4xsXqrnIo2ua&7Be_T+=PT8?+J5VXvt7dfiM{_316p&wDH%?;6vZ@V8Z> zUZ23JuA3L-!1Zvh<<8&PTcJ;)-fzE)buCNS#Kqn9pykmOPqjYrmm;EvcKQ0A$*#{H z%Hkk)ID=k%wH5j#>a7coadE-xiQdP%K5+ixs~WMro?9olzIb*E${>zj)e0;*1$&|_ z;E^>%59OtQJS*x=yJoCw@AVrwu(1=)9qQ~O>Ye*&v}?(^6+{pE7Ifd)pNjpw{k|yI zC(GAzF)zL3L_fjxAnGmpLxk(Ezga@`CXT;k8MEm3mPfw&zZN{dL2rb*Hs`Da9|bw@ zf8iap8_`2v?|)&G3t}kId-#q4R@;r=5IwMAU|d>=>x!DC9Qf1;y4Ep^62$gwmBU?d zeG#V?tb8aJz*H^a&yPTtD693somRfIL z@L!?5=9_=UngoJ&d$>?S&h; z@n6Dy4d@|nAHDmc>s0SBWIpJ`_g7$lEvN_Ww`2}{##KKpl;}M*;3(=4_WcLSqJxDc(uPf=%2r{m4!Bns2 zpNAa~XPh4o?&TV>EsXdZ{ebP>`pyI^#AYYz{cTMIX%F@AdU)s6S%GnPl{vq>p|=bB zf_&}x?k1x5#{4v~y(_{aMZ2KhzeX(#0>3!zb9<9I(MDVQQN6MMw^7vF&=5)b*$3_u z@jA-5<>x_DK)442xa<(*jx2Q+CjtzRa{#v3pFZnqO#6zNY@O2x9wuS4A z_&xRA7L^|Ig6*ZBnIr1GnmU?;7~%ZLego0_?bn|ZyCg*Qbrmha>nJuT;1Q~~d_<|` z;X4|LT_A=MJv;}zo^JW)H5}TL)mz}Wn%J{y{+pJVmyQy>>;FBD=wY4YI=b>v4&yJY z2iGIfyW;$}mK_t1lj~tuuL(pCuXpH^^Hkz%B)*~Eqras6{KXckH#7WhE9#-&PdFwL zy+>dDABjsrRWnGtsE0Pg^Bdob>h=1pzZLJFk5s??g;)2_{nhu+!aB3L?HU$9|K4=- zV?(pxfgAp3G(YOEoF5GsI^=o9{7Ck&L1RZaYkaRZ*S$dxA9wifQji2x(T?G-`>S<@8-AoI1AIg zbQ5qNzx@%vy`SGcz;A!dZy)5h5AoZF`Ryb8_ECQO7{7g--#)=_pX9es@!O~QZBKsN zi{JL^tc6{HCZ5U zx$w?AkpITgEBaO|lw1E4`LgxGJEN@C!=_o`7;Do*YplJZw^`v?70CBm7vEJ4$23`^ zx1ECH-myX@aO^Lw53l(n9RH0KPK|4N=o-#$>A^vr+%Fg1#T6I*h>L1^h=cmM)8F*u z7T@(eH(qfYgc(-}5!Ja;}WZlF4K?nac z>Z$S#fIcrNpZ|N%uz9Fy0n-Ae1xyQ=7BDSfTEMh`X#vv$rUgt3m=-WCU|PVmfN25K z0;UB_3z!x#Enr%}w18;=(*mXiObeJ6FfCwOz_fsA0n-Ae1xyQ=7BDSfTEMh`X#vv$ zrUgt3m=-WCU|PVmfN25K0;UB_3z!x#Enr%}w18;=(*mXiObeJ6FfCwOz_fsA0n-Ae z1xyQ=7BDSfTEMh`X#vv$rUgt3m=-WCU|PVmfN25K0;UB_3z!x#Enr%}w18;=(*mXi zObeJ6FfCwOz_fsA0n-Ae1xyQ=7BDSfTEMh`X#vv$rUgt3m=-WCU|PVmfN25K0;UB_ z3z!x#Enr%}w18;=(*mXiObeJ6FfCwOz_fsA0n-Ae1xyQ=7BDSfTEMh`X#vv$rUgt3 zm=-WCU|PVmfN25K0;UB_3z!x#Enr%}w18;=(*mXiObeJ6FfCwOz_fsA0n-Ae1xyQ= z7BDSfTEMh`X#vv$rUgt3m=-WCU|PVmfN25K0;UB_3z!x#Enr%}w18;=(*mXiObeJ6 zFfCwOz_fsA0n-Ae1xyQ=7BDSfTEMh`X#vv$rUgt3m=-WCU|PVmfN25K0;UB_3z!x# zEnr%}w18;=(*mXiObeJ6FfCwOz_fsA0n-Ae1xyQ=7BDSfTEMh`X#vv$rUgt3m=-WC zU|PVmfN25K0zS3C?Plus7Vs$$om|-qL(SC9E$~7oBjeNNO#Zq#eM5(u{B?U5cIy-9 z*8IKLtxa!s9xTwk`5R{Xu$x<;oAWo!B(Qq}*zNf<0qoWQc7OiN5ZIjo^auV-0=qAP z{=uI)2zOTk{e(X=1a?yZ{f0j?1a?aT{f9q31P(J(ce22XK7AnD$(6mRv^0bOWs$kx zr7hrJ{J|jHrLAptPH6!@<4<{d=6)BqfZy>qyo(#$?4Ha5{>R^NlR%k=yY`Mh@@Ep* zwV7jk=b!wUA&~WPSI+w>e`W~m%Fr==>(>0e#GHtb9WErHz}$9a3v_S(G{#v0KiGC< zyPJKZ7U<^usUc(dQwvS6cQp%id;XMBPR5^7*ww6W_NV*vr-*WL{uDyf-(AfD{=lCs z$|?Di3A>v0yI_ATBz0QH{DMC=%BlEcg`H;BZrTuq5C83~6YvxMB*--QGee+TG)eH0 zoLf5WKFDwQ6Cl&#PolHk_VV8f6!>*g4D=uVc*wN*6NLVYPrJ`d&0l8>!Y_65E6YA@ z?j->>jwbYhP^8)#3ROpUT7ls9PHRxRA)yNJt;oaUyFGsrdn9Lwn4!=^!};oPxwn5H0F|AP?fdFXHS6&= z{cg+_`UCzH7vcRH>q{hC#skQM0bwO`_Q{Ba=$kcAit+iy&-YLK}acf6{l zT}rAhC=1$EyPhiG}*_pRvGsMWlFnQk>+3c59aeFqx>s1fEw{<;!@TF&;; zo8G8tHE&erO&j_;-JCyd0O>uT24f6=8g%MgsY4~avtjX>Ngdjvta7GN_p&nW_U?3d z{s!w1$PdE`^RWDB;HfyelXiKp%lYfPNw9m+$&d1#Wx79qo&YH4uZx60ZT>n9Ksj0N z?V0?M_Za#Prb+WIP?(6Ywo>rdHA0}C%h`^1P+qU;Hp_k1A3unH$e=cXJZ_S)RQ&yr z1PV_&be%2ry908%t-2@hRge6HKQ{r?;O_?;go&TgjrtDvm6KKV=r`%ML{+DE1^&XH zb_f)C#kU6fM+4hm(ZsYB1#Z7H@J7aL1|MAC$K;?;#Fk;M5sI1A` zeK$R8beOtnqoX@irtXTTC4R`CK?ocHN!LP#LR~G-1iqm`)~0W&^s*LNPrTXjOa632 zAR?v=Clz@JAUP(_(&50wN8L}B|gJ_WFgAlQDZ_y>P& z5y;F#)RYNER7-pcpbz=Ou;a_>NL}94D45Rst-tW+_8_c988x4lq^lC>YyOA@JV)Al z?gUN0;jgU_NarCsjFE9xPi8FC)zTDr?qVH{5jy3b7c>3zSeeoFGV4eDxe)^OjRe=D zmqL9tO^L@o?ob6q7d>uEq3`*wKk?UQ2sA(prW!`ZGZY#rs%0MgxT6%*T^r$PRMo1& zvoinU&zL~ZEYpEUcN9@>*K2TYZr6ba8r0*a$lv%g7=*zhBS#vT#e1ParP?Boob2m^ zwYwyideoe#CB10*~D6%YzO2L`5Dn)@EI89^i-kX@)>* z2t-U5K<|ZZT3Pb|5B%)QLpA#%FBF?=^M+8a=f$2s@~063dCL&Q^a%7`=!M;his;UMp}2Hy3V$h)2iS_a(^4)J!|t@(R-^^EYyc5rF(4ES<3oF(rZC3#A6U$tirQy-O-)MY>F| z2Z?!a`o-#q70EN~A>QS?J%2+C2_zvpXq11pkIk`)9_V|i*q-_>^(=VBh zb_=pYyxQ>t{tN-c5GWZ&iI^+|dM}h3@Fu72T16>Su|ne3E~D;cR;w8=t76rFuE*e@qBe^XIORHK?a~Ex&4%$*R?mw^1Zh_i9J00k0~OEBOb1IslHO07_@9 zftVBmy%$Oic##tvtACe|M<^>6wA|b()ji+WXvp(Q)Ea)mpLPfYqA)VHftVoHI};|q*7?H+w(^UVC^v&i6Z<71E~&TRP1}9UX8MH zEzhdUWGc)KbEH(t6!KX~=3P!*qs8J~O$x1-JNc90K5g!0JQ;EWNd8gY1)8?sk9vm;k5*P+=28iWJ9o=B|cw6-A!+ zhONMJZKVcV8=qGm;^n>{@FxS1oe6trl2M}!6ucK2R4Rxx+$$>*j`M7bRd}YMd$g=K zbVv7C&jkL!pZfqB5jJv^5o6v9-SLNpf0-h8YUtQwn9c}UA2sL~{K)~7LZHet+WH)3A{$MdsWhQqiQbWdQdMH$NLa#u>&s`z`k9lr{+#5@mUkJqHfQhl0ffSXiHNcK+yD3=womCsyYES zjjgYm;Op@N{^TLhdluT%)P)Q+y%YM_n?9*d13Ib5CoM3Q_yd130KI3y4NV=$sHQhU z#uHs8E5WAD`Mi^U!5<4C&w{q0+AY*(OyEy*5H=KQck_oWRZ}OG_`GM`n!f?B2}!bx zp}Tz`)FM!<)?=a6@2-=RXjA8W-plUIU$DTR6al?w!Cg&tlw&kK6#CqsKB!Plomk?7 z+7(rFbNrR}X#iJiFL-io#K{e?e11Zu`Vz=J<^??KVlMECWICknL0$hXXZ;=y2xb#m3)C8a(Qp(%9TR#lq!sH_+a95Al^rioU?=W& zNXvf29}R)JRzk=+K!}j(R@)w-L8Tol5`j}@I-*&>;*SuhZ>5d`#fZO9?}hp&(y(;eY&zAy5NA?^$R=(?A>sN!MS7LS3zn705`_+woNw{E$C` z5NPP`_6c<%!)m<~`pU;1)}x1enZbVhyVb4vdx3rYp~{QzMoU)mo&`5GHKLrP>8ViacccDmMEbv0wgCEm6}^!$K7 z;}9s!gvr@@&w^)~>{LUg>9$bmSYr$nQl+x{`-+O%-gUof^^tzTpIaeN7>n7hEEstY zidvJY*n6SUTWyoT9--8Bd!Mbu`@EaY`3HaQg+P`(J`Zq+hVmX1Elsgv?}cK|_>5;T zD7D?*_pBNHqupxIU-f+85vbD;pBrlwm$p>1KS?`my{py%OMyE4y9-D=iv z_|p!7$!jsKTelAWE!XV2`iDV+3@YHWaws z3>R#tmPH$HEG{U({}<=y!+*s^`T0eV3>!kBAMvL_;95Yew6u(-hDMr>&~%ifWkV4_ zG6>^VGvS4-DZoNQAxd>M+Y~Bw-3CKAP`_y-iJ;xqmK&P7YuB!weA=;tShsgf5f~B z{_5HS(7*VThd>@bi9ZUUoIe4;_JE@2Nm3qeBsCN&yW}Pq>|sW2>&aMs{XCB!%O9Fo z$zNTbeihgOf8&n{fdYPQ@FxPO=Z|)lhZ`|wC{#79?@C>Xstz{_bSq;7fZuKyecADM z2M)n1{tN)buJ|2)VhCJA?){kizcMfoUZRTlcT51#kcIXL4RpgH84#fqG61-lHK<%) z)OOv7pNv1WEhG*8i~;mR{wRTKN;LTc09E`QTTUh+hAgyCsLL2ut1Ao(QwiH9Lve&Y zWJVH2C8}M5pB8^R6!DKNHLOzU3k7}||6WG`*OYj`UlRe)kcD;%^&lhF>In&d3ZdXR zceh5A8OV&Qr}$~|S4IKkr@y6ismlOBzl?vczs};v-2ag=LKXj-nh1b~EVNsw;E4A< zU#(sUq+%H-#$>3ZGF|s~RQ0e0+|ab$nArP2V%Z%#V5N%yNck(}`76-}@Ok~OTXcQN z3lQow6W}L>KP7)VDS&kS?-(TD4*@h}q5VQl0EKEbg?#*jV_0xl657tQnjYkhDhB-a z81RPx3it{9=>zDO{2_jA@^=IQMEnSU0AQ#HgNCC-8PCG2+G((M60}tn3j*A#gc5%; zp%EJ@nMS4V6shWw3J^a-{s2J2AKrh{@fZHpi-6smzoA35!XLrUfWMtI0?PS2&H|`r zK?#aBey_f&&EH@_UFmzFoNSdsmOpim)JOZWMB+`6BlsEfS4L*RWcnxarw3qn=T8Gb ziXX$DFaY!SDEPn#hF~)OO7ZWwF@Q=?^!U>wP}G-0A{Nt(TqxsD76O&W`F>wPow!T! zQ}M@}7VrNs;XR;&KOF$OJ%6eYsEZ$2{{utm@h1mR5pa~C=<}yTpjd&76|t1h$b<_1 z6g^TL>f>DzIC+N0uPy#cHTcs8&=2^Nhd>@bjqu0v2N6&e{}d5W?rtS0M*L|H!cq+k zDpH}4V}wfnIx_^yu@m+HzrA}fDd!*3^q0iHQUFjH|4NFr?g9OQKQ;sk_)-2c7R1L# zV2XtQ!bvV1UOO2zB4pUIqot)wmu49I}8mB#^cQ$5f>L1zfrmYEGgCk&_DPS2^8@wDc!he=9tko+vqW4$3Yr9 zX7p&-i;M^xId$Q}=!B%$NmF9uXU4})kDD43GZBung(oe_&e@(D6B#yAU_tV%P$F!H z_y$0sI@wmgRX9u%c5F_;Ur0pMoa_x-RwpM$j-D`X^yo>Gr^d$3OiW6goj7Aw(v<0o z7EBvCDm*gEHW~~rSlnhCGkV4*fkAlrQXK)i7k^L5Q^+JKO~fSOSF#D?Kdb{36(`IY zGhyNcB#_`2vbDT?`GyVaH|7-<6&DwkRvf6@wJCXO^yskUtbIpocC3mVHC)0$aI#cF zbHJbH1hSRTirEq-uh%L$L=blHRu16su&5Q=igp!br`X2ECd^%*xoOMR?A-jalG4Jw z-0Ur><>i?nqrxI0qwso1b4M6hyt?oI^)r2A$93R=TZ1d zt&qsO%<_MjV9&4rk%g#+qLp*TjvqgE4DuH`qM$sh;P9bC2lnqTFF$aosQ%(w{vUmCgl!{tBX0bp=9 z;g9u~;7Dm(Q&Tgp3j^2|e#L73Fap-&YCus!)Yx&hsL063vE|5L<)QtD4jkBj;9zBS zUBmvh3n!0@7@3lL@JPk3^^u`6{wRP-XY%}cdKW0G1~`)K%XZqvQ#^b%nGLx8&(_7F zreQ_~V1&)KVr#Wydv;1_)P$)U%Nkpb)K-BJEB5W#z2`t>!IJWFuq}+gW5%F4ke9dZ6U!gH0Lc)H{53Qz3yXpQI5KifdHE9Hud?D$ z<)MRz4jnpNQ@4BV@(E)jMyBQ+Y$z++poo9qBMndl{yYtVa*rGEr`f8c&2}m0e;y{O z8l#+yzbKn+WtroRqU@BB;iD(z)SY_s#PK5#0uKU!zhmfg*KGrlz{f0w~8%i$4}W>G>N8e}us4qi||AtGs+>K~+^%`GLyABvsWGtzSPj zDl9Z-&%xR~X=zaj%%eE!Aa$UU`13FX8hKnHQ@6?-zz92tzi9Um6ndRlnVBsJng~d;J{EGpwuC5@yar2rKa1-^{y0sE@Qf_-!1n|x~pME-Z z+B9-YB>N|3N^I<$*|X9yWoBlsU%!6S?8rF;K#4#4Mo2vFY;Hq2aJ_O%(eop$fxp( zm7|6aL;R%hC-8@pkh+4<*_*&Y>z8L{g12VRo;@cvc1rwcnLlA?2BfH`@4WNQE-(=p zH*VIfS<93u6%$-WX%iRa;^_*B5SKkBEHonY)nW9sLPw1pIby^zKKucI+lLOzFGo{i z@{B(P3*H0B@&^1F5$K+<7s4lozs9ALVERk(^OQdYC<*{QxD8|gB>X8V zR*IQ@S(fNxNeA*L0N6HvS>+WOB>M63FNGR^_;BQK#PEqTlhfzPPqFde-Xf?L{SdtMbr) zQWD3zA2I?y1C5IvQDkb%7Hr9b1+Qf_F$v-P>DATg@CSE)kRbf2_^YM-9V$f= zz-^%sf6Q2^=Fi=Eh~#tuFxX0kQce_0MN-aCC>_?`909Ox{$`fv?%(YOfBXW>YA|*5 zoK3mgw-*%`ZrLmkfsDHm!5?G@9;ZuRO>O>a2!4QHVct3ze~XI?;ABMn4Ee+GCrp2Z z`#(@7@$V3f8^B*+8~jP2cnzS0gvbzPq+%h*h=n3)YbcZsYj+L-RPhH!tEpkJt1ZIw9_ET9gOxQWODx1qJyVQ~3DjCVz;ZF#V|3vBNEcc3?*C-`ZOhN6 z02aeEBv-(XpMZ!S|6$@^O(CXyu@`)%1cD#nx1HdZw>6FO$KaXo@@*flWZ+KG<4W-?&Sd!a@pQ&SdhJ)-1;GYd3kwT=Xk^)&OXHD z7tQ!r#*e?mxs2bG&K5w?2CxkRrFzxX$^x%SENQCwlWDRCwllAce|r3tA3RV|uFqcq zjS+18vv17<01LBtSchZ2L-+&mo*u*kH(~sEct;do619y99~m-oIQ$`* z0g%QFW&qr=C9f3l+YSjw-~9Ydvk8A}_%n!qB>aifUoHO1Wc)E6Rgush03{$asue26 zgEYj3x{}a`Il|vc95u-E-%=g^W|kkM0Z=>sE#djABLIr?5AY%IR|u1k;^NZ0Etwcs zJDk5qrl#64Em^X7@sg#gWarUC{01cFhya$Z-kuNdl#w^b06-wHAb*yd{E3S%((;Q2 ze{l7(@ekZD_)&udLFjo16f`vfV;a@sPwbJgQ0TWWwmAN-+|?F;w(NeOrrrXd~9-@F}VDK6u-4*Unu#ca&iigs$h!Q}b7_L}UFK`J>@aTz*mWhsy^F{sebx@<)X_{27eF z5@OobihOE|d8Ukoz5tKbLF1puUx`lqvqAjB83@Hswf-!`KN|jM{F}D{-~;kXN`Zr& z@bqe5`JUXv9nYWB>1@U1!gK=CNld38oyIin6$ZfMDgnUUDtMKzrn(B=T|Qh1uYT{| zxuZ0H)zn&8|C5csy8My5KWX~w9)COw+Rmmz3BX4^d|IvKbb5)o4^s z;vZ5+#$Uo;yn6m&%Q|of1EBl-O`gBGxOC_4^8JUast;FHRoB!WIZ{`B;D_Xo0CxxOMdQwB7o{~Yi27em! zU)z!jT7C_(+0bIXx=f0Zbr%xh(N9t>UzY5CVGzEY4n*1@tukiK(34aoQ zfyedv0{{)&Bk=diBab}#=wpvP_W0vZJn_VnPr?too_@M#&tAQH_3r)5GtaKCUyg5B1BQ7pBE)M3YWRV2s ztoUkRdtVN8I%}l?7{_150DOi4kno29?#gBPJB0l0C@xq&t+q}!{_68bhhK5}3paX@ z`1l7oBmT4khMK}b z?rz8TB-C0OlMf@$Wc^e}uoX;(R54 zaE@BzuQdBm@HdNCYkZsh;Rh6S6BR!52n~Q*{6Pei0-z89fj`4);m0bvO@hFCp;4`B znY;0CCiz8|5CApeANZ!e9{8hw4XChoa(v3tZ6!PQlpi=;({PmVch*q_^Gg|j@J<~j z01(nXlIV?8c~(}|=Bz~-3l`xg8MKK++NMKVj43B4JNv+ag9i^Ff5ro_1R(NP1usG5 z4BkZJAJ5}0t_Si7^2`~R#GmkkW*Yza04M{H^2Y**;f*AU z9dw8$ER!e{`6C0c{02}MfQ1Ms@>iWPneqqYZ(c5}TNdQwuigMbmOB2#!opw;J8&Sq z2PXUp;V&E35g<|ec>c;1{3VPGiAMMs0MVbis8HcE4+{X25m=gnFaW|FgibvcKWQOq{M`TnHeh&)zrdDAlyq76#<92 z%c4c@;CHbR!GjU6L4#gq_~Y*xgr~m(e>nb@vHStIsGgECIg*`#Qh(~O!0@M@0t-_R z;TDkR4*-0kFNuGst>t&t1*i5cN~CAf|;xaf3P$Q z{E;OXkw1#39Ky-jMLTxw-GAUvWlnxUPFB{YnaK%>Nl8hwW+g1kz_h7+(7R*XWS+#uYwmf2+ zf<5`EL_mf=9RS^E6%Nq?$52yCL8mM#HZ?s7_`?(xJ}qT+!G3|iy1JFLT=+Nj zz+nWC&Oj9W!7(wbnjH8#b9MFMtfltUw8g8pqpG=%(;8z&beF%huxP~MUy{Jo&mg2bQ308Ac%Abw}g?i)Oq{~;yl zk~^~SaPq~g*(oWhD|TdWgdbwgnG`v2z<~I}^vs;qnPbJVHBjIW;-nD%D)jgp93LyN zhXAImnV+7PGIPA(7SW$t?xwM`w2!OIR7d!XGTbY+S#7Ivsn$1vqWCNz0F%pk6w> z6b6~m3DXy5Y{=TSHEmwf%s6=SWXu?Fu>=X^X3i|GuCAz{1kwqx5dL5uTwb0AKckBt z5g7#^)syMbId4wxp+jU6B9o0AJa~YDKk^7pz64GA0{{~ikhE}W;Zgdr`eSdueIC*~ zkS^eF$~VLeho^M{eaiq5I*`sQw47i8s+f*55D-49ufk53?DIKFd4Wh zf6D>D)Rd%&)bot9)hJM*$e(->nyf+tfW0B{t1!=D1bn{FS)b^$cgbOh+ithAM}!b4QKgwHGj5pC*>E;t;eL&c>2VN_KijPgh3^L4Qrq$oQL>m>2;(Z4~h1ryt@mv&+i%Yu0BF0Fx65fvi7O zs8AvDrx5_f5m0yZuR0-tBzx8CJE_7{)X7?EfRq88$jtD z1U>+Y00!^DelY-I0L1SaDB|DYrD6yR3>+9b#cm%C_(|bUgTFm!%Ab4=>3B*C8W$18BQSp2YQ1dyb=9!^1;u4# zj0g-I94v&tavT{*62@PMeBK|==Y_+ocKQ&|u#N@h$LU|+Vy~vo^bCZ){ijcly#k%yENqA9) zhv(;i3JHz$9>q_^-;zTxw;)sC#ZhpR7@x5qE*!Re8oyG!|Ks@s`Yv2(Cj4Ra(+htB zen8;Klg-VUnIlJzq~Hx53U4Z7b~23C^CN=^VmkN{{&o*YTD)`_6aNfXUrill0l4U%RgF>O`$ubV>YFs)qFt;+{}E zN+UB4zRf;+&fMhX$vk`Zyu6RW9>^b0;HU566$>w2D#zcM2dl~JnJ@q^6epeGp)g|! z508o(zp}Cheoh8&+~>qaM2(oav>frH{0ZUDX!OPChlz}qE*E8xM8Ejet5>grx0&O_ z_{R^uP5iwdjK?Ty-c`JI&C&%ig{$|9D}Wj+F#JnccngRFFbROcd*GH7dv^-15|+P= zX;Y&oj*nlpc+sMT3s)^!m^^yi*vPOL_~bo={irDX;%=mbpD_KU@eiI8!}v?%9|?VU zeWRJd+#f?adX#5y>5MtEk`iW2)gh2F(EC9x0LhCmbPj?Mki7}K@h(mNm=Gw3OW5P7 z5q9NUcZ3?)@kJ9S&mj0srE$-moBPwBq9sHCNera?71XU9GiIzXx{@tC$mIuM_!=)5 zAI%RVaT2UPhfSO?Gu6Hr+?1NS2wq`jT_eQrZ;}gJbwJ98TOl-AR6x6xj8Efo)G8cjEERN4X}ut976)XJ*V-bk3K>h2msO$2=+n# z4iyyCt{58=GfA*0Zs83;Oz?<*e{xE4+9F8coYa&Qn=N7K(j|)*LRzqBQAP$Vsx4f& zz7{8u&BsfFkw2r+7bhQl_#@Hp1evyy`Gd2nYU}K@@}1lBa<*;TLgF3JMhPV2Z&MTe zfyra=*M{IJL_yJxz58n#k2YnH8L&73QLF%J1i+X*2lwE*_NGJFPZ)p6B20ZfE+`2r zn4iA!08KLQ`2 zkAy$O57LJpHZ~*^myDShGi}Ov7$6ysqr3UtftX$<=@pU&z48jYqlTyl1_qIY?~*{W zk~BcnWa{Iyh7?gg)k=)6$3mcQYHZ9zI@d_fY53s74?o1S7XTz7kROHLg$uuE;lhmx z5mDn~Vwez^eGuQlg6|qvRTpgDf{&!|UL79Q&$rpu;yYCAPm92F1V753oA?Q%FGfGg zA4alFM^n?0>O;Fq_TwGjaR_xn;=zMI$q7!bDT0^%*OxgQEt%2^jMf0G01&6ZxCqEk zK`4I|K;#b+dtO@t1MLUk`SW2iCm|B@l0E)YM6XstM0ys2$ESBGdo=b5^zrz8_~8d1 ze9+L4Odzz4n=l?8hBE$@?J(se+a95ju0*}EMcDUP2nu5-jJ1)AEPF%4`|raE1AhcS zkw6?3FhTqS{vg2<;KIVylVT=`1Qwx8w1Qv;V-}y~X4YN=pAg7ptl&D-Ty5hS;?~4_;~Av!&G0;-K+QYXyePK$sC& zyaPo3BmgmF}9!3XcZ z|Nc+v>nVfqN{0+U4Ftp@_CT>v$QTP{jR-2@)>v0OOe3Sz#8?|e*vnd951(>_Q$+yP z1oAQPXO}5|5K{1ph7btP+({z*F#r2-FAH|RG zmz@fu->w7T=z91i6Zs%jL*u@E!ef)&AT8SmA4!TXg_(#l{z(Hch@u^0da{%Ror3GU#xy8Jcq{0U1h z!h<*R1dM<1p;&s6-~6?-v}7KttlSY?TT^w2zUE0D(gAz$KpKc6J-~o?xW1wBXmF{c zS>R6^fSCZujKK1za4dkq{DWY{S>o;%@=bdU_;~)Nu3ftT-qD&dV?8k-wI%Y0&tJ~@5ht4o(Ir38v$1P~MRDjvKS;@X9(I;p_>idoGln;lN(5}XqP_^vDl5&}sO z6sIC&HZ0?hjJ*q1u9ERbtS|G89)EB1{2_kI=e{KRwLSV${>p-n9%-zv!%s((Uu;z2 zOAW9<4Ab5tkQ(9rmHMXR@UVr6e@gzO-@ysL#1s|*$rOYE5JEOi+Rw|*k?s)wwD9Av z){GrHr=zhVS3&`$ufF~gfls6l;bRA0GV&7tprg=X7gHoQ(4V@fcrT<&XcBOWoKd5~ z;+sN+-<4@brCh}Gx#SWE6cH4IpmH3(NUcUL--GZM32ioeRu*o=_{W={@s19EfFFr} zn)oS3U)9nJfA>fDJG3=;2dwfnA3ch&VZsc|M1+|zECAvv^bW#b1_VviCIhha437CB zCIwIt01-g!>A7=w%YXnfXDDQG{8i&ej73I^;+on-+ktU;`Dw6G^2dzA+WdhBo0^J)#i<7weDSpxPexzhuch+fmf#)m-U0vWK{HtZ zY&v$FEC}MW58%y#1eOGs9w+yIiWM08Ar2Y<-57z%EvpWH>1zBaee`Z+<=L|v5kFfI z(1-tFx&-hYAroFQ@QSfdjUV_bD~rJZvb?jkA@g1+RxEE#*z>+(wPtb+20;w~r5H$p z;2XFk_z@j{=?BI4rK2fhw{1;Ei^7ChCu#DB5v1u@aj-1<$?zkiukyhMjap*#!}-Ud z16u$rNB|n*h7UrHHy_6chh&{R2@mgF2&FjwGW@9mAOVnh5n4S5G2&187bOJvU`y_n zi;9WOY{e8k^6y0<_=)#@;;ciQcDxJzQNzdbrxgN4XL~FZ`P0&rgqqlU-YSWLvIr>7 zh3Qm8x)Hnx?;Vi^U9f2G6yz^?(Zb1uKV1Nmwlcv`7^;xJV?2LkeGPB3(AgT{Pk;2q z_;+YOfQ9%u#nl;StFx6>b{Yn9n8KWBbpo5h{hx|IIt5`OpaMX03rKnimrF4JE}(EG z(zbRh{icC%9WwOs{Mlk+&}OzgIe+BRmfiTNf*(XaaI6r3#I59Pp91L zqLKRkueNsUEa}u`@WBp=f5cQW{u&xC68a2+A4G;)gpVYVJem=TeJo`8YXdRWH6iPg zqPx=l0v;t3CIf=PD2!_{nj!E*eDNId!z6nL8u^ow(0lDz?lR;y!&fx zY$yyy{s=%wL7Ss*i(`4qf%1*P2n?tZc;O$6qc|ed*RU|-!r;0Z=f61rp!^w4K`6td zKCFzaLi2NAyk$TD<<}Fr@!KkmzmhHZoHl=8P2`WnKKY#=j=dNS0KRP2r81|<{i*P1 zJA`uneo0C@@m9zGgD5!?pihWnzVwShpF(>jDL7BH(a3b5&FoTACA2k04RK73feuNG&!q94T! z<500o%=kp8Zk4H4EtCN$_C(Aux?-7_VT3fBTvJOC#A{G`Btj8H{&@V_;;*i*zOmux zvGq0^z5k;=61;}d50e!B&H{f0!EKGcvYS5$bIW07+tdW>&o#gvJl-I?03TWW6YxU- z;b}0ug|mBaderpT%pE&uJ=y?D0Z@7qP#S>=e*j=L{(O7~r2XP=kKtVL2{2Eih#tul z3-^AwoiJg3em=zy_~YS2`tX8Bvt#Vzms&(mF&WWmOqrPVfzV^b7y`w9Fd6r{BpyW& z#2|>K5Mv;i=Yw||dH#x5%5MK){e?~S^+y_yH91@{k^m^Z3ymYNz~8y@gc&p9CoeAn4&iOHlA^7vGpE{YY3a*W ztlYGD%eDgeN$HLjSeJAkA3vmS&S zC>Y4joI2TBym|AM>~-tcuUN5a)#3%V(W7T1X66^=<-sSa_Uu|TF>d0N#ap%TI}7g= z2W#QSzH>_A7NL)y*|xOIE`Xn490vFV0D1gyLn9#I*W5fOFE2MY6aM-&M&RHh4GnMBldkY_7e)Bs5XxV}PM$vzK7P^x@L{6+lVM;G z_#**O#a|ta+2rx$#S0Y`<@gwk`TjxlvZGGwr7C+^WZcY!@RnTd(bI1r7C3bvYjv>l z$MZ+LuMB^5s3iZ=u~VFa;0NsIQ1&Qp2p)a-M)xuNH8%&3Te4-#mPKO;eveW7`0w@h z?K^ebHr>Y{_?7%T_@R3oq<$-l4^*|da+fAfi=9Az=pyjvI+Q8Aiw1y&lPAwdj$C}_ zt+OXxj;6-Ow|FwihUbrr!0#yN%TAFvL&&5(MXm#dx~&>gPYG1?Qrh*Rzknb=4C5#a z={)iWiM(^bT9n{N9*@5T4__7hO<36=@rPcEo*I{4y0>E6`jt!KN5Jo7p6u6eETq_s zp%`=|{)D+NiLDS`weWLn-m)n%n2f!M8wq+yAS50?GX6FLcVmXc3?AIKj~qVno6$g_ zl*p%&MUj*V&xK!L!iTT&mMo6Pj{}FL@bRy;dIihhDzw6pBd0FB{l=LSEys@>dlR3M z3yI;c5%?pf#gXRS>}<(jm^0$91BJT$(GV!_BfIDO{!#?dI2Z)=_)C%abD-TD8&@&E zKL`F=`S_OtE{k2b2)@5oRRixu)mDZ|zJ=V=@M3yxZDUbf0>(e`M1ro2kVz_yfr{|w zu)(LJWc*S1SO7`<)4@*Gew#a?m38&_0XBTOd+)A|Yp0}C(=&6{tQjNVWwS*@t%A?1 zK_UUL)zNh9tndR6Jc#fIV+6jUa~|m<_RYy*UCHP5;~yN*Q9^zG+PEY-q)RDv!Z7^a z+cf;C;-AFdTWIx?l2!2IH!)3EQSTD?OI}OAzjnB4)#^2CR%iB-yZmXgZnH8tc)c(M z5n>=8{pfr5PI2^gI06E0A25Inu{8V<{Else6ipI-ap!q?EA8I1Bqq$9xp>QtqU>!v ze~zlP0)H$;Sr7nELjXK_N{c_h5BWp-PM-#Ijbc^#JpPnZH+7J(9boHr)q4RLru-B| zP>(1I&4hz2+7Q_Az=iOUjrGdqXYJ^R{9z)LJ^wr* zZqud>8)#`q{cGXRGJb`?Uvt%piFmr~>0@39fG28sW8g-Jf5_k41U?uuG?cLdb(L7k zrw$Sd)qAt`eV4#6@Ow6b;M+&E@BbwJlwC%1PhN9a7=N=wK6UsD003)N!H7C^9T|c# z{LvL+WBxjdH0^o?{-UDN`1sdSnF&8gRi1rRRMv%aBmlNvL`(2V#$Qg(YP@C9=TECz zz9Fnn-DbOm?W5kfOS;{~0aH^51bv|-=_;PLY)=99?Zky%g@ z)h2)1egRXG`b3i|GTsYG#fsXPUF@@N^1f9)O(>`Q;pxuO;!nE$)9zMG(xw$jiI^n0 zE`I^bD1Qi`8~E|#Z%2`)U9ZHSbV9^@YR~Wi(D`->e*kPKe;D*6{*FSqRGF01sg@ql zLBe)}t$iwdwVn#$B#)B#*EWAz;ZHK9ET_-koy&0ig%q+*9DV8FtDbzoBM@yuI=0k+ zKb3Vao`*HS)|R)J2?+AH0Pz#}I||8|KUKBFm^^>J7PLiNMF#u@JjU}E9I}^r_@){CejxrdtShmg z5Rw4sJk3mimGMvHPoasn)2+4xg>0)D{*$_7DAa&g()2(GN(_HEYBa89DuS%QpE$YI z=$Ep{h(B1-6C&VV41ZcvU-DxQ2!G8TJJM>`ZiSOpo{-EWtdImi*Qra=0S5fJUoAEv zRq08G+SHU)%$^F(Gp!m5J+P5cm!LTXe@7Jj(c$;9C*e=>iZ zI-Dt{*^UviwW?U~UdUJhD^Z?|tYSSOl)J^BN8!&6{&)iCYmL4aBjkCvVE{b&cAZd);V*E36#v#Belq^tt5!_{ zwjiP+G{O1i;g8T^35+;m@6F zwMc+A1X6_NWLvj{niUJ0o(tRACFsA5Ke%wU`136MxzC^IbTRY9QP#X;AIKjUhN z&-B`Cr%(qmA+l<>N+Hr3Mal8rcL~%9e@JFY=DcZf#E%ht@&@;Mcio$HT1H8mf zKq!9;5kHB)rY0@@wyM?Ap=IsT)IS-h)=MF|n!N02|ySKqcE%)a>c zoBRR{#V=6CAHFjSuO%?=j0?3HS1TN)wQYx{K4R^n$f#PlNVQIaFT>%NAAj+?^78vX zGW|#~(AE_B8-JQ7DHYaUc=H0iMpgWUE!;==3k(crezYa2c7D!~1@DDkok%F!gz#s_ zKdCw!M~$Ov*s72}kHeqe#XtC=DQgr(ess>obLZ&CFFIrt8KSXr@MTAm5mM!japk?Ub1~>2ASf`Zo(TtcKn5>Z(f8y zzvWNcy1+ut1+oAW7zi&_z#_(Kfxp10(F=Dv&K)jUYa{pC3J35M=~heR3LU!Zy{5pf zX0`1X>Rc1r{S6DXt=w~{F|TAk&mVl;;@BEo2IBv^FHAqY2!Gw1KTg9lg84M9i^9v7 z;5vm_22C*O!1xy!7!fvN@vhbr<=fYe5q?3;*4}2d^Z-_c&wH;aAf|aLz7T3wtkd7I zW0sayHP#iSg@(cOY&N-US5X_1zi|%yX`FnUMcsP+h0`#gWIl~pg~Ggxmu<6V&XD+n zzt1!=I6O3D)Z$%->UWpf;TW~q+N_p8z?Yg5Vj3OtiO`cX9TQtrU0GQ?XGCaNcvKur zgmEg&r|>wNcf)TtkAE7dFrcWdi^80Xms55$fGG1ZorQtww^qFwxzmktSur!(4kwk zCL`|@p$stv3(x_kJM9t58B^+Z!RRybI%eQ2=1*}{)hpUCn)WM0i_*f zt}6=l`|CXmhMFXhDJ>)JqpG;0%oqxl2Q&Nr#UB`uIX7jty9o{YYv^w8g?I%kt;Xzo zFJvl_XZ?&n`N?(jJ@4J^y=G^d%w)Fh;xX zDv@?Q77B>zgh2jew5s)irl$ScbuyYA?bh>;!5W^C5bO3rks@ESAlgU^2~nuks`o;z zrl72w^QX2EW1;RYWcX8`N@obwS1@Z`6;Xyl)zSZtz4v~P^+4(0QWU^`uQIr-7fizw&P{=T*JZh+AWTQNUl+Ou& zfeZ$PvT7_F3kHn>Bn-wN@Fq6~11P!N@Fz4fDdYo~Eu)b?ah@Y;ESrFhV@L3D<NXb42na;TA36daC9qjkQ?-JQH8 z$39R@%Ow>sSNx$ynlAET){Kt%CUtiy6qQBYobe~)mifGczYtycm~H+Hp7AL%X_PIakC^FH z9fb7Cz61ZA?kstn(L=K}Ia&Mj5ftsXTmR zXi7<>&pv;e45|pD�_E(vrpItr}ItLLpE&(}&Sg6W4t*%4~y{nda|*P|Calk5D27 zNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu> zfD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d7 z1xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PP zQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbz zAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi z0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS z6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AF zNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu> zfD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d7 z1xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PP zQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbz zAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi z0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS z6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AF zNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu> zfD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d7 z1xNu>fD|AFNC8rS6d(oun;#cS0lki91+vYbVVxED!Ru(&o#w_B$T)vt$Rz&4M)LY-Qy}yF z=}}JMPdAP>^=UrY=TD1rDu0@h{5{$f$OC^$l+*cBjH6BcQ8=IHSQ@d3<$^ys%7yro zjUy&&MhD{X;lCqw0y*I?fXu)jg}_KO1@N(4q&6j2$_;-4G82CRoh9p)-wrhVMkog6 zhd&Q8Gk?OEUwleFvoL=nu?YVx%g;amOu+U$Pv`?t6nb?UL$yb&K)8Lx1~d;OrlGx> zE2mhReg2+*eu9S|yg2y75SXHHX>}zze8Pn1o=dCSxcx}eP_uD6idjGN`~~c>oD(sp zF{W^7Rad?JkO0&tp&X}mrpA8q#^3Xm#?$l)okmx~^4Szu<3ag`viBUCgi>H{SGpy4 z{5@akfUMGGEM3e*Jt(fmgG9ZQ)F?Csy`ZGtosF~3pSJwMp^GuFIp}l>RX(Hc>=6er zGy!u#Sro`Pe_{NPKPzO+HrfabdAlr*ws#sD-&$brBe5l?{8<>|{8@~NH7v1V*xe;| z7_)hJd}~R}o+DJxG=D!ggg>?b+d`nnpEW*45G?&FCU2)dinT}6@b)rmHwg`y=I_a| zE&vT-&gXA55omIDLT@H%W*XjAmShZjn~d{k29RF^8W@xKGtg;QsjL>y*>Lk&N@X2U z>0BB#T2^UY>Ot1|8*3rZTZT1xWd00zYPCn`(DS;=-^gu(qd}+EN>7>W^A`s|mA_FE z0?qu57=S8S(T<2e_SnReV|g-O1&WP`td+*!=m>!}m&+3`Q{Fao(el*hlY{t&44Mh_ za0|o=@%JzZ6rXfhogK`(3_0tl(F|$LC@1_y1kk|WLtBKIpV5tWho|M_P(St|BU?g! zCM(Dbf94SA^X7@;A1j$;%9Yki_!)W;+3;&=Lzr8KM@d*>?mlj5e)uzmzyLF@OK_~@ z;)+&!{lMog{@5ObS1n~o=)r3q3|o4xlouD8L2=x{gO=sV#hs6-e$Mz)L!f@b4*p;xtk*8raSRx63Uc*$Q<7VjBOh-z=GuAV zPYHpcQ=S+qkqVqfsa}cM(nyFpcxj_VY#ntnj(YjyPY!|liBEJf<}~U(B|VPlSy#32 z(5~8LwS}h=wNLGcMMr*5hhID99#!RQnguN&;Npg@&{;VMo5wim5G$zqD z+$N!y4b8m14VDot3ZG)kl6>-~gh25$7AV6Ivk~Yt#+21qeMHF^p}~wN8ly0;{Fw># z5Hl0#G{%tBR&|7k7};TLCmXdqzx>IIFg%{X1o06wEy7M?R6XtWQshFtDU)Ky@w>90 zZ!}A$`Fl!y@L_pXo{B*|0>lg<&}odOrlVGT*ScDa8Uh_>9Lbbz{$6;&LY`Nw00<#a zMl2KpoyJIFIx4wf%COCG#?ee!=dTEW+9IqVriVbMF`AmzS{X*nId$s^M>S-gzhWU! zB~W8biGjLJF-FzXR?q3h%o=TH8@)US{KX1^I%JYkx=D|ZQq=_66d4;e7k|$^mp}-$ zNgnu%6#_$)`FeybP~aO&&!h_AC_%7vRr0}KF#@H1NSHDqV%U-@fGP5aVJFS*SYJtc z6w)Ps%?p2#7hw}+Tt1U!v=W#$e@ub+NGIn`w9E~E#X=z8hgcY6>#WV@HdUZTKo8idE&2V2y`F@ zTMd_ZPNS<~*cRKzZpfh5m3wF9_9k36Ao}pCJT# z$}q%i1SU5o(495${XBx1D0Ai0@oKu~7Nt8^oqY1A6DSn$5Hp9sc&_<#t3}A9& zFhG)=!@Iso+X%$HPh3*;gLCrdp=FYsuG%K!{8hc^0?=E8gELkT(+Ny&3^asg3f%p9R1w4q$M` z2E>98nA{i)kRT@9<`4)(VPq_Vm@pgpi)0MT z8ok8eS5uLx!*D%TD_k7L*VLV}tSp_vh?(b)FTmzC7>Q#1iG|dH7&oVHwDqWzn>-sX zQ<_KzQq7=LD)h2}Emq%?%i!A^C_%HBe*aaZ_N52YGPL;j8l@jFu%*wHH zkld(rV?HpsHdJPA8kA}@#BLw-WX^>;)CM%Sicp#-{z8kedj3v|AOxzlm80ZFr5p2s z!MA#mxuK)9ktC8?9`}|1(PzO-c#7U7>cO(xD&ee+I&qnSbervsj+?$uz zt0X+m0e=dB@=iFWB$FmN8YDM5Iu#~^dzB(lFWxJ55wC@{wlW)jV67X^kO%&v1Lz{G zYLbhwqwIk*^~y%1%E1lK?n?KRE#1!fzUFcF*FHtf$*#0)!bkN zOUpQ?eDJ3MCs5nKo2+3L|GKMB$D=>JO71`>KSh;CpX$_te{>%V|M2bL>CCc%Z zNsOUM*a}Q;hB90Akt;V%EUnS=!k-Qx3xOe#B2i?7Otg$|3{ApXVDj@Q<|;%qqoe(#-Rxu%H8|7(wS46scruE#nxAxK}oZTrC?PRz5VTRD*K%`BPZX00iC? zx2gOUF)C)grQ4_s8sk9sb&RHEtE$flj#C^f5B$lDsQ@B1vREOCoJP5AsAXiMJVepF z>g=M`N1%c_BX*SLsJY-zW&r@4G)Yj%sR{IoOHt6Vp=Cs4FnW}n=WeJ9xhM_FU5EIy zKcV(?qeHce)Glzw&|NKn$^W zc*JRRw0qbl0h{49)v4u*zfxGpDk!Pa;CE)gP89idQpseRzn}g(q=73IO-gQ@R2>>g zDoAdWIvG`20bBSrG>=dZ#i(97}IxX={<(A^GizaT<;o$f_o!;m;@@WfY$}qni@aKm> zCQot};PnGkm&2PV8QwY5hgvIDi%jlN))*mTpTiTDvU2LBq~J zEX&9_;*W$yq4U(qXQFWc8QRXl+$qgF>3^Agw9nkUvKVbh^7V zqZKmjHkmOkAII#+hMZO&vsX*?T=Hinus91AnW|k7aifhfKBgrEI*qoul(}5v%zl{0 zJaWpPWf69|yJVx8K(E^b#x(q!pbtB6x+Ov9dXv2JCxt-il#{a%t)(f-`7L7`rHPMZ zR;SEyT>7*!&EGTf#~-1*|7>)iQgRj|wlqY!pk-WRFz*q~Id}`1N}gD@`FrW55Os2a zAkbGSISY|nh9Hw$#xwfU9@$I?GFL&VQ_DJkZUS`x6&7^qQZnj{Dc#00>JuEHV;62= zTZxm(K7Wxypr=xD7UHy2A=6sMFqY`rq|L*Oxl59c!`kJ5KX(WeJ7IRV$ytckQr>EW zT1GbNj&)<8S@p)zkF}19A4cywTr(H^MGAppEtX%EV3Zt`a7(G<Ek)z!#_3+f& z)!W;__!dtMi@5wH&JG(;Ze`HJOSS=4w>cV{R5zq_LY{_F1Q z=;(sv90-j$;?F?fEte6Zk6Ur_#{nH`jjGqAy;#aZL`Vk?RgJBnLm_Q=WnRp-UUt|Z~RFiP~caDKOaDwKT0gG zbus2Nh6c8~(rO9y5uw1^nPdR`PQmIcufNA}2@dh+01&5=JO2C-xRYJ`N!NeJKp=dJ zYVq%u0MN-oDMm-zT1XBebfW}7#H^!pyQz5Jh@ZkAY75E0pBum&^2Z6>*<<7n01WYW z>kYOEak5aF(aIQhYc&}!Hrb6K=bLWc}L6Mx6G_{VNFY!9^;1-XoWFClVy z{G{tYwnl{F-|gEBKqm_&8#RtR-+SHKLLfIQoVdvdS(UcY4?_LO1<#LLc?y$!{l}C& zejM&}F#tJ#y&iu(b^xES|GLHKw>$%(&X@qdApGh4o!|iS`+v6}0e=XflZDcaMgT>( zMxz)1ppFcVOhWOz+%iVq)iL0A+QAuWdumrR9H;8|C+yLsJ*!Z&%=(krP@r!vz zH7fimAyBWE{;@`#f5`C*@#jg4*MFGs8c^fU0$}F(3xz-{e(e4q2xa3>1yBn(Iw*Gj zECl);D6IIUUPdu${An}Ns-}GC1E*#@{EG3{Yv9ieU=H|GL!gJBA^gewK?DrNKP>{P z?$$wZ@n>FygB~PQfcK83I-8#1Y_k<}@aK{}F6|S^Vn-0QLCS(`~v2%maUN z2o(5n{?@FWKfexBJ^WY8a<%Z-$)t&u<2T>z?cKC#%er-|*REQ%Y}ukki<(|v@$$=a zswdUf8CVcS;`PNBV-qNv!mAINROV-WW0*gwrBtgdS%IAQ`qIS8%DQPYrY~49A5znz zMa|8NU)!*5?f!jx_pTT}VIutb?iv&r#Pj5?n>TOX{5`=1U{ALRzg#H%=B!&cvuW|%mtUDXf8qSOug#k?d-e>dSEyaQzU|P_ z!?WvasssyxXGMv4eK8#XMR&@peN?Cx7|WZ}_^YgISl0H|{vFMW>L*X1I(hQTv*yg5 zw{X$oMN1bgShDz)*VeCnwQ5preM4a~2rd*a6dETl*ee)>hcCksFkAe6Pfe9C2WbYT z0KcBS82{luKv#Fuvc~B%rXzt2zsduBeQ&(=*6uy+N4mPZyL!)ExO{SN^PHKJYnofn z-W)i-y?)Y!00-gZpoHguzxV{oo$!w35-D#Rl~obOGQX+-PN-?vda&zcXIo2Q%G{>k z?B23>|ADr{9mjflkF+0d+rOf(Z%gH*n!5T1e7~dI^=nt}hTAWAC+14$DGY=7@b7kr z0!n&G^5~Bsjq01ulkkyRk=V1$>OWGj_wN6(8&Shu+m=n4Hf>5H@>e~vv#+)D%B4#e z&Y$n=yKw31;I$i9_b-2CMqSnN!{=}GpMJA`(s&64FW|@}m}mUOCQ$B$;UgEwd5f*t$MUhlsG684=vb@KShzKc6r`;JVU1Oc$n zICVO}w~}4`;q5=>0buYH<4^XNaAfd0FfcH66a!cczwR)97y+;0-GHv9hAC4E4fXZ) zQ~Hp<%a_hyx^UtAg^QQ_hlbDZS~qKIT~*8Bi#N`l++AO-@W%nvo$2ux_bO271~^iV zm4o7V`qls8TReSJO8?B#KZ*2@Uix2%6Hh$x z;~)R%M~^@L!yo?O2jBnxV~>6BdyhW)=y$*So$q}6+u!`0U_`r0N8i&$`0V~Wxx+4u3mXeXzzXe!3Z3Jq!Q>;CuIiuM=gLV zekT58{DRNlSomWEzBUQBW=s0|7It2}dbRJu*9qih@drCu1^`X-j2T!kD+0Z0? z6vrJD4paeuF+-rs5uc-MF$6fX`RXFEp*a zFnsdxiV0P9Q@U^d;SZmG_~0glz>DYm&R;qN{GD4;*U(TmX$9PXvGF%_`J@yyB~nt(!??JHNbnJJYC;hIkC3>b>MH&mMuqH54W~Kf)nmP zwQYNS{kHBwOoc+>)K+x?Hu*^6FJ=f-dV?Dlr6--gpJ4ox!=J)mYRUl-_90r z6ZhAfy8`Nh+|khp;5WYU*kdbReU)7jDgVrVW$xT%OP8#|v}Mbd-Me@1U0T140T}Sd zp9mVpufJ^?KfbE9uMeceGzQWtxaJ)gSPVyZuKWCLs|-P*gQD;kk3hL^ykq7_(TtBg z%a>nM_%kiPQRANkVC8C`zuwaz)?y~xR#eeTedo4KnlK*m3&Nk^4>uu0oz+YCf`fLy zv1JQ*Yw6OZ%jV8~W&UKvpK!1MQp024_{KLz!A4~2)Fn%nY}V5XOz<{ZIb;1{JYA6( zaczw?)pgY`jOV9SJ*lc{;>68f_yYisjvL?6hf-pS$DhVRasXxCfIk<3(Tu>~GkEz| zgum~?`ulCX{ENyn=gt#o{@O=RE!pF`U}p&)#PGKNzjyI->nRd&jm z+092T4%~kC!oKBA)9S~MeW?n-tMrn@-}Ox|!}gcs7neT`6b--_mw^(1j6bbo-7Fm| zS=JUy9x>ym>^@-o;a~p6;7$8V+<)Zo!L4lBk^Vt<<5dw)4{twg!*5^!@!PV#?dZ|g9lx2`-8^n8 zeA;&G_=%GzPn|w}=Ip^G0z3nM*Du2MHz|Kg2-NvY1fa*C(y;^?H%kR_);A)5Z1ejR zZhrmUhmF5D@lPzl#rfOVcVV9w*I1PE_!F*}u<9WC2%K~fF1rsMt{b;u@a3i3Tf2_5 zZa*%;hglwP9yWk>{00WPyASVg@9FM3^k(zS?(XKPIBVSCv(&LcoV0p$P!f`Jf#kqHz%)bfv{S=S~F6Z?DE*<%SN=J$wgTyJ2u}=8_$4N4t93 zx3{DJg5JNj4uks>(K3U_*6weI8<*SKPH$+)!XI4yL4xoX;%|`i zcc~X8fXhM`e-aB}{-W+f0;dax;58@=a=tkz3UW?kP_5J)1F$%M3;PbAKNW#L?*_~c zkb35_y@!t;?dm?Vf1erxC3ho&4`m6SYNc;ruww@VKfv!u`)p%NUcm22$6JEG06!;xSHk>(5CMO^ zy|5{U_!nY9a<&9YLIMLMly0Sty&8%+XvK4~QMaXfY~gQkP~-2yIUWE_*MI){%i|yC zujN4d5e{Jc{(Z;L5j>3w{hkMtz!?P4!fL~{4$DS51{zc>u@e|u$9{<=G`|;1` zPX|zTcEE{=zX(@>0Uc_$LC0PV)f_ZqIj2#pTgss={%QQRp1a5cAiv3CyZ%%7JJ`{| z0qllt$YFt>w*m1z{&3>oz!6Lx{w%yq4+KBJ?!t*We=0DnjZ zZfZTGlt|5*6*x4$O-jw$>}9u1LD3V;C+hHgd2 zSjk{^whfFa<`{q5aMfU+|Mpt=TiADz2S9WD+vxE(!~pd7AK*ja?+9!{y1RSZ_iw?t zT6X@vIcLRkOdB_D*syU^C@uHUL;Rj@KI8+~yW?mFJX6M=90LG>z|M{(5&83Pz67^l z4E({nSB`(+e&I(03BnjZ1d5hMz@$M<{P{C-8^yfSc*XU1+sR`575Xkf0E7iNI)A@u z1^Dm+5by*3dV0XCunj3Yf3F`uwiVL$ZQG8YIDY)_;lu5RVJ&TkD0U9gt91-K@1N$Y z0>CTh`p&}Bjwer?JaPOOMnRZ*`|Dx;IyV~bKO=uU{Q0+E!u;Xw1C2l7ZX3v8aV0!hJ5`fKD1%QXI!lQfx{a4}H3%0+}`14pWJDVE=0KXOU({f`B<3lmuyGFIPb+)wi z=qhLN52<78FXL~1c>hsoy>JNwV08Xwt=`w&d*W2z`72lZuUx&_KQMUX#!&x-hshrU z@Ix6u;IIGC*oieYa}NyMefQmW?%%z2{Te*?c%kpi$zwf7w!EVAcg@NlZa%czFJo3Q zA=&+x%NYJb@edsX9<}-hyHqZ8|K+E@^V9z=rT^!VG1hK5*;X?f)n;`X zZH*k|2L25D-@?Xo$ltjb{B78KsI#m0#F=xK`iE}ZzW3nW4?eheWzF2VHS8&@1^5^Y z-UE3)=DWIRb%=i~0Q&Kd1wf2{JOIi70)GhL?h_}@4&3?RJ>>7kwE^Jo9Ov&Vrxc+Jj@P`tBj6VeMiQ0tnx>K>ic}f9wH>1R{Ws{`6n8 zF0?}zf8N*WwHD&g*-})B`Wij{VF2v2^2eTnR`}a<_(%`%cj5BDwHtTtzsvZ8Uo>Yw zEC^3CvBZ9F;q;j^XAVGGy?Wicx%1!~@ObA$AX$V+G$Xj|Mjy7T5%JG(0mv6%2|&&t z0(j^Oi+^`m{A2tb>+aC`gL5>kzrpT9<8KL5Yg$qM@COvEi5nk>ubuurd|7ULj)~*000Ob|<$)_HD3i-2l%lNaKoo$`QXxw?VYHMG5;f2a= z#jpPgZO9*70Uk$9J^m)De}8A~+BK)P&4Vu+@-*i+vtFG$Z~cajBiC=;efypFKK$^V zfIq(cp7P?K7XTFiIe#*M7~WXo*ujK&!m@xupFg$$s~3P`0TvO^=dXXwEY2USzwL+N zzGY_ze(Mbol&SMhtfmI;VUPJazXoRfiSXA3_Yoj*`aJ%QY5X-+RnA2CBmmK$R#dq0 z@qZQovMaz~3nBpsdl0?}!8O?9&)O~L&uZqQT3B)#Ep1H2Ooql+tGDk0(b)Z$^X&fL z5Z*V~%B1xF*?1Ac4Fo{81M&E)Ex?yD4Zm9R>gqM?+S(4cU+L&PdbG3k$o1QIAH4J4 z2Or+G@~5o8XZaGW0Eht)66X)U*$v6(j~#K-g&_XfE&xCH5Yk8NHcT_z8M@lRzL7Hb zwIz!eFKTL9xMtg>fkC(v2-gx_UBKbt%`<)O;Cr!k6=M;vU;XMA5`W$`gZT7U@Q3T~ zF_}N$7Mr)U%&M0+pxmDp79{?{TVSyT5to1-e*oZjpJee5Tbo=DW!|V}YjUHxKT|2- zS3h^ztA#=Xyc-)he|G@5#juk#@CUbMfj@Q&#^;aYsX{obt?T&7Gv_Z{x_qdk^H6K+ z-i6Igixw|lyktq!<~5l1_U+wkj(_SkU@-tWfAT7f7lHTv0BF4f!vGu$H=>){H!YCh zV%gd4UERHB-n(&W*VeU5np(EMao}J()28!u$BJn|a1;E&?t?w}hYU7xJp_NS{=!W^ zFaEu?Y8FIImRdHgYMxUF0-)z?Gb!B2;-9_+*7v{|07V3p__F{QVN|GM0>|0X)WA~K zFn7hO#lRn?hT2zKc66Q>{0$9lMtZ@ zpPqk(pV@={EmQopb&TK-;-m#%fe~GExtcZ?&ikF{|Ogh z5df8YF!BlV&@v?uUNTi^#YGSV5p&?cF|7wEe?!- zA^sMgWBkD_m_569zsA>|S^;OF@bVis-sN7pvI!QM$xW}VTk}@y!2>H_U%YT0JbBXC z2rdpFf!xA{-TnRN&T#_y1~}jk09*-QqnkOgz5#wzPie>ZyoU~7y2Lgiimhtw*rzrA z*hg^cEojak0NAvarFCNMGQ0%HNtj0Y0r0{JnR1d!xo*Va9~Y zG4P`=Je~9O;86~*|KJy2R#jHQ9BU^|9LpAN&fgmV;EI;TGq~p^XNOVXMxQ_RCN#SX z4FLWSl6M#8#~1;h_{n#I^%r|E&=Fx&UM1Al*)mkySWrJ>ehcEaLgEi*$$t&!jiX`y z3Vp~Q20*ySKV{a8IzX@{;BV#|ci!>%TL%2i+0^&tmtTJI#l3sIvEkU~uiu-D?k3VD&(}B(bR1A`~{ua(%?&nv4sPe3Z zi)X0(Z3^>;_}%TTK!>7Z(ZS&7Pr%_HqvH_(2t|06cW?MIBaqzy+z0oe*No%%!M{`B z!f`I{IPU{tc=M0}w`0Th?RPNUWeMp+{$5e?hv_(t-jbgDpGvY+H>0E^lsL^m4t} z3HWYRJjP2O|0l5k^8hGTU=aW@0`d>yJOIC~^H&5hEkgQpF)`*eDw8)J)X!er+|2M> zy=ocoC)n%g{LlaV&;R-(Dh5g0{}v8${&)Z^%vj#?N_BM*|E6udCF0*wV1>oMF91N5 zKLdUreu&p(@Zs#;yO;R=7bFkjT~*~c4q?sm?~NVSJziC#4PwR{!Tr;c*Ca6Qv7q?fnoe{04vUY%mEx2 z7=%Y6jvhq-@uwv@f3JG$@BRLH&vCR~8aJt7W^?nLii(<=`QGZ=cTQ|Sww{XzpYkhrn;#*_=PV0*g;Uq|bo^{_vwX41xdi zf9g!Dzy1~ZLja5O zcMh+o>lQRO!^dK^b)3Pe+pfRIe_7+od>;Q0zuyJ?Syo>KKX^{>-FMHNS-iLger$^e zzj5Oh&Y!ky!-kC;RQ~SXxqUl`es__-Q>T8jZQFLS{u(}l(-z<$00Ms;!257h5#B0z z$pHLaL;n8d*Dp5X55jbJ_nbX2|G6<^$Bl>Q`0H!^Tki)W@F(a){{H$O|M4HU+Ltrw z=kDITvH3U4mM(6Z+o%EIJC8H)!+*s1=kv#(L+}CgZv$ckzxg|M z?AQ)ZGtcwmpSSeh_OAV4Jx0^rlij;^ZdyD0$c{7q9YDh!81Er0yadDrm<7O!({M?O zvwI)jB{F|&UY#>@#_X3Zr%2c>zXG|ol;*j8-DU0!hS;o{^D+ZfS=g@^7sdz z6T|w;;~xus`2I$jLGJ$z>E=z3!A%R6Em_>OV2*`A&cF}<(F7oS5QgtTFapXKfq(Z8 zM*gG_sKO3+CZw`-p%MTy^_kTwvd;nPtC5y-xYW_{~pnUg!CbP$lt$1`nMZ5ID=DW&0V-){wq^VF_07R zbL5X-0rH!GegwqdcxTUId26tD5%~Z9i-kX*Kou?jh{NI^s+q*by1Hqv0v7XT&1Qjr z`JwCo_HX}&8VCS+2n2nQze}B+gIlM}p8c{=6kp*9Kuquv|M}*Y=9TLqfpb>0v=jKCSunyAN_3PKHftzaU*6kj|P2}A>y%orxYxTv=hZp`>^m~_WTiO2M!PTn=magnO zakTx=!Grr*yaU=efo%P~eH;G3vF=LfFeyuF6)fc*`Kb_dW90B4`R zcpC3(@4bZcg!Px*gt>MNZzzd7Zp5@9^iC z*lz&+0)7k#68t6*JfnuFj~VkXEa9^xkp7jWr~Q^ve_qx}5$7}9NNO9~2=vXFJ9`G- zYcwAk{_|h{@)taN0U!&3-YWcic;k1iTeqjFu3_5j*-{8>yNJ(V!S5Ph?eE;TA3u`9 zYjyajes!U+3qQh;zgh&IWB76YBH|}jUyOd7Ka6Bs?%lq9qyN&$p7VIccLze9NFI3b zCpp2%16}a2|L$Y=?%m%K+<`GIz#4$K1;(3z-WG)O#{oqCAj#*of6SQW7vS^fD{N1~ zEbJkB{HKe*TNx3nS`j=i{hqVOW1pbU!|yMD`SYLuJUraYAS_IsJ`Fw$mHew5NaZYB ziqX|Jpk5ylj$<34!IbG!3hYhR_SW#9{sbos{4oH10&!Kq1o02}g9M)dA33t)<=HR$ z1a_gufiJ)`M2!ZgK zJ4=i|2|xxK`r?g6CjJHhKj5#Yf-SyQ{PfkAExtVbar_v6Z7X2)J9z;deGR_J#D0)! z`1;wi;$xFjU_Evgek5sTFYHA0_!lg|oIfo9icdj&{yt@w3?6^t?c}}g-zpUd@D%=7 z_-p96DEI?8fxiGge&xrOUI+pneSZSa{NWFO7#idZ;?se`w``Lm4)oXL#_(NlrSIe& zD=qmqooZ`aTYnFy_XjvjMj)QGN+7!qWLpuBKPDf%qo?>Gkn<;pK;IRM4e@UPBgmcG zJr&mIXWx7Ti|>Q`_t(Ih-wJ<&DZuT_8TjKu7cQPT3uQnZBzQ{o7a;Fsu?rO_x6@X(T(__+QEZgfBiL_7@i;_5YO0; zf$B00i^Er_7_7gTn4+_mESrk~Q1CYh0Z6#Rd$sa++v88%dJ!MIu}{Ff4?eV8FY3kL z{rmT~T)KSuc*Wqr)l2*_Pxc`lum>MV195By7!a>q8@_(CqW9ii!C$ZdO94$3dW8*45)Brb#VcUBH(sg)# z<=X8#@L`J-|8)L>@4<<0Vv3u9Yzra*2q7Cc?Vl^>$PXBQCj7j2>zd;yUPH0!w?P4? z@7lGm7<@i`2%o(0vXz(l2OWhD`?4iq1N~`5CApE`LJNS?a;`ze%{Q%ycc`>%X!#P) z=c{)Ffj)wM5Y*STA*f3!Rb|PXY3^xGrF7$E6-x>&-*qSZC!Dn#N7cn`2S^z`;CA2;awl{?l*41!CdlE3d$>^`PWh zj#$8>Wb$sq+URt)^x7)DNe9lhag5Ca3_v_JJqUjIr$2q|UkUm_Q0DK#K72ZGUVFz% zc+vTjR$()L;KAFsyDR*y2U~pcv6ncjFYtH&^2PlX$KkmH@6m(1>;~ZNTX)zELHz6k zc)N!L_EhxVVb_1!9T@&Y96SIYOtr7;1^_*>nEV=Ka865cKq-m`$>OoG?J1N=7) z#QRX9&*QH!dp2rTXjl1TZ*AqmFBJSB@_}PT0P@x!D%1C9IzDR2YiJpNPy*?s3*UEoZHA71@kzdn4V0{LSA zAq7QO-~0F8xPPHki!i;{Jp4=h}jBhFO2u8M_PZ z?Sb)<0RdFsPwc|)K(PJ>T6j4#f1oDv$6}v)<%er8MgxGaO?Ii`G}WJ)M@ulO{HdV8 z1myYX9F<`75Ddpawhv*8a76x;Sm^U7hd}h;r=I|SBKiS zUn5TmDguHK1ORL;1ZJ*tIp_PXdtP_sJ*c{TqJ}pgjIC z_5}DjSKoX0R>OwltrPFQeg7VAO*nhF0pa&vIDbgsjlpZTZ}or$KL5bP7ID6tF$2gV zpavj&uONRI0Ok1So$CSkQ}OWiboo!x_#hhMAH$FH$KT=p#>1m;5O=%2AN`EvP`}L2 zq+$$@Ds>w+DggR3@iS6ezs%1_MxJdR7~}}zdocJ&gd>RjdH5CMZ)oV+_2HYhb{7i# z`j7iacnzZ;rXc)%1pIYY6kC0ji$4f+Z@|v>_HDTTJOJ#$#~bVr;A1!c1bzr0d>RZ- z;hZ|Ns^PV{TaF*+{g?p^0$}hYV6Xx+{s6#!{QCHDNay|U9>clfC%_(!K6)(EEw26W zb^7$x9UUA$;Ln2(>BBcX%8s$myVc@*O3L_7lgj*T3dY!oNd)@ykTTJx-N3-58i!mf1$AZ{IO&B2Z6iccb!n0gGX{%pN%be|HieHMoIM4-&4rYVdbxCfN zvYxM`5-D4<(ZpX7#Kd$B@fv0((2^z@WQ9?FT|PL-mSI64UxxqmJNR?~Rgefx&Bg~^i_EZWl1)!q(2QFZ#{`Wf?Pyt3hd3BQlv znc@l)e)2P?EN(ISyq)d+`%63FD;QS*J^`SIAHMJi2>9K-yR5za@Zl}+*RMH$-)6s+ z`s9|efi`dbkE0sCc~J&qfK$3J}I#~A+Z-mRFrasU4P>!&dM zzRmITey{h*C+8eIX#E%jeI<&f!gqUY$FgebGhm_s*p);#o8R ztd@Q9eB;KKzx(8)_ujd8`}*}yJTlpf#~)jPf2Yyc))H`rkOfCtu8cBY%+CGY7Jw3_teq_$Tn;tH$5-ZNmY7=(U-1=B?^IbMD~oZ5!uL zgzw2b`qWcXAkAGf4uej>pV<4d*b3p*gx|e=`}Z!YU~4bp#)2La2+6~bt-p7HyT%u1 zj~)BuPgMB)-;4$dgT#I+**685+QaY-O!(oe_Kh3nCcxVh~i)n zwDH#x@OKY&zkYqY^!;<-?`<#swSdd!u3Hbkzjk#1o{Jj1Tpjopa*x4->4SsUyXG}v z{9~U;@H-=HlgeYD7XI!P;HRS${y2OxfGqx5uv5k_dQg3N=ok_io?%NPGbVD>43Hjlf5AK1cePzK0IUuJrQu_y-lr zWwi5GG8sz%+d9?c1`ww2eU6}wKmYRYtL5@I9)UBrTzl8!Z);oE-gWC2LZ#=-5EQWD zI{1-|-TLikbM!<0Ffq!Wc!CkPckf$o@zQer`mS7Bj{0_03zjBdg?pN^F(6G{rfA=qMfiI-$XW!7!`o*U# z0KWYtD&ZxCze9(1;3bQlKU25fK-gb+%#w|zQSY9Tb-K7<8jVnX1HVzZvSsKqe8)o4 z_aFY{U-;u9IG34QuRY+OhxBjrN3ikt$dUoZUqvMXSakIj@z2ajxk>~6UVQ`qpxoB2 zJ9fNohQSMfcRok6mwf(iEP;}SqWqct0#dU6d`Y!TawF?l>rFbuIa^cmQEe@uob!jL zyTrs_aQSDR*6hW5w=Q0UDX?4ldt@``4*`sTpSS*&i!^h;0e`^>G5Nxt@dDt3&sw|+ z@WT1Spcn9W6Uu|mf}EvWUZG6JlEF5giq~yiBb?+-7XOO#X9|CTluFLd-#0en`U|P@ zO@H;}i*I=I0Uv>w2`O)>gTIivUw#hv0N=jQWny~#rEQD^ zisH@SKujqpk#dQQ&75LmWD)-O_T%fX1`Tep6*>5O{F%1C?8_by{_d7{r8W2duW-`(2`Q4u zMiv0ydH<`Rf`h;4-Qo~ImvJUkw58Iqd@4LIjp{VU(8x6<7*6BwhQ=RXe!q?r{%rUE ze1CtRBUowUkFCAdYVnO8e-QSTVmCWP{NtY)Ni8Ak zH{zKQF>0f#W5zK?*T`|EaN=(MyzTGTal&6w{_tZB41!JO@aN9b7Lpq!6|fWiWR#9=MktTOU###Kfj^JH)uz?g z!w(dRe(dW*F@hPNKS^DgACdRmh5_)s&xS-PhQBdu zgZQ@-@l*JV)@_&s9KlCL3@5)vGnzW~TgEm9GYZ8&dAe33RATXO1=d)46Mx-8z5e6d zk9grPDu0hW^388P`Y8Olg~{ty!!M_9KX>5*JpVd0B=8#?96{{-p(Or%b!9jEdG56~ z2!J1a^0g?9!e5kbO(eh=f)p{F@*2r#>{zsnZ!B?2Fn@(Vc;lM*ix>W)^XEI=&wTeg z_{?DC#EFHqufP7NUp#^tnLnYf;^yFhxs6=`8u+`#uK!H&4?5ru4|-B^Wi=WR6B9~e z6m6AL2oBsQ1ND%<4*Z=LjDI2i*!I_7U4m|%xqlr&Tv8uO<&UdtJzG#01K>wu1I)r7 ztiQPEy7+T<%Y0@VvqYl>F%enKqjHE$t0=3Vd`i%q@yCLmfj_?eZ86WvTAC~V&@CBU;Kf( zc5T7f_T^_EdpBS>eq$8=@R?b7EJ1o^T=eGdR@5@RmeA6USVj#9z(2vy8tnV``-@Z3Rt7KIdd1xiQg+ zRHG)0zvcKRRhQzbadW5K74jE5{N*nG!52+sQMCNzLtlRSIp2VI{P9f(UjG4qkZzBu zsHrf-0C%^}F%uiL$)|Ui;AAFL*VF-j@W-n^O~C0FM#hYg{#CrqkaHE(BA)10OnKmIBF z;pJca@Rv{itZthO0q}G7F6H5`0Q}*i%lNCEf8y51cTVgqG)yw`XLlMPRt_T|X79E{ zqrGGE$&9aR*nRrg(W5p6O24cp=^y^R-u^7;#( zz9k5Mx#iESF0gRu3w8r$%oup60&ZgL5d4j4n7r=9y-%<7>?*KpZOs85Mb>VaU2DS+ zleZM?8oMpsXn9YV=UY=<*mnBT_4c0C9)IxT7PofdZ6NQj`@;4kLHNryf6p5{Bjhux zi^i|Ng7+!xGI)Yb2gbiKW9n)qZaDe&yM0G@HHvQ#%e@!vmRFFQc=_Zl1!BfiNyBLD z*fQTKjhlL}ULWdOSzQC$v!(29yPa#u{>C}*XE^zyMVY?;^1MO7Kt4mPLSy@vUl*1v zToCXFf1hbaMQwHEqzxx8T|0GbIn)WuR~uUn1}gOr_HniegBO)q>OaL4*O_-pBN z<}@{}n9DoRIV#$%Um?Ag5@O*~Ny!-Q*gp}>C@1q;%aXZ^mR8r*H@rG;`yAW~H#IeF z+0=)3qG9_Bf8#QK%6k0MH4>NmLPb>(EM5K8*KNm#2QFXkAI2X?eEYo*KDyt3Y)YZ7 zPPE~#;g*tpM2rez8VfK1X*#7CRmQY#4aSLelc%&FJ;i=%>h9g!*REcG-(o}ll3e~} zKmLIi6c`k*n2b|_zY~nVYd3G-zKa0fIdk~sX^pBw<8~{L8*f6^mc?0+nkr+)Y8mrP zE9<6Be{1&{_&YD`yFWJvE)EV3?||*E^uvqVWE_V%h<_?%l1sd-boQ*|tFMPnTpzr0 zx&PV?xcPSnein7O6d*6BOHo!rdzGV5J!zNGFPK(L&v{S(|oT zfS=WU8@`eD;P$o4{R6|thQ7vME{UJ=!Jnj2NIqFWX6*U;>m8RaUA)pic>N{tD_<%?I^ z6K+3pvHmi#gubeB-jQNF1a}JNFI%*F-}%97!@~gJ-M$^`W*fZ{_@#KadJSdFL`LJ3 zM7Jp$tg)`C03p_KuCl*Ve4w z_Qs}-E7q^aA5D$ZQq(neo0ic$B|fBOzR8V3KZ^`r$KI(;-F9%-&KG#c&mP0oU|B@1M_GX60(^$+xn)2LULj`PJI2q>MKp3R!kF<+;I#wmcgBH{#DOARv|@BY)mR3N{(JFf5s)L66(dK?GTSm6Eu$tEFpvC=HJp@C$S7dBjfVNKRf&xo+b9sT zgh20POx>o?(x_iaCu5vwruQFX4W0=Q%e>L2C~X#ejZ8@RM$@Rtji#ZXEaUuzHIfv{ zx>4dUd@3Vh48MbAbwflsjiK7SAoKj`8p$Gryp}Daf|!1uBVtTZz})huX`~{SX`_mm zb}A!e)J9FNZjSj=G}0Av8)a2OEg!Z~M@$QW+I;2Nrr1)^FW7&G!R1j!ng1#qE&1%P zzwB&-v-3|r$Yj+Rq9`pE0%^QlppaoqdDKwL$VPbxDW4Pm0vQYnWz|?V77Q8%NEnPk z;7x7}22gUj;ZJB}Qpg7|TSg;);yg#xST+G0$By9R%Bc@ER^^GkvDBjAD1$~bVr65& zi%WrIN{(BVqTnF8F;D!l&=82C zx#iEOuP1_8z)@!u{25Q>p&HW@Fx&hYJmXVj(kNR-A2HLZJY=Imc^+1o#D)+UNUs<( z&REPS31+nAENS?4b#gN#_ljWS}PQ+fEt(3Fx$pMCx`8B`HQpHW$T zq$P{ZTQ#bPg+icmrVpc~Ca(Krl-UL?GtJ-spp-PEF=5OI7%5;;2GjC;~vcR^ohT0`!DOW&oqC}W?g-nf42D>PY%qu0vYFT zJQ0{}05i`Y0hnn3v(F!ez$^op2mXk_>=Kv{{%8@-DuFrSk3wKZ0n8156aq6zV1D@X zLg08xSyJHHEPwv{tgbYK0KJHgN3{a^;tv+#QLQ#jPFEmj{OPAh$D>?<-0?SIlnYMN zQxwP_e-ns6#lxffAdmbJfuox_d3O2ak3yjABNN+%nDI&!iI(*$EeGC(&z~0M zRQ@z0`Fpe}kO%&hD5vwM7)P7>qi{aYu{2^6%LRXOlne1E8%Ipmj1I)%!+%HW1aiV( z0GWY53W1Sm3gBb8NNq~4lpFp8WG4OsI!o3oza41!jZh5C4}TtHX8wdRzxb4VW?}wD zViEpXmY;wAnSkwip3n!PDD>(yhH8&kfpGhX4QL)nOhbD$S5C1s`}{rs`~(j_cyaKD zAuvVZ(&|cb_=E}1J(pIuar=>`p=RTD6tjNj`3u-%IVWOHV@%=Fs;+wbApxjQLOD+B zOpX2IjlbtBji>1qI*qP|<+CZS#)I+=W$!sO38lc^u5?T8_NXSh|>rdQe=A z2Z?$qsZnSOdO=COI~!-6KW+JiLl*&_~MXaeSfvM7*o{=)bne^$tt zZL|>>@^)DqZSOQRzO}&KM`BA(`Li&_`Lh@kYgl5#u)9m_FlO`c_|}q`Jx8dXY5sn0 z2!Ct=wuL~CKWlu9AXxfUOx{j^6l;&B;q7JCZW0u)woX_8ABGBaQgx*Zj z%rv~KEXf%5HW}y73?RP-G%zOdXQ0!rQdupYv*G5ml*&4y(z!Hfw5-y))Pt<^H`YR+ zw+w6Y$ov`b)M}5=q33m#zmeMnM}tnSm7X%$=PwR`Du1IS1e*C9F#uJvq8$-`?6HX_ z$MR&n3KSa=Su2gd(Gdb|E|(`>ro3(FqUEX0CkOEl88j2<;TDJ$;_qP+C_d@1Iy;zm z8FJQ9qZ!hgQBL@a2%v$#hqeeaKcgG%4o}O;p?>T`Mz)0dOjeK={>&lJ=gkwxKUOly zlq;>3@H6xxvf#EFUX*r%|by%7Ntl%1Y@#&*EUBEaAXj z^PnHmU&*ftC!{vZ6@U67tez@d>1hShp$kr_S}Ix9^FCl|F+0NHg5tP?2QAB!i#s1v z{haZqhCuy<9sI#YSg&2K;}|gD6y)mhrX;s4M?T(c%(e5zpArH?r#vxKA{97|QoRzh zrI8SI@X|(!*gEQD9QE?YpBw`96QAf}%xTnnN_rg8v#x64pXFB4!29X-uMRxJ^PY8=8528!RJQ6h6h6 zCHdq}34!8iEKr6aW+TvPj47+J`iPP-LW3DkG)7@w`7;yfA!a7fX^bJMt?CF7F|xzh zPBvlV0T4oxhV>C6bwK9yDbL!R;j%vs}f5k$eN}$G=5(9OcVvMS%t)A11 znKjzZHhOsu_=^<+b;u;8bdw$*rK$pujhl zo=FwJQG#IUs^o*eVgyS2kT7LJ#IPk*08``-!%mvrvA&Y@D5OjNniu{eFTy6uxO^td zXeBUh{+I&skxtH?Xqg-SiiJSF53w-D)>)g)ZM3#B8sfXy(qf{eJbtk>&)AKnwh6O2 z;x9r7w2K7q$Am_EGovN8kIULXn_}#-wD$22^Tc1#5a>V*wi+(+oJLo}ur0Qa%hez} zwTW?~s;P^3W%=UIO<+7_%2>NxL`Ck`ac+_Oz>1E3BAW8XpJNdQMXIVCvLrV;IyIYO zIXUgh<|+B5v4-DwW)5?b2b0<>oXiJLbeqF$OEzSJ4VN_^)o~rfiKrJ>J4Kjih~wc6sGbS%k&O z!u~@fzbMEElQBLzjX@vrb1vtmXPa2kR@E_&_$REk$Sr?T2n^0RpvVB3WDHt}oYN>O zC3|&vH#JkTnTb}8X^1LGUo*|$FRET}CGl)@pp8UP7{KJlV1Oh!hj)FGwh@SXpSYyx z2j}F^L(3#NUA0Zd`Kx--1)#SG2WPAxrW2Ul7z~gkr}@-;$U2ryRwm?1em=>(X%ZXzNiaH+eQ(rZkZbq?$pgROn>`Te4iZ z$LF3))|1xi7g_%O@n7J{unR!;kA4-XoGN>FD<#SSn3ZGYAh}WL#(ZFMZK%xLG$_?* zh}}Nu$(##ys10at6`?dw{Dl@__57U_K?qc9D@VzVN;l>MgKzaBb3;dI$sK5Y>;1v8 zRwLnI_W6?npbns>iIXC&?noXQoU0YZpN-r>{MLF4xi>GbS4nuB1O5~M<(+U$NhVEl zG)QiAbSg{+_bNrAUc6WAB3=t?ZDlt6z*;w+ArJgT2hc@W)g%{V$&FF`Veqe16s3pq z){!P7vMCLi3;t99gAf?fs2D|d0-V`viQ0^wJ~tGU4lmX>i&`QT3jP!Je$QE`fF z5t6k`WDHHhR$%ZlE3(xev2xSI(i%J`{Fwm^i4=h%OO)d+lNdvjuoalx3}v?JBUf&k zSX!g!g+CoY76L;eMWVyU~gn4}6M3 z7TIWG%x@XnXd22`X2j1}b7Tt*^Ga_5+2&8VAk+ARn}H#jqEZxwOlld^7@CBwz~JZ6 z5dEqQ^Rk^n{PK+Rr?LljVTR#l%99H%%|9{7_PQvpP3WU)dNIgN7LP|L_hd5EHU)!9X>k3a==M(ilfQFFnc z%mM&7X_BCjQxoVFm!hC!L(7Q9VDu5*5M^Qc;jiZWQ4& z=u6Id{mMl_UJRPeS3y7V7mdHtK?%rFbBrIgC^$-P4Eh+AIphu|X237i>HgC0-O+E6 z;~%J!oCRk~)~{7`Y?{nyZ8aJU2JS>rs?*C8f8_(kff!=%@QBmsX!o#90ye{Is#D7q zf2FXHRZvo;!SBp~ohb6_q>{-re?R?oNCQ_anv~o)sX8>0RFK>#buy~50=DpL%G1m^ zf4>e9n4AT7%W8+YIH8=m^)FD+7P^>%=4!cn4E=TE$sk8%Y??1y_r^b0ye{I zT0?mK7A#$eu~Cl}Bb&ZR!fZ1Yzk_zNOnau%Yt3`aSsWlUr0{!F36Fm--O3cYJxWSqY+ zfXP{i+%gp9llm7g&&R>O3pr=i87Gkv2AX8h$F{ba((t8YJ=a-b; zx!yze`O^T@SWwS5HKWRy)@=-BBDCU-4$LSkwJ&r`V&zqJh%hM8|^1jUW<-ha9oCQbH<;4Gtekiau%E| zt80v-d4 zjA{5cK_7PDbW4KH^(J}cPYQw3DJN$kT1!)u^IOI?N)sQ+tWKHZxb$gdn!jh{k3T|r z|Jmq3rQ|F`Y-xycLCd(tVBRB|bMO{2l{~R*^Y_wAA?oA;L7=Zvauy=D3_&KhjA!(x zJ+he)WUhizrI zKaAdWxMnW+ixdLIS}eaR!6-Q>;g(Xz$&I?VijyGM&|617mODwFk2qXD_=^?-W%9f{ zz~Kv(928SazvJXae`a1L9t?Wx$j9-9vDc0?U|#r38UiH%ji3k-;~NEl#+H$cVr=nh zceU9f+Qob|cNV`i(y+PV&m01qcVT+-%{SrSBS*Vg>fx!ktGBmz%Y;cjfyqI!wB(Q_ zH&%-e_Pj1)BVZlcX^1?`^A*dvvZ(Q%?#@p9e|JX*{MX&p(a{CTIS?9i#Giq{U4U3` z?=hZ+uk&<+r<*Ko9)|$3MHpX=3E#*@0~R`sXtlO1YSi0C218V|5ACvu*2P|*AGhM< z$&)9%bo@9|cbKPZUOMU;2yMCIPhW&NeuBSaT%qd7&jTg+H8f6qPV z8r)_p#+_&9_(}XRc{ToqiUF7}{?riY;TQ170aW=D0G0xZ&6BLW)@5}X^(jRJL$2l; z+h&v7+w&fNGJhzq&fieGy$hT`-uRP3pun#Pe?EXVf0S5W>tf7l3=M2|rPUJZBSL|- zGsyt>or2X@UVo3{5**^s0U%B#cl`Mwa3{O=ldk`cfk5~c)#BeR0ictGQjCtawU8V{ z=tc>Eh*?MHc2n`b5kG}L)E1I~KR19mgCjO3V@sHhV*dA&x3UV3$UP1tO_Qc@tHUrSfLWxEjWUO18 zk@2S)h3BHax+rtl+}(5hjQkzr0D9ZsUVf{~0bnlU-%Bsa_(|7)Y>f!TzuUJNfKC=l zHfkJszW2Jdg+OjrIB}B^vMOz(AB6gm3!Wdh@)Rcd`j07l{5ag{VgPdfdOiMn>;OJt z|8--r2 z>Vg9sRke_8}o-K~S-;?KMY2R%rr1cf3e z8Fl_fG6bsFi6g-8%xO&e{v+7_viR2v0P69tr`vQ5m&H*em&`Fny3z@BasfcfChC(y^Qr+3f#g^iO7g~^Rmrb3$1IC(N0 z)z{Tj%~`i@X4B%iFTXN({=)flUz;~)_Usu@uTZ;qecPd0SYFqSu`@mE>bu&nK^{X3c$)lZ&2b@JqwXU&;AZ{ecFi~+K?%({Q{4YyzLPRy0gQy2#E;ot2L1(fuX5Swk?}7ZQ7JZSXJ2dQl}nc{oIl^!cj402!D~0J?qB}O zjJm4jhtJ>WKmBI?r1268Uciw{FwgjlO`zNh!$&TV^B!ArQIK~UgK8z_RQ@J46t*5b zHFTt{rEco%1v~biz21KXBg4g0eHVAM_8plx2?AiDaq4t{Zza3>!`pw%1Hj-Z z#-Hpj;mF{1U|?YCCV8e ze-h~*z4X5zJ^5?Dr8A=dSXU@)JQOn?%ded z2in5=J7wzRI*&en``H6L%{2|M z0N2+y_VsN9{w|-pbotW7OP4NP85laX>y7DC>Z(?>UmQMm>@6++fsYKJ2>#-RK-J?8 z{*0prt>q~_^0S{k`|LB%JpJ_3KmF-XpL*)aCx7yjC!ToX$3Onjj~;*ghd=zm55E8X z#~%CM_a1%p(eHluJKy>Cx4-qRpc_|C;jf`k*mmsRZ@b!Bs%j^{d}!$XkKes>147_M z0I=`kl^wv}%YYw9T)py`(BAv_gAq6cNhQ#yPRb1Qk6Hj#{7n4G_ywQ8vGB(Td~FhL z&6f1_E$qB{^=jXR%U4*sI@q;)_mqa3>O-e54xV1QvY|=(D2_WQ9H;{RVunDM$2D7c zRH=ewe1zc#{CW5>{y2V*GyFXM1b*L?x^ZPC0H3d%Uuar;Vff_X6%(rJrgY!@!yi8X z@WD+8ffvvBoxgMl_&c|xuA!lB(h9f%W8-h=@<}OZNZ1A0{?Q7chM&YAdDcdAh#2a$;@$>%iZlEnAMZ9&T-e1Si~oYTNeu`fc5Vm4g}Kf(AXhd+hC$nmeXHUMz^>{bw_6VuL>wGBrufBE~*fBWA3 z6J19-+7E5o+_GX}U*Bf9aZuSJ<0n4;#Q-=o)H(n9zMU=LCho5{cLmf1xuc^Iz;Arx zvBy@t`YO95QvR9!%G|ljmM&R^Y0H)^yLa#2yR?2812Eu^KM^#HUw_*)etcDHUmr+` zX$++QpS?HzkK;J+ef9F*n|oiJkM44kb8q4}_Hi61j%6j0Ok!-oKp+4T zAixC+K@b2zf&jRJAohJBNUX%ZlGwP3q^OM)CCid+$xeK7{*T<}sp{&k>aOmY>6sxh zlPZar>Uz4WtAF*Kr@Xh$jKho}3 z=Kf^eudn`(12Fo8&R_2h5G#+!b{19C!thA-j_unJKg0bo{%{g9*B*Dc4jSmZfhGxDKYrRpNR$c0P?&6e@+DYW(5BJ0f&EH`1>C8 zzu(2-pYQzX9?%E=hDWw-E2M3KEm0H`9UB`*9&U|~-xiycl9E!~s5u4JVtw7s=St5M zlmE_OAkovsw8e|_GV(Hh^b#@LJv~jnX68TWtzjI|dbPFf%J~!L3ky?I)6)w&Zq6*< zAG=bRyC-SemUm(Syl5?O{4G|#3)5eUpTGQ>KrsR6$1sosknm?J*lgyug(%63DX+-! z6J{SU{m_She(=Wt^v+-7@JK1~ewzO)qT+AgjvU5p-+Gc$-^Re4iqXJhTr)b4_w*E#8FbuUq~E7ii|s6@VIlLcttl)XXLDQQe6Ak;(6CIQi9QA9notA#vf~n?IjJ^M$kwNFzjw_N!(g7`|jbI#;%UWntl#Gj8cE|vH`Tm zZ)T>uyQRLhr@O29LP2VGcfoF~HBRtJXsci+E#Yr=7^WX){$|DTUypSBr#ClazyhGp z1hNtm=p|`VvXREc=rL{lqi+1c=FOWnZTk7ofBrKJZ@huaG1`z2`(&~t!?1)!nZNy8 z?g(C^bPnPRf1@MUjPAGB#9wqpo7OyZ2AohaJDZwc+tk_B(^}Jt?PmnNzk8V;@JDcP zy&|T_Y~RWHR#?F7Y`s_r{B;u-uASc8oDF|4`hx)O&%)m<3XIFPe{T0y# za?OnhzLYL_w=I1$vu(BD`~bg>)(g=8c6YbK$>{hw@`vt^O@G<=4>A+~HxB&<@VD6u ze+DS70W^@%8N!H>%wn9GB^jTNMq{^tV+f#yKM-wZhM)g>hDSz6eCDqUflDo{Z|wj8 zyLuW$7s&C4(GA95Zyo7-0l$v6ON>7QKS%y1t^9!y27kT1FewKAXJLVBYz`Ec1R6*P z#c~B}dkAJD7me{ovn{CGHvG-bn)n+Vp$i;NzMORxpD1fc?SNgCaXxI?^ znJS@g)|I!yy%b%5UweDoEV!4Vb%W zL0Aiqi`%ySt)IQVw}r3GrISsaWax+ZwYIiiJ>nOCIQ!6-U!?x8PwRh4<8tk0ZfpjM z96&Dw8s%Dx6&$a{Y~-}^C*Vw*KiI?rw|CDTxJMLj67{4c#74(%haZxGMXlFUwr~LU*SGcpew`4o z_ibydJ52cF-JgU1BkoV1{>u346Zqp=)Ivfa02)9@ie&}kP7msZ$jdrz$O> zP(u&#d%d7p2e7xcvkmSkBX^DgfIwh-TfUF{>5DJM@{5E&`1bPt51OB~ql5%z^xp-t zoKnEJgUa~RYvgQX^$x}-_P^Bw-uUwjkAVY(4%m18jy3{(H~<9vfWMv|XjPbogq*)) z{e5R4)KpjZU+eF0X=!O~fxfgAyx0hY{fPuT?N7lJ18{O=cnI!x9Jn@ct-lYwAXL5e zn3ccwQPefh=8A1)u5_+!m2<&PR|_;cuk4aDTd>U_%08dJ{3V1P%=nLq&|!uinP7cQEl@&=Cmu(DV_K~q>l8zguiU-{KL~YHjWO^ zcmC2&Tp!>7+Azi|sWd7?pHG=$&_`zRAU2}U^@3p~^@#(px<+VHa zA3j{0EX~Y}CwE~T#M@wS4dkt0-va~wk2pZx{}Bg>{*O999zfs^0lawa+R)6(!w1OU z()jK9MF@H2e)KQkPPMFJhn?u`zG zN=>x?75$%-zhQWwVRU4G0(jaq2s+gcx1d7c`rovQE5Kjh^`TMZ@Alfg_aA?-HU+Pz zz!$sK)m^!ArKGepD=RZA3+Ab0i3H}XcxzywHwWIiGi!9f*#DY3;71&Qgg*pupoQme z9Qo_-Za=esc24Mj?fIkKuRi^SfgS|S|3S=&KWV#|6G#2T??3+Me>n4}Juz=Sj<)Rt zr*lr50&SCh_kZXBhi&;IcR>sMU2f^<0sh7&X6Bbx*6$Pk;Em?w#e#4*6A9#f3pWM_ z2d_dnaiXj&GYg)8$CVR?q!%L5jNp=mKC@yZ{*K*2Upi=@c6J5*! zO8g(R&HOy@M}G~dV>T_jsIsA_|HkmxXpT^(l$l$fU zuC^10G6>T6CkI1SRq5fF82#Q;>;+DfU!Kw2ADjN7^MgS7!@k(r|G|TE)|O!@)aZ&o zr(yvyC$oDatG92TGX7-q56>_@QOIT>Jby9pdXCg1&^UTiVI=+N-t)IF1Aikor+~lJ zJNMsz^mu*Rkv|bYTrC+H8Kq$i0zA*t++0>xk%i3RF(xM92}cMpHOBz(;a^|)V_h)g zZ@P5L%*y*P{##jw@n2g@dHKl`CB;P<^r?{8gp?yq=gt`{;tvzCJD>@F+Tx2o{YCg7 zlo0=Cbc z#$Sy1`#UF3mR_&Uf`<)h$T*s|KQpVmqOD``_Uhex4<0?bXYfb6-|L$H(;T1xAmxt- z5ZxOI6g#L84VY$7sPjiUU~vG*I$-7jb^fMH(=~SINcrG?EY764EY17X* z{_tUsN&Ezqwxry%!G zUS3{)e(vc~40XeGb#nhFjsd*^NcrP?VH^am>kiO%1%?2)1s0+US}PB7aFJ+wYgc#g z;De>{^Jh=y=N8qRx!TZ5v}wQ5R=me>Zj3*eeUKafkilH42jdU=Us&|h{NJULH1L`v z6jhcKWOxh*$oP}85IBF*Vs(n-eELu&*hNN_A8E;%#U**bABNNv~O6!`qpsrzaaL3yY6e)YiB3-hTAaXkY7<+H`Xp=br7EnHd2F z>GXq5Kp1{Q%mdb!!Zd_D$&Hf>5;zYyG2VLWlr|PLj$rU7>43>45XA4%~B#<{=9WE*=KHJ}P86Lzu@@~=_ufLu>Nzd%gbk#1sZ{5uJ13$^!--sQ5Te345 zdkA3Bxf3PFi*oj`rqUZ*)}le+PgX3)pUmt-mB=pa6kL3!WEUSVIdM?q4;V^LPB?T9 z-o=6b&%$5M2;mQwU@l+0c!2gj2@Fn;=iM_)_h~ClRzfGUGxtDQ>7~YotH+P!C##3g9T9{`wpl7zC1j$8C)^|wF%_!9`9LU@YLlwV5U4p-|Ke-9>VQcV1L_HB>e z3@?45VMNn|dN~~b!5d#nqNAaX2{AESNXJe2I|Bd~7v=4v9)xRbt4j$Af7W6J{;Xyn zszi5j0Ul;a+Lv8~_!V>f!S{f^4d+a!l|Rog@`nx(*7$d&?Mnm%;|=~&&#c_j_&Wmp zWmFD7d-m+<)3vqUt>ir@6L3!xxe=hG@?`0W6DOb>KCo|J;pMJ2!l0SI;p0#UhM0Ys znVFugTcgbo0){mZ@O{e=1_st3z&%yJO!dwm9RNOt@BxG;Po6}Le)!>sA3^vSf(9V) zx1jNtsrkQ$H&5*q_}h2rP$KYjnc=5RKlFVL_w|k9SNT_K#l`4h8G|R!T`VO4791i3 z@{MhAM#16FQmn|I#q2|s*e)&zOiDjkNboB?rtydDVYu2pgNi{g^}n8xiD^0lM3vKW z^7e`RRa*H&{8oFTutBk5v4NqTKZFK)TnEB%@sI#lTTxT9f?<^eq!0OfyRdNG08ktN8uuV*4p0X$>IT+}0}ve`zSqFy z|0XMS7q)rx8*%Ry7H$Xp4EHDD?*>Zwt3NTSdd#>rvoZ*Vkup2BO~sW48kH2>GWDFlt2SZ}#Jl z=jKQcjIXx3DD=PgW_bR7kyTn6vw8EDDCYi#v1cHG^}pc%bpAlv-+#N(Mi@*;J9W0R z=`@T10l*`9?|IAw@~uW2I~e>qBPO~)6JmZC`MS&EN$H1=78Jk~q2%n@3nl6A;+HZZ zp{?y(2q@AQ6h8}pr^aD!L8ibJ$uLOFE~BxH2#3Tr%zW2f9U<}+#kaa z2z>Bhb+xK0Ha3=mw{D7j&(?b1;So0d;QQY`RP&2J zps>K-x8HvA&9cT|=7kGQmEZx7!8?vJcbeUi(Q+>N`{WZd)L4*FtoeZCFou~PRru?p z(b!EEOHRu_eB>D5S5Mza3C`338x9coqX4eMq9S}%aL54s%_D!m_?Nc}@P#nl-91BBv)|mj zW$QM$k3T6vUwUuwfj>qc^7qZ}e)qfEt%XGT%!{W_6&yWsI4?Id#RP=jc$5LZ{oj16 zj6WVg0WNLBbX0K{-;DZRlE`j&H~ZluM+?prXzUfXw*D6MK>joWzx^D)v2fF+x&O_% zIZbZQgbuh&pL8a~!Hgv#Avt+Z^~4N3P6l_}H)ka#$K+HFBYu=W=Kh>|U-W($NN;)X z9%YbtzwFxD+8Vf`C(}D>2lW*|$qI~i6BZ5uu>&R! zFzN;jNwIeC!?%RzuXKM#>b^bMH5KLMWo0#|$_jSw-jx)e4zIiix1XGhZ`@5X@MF_o z>i^(6G4#LG{}I=R-*1!|h)b-?g8UJxJU zhn_eOR-fbd?ae7Jtbk@JE-r^#Sl~G45kJBo!%sH(Fx;PjA5I+Z-e|6dCAb35mAcDy zb(b$)yiNyxs~UdV$0}^EE`v9`cI`@IBU~YFZf+6z6K{g1{E;cJ?f|2%YWE><{Cxuc zuf4bkCJFF3NkKuuG5G)Jp+m=0J)6slE6SkbJPK>-hY#n&i^fTSX%v$SJL#0XyhP-0 z628r!BYF^!KEx0C`wfJzmzF4lyV5dq4raf%OXdS90b~B&|MFpm4Sza;B3k+ug+<>K zBd(2!iF@_~7FlWO#PJt4FMj>?*Qfylkh(z72l*RsZ=XH8D?R;PrYL^G9e^0%68~sH zQNi(Y2+%miMMWM@Ze``EiZTc%%gakkVNtED?BXm=B3D;>qmVzR-WMkyn)@T(?>?Ef zlKI1(si~`nj}Kq#Y;A66s3-moXrly@{&#s9eqi!8{Mry)g=p{U9~_-oytQ0PX2ALc z#Iyn^ae(PJZr;Fk?YeQSC-lE$5oUfK7nIlvIecEf-rip3*_@A$i1c>DgK{l(@JkC* zc+dV#SQ{VVr@x6+H3y>hlsDwyQWoKlz(?pK?ho;U@YPp~3k9S}Qud|qe{T4Ka6FZQjitydW< zFtQHV1Rzd3Pf>8b_fXE*N{=DipZ!R2z>(7&9PC_i?CVTwTMZc}+{Wh00B6wQ( zoU%uKAEQsh@2jtV{p(*ZEEEt3J-herfs3JB`wAOeI*A5ibjoW`Z$2Vy`)z~_yY}w# zkS|%&rG+oQgcAn-2!J|)*ehTF{|Ed*fGfZq9kuVKzpE42g&G@e0nIW+f2E<)W*}S-td9VZKLbE?*|=(Hp>JlU-jmiY5Q(B_!Qjv1X&Yql&CSnK{;0s% z-{9At;RlWJ>tBEQ<(I#jpQjAMEgb@Y5(M-len&l#MVyVoL0A_3(>brcnH6`U#pXeChdfxmlB`2&}N-)L}waLt_r!XF190S(*Y%pntO90f|n$v_QFiW?Ej1o znDS?Gfb1%W&fiC5$e{7ZzE0X_{aG%7fhX`s++T9rO~xO{3H%xG(UBkNdcg^3^nD2} z^Tiim%*|2;@$Nv@wq%mSHq4*yjn-9fp=|dB3pweNP6-K(ji1BmeF0}l2*k4%2_)k{ zG8NJIBl5vFdYA43DSx~R)SDtt;{RsQgRCs~MA>>j`^ksV`QBMyFNHq85q^Woz-@34 zeq3nm=HL*d0d){yC^T>rZl;Rrg}HDQT>>P95S)!AXfuGi0~C5-gFpQYSw!d0#2#)E z|F?a2sBX+@5|$ z>D=BXlkY~nsMgT%{P}Y@F+4#+AfB=A1I2C_I)~3tG3b9W5Jl7Sk7S|)Wc+4nH6BGSWvollU^fpg&kq+2{3(`OwsR25~ z$@zuFTT#7htBgOR1Lhnc*8_`J;dlU}vVfl=g*&nd$bN7yhsG3 zS|Weg@9M4(rX7as6X}Dt`s}mMiZ<(R1Q65X79QLiaqYrVoRQ$ZU_NKErw~r&GdL#% z@N>Zj5(0@6)Tbh3HZ1T*`reb(H3EM`eW7ja`1@Gn5Aic!_a)xXd+$s6>x;Uzv^YP9 zuZ|{9Y)s)z4X{8A(_VZab`kDhnO|Oki!Gf0GxKLW2gjboWQ%}g3c>*hE*mH9p9tqj zHwb?+{IqXtY5%nYC{|K6BvAV1=RYIx>GUCde8)?AUeZ3;P}pG4iVQZeJ#A5OZ=_3T z25_dB(?Qwdn<)7xI_!0jn#n06HT9#h4(I4S&{AyG`tn#hix`nV|zzEDl*i0A}0C5$%pYT@-P7`yJ z4%oN`$32Ki0W>*41Q2Wb(MLFBKmfTjWKryYt@shKNN>?SGkXa6^8kMgKf)h3OneRyzRxG-WT{=pSW2c)erX_ zXty4$k_Eu!+bd*25U+gzZ)-?kPgL&;8ULA9VCaiDr~~w&2PQ*S8~#eH_)+@kXr=n` zA(VRm%kgJ%fCND9Mri9C#EC!iFG?`@ z;FFA&ySj;Ho^FaB`TLAHKYiS%&pPyJ$LG*KR`_`SWG+x|Y`={E4` z?|sq#jgJCYh~FK3b>`09J9lYe51}K6Da`%5cYsYc{pZUTE z@qV!MLc*E#wUOQ^GNyZoOLuUwj==K}a=}6&phk9svbA z;IeVi$U6R;oxPfGoVoxXY!LrPq!RdBSa?S0b8vp(8Dr$n)m~G0S&C zsFJC=#&`ku5{w0aAnS#3Ek^1Bzrq{O5kCyVJ>ZXNBJYY zuimw4*WH-=TRXG1Hgq`~JIMFJ;o+qbG{GD8p zLrmnJ)kOdFxk|7jAP7#75r_)_-(H0}g6dSJpI062}0kM~2+?sJ0M9N=8>3b00_jvk40 zv#}q3?%jK$t&QRb{AutZefWh(+0plDOD%d!aS^@IxHLT)1f$=GaRlo1;3B?NN#Bbg zh)xit(0w4t^XpF+HU7G*h2bBpzwlvxerfUc^4j{H`w!|po{OV>efWdG-SD30o~Fje zhK9?FX8drFwJ;C-5x;{3m`x4SshLZ#7s#uLl8pUD5eLBG@KK1>om9 z_TnQiu)!M2IkLP&F>9f^! zSLz$u;gQn*by%0Yv$lTs-a5X;cr#-VZ$l_uxHdEm41P-RBdc9p&)ZbUMqOMpa#5|V zxD+lLuu;aJ7sOa~CH@*}#?ay_X+)Wk|16xHCEYM1kaojgehOC?(Eo|>fvh)S00kY{ zqxT=&?Y?rQzUjini)YW))Kr}G?A&?qP*q!3Yb(5>>c&9%zN~%kRa}+f_ZaRej*{WW z-*Za*7NJj@*{-i2Zih!OCILPMpoSlQP!9Ei62l8NSw2vo|Jwdwk&(b$9p2&f+0&7cVZ02+z5gKf^sPIzm8J^gld^Kk;4Zn@TMs`#EO6>T*6Lv8PveiY zzS;fJu9EzveWyMJ!57#cq3luI5IlPEjc%j+TV0LXeX72`zI+$K@4FN~?e)Gt`AJ4Y zgY9Jyd`o^SJm{VU;kD}Sv8nZYEtQA%XYM5rx-kCkjaRXI(EzXna^-nx>Di|rK7Md- zZFzC=Lyb)Gq47t0;7?8TH5D10A!NpuDHcMb?NJ@6rv#d6X>7WvFCeIO!`KT$_yqZb zK<+u<6(#tQ%i|xy#a9!5d#e`={;<_jGqOs02S*w%R-ej_`96f#UfTsBvveyu9fLnM z_a(j++^Y<~wJY^?hoVT|i?|V|hXg{<@FV^2DsY$bcKVhrKlytRKK&KbK%o)f*OY(q znr<>82`%shCcOBn^;AVRz8pBdNb`Spr_b{I)u0MXOYcAZ_=89H*H@NqKf$ZyEO7iS z0)IqW>}fu4YBJgjV@~{q&}hpab%A0X`K{pBmm-MbpcAy?ugKtU4Rv2!tl^$N2mbDA z{;vp{EVHZ}-oG|A1NTMEPQ)2)3$Y)-jp?(qi(Of{=>NzS3A!>uCaKg1n%v)-2VNZ| z@JHd}0VMv<20P*SUN_<<=H~GQYYb}(TV3ohrAA0!!Y3+^7tMW)-&hFd`G-FIvtBNOb4fire}`TVnXcB4V8`Dp`7?yS zsAvSxYwyeapPZ9Wr5gP0KZ8FgclK;;?J+qFngd+<1kGB|`CH0|lw_~`$=d~_B<1On znj-Fvq+nBN+$Pr9Hn=~kT~0`+{Nd^5%lI>ffAVUj=hdCfJA}co+w%9yY04i0=mUOQ z{~IdOLj2l~+w#}M`d{u)-lF#W zz4GcmQvN7_4ofdU-^=0epCxCmNAqhxj3XG6!f^EU7_=j5>d^3i)UZ z-pRm8df|^wKc0U!D`^(HBnSRp`L4!aRP-Qs@lEReUJ`#2bqy9wM&bbPJme<8X8))2 zXUd7%*%li@BY#v$`x#YoG)mw#a{6I}OdNmMYb@4s1woYYr%!Gr^)e?p@dqn<%mWUh z`;$$5$-^Gt{#HZV)5=T#XE($M&bbPz5ki9g9CrQ7mJ55iu9*KUULcs^QS`N z+)*8kerV)WB{-akzaF(CPf$zRl(58=Om~(a;$AQY|dF zH*zXqA?A}23br#sx=;N1<^Fu&Pb2VztoPOM!>27@pxe_AMg1QL!wG1Xk25~|&o@1%#{x+X9{NFjmPvFnDVyz_L5p-18;rLJAjIx6D zoPHaP8d>}wUtL=x>_q%uG465r7QWp=9RJbjhkx$RSN>jkSpQ zL7I(>044n0CgVSu|APW(gBv9Yv5*?2h_MQ}F|xeEDHt2hCjb!K z?_G>y9b^Ce0&xlYp+x?uy0&Kv>Y@XD%qGCZ{XzeWovssq&c%{8vpZ&hMjOPKXO$n7 zLL}=&$^P!E1lb9H#OX=+qto9ad97?yBZ@z48QU{OV-t)3Khq{4l)o~>&){!)S(e{h zv9dfgEuftCCj-U0G?H(V%lbO|Kf<5C?#~lh{DHdm+Jdp^*~d?`1sICoW`RGvXBKWt z;O-e`r8yVN_L6-DkkcNqfGBb*7QUpilhD$z`_=kie6PGX{v*?mB8S{kQ>5`HJxL>R z_UV(S^gC+dFTQMu@V9w$0{76Ck@dyL99eL0baf(es0rb(5PwqjF!ma^&hbSde}22a zi1UB&peZkkDZaD$*+-wy35doYopj*%5BP(yyg4d9O5y{Yi*-DvYon?1!P|6kGTYh9e6{F5g&gJH2-{h{DgcfEVEpKF=Y3gug^j*|n9A7dp>-lDFIN=TI!Rnl%@ukoI(C)bh%U z4cKT?U9G<6gKmvWK0iF%(|N6wOz{pc;{cHFf8pwz3-=eX{K?e?7Mh=u1(?m7;id{$ z#HeNbZBE`Tqc=Pv`q-B_VQgzYz{4@BT!YqRZm~^23+q^k3KBi*e z?)~A;^C|2JV!m{*#nK)4ELz-sPKKEDRDxlY7Hm`BT`85lQ;Tz5$K&E*dUlw6ZEL88 z|>S9 zYq8o6!OUqOCaF+RjQ*TyN@mye#6|r%o?%bK2uRt1XE9Z z_PnWoVP;}tdI4XKc=y4>$LrI5yF7`BED!y(=H%=z#3&$UVgV|^RVNUm$e5{Ei7_T| z=dRYy>*Q5atESHs4L7(G#jmI*aZg%h?HIgP_bxn)pd zB*vcS&uhoWZ%$6nF5W@_*X~R9 zX&D9e)9_OUNAz6ip$IKrK$epWcx~s4VF<}C3;Ak`noqV{cL2=3f4|- zcXh-0bG7G7PnH#6ifCk~@6+d7LBA>a1r`YFe|#V^@Ckf842^RByfn8X3ywyuU|uvn z?p{E?6mvY1_#*+7Bc$DG8GnU&GEF{{2|jyM|4J9FES#UBXB zotrrtZbpatIySd^BYuO;s&U)yja(sOG{X23PcF>2+&8!ToG)6kh~qCM#X-W2XLP76 zI3p)(h>Sa(^b5dfb1D(YpM!uYP5^E=Rg8gUARyOHD1dMYo%X#EEB>RO0$G+Oqi31QElStDKq zORI&KQ9#Umo-f20L;)j~Ka)lxVqt9*5i_023o@FH>R#Ol=1xOjx{XUHH?C|r#pV?m;kfrP|pI9~Tg;{ZY|V)$bk$rRE73@@XUKX#rk z)EF`WQ?|Uo+m(ZUs4XH-6+#4f_KjIn;QA{z^ z$dqJkZb+kTecd;=`5e{mf)1@)u_}k?qQFs|c(A-2E_Y|cBUBJlWZ(h}qWmx$Uqee7b8W?rnZ{VZnW!!0>RqWvwF{IS0hbbd6jI85zC)cUrhwLfE&)p_>-Q> zOEm^h!0_fz(lRoH_!1Zy{HabjTnUU2{*((0Qvf4|Kji|$lEBE} zPji9WR0t;p{xO`d|Nf&{X>bANB(=Sv6^JbUpcCHEYOBhb6^JnY%%`WeH@E^3$KUo1 zF1V_mpg`pDw_On^wD88>h(!Jrfg77SzIKtxpK^hG8*k)zgz~3c;6{dyt6Ny}_fOo3 zu;_LcSc3Z8$O?ovf0BNd;Rl}^S$9=8sz8|YXN8R8&uUbyzR?s2d;ZK`PTy64XW@@GzBpaO^GF1~ z7fN7+@Ml0K;ZM0hpS}$Sd@qo|h~bYxCgabb)2D{`&$*KcKiGT`Iz$eC8f0?*nDIqO z8m7mv^7kSD{HJif|M{Op*xveQXnHY;+d?e3%?%TUcHaPXkZLWj3ZJR9!hKS?ut!M|xqMeLEP}5ZrVq=HA$CK32Tm#Ft zBu|i%B9A{O0D(UTI)fyszs0=3)|OYzQUQ5_Jad4lVb7nb`-M#h$k4R$2(&bJfNkn$ zcFW;y6Z|aLm)*miKP!HOKN~Mc`Li+lB3DTEv9@)5dl3W6DQoS(O7ttY?C|z!eiiM*&f(2pw2VInpv2AT{B0-#C9Um4%LQu} z$n;S6QD+leV_cxdYx&dU&VAfRD%*5Z3(tWD5j;2;}+O zFfNd_^>!8^pL`>QG%g&~w~ZjyC;a)70|-N_V z0jHfg+Y~<~vV=T|wKE)uMXjHqsDH>Aps9gG{7zvV=eiUQ0xf6e+ zho-ywQMP4QC_zgz4jXvPau`<$Z-n#x&t&}t*I=|@U5ep2LwUGOG3sa(cMZlC*ZR@X z+=e-d$6{$NkE>-j%33pAg1j1yyyMsrE7w|#D0yB#G*w%ax2 z>5{tmnG}Ki@h;G$MhpZ~lCkM%GoOw}l8bakCMT{MoudM9daIN242U zC3)Povn8Kawk7=qCrD1wzof|Ik9UFk?SLQ*LClUoN27nL+U)&;;dl{;vpG|rlOvTs zIe{8tasnNVJ`3uwEv`#rT41vxk?cEyQ{QdHmHsnb! z$cg+}Tp-V%#pqp0=RKTY3S*n|N&8?8cmBKpD0aec35c0pprg?TDIIqSW5%4S?s(3= z7{i`FZ!SXyI?e0w`P)eyQ~O2ARWN zk-%S&T%eUda}VvHqjysfIl+Sku{ifdc|U7!dqf6*-Ut*X!e-8xMCNp|&5S|#bl=RD zoKPD}+RI1?T$M=S&-YGPf*F-jV!))pHD)CQk&s~XN2=(5{n(kg|BM*^ytzO&50O)b z0?I8Kfwo4!4F0#UU5*$#gWu`g2?UatE5@H_1o7vS3$)Y5i9d9KcKQ5G_q&Nho556l zzYk{0@Bd~b@#ljJbP&i9F-N0Ap%RndZ5*l$lDhfpRb@r^mlj$4c_Yw2Z9=wpsEG2p zT!%CBxeVOVp&TDgk;b26Cma(S>mZAJqeG!`lV6P-?49LR(o_99oSw^VB;xq9cYz>L zECgGn+#79+bTIhU%)#8*p-$A~$HBao<#mS$0CKqigVg0GWoL- zNHx$9le<9oMkj%tZO)AwWUF&k?5$4Ku+Qb2B~tk_b;6n$3^Az-bZ_)cYWZGHjT<~$ zOh8jrhkD3Q%Js1zn{g%`b40vjewY$ zK=($yd^ZuHL2Wlk$-LD+Al+1q&%+;?i}Sm6=>a$>}5?3!i_2+DHN(3G5krVA%GNmKt>q5{$?SKw3{VW zOK@+rD^W<7wX8Kw$ii)Kdm5=+q82r5?qb#w-+a?uNv2#s3+MNb!}<%X!05rc7KnDj zpGF{V%L70rH(;uJqkWmWMb@;IG<|QIh=~Tj^V<}wCtC_M9Z=uV-$?lL#{p>Zfr1*2 z6aizx&fcsMwoQAu+>9*#%m9j_I8r3;`a-uPjcZEs zzdC#Y{^vI5;7ilu=3QKFM*x4k12oM;{1Roy4u{>{8yyN|CQ0)ONo+s=KCuY@a@g)I z)P`=@?(3f+68N(R(5!_MVX-@$7;|ql*TH66(!?yuCdUg*W;NkHgLL$W;7x9iB zd85dlK*;IRXs(0ZHrKYMs9la1mTsyKZU*bvk-?t{K%GFdO5Q25^N=K`E2Fs%w%d|c z4rWW+9@ncuRxz)ceB2>I__G1fEP@Xd*?2jc(~Z$w2it8~Geer~{x8fRt5~oO9x42p z0i-U_tddU@Ng-o7{WqHHV7o1A=8b?~tfXSLC*iLltoi%7aJKq7G>y;;AHp6u#h^h< zrd!MgpH{PbqqQuX6iG8jJbtm^&7S~@i9cM5F}Jj@6j=%6a{6gB*THsM#-HuZFVG;V zSMZJ==KP5)SO_%hMd;SC#yaDJ3MIzZI z{`@ieL5jJl{m2@+UCot(?I0d3{P`1DFaszU&0frlQ80_-;uhBk79(f#D{5+Mzw(A^ zXH%VUGl++Z1paswOaO|USiBgY(`w`yvmoZP(XYl58r!d&q1suf5#0#pp(27mo&^y= zf{meXG>-0Tj5?U0x*1;&ZQW1tXMcy{&p9fXay#CB8&fHU%&r?*0{`h~ zWFC^wV8peoMgkvW<%Z`c!(Z{|8-Ia=!n4E22|s)SA9ru$OWA;NO2T)@{E>yrqIb)1JX(#MxlvnfRrdp{pQi5Kyu;%ae ze__GU#j&}AV3ZN47wfmtsQ1Q+8MIkrL9bSQJKaD|e&ma_N zrA|)?Lg}U=!kj-VfbLoFIj6v(4qksX~9lU2)Jl#0TIC;BM?u|Jq!NkG^QGooIV<@4f66+bll95 zP*g2K_|rOJIe$K~;5S9Omy_l6%@~R%4YW)%cKUXp1VwI0~SRgzk;BV4GqCG1?XxdV>i& zeKRCSjv)T13$#@UAq#;KA+s$u5Tiq(p*88inIoY%Y(()#2((uzRG>KV7w6t+e?^38}%WN96IRyFdwm?pg4X)4?1M!M42|jkdW$YY<4Y+Xz+_B9K1^ z7wFjBff;Qf!(!bTgVn}FxJtCApn@-rVxdS%J3Dk;pVGKt5x+udQIJ3n?;pQxn${*(fxl?w}f^Rux zUQW;Hw~?#(i;S8nbJz~NT4BxKKk%17EOGtbXhS9UEclpH;^mB-{u+&XztETiZx&O) z6$@|v-g(DD9iPAm)KzlNg3mcEknuVFGwRiTu@MVoP6>flE!_EYCeRF^z=B!2AQ{b! z3B~$lG*|EiZ9Cy+wFOwI@aNAb7pSS^o&|q$ijbLd`e6*PY29YGGUlA(_BbS81n}qV z0@+NMoUMBn{L9HtH7q%OHU`wN?lU{nCb7CdR9sv{@aK~YWPLIJDL5ndpjdNq1-my| z+Evo__DPo9z9!fg>5s+7qeTXPzPUi2JS`4z=t8*%MV3=9*u7D&8H@PS{%^8Oet-IP z6o;LCI&h@$=hg*s07^k&BKmJ+07`TEWMs$IKOGudX43Qd7m*d^Ce5d#Mht&)7g%r} z!-WeM;BQA~Hwis7^mg_3_Ev4*p%dsH6q}qBGWW(fc43bd;V%U&pMRe|ALXs6LaHok ze7U>59slocYlDB?U2Sb$5WF5ag7}jVcpebz?d_vsVUdO<8g7wrdMg4*I$>L_@Qduu zh~?Vm^lG%^^_dK9NBcuNS$sXpTU(0<1_rKap}(J~yFkOd7CM~{gtmy{&)f-9{1|_I zRG~$IzuN|Y_ENd02*|cC)|)Y4{@#4k>EL!YANiyBar_Z^P5jMy12D4q6J4N&pTQpm zP~?vR7zikKElJW7oUD#UOO<>igWJvN*mgE9yF*mNkLM5NHS;&uYF`FcAkz5bTp+{G z3x7I*cI^>hdV&*Ujz-IY?VHlpVkyIi1lz(m1K@WZdSAZ(?Z+k?|Mh`D_!TwzzuOEzM-~DxI^<1&;6Q}g$N}JE)S+;DllQU_KY>5g7J`I7X8e$6Wc>A;{2y6r zs4-{Pw+yrXWdfih3jrGKAS1=v83}(TquzABMjR+}Fgce_@ssk`M*-BP zzrA#+%K^ZM^ndTX!{f(||41KU@qf$91VBd?0ydgB(%N1t*3Jb|v%rZn8CI*1+2;*w zIb6zHTaRDIz>oikvi<$A(nSEI{Pk-5_1FXWWBa#TY<$Z;6^EctXQd0^M9}p&kYBHFHt_L?{TM+0lym#{2_o0KgORufDy?b;^&pW zB?J)hBm4n?aXJi=cG7JE3)+r88byvsUL&W(Xz(W(o!GG0B&Xs_lBFCY0r7L>4*(?m z;rN^Ozwp=21BN$$Ter&GAHmOoziZS3iu|qc09qllfbvUu*dh-sySeRII{7pkdH(ns z87%k^vli-8ifzdRKWF~>$Sjyl|8)NB01S8jBmh$UIR01%tc_9df)RAVr2jSi--!=JVyY6P^{qP2ql8qCvJkZlO2B)KMQ}Fv^f65fMY-te>MPyJ%1J#Xp0|N z{{x}y_!9y228u0zHU#P@354jWT0}6K@iZAjx@p6ohMzb7dL{hH0gM3tL>H*xCvkr~ zf8YTv{?FtAMF1_JIPfQTfkp`&I7T9iaYi$LR{tpM9Mlate}LcM4GiY_hcW#n{;wAR zH2c4vZrKOOdT#GV1^v}1Q0F)A_XXQBJ()6g)QN=F>w`b6!B7j?>-idlA zF=_XXin6k@@>8cPD=SYzD7{h-p%j0+J9bAr3YM!6HSva+%-8w0?eZW(})!Jkf` zj$cpj`K|W6Sk5P<71=uH#8nAIex6D_;_VmWqD~~@$rH)yHj?j z#OIv5+TPY*m>9>_VvJVR>0pt!g90EcP55*;Du=GIS4d(ae_P`c^BOPI)f~&+u`_L7 zcK*@glNB`=&o?w&xp<+h;&8#x(21DX9SKRvo}C~#Q{3Z8*?F*zF$gzbT0LNR@%P`w z5dE$Z_F*va>#0Nk59FK|Dd}!!QO!SWU#H3{WzN6epCr@02YhO}qLG#Gc#PwR9KU+|cKfej&5TgL{34C0micd1)jz(j* z0Am7w+Y*y2uJ%lKH5SJ2d^ht-$ISA=q;MOe z@jRIg`26Q;1y_-*;C#yez&i&a7W^57!oF;43^bG+PyPTqmM2>YXHv84fF6p+~7`+&CE=1TmS|9x=bfSHhKQ&7m#$p$lt=k>G)*m zfRmCElZTPN$;siH6BFYQCZ}fyFP_azNdW%F=DNEsC2{>PX&)IV8h`$}K(W0Y_#=*2 z+?(IxtJM4-{)J!h1=+`W{^C5Is?OU_2O0~bf3$$-@OLeA8M)H78CF3U;phlzxwpi`qJ#=&9RZuapZ3}FFr9jIdMlZEWp_D zH#afBxeX3GBRaiN0W{&q@dwXfVK{#Lc){_G^Sg2=hPnp07D~h7>FAh*q{PDE;haMd z4(7nj_jof5I%Y;M7iA``EY4y`PEO9*X0)?i%;3*Y7bp}63l@QgQ1QnBxGmk|@tp5~ z(0#*`oO^z4ZRPe-?~(jNhaerp(9r3`Bv@N0;_+jbf6)QX&9!GQ)}JeaW}@wN!GMm7 zb#6oeUw!olKPcY6pNx8hKk4sfW*$DAUxMMxnKKtJUaZScJVF38_@j3OE#TvCmD{$( zCLweq&kQIraxMJoId^3iOKLfz%6Tleb&*RAh zN~Wi)2zm6+&~RCzGnJJ^B_)RsAI{9od@p;a(4Mey z5JK_~UVZh|4KNYey*ocY|Fk(2V}P&G@qOhj?9#4ITT@DWVq)Acx6#vz+YuWZ6LVT~ ze*j?T)@`xv!^4C=KlwAUAoAx|C(Hqd>$IkVIjb|Cdd|@CrvvDfzs8}FxNX}C5kCrG zQA~7fY#e#GH9j7`^(iSSMU7fB(Y@erck{W@GsWaTa35!A^mH+8@#4IUyo?{cL=1ON zPZQ^4{)6rs#t}_dTidRjKXJaWFf}zTt)OFkX8HcumBQRTN!zx(6AR!)Yk}hrJYYIZ zKP>#6_3r=l=GFi)0q6&T9AStb1TIC0`eqCve+)pc{8bE%lx`>9PxF68RQ&DRp&4Rh zw(rX+xY#tX^6j0jwo5tb>8Yuyg(EfR>N{E*&XQ3N_XnGs_*e4h<;{of@SB-I{Hn_9 zJ31O_kG$Jmuyr?FZR_j5HZXAg#*M+DhI|H|gulhgdI^8CB7d2({@2WjnLk$m62R;M zq-Ft0V?#HFl0OYVp1&Mf{|o#LYxvRrSL2U0#rBd0Y$Ir-hQ_9*=9a{*6|?Uiu4(A( zXsqey;KL~OHw?j-1)x2CGc(=YE%mM4U7bzm3sSqg3wC3zae_}mTLn958Gp0GF#R?8 zzgf!PI2Hr*k4?7x37*OVpzj3g5K)Hkub#w5n3+%0_$XnupGS6DnqBQ}b$@I=i}CYg&o$`X9Z&qX@nn0DlAr*DGR* z%=VQbeqEg{7Yl*EZsNkV)0>;K;SWZC7;XZ8+Vq3XKj6fP{~HJ4fWJ*n{BfXI`SW=m zqPII^$Ob^&tY_dNjz(_30HXvzul#v{zhPha(-vTALG;ukbuFC$U`PEG(FJnNjR?LJ z0C(HcH#6H-(+bPyot+)67mgx-?Qk+WeqQ(knN0lQ_fF@pw-@Hb4*rh^#XW#J5;{W| z(NpyZXV%m7h@(;8EAR*c=mUSy0S{0B9sM8WuM2@oJyPG=0RVROG>R^e;}4@7jK97* zaDD{8_SQ>`KLbB!{w9$>t^1|^5An0`XJLVBYz`E+1nN)-#c~B}I|ybylf-DY1$Eme z{u+lznFI8hzoM(H9TdRU`YU~;A;_N%!JnxT`et2uYY+Cl9Ubj$ZI_EQ|K}@z*!{BU zFX69*oU!5m`iTFtpdgT-(<$T6mr{b2-v1Rfw6#$HyI~sAq64V6HI2Ls|MzM>F17<>t)|NK*K_!_UM->garoKz zKRWuGLigtrf2J0-0YwU+cLH@N%*Fb4RGQtKS1&Z!5tIKr2-6N3e`6!VL)ZcP!5^%k zF#vm7>+4|jO@gV(L&+ap+4uoohJavN36G24v1QvEufM*hg{#h`lT97e`?a^XwYIig z#rZFc|9s^S-Jc_Wea!!1>v9cgZfs_h1L%!FyZ(`g9T-Xt0l{s?#sxxIUK@7e`7F7Da8_ds@5 zW>$Q3?DnYFqh7~we2W1faeidzM|$73*1E%lKOR3P|A&h&#`Htd|H9W7z6qJpTH6|} zXld+#0MyHs6w3<6eI#Z*tFzJAE#Mg8uNs_Q3F(n;$ocaS{%{FK&flqat&vCvOy?iK zUt1gS2YoOu>GrnPSE2t4J%8URE6|c;l~eWNd_t z0Db50Xd{IW=fJ>UPY*OJOhZD?-Jbt~M;||~*`~vX{LQv* zAvKzW@x|bExa0d8JZ9R{)rF(Owqri}Kf)h%fBN*-${!A2c>f1@XzkztiVc5GeXy5e zy_mgM2|OpqPk>SC|8)MY+xWkVA-tdbI=lP8JpW++kGenV|BhWEvybkcUVJPTu3ile z-)Nb90sP&Aa38`02=8Nf2;mWi#}GcKd6NUMac1^pDgrn$H3zp*%)*`XHz&s7zQ-H= zeLbz!8M83{6Z~KMDarK@!XI^iI)6Cm+{Dj+5kE738Vhp%sL=rMFa5k)E_HD9Py`yM zquSb zl?ge21Tx$9!PZWs1pe@U!SKK8;eXS^pXlL#=feN-%4XYQDcQDWa#TC3qtPytL%MJL z zbY>3Cqx1Kj@BHOoe)qfI{j0zF-uJ%u{qMtrT|fB2U;p(FfB3_{`I{g8=tuBu*Wdl! zkAM8*zyJH6KzQx7*M9obKmG4c{2_tPN42*)=Cscyx0w6ia<=|5&0YV6=+GEAz~NzA z{@@l=2weZ0R&fRR>l+vvMgDHD-FyG>2WwOCdJ258TV36iDp7xX9bHT zcxzz5HwRkyGj+fK;LlkX+&Du49B4N4cdf6x{mlN^Iide`;7{v*`MVD`ZOSKlorTl0 z_kZ$s;ZxB8{`|lG*FVMZXMgr*|Lwp1zYzXE2!HZ|{ z{_?%?N1o`nYawcULxYR}_I-4KaK@Sg1P^F9K;{9z|I=SM7E52-V0TdRP6u~J+gwtE z%>U6l^hE#HIC>Ku;1G3ya{lJ$ySw4`d;B$^j*i3wCo3C!`fm)uGq4M{R@U#o|M>CR zl$k%cr;Y+R2w{i>I*uA{Y;3&JSYBEVI1(TwK9VddAAnGSp$P&!@i;b${*O#RY&&2B zK;RD@;OxnLGiLsJyV_2lJx++$|Kz~kjd=5EjDGuR^lAFbGn)Hz$sas8XB+UTy2M1v zAN0T)fLaGE>48Zf{Mu{CpHs2GpOe|W(e^~7204GwDyIIoVg#ON)g2(mUrY=%NNRdc zj^0QllD;qP!XolFa&rp!TfKAt{YQ`2rw5q()BN9cdjQ#z$tVWk3i)b=7lc$~A#!+- z7I?;%1jG&^_yPRgweV-@fIoPmb-|3kxs$2O4{-cP{NJhavaHPg85#T7IVGl)T{%~j zk-@g{bNesJAei#U+#j3%YW|P*zh?iJMV|WQdNF8hdjP5N4eV!0FRS&iRsjDF0uBMG z2mJnP$e(?&gg<+;V_rw26f>Jdn9TfASpU0@{V%RZ%*=@XuVNV5T6chyzipk zZ7R>%m8`W=#?iF>(4AJabu8Xqy?gJ$qeu6w{89f0JunG^2Q&i+Y*E~9;)As$Xxmyk z!B68)UxCp(U~UEsJ+KC#&fnb0bmVUt`KzCpo|&DQMrVHhTv6eB+W8nC(NR%vZQAq( z#~*Wl+Vn%iPtyOs6}25QAKxoX0I)r6QK7~+aG>{>2p}8cSpkGO2%Ur=ff|3d#Zvxk z&Ge`?*f<((^2m}w(YQEIOiYcw{ zoNQ=n9-nM!YwzrAuWwmgUcGbg!NW(Z27l~t4GGirxq9bU^LqWtI#c1^Nn-!BqjoS zB>c?aPj`PN{)qqU^T6&n|F~3ALcra$>3rppcexoTZBH8(sPSiHqG54>YzWBaz#4x5 z;P-w){2%6)H9e$hN0nRM8|CH6Qsd*|6ZRLR!E^hGiSfm?xEiu@-iANVFg7!Ez&CIN zsPPAnq!TX*5PFOz%5Q^L?+lOL9G~pE-q_sOc&X@U?jdO0{QTU5r6)>DOY4T~>g4>1 zV?Y5w>fB5oQ1gV;4bm>Usp;-rn16@^KuHJug(%ZVadxzGRMo%9) zu;)N#YF6o$adJz>^xX$%-t~AkZ{E!FhqDiI;~#!!wCOJeRGWVg{`yZb_Rw~|vgCL{ z)=t(1YmF@@g&G-uUL7FwfE<4|0QzuL*hL18V@_EHXB|Fa-}-W+{U?QduC=vkU_Ek z)utdr%mdb!!ZZXPDZyX`5Z-Xna_STr3i1cIO9p_t1H>yJR{p>NKFQodv*D5|yz6Qo z{>v#ZFDKKPo}PM`lAqik2TL{kC+MlItkk9w{s@4y|2^AVU0RChk-$Csi=5&7x9?I~ zR1_R2`Yq--fxq)7N{$y73R;7cU;zfql<5 zj=!{v_m*iR-P{8g__L0jJym|PrM=?QZl?$k7C09k{NF^Og=2+!6WipHob1*PrL?)mq4@*_{oz4dFO`kB4KAr`|VF4 zdjDt^d7u@3hF@yLlPt<}T~}anHV?As-%L$Cbkm|LtG@b#2XH@cf0#r3Ao&g4}&c zY$T?&xv9NqD)Profw`+7bO@+l2Vs+tzxsQ9{O1E=7C?Fa{4<)WFWZbyN8R~ z$V2`#_BuN5{QJNE`#SM{7XETZCb0ts2Ur0Brsw7F6ZyMxPtITO!B0Phru`TKwlual zbbU{74gj7A7lmP%x3(~0On|SzI&|q)iXZ&l1p~*-c!;d8udNZ@;3CR3V6SF%brk@I zfbI(B>WlIs<`fXaFLA3_#5RJ_G)iO#I!sfA($a{%{ERPjQEiXCq~X z`(sz%Fcm{ST=XUU{VRk&{NWF)D}=%L)RPqrjc2e6E+{yfm+EmMknu-4U^)e6Jutlt z$L1h-A&xFYqX+!1nLjTSnLMP(pFc(!a6%f=cNFjg?^jlQBqN3Ry`qNo{{g|CKhMa- zH|RQ3JmFhaRjLIH-_I2 zsOH?K@J7F<3k&E0@t2H5|F?3xn(~KiMMV{yJh^GpTT#&sy8CnD5AgfL?|=XMZlkb}5*+!0-Op+A8G_n^)rg2!Dtl@CV_+>S|RLEN4^jwr-7$CebvQG@nR{0{2G( z#Si(@5|{%VyzoxN>CdyPdvxD3ax!x{Xc2Sfn>pFgnzCK70&h&Q`8I`J1Dzc1rZ z-r=Li3gD&ehY5RatN;41|MqXl9|iD#Ab&Tk<3CRm;g76AW5)PBS=oE>J#lmmCiUVi zt^XYdS4I3Ec4VuRKOgbK-txiVU|wE4%p|Du+qyL;JNsxw#i;_uAH{EFWo4P+N4wwk z^}!2AYip|uec;dR0D(UQ@RLs-;S}W4rw1nhK+4~?ccPLr@QpCt-91BH*>7#x8ozTF z&WiM4xIa4m*hJqw#Pj!r#}D}X=6ApQ-K|@N1j7B52ag=i%gxCU3DlcW<4?cYfbb`- zLK|0tjXwBKUXk&~yFdwIhS|MQuec==pRgx8=TQFPBPY_2Q1;qD7Z{mn{80c2e=q~d zL$}6-Q#av-igXF)Bmq!s0JtcXkeIyl(!>DVZV#b;Z=xqAr*akXL;m*r)cd0MBLVLm zy?<};JTgeUUp5Z)YO1TV7;b2I z|L%iL32AA^!T*hd{{#M@>&4!exIe`2cfbAZZ-2A2R8W92?#j-}J$PXMZec2-wIAb8 z1|V^O^iBlL1L^?&$^R+kkMnUd#B{TJqXj!HAwGV0)`6Tud8uhg&ym*FZ$S^8KjHw1 z3uOG|r@%Y4xp3+xyc8NPufh!ETrN_4q`0W4sIc%*c47i7i4>nbRZ&q=Qc;nWotvAN zfA|u`kN7`7+#kX3J{iB=Xs(8Lp%r+p)LpKtyL9Q|b*3I+5wv!-~o0W?h##tA^ZV=!{^VPBlwq*_e1^wzu*4mH^2G%>!n4?;LeQq zGBfsh5;$x~%V{Lw-Bxr2$j?FGRzQ6c!Vw5v@c;b_t{{;n0c_C(ThwYvrU`D1i3v%& z_h%j0yARomYia%F8~7sX{GkKX0Q}=0k-x^m_L1C_eX0960&|92;F1_zgPoY{zj*0# z9W2i_HZ`;~x3+pbxu+{n!NM&r-JUEfyGX+9Q|QS^|LX^S52*V?{;(UpJ2iFn@bTg6 zovqCc4VPDdGBWKz0O4m%36CqVu?)YowzRa^(l00-;p#>Xch zfUiami2R`gWUuPdmIUx)FnLqK@bJj!7+iaqoSvE<9MpD#m+nq+%P(n-^_|*nTN;1p z`v`pkeqVq6)mL9FEKmkL@9y23%uPkGxiu2r!0>in@Dm=U-}#fDYDGu5f7 zyA2PBBk<{Ilo(l8oydS~u*T4St zmtW4$A3KJ9aZ(aomeZS-i|T25BoL!hUW0o5P)5XWV?vx~*Ultluetd*;PokgCIE>G zBnWE!5pnZ&r>5#I5b1+J@ne4x!<-}&e~@>2dI|YkS?N6h&5BcBh96t{)91c8^VLRw zhWDfH5AnNGb!T$2d30>>MmPC2UGl4v=5drga9ef2d213>J8H5aH8ND;*G9}74KPT8N8K{{bdAN|X* zhfx`S(Eom-@yEO$^2g(+tvoaDXCC~K-uKRW)dY+qXxOtpM{58~Q%~{Q2|epUoIP2Ksbc{NM&8N27g4Zx*L=;iwMT z1!H1-W8?gn@C}1Ot_C1;f!ZJtXCVTC>)bQqWDVv(d3i1Z$nz(-!Rcu^f8a`1R(kg9 z3(wx%pS1VAvtEk)Wz523k>g~NNp2DxggX=f{AuC}=b?$`7p52C7w@C`IRE$mv-hU| zQ6y)+FE8)CbMFg(`kZ^unRD*kaeI2&_KZEf*lruU4Yt+LfFvXpVr!sBIC(e z@)zHFV#%x&yuYV1`9euo)K>SYP z#6%_V0TY&g$@di?conBkY52o|SRkq8kKs4ovQGAXiu-eozS7nUE`7Q8o1Q}dccm@u z+9)hC_<8`Uh4oKwFK*DmGb+~rI|$<=*C(gW0)O29sROX{5RCj&bm0I!Bk=l8Tm|d- z3&d%l&IjJNrS0!OSDg>!mDbmn!H0lN^J(O7 zT;dO>TG-?0d+@n~FrzP@{K58K#nhFaQ)#f-GJYLlV?8$lkx^U@U+0^FU!v-a5gk*tmD^zC(2t6&3LMk*oL` zkUAb}Sr{3CzuO?@ecby!WkWCGS6$`c$2;5Pj5vP=7JQAIKSNH-7?)a$hj4alc|ZZ6 zJP0!%_?xf4{w-gLIQ$I^)E+~}S6Ww#eo(`mNZP1be_R9q_l3ouSbHe^>D(WK2I+C! z-&A?emF|;i?Xb;^4;&zAjJfeKq>tYNhB0?~3Jq;ExT!w*3DZtZ`}NJ3b2Ip!2db?IWs`c zpKA(;?wHL09sbyZ2*@7-sN)aUA8!7vsUGr|Q&3#KDkmpBeV2tFGI$rq8!`6chO@lt z75Krr<4KJRKAAtAud+KkYNO4cE~jJEVjrMZL=dNd3_zR$qaZSXI0IB>1`4zV_%lHB~;mX>@?>T#hx{+`(N&{v^KrQ#fZ6Pz7*Z7w`uFdifjZSZ1P+`#-yHwle3(|3Usx zbd&ev&ab8hG7AS}f7%7c*BCKkacfe^@NXD_d>_ULpv>PsVW0_wOMGoqYCZkG_lzH-7fwgU$b@F14niubaLD{>z5w4nhb1khw(gis0dH z-b`z~F8GrGB`f&Zx)p@)?@?7Zm0OGrN3yuXCnqew< zx1ISv&Yv^_M?M7v{uqENe@qyaKM6ix{yfePmmb!x3()7l=j}A5v)w%!+^F*B21TVP zp0$?1R2X!!Mr(0((dv1YCMR0AEelRP+vfKR|M0P%o@k->rCv1WKIx}ur$Cyl^-5$piSz67Q<@aK#_ z08rwO(>E}H{E4^=KLdZT(Q+5}{yBZ}-Cm$?um(EB)1R_CN|4c8s|NomM zO>V;{tj0%2hsUOIswc=BALHH+(c@b)lXx2#{<~1R9DX&o@9bH_8idvE3!va{8lF5n zatzJ?!J#?I{BQQ`<;&|~iyr?z zEAqz{e`4LseIJ8Q%b(1 zn+7Lzef-*JTU+~i=#AF4(~V8dZJR@@8d_SK@zwDsU>Cot>g36zM=N@?_zgGac}8FJ z)(iT&$;PwiFR}*>_`VB!`ieO}G4e8Zi2E{En(*b}@Xts18zu;IU(2hilHP?7=`JAW z4;MJKfj>T*YfSB!-<%ZaCftH;hM}$%RLFgt1*N^OY#af9&9{~KP5kZzP)Hg ze#XbU(fvs)UmSd4xEmhsDCCnrwvN@Vebvzyx$52A(A3;A44w~s9a4uQFS)2y z&~2ORz*p_vyc~am2jOM@veegJ%d4*D?}OIv0(z%_Cr;jeG|^mhWFP!22;MBR;tC&e zrziI>=eq%@Z3g~81j3`Q9^Ae4>GkoE;rp`WoWEg*KfDDXmK{U2{8Zi_ayBWRP@|qd zf5gNWMrxxR4btAyYYu^Y9*F!Qfd>8_!DAs0QFd6$W;wrqTX}Nw8t*tv4tCpv-FLe?)KJLQ9@Y=u5I|@2#a%m`;$gs*F9gJIlHI=g5DG%}CTs%v z5mvzOf=srhE!%P8L}mFBnLhEtUZAk1Ms|SwQ5d{4R$suv8yMj9pOx*c*s*U<`HJ`O zc~A&>7JpNd2Ua@#WugiYZa@6u-kqCM6QkGe6-(V=@rT1N_&)(3TlCgSr(vf_hiWu| zMq{nM6mbI8MoLG4+FN>!`9N@iY$d|^8yv(x3L=LRxGT;a;KPTvP^pI?@&mX7%z%S`aM=MKP~wjR z*aqDF>W#T`=6wGf-}r{$PW^oCwYBhaGJHky>#9LI%IqD0SK;@=&rNGRDlBlT?gC?=!1V1y`9waU$x z;$>sF5Mc!V;V$!kTK)upO!bkGJ)@sL@Sx~G_d=;} zXU%#Qe~>dU&{VKNGx}OT`>=eVaDsg~=+R{UZ~)fu_p&wODiub3kWqoZIywWd3+cYx zalXB&BCnT!wZ=yea{jn<2U>aPe)u_efVaf&&(ZGU4{ySI;bR5)bT;@KUYGvg7>t1t zOGuMKp|blxAR=huuLd6rV&Oi@-qHe(xuGNA1g@#w#mVL0G+-z41w{BMwJzvo2r1uJTCD z9*e(;k;6Fu^RV!k&j8163`i!4zmcyPeRrl>CA$>$mY8^B3}8!*i&qp>ZD_dk00}hl zH^81tLja5AS}g~-d?2~Kt`bG5d7k;p<(Rzj}7B5;}1Pxzq_r7VBv|035&mz zCE^4Ff3pz49WbKKJE|Fi?G=a^eJ3aT52`3J8fB{uL|WnZuuTK^r$QIqb`GBIp2b z-<+EIqQptF_=8*C8zC_)e@4bkb+Vd*8`;FkX{|d31-p}+kk-z zBj!(6ES6@hGd81Du+v^OW&(ws75^qdi4uSKF(Gj7J~TxtBZFUXd}PO&1~0Z!i2^0x_23h)20Y?Uc=qPh)NZ52|YnZH>t{gCsQw&0?E?aQ~H z`Tp|@;qT|tg@~xo)}Q{tNg&nDs@@pAJA{omf?Hqm$QZvJ?{qOG!>t z7(FuJDS?cTDGYyu__I_9@)noVB7Mw)t~Xp=_**h6pT9;=@G$Yj7yWCk}&)s zegI$;s}G;`ugw8>f*Z_RFE4cbWt|}RjM6K3;23Kjefi+C+h0Y5OCSc#-V8?<`zWUbbV$ zfv%3OF5H0}WZrM``aqXGUO&eOpZ{?Ygn)`9;y@VD0p5NpjyQif>67?t2dl^ipW<98 z$%!V|s5C^>D(Z~h7~{o21N`|Le1flsh|c{*&YuRWZ{Z_@3*aw3Z~yq?S2Xs7C&~N? zbwxk9K6$r+1H6ClX$e1I_|4v~@HaN*;xD3NU3G~=UX3_%eMVr^A|@IX#3=H*nj#MU zn*?WZ{@Cfv{|$?r>~j!)rTTPGNeX}P^Mpf^zzS6UxVpwmMs-8r0PjDo;X-@(YsYtV zdHC}$mK9~HGe)BUG2xT-weo9|UAS^y@FqbqW%&5ZFe^{PAL944M5!(OK}zw5J!8IP zG@=81!ujL)$^Os8U-)8;m4tR3*n%9e7xxM*-td~*i7ApA~p{+K%W z(C+ZCTomK4L{5n$o^t#xS>kiqV0`lB7czf22XWHJ`P+U9!m+f>tk7al8R3fcWUv#0 z=d_E6cO*cQGSf4&E&ehPMfTL%u-vd(aDUORK0=`spTV{GqAhVu!yGTLFj+K7INu zYWJ6#{299qKw0wSE4Bdk@|Tr;yzSLlF9gM9l;>WH2Cz{kbE3jJSF{~vD;AF+K0%Wz|Zl2b8~Vk+wR=yZYax1 zpYIWm^@^#}5@ckEnd*$$X!^qNi*XB(v2gMF_6u#Tjg2Ar%f6wImp^cSrS4qek5iRD zy}C@>M^9h@41b)Oy*DAV4b~iE_2BF6(7D>c7~KgM1$uP{An*G9!71!hW6%$*4K6oj-IQ_ z!j%a4KiG;MVShR)BLfpkpFJxVmmI}sQv1I-8UY>oG`@-%>mNVO+27PqSHHdpT;PWN zxauv;Ov_l5xpn`k#xvz`l2^d+#qu-anbSc`)1bH*qq)-D6*Zkh;}@&57J+Kn{8k+2 zFMs{|yiK*wo|SqpK5R;|^XKUq2^6oo%vk&QX-Vr)-<96};j#Aia~C@PPvv6r4ssu|EWcv)LeZF;z9hG_rmA<0|52;a{_ZaLYZt)ZHP^Wj z9i{+(s(>y~G{$9uzdqm(UQq}D4m2KHy>e-;npe?JHMw9&kkN&hdMWWUMtLz=ilH~i(F#qF0LPaDwpC04&r-R`CuJ#TLkFq~kJo)M6 z+P(SjFL$z7F;7nUjKmw=h(!sC+*p-e#exUYwYdDB#+b8Y)utVFBa@Tt4c=2DSG%wD z4VFE7#%~PEMMV%#1^=gLgmSuqBLg?UouZ=3%2UlfL-^-7;YD57&mP}W@SZCd9=dZX zrzG6y9SRMMdeN7Q}CCD;9a#h#s+&Xb@d#7_H3^sNaV2;;m;9JK|~w~ zJ@|2HX(@~36`efW+TPOCP+ebBsOe8UNzKXg+y5rmsBMy{GtS1Su5@|cCzmJqA6C0L za%tbD&0DsVJ{u}^nihF1W%v^U+Hj~5u85juAB9$-**;Vw{yCXd&0rtJnp58-UMl=+ z*CR!O8-;$_Vm-$EgM0UumzQnbwqq^SZq+KaBjV_s1U{ae@S^ScAHQ5CgXzkX(5Oc& zOn3XT;A^xBj=zAW5-~lM_+tW^FBZg4E~B}<3HmZIM`qLrD-C8e=1MjGKtP{sO*W%X zdxNv!n-jmm2@%j(XlnUC<7FnLAwewo7W=$~N;&>=UAxGNpfk^?G|zqD=Zlsk$hgE$ zzZi_}YqO41kUt**bNv9EaYne;W#~1QoG4>rG%{RCD)MI*5D-fuBk*T#Z6b|kvi2x)^OklY0I)SJN{NVo7hbDT+4>wnjQ6 zY@@E$;6`0lke1~9c{P$0N_L~fpZ8Lphtc~DmeutTo0)f!yg6y{IS$gPmSQC7v1^F)`n*Avo4e{h3f7#VKS7%?m6G>L1hoX3~;7DV|9ECJ>%C&lOMr@R; zkm4!ApCf}tp=32Cj0KHG4iXxp<9LG`oeGp#%J3&N(kWyEm|R9Jf8siyt1)2$=FUy{ zL&^!PV6Ms&`(mj{&QTH?^@t^m18;Ko$qkGTd%6juy2awR#3Gty{m zZ}9F8o|9wmDCUX<6)>gvLycUz$cM>h^l5Lc_ecXq?7+3&w&C2*1 zp;B|sfX0;LZ>gU|$z@DHcQbKxTlv+UQxs=QOp*Qdi$3gE3|K)9P!9pcin)83ljZOL?Kj_z9TY{Aqf| zrbwqzav5#JbeHmijT+^-TV)b!T%aSpVoY-WyfQe~sp+-kGD?Vfuk%G4N}zKEPIas4kwX|0np zsrmb_6p~(nSrkYCQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AF zNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu> zfD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d7 z1xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PP zQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbz zAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi z0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS z6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AF zNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu> zfD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d7 z1xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PP zQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbz zAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi z0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS z6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AF zNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu> zfD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xSIvG6lXtA?X!JW@LXbk3y0w@B`v6$$K!l zn@{{DcmE}O>yw(lA11r{)c)k=Z$5Qkk}Htp{LLofvLeC4Z_JvV2bcZE-*;}Od0;j1tukdsl%V;0_RgmCIx<&%=dqP=vEqBfIEqf zXS4#T#UBj9Gg@tGoLhku=kjtYe=Z~S_e@hD75G!UoSQ$z zIMdXhf$e!4!}DubO7JIpIS+rb@%ibRoHa4{o-2VV!k+_~hCgzF5q%pR_?{zyDZ`(D zOvj%?XG9(G*Lji)KRA3II;0MN7G!$@7&X_3l`wVD{hCWP6c*|0Szd1bkBIavMxYEW7tgbiOpNql3U&0AY0sbuXECBs_ zYo_`?x%2(npD>%;08cz&wPL}^&!2+doH+~p3!4eGHmTO152 z$KRX$0{(R_+lcdoW^Bj9y-!xAH z^a=XRF{UOxf3D#d4t*d))665#)7>%lX_VPL)f*F{EI5+Wlbt^=evChZm*e~yjFHHd zkaN8Koseowg?g(urkIKdo}SeFz4fjJKhB@t%US&Cjlo5XEvK%xV=FPL+`8(`X;Bp& z!O6+Z-vS+f0zi$Mv-z7*1ZsLaf}V@lDVo}Slj2n`FDJ?Qn;RW~5`P*x{ITfgvOOxBRhy=dy=0&J%B(Z zgH4r6 z6}($95@#rnbf`vsjq0iKIFfpQ!2;>P9mTptTEKd(>`pnNyqG@&Hb~8uQv7)bVdY|d z_`_MCa&AHol&-e{Q&O!1Xoiyso#9F&PEJ++^aNUn=?U~TMl7h$iO++F=Us=-aKeT@-`>3dQf$xt_C;#hF>MfqNG1jeN1?{#tG!|PyrV+^mP{~3NTC9%!_q9d>-JAYvSR0rWX z3Sw>-=xdBXO5anGm@&Vm`(AS-#-!&joC{P5bTOv5Ik!zQMxv$PIewj(yf(ipj?9(< z{Kd-!dia~M04n!{UnsqiLFRK-D)1L47wF~BJwp5F7~T{|PO!2#78ksz@K+t~jMU&S z9D&M0*v*-c$eqrPg)t7F!MoX$6I$a*dl3n-tCA}GMLq~?Fyk_6444$V#-fBc5)yCz zSQDdfpSZH%pDDv%I2S0^A$rPCK)uBx&}fXx5Pc7uax63%qE44CAeOvRG0{X*h`)$j zph+7){?G-Q@EyepDT;F3_Q`NoCr;+#-HyXyl~MXA6bGMeG1i^qUz*$ zpR8|^of=hjcCMh2l;h9r0zslh5R6I%HyVrdF+|nP@jTh5P23bkWns$-dqN8GXK;Zi z1tMkyFt{zjIQPKQDn?0}C;|3k+`b6WHJ8-??$N`ZvXF^=pPXS7eq{<rv@S5XF*2$3XZdw*{A>$jTB`cALw`|`r=~1_iVGBaIEZNol#Jqh=hN5doE9MF z)4B081!$L)#it$8S%IR-&EGG7;TP;@qitm*io5^@H`?U~iTHG`WQuXJ7^GoR9;qQp zR5~SKG^zRf@uEc$5ol|pAm%18xX~^@NJQyS;{hvKwnhhJkcP>5det?LsUo~k; z$r^i@Bqe*G16>7y3Q{vAJ%8u`jQ}oU%aghGC2J6KTwrjcU4DRwu3t52?vgd9_$9fs zM}Mp-J^DhtIZ}i_1A&V;fVPvDJAa{Xol_H`@3z^6nz0utU2$C}-djg9kO49|;?Lddc{1zoQe+nor{%|YC-P4g$I(V%Hp{Ay2f{!|t`1iE#KoFav6C`QQ{mC@Y>^Rc(1&F4Npi?>zr z^vX%kpTa^o0HdHtCELYcG{z`Mad&kTSrd1vyHRi)$I6nQKZON1fQr%W#ljdBvq~-= zNuA&^O17w?uD*^cZ=z0iwFwX7SXCv!FW@+$?Yh!<6?%kPhk^LX25}XC!oUB}<=$JK_(U@x{7&P38q_{ROmH0~>C_acG z_C7u0YxK$YqD=xe&1YPjmQwu1!a}lw5?31R!Zg^4B>SAWM3S1nH-6>Oz|R+@2REi? zcxsXof*YkmW>i$b=KYLwGn1UZUwH@&&Vql=3?FkwU}KznGj7Fsxf3aItAZDl^!&LA z49-HhoMr$aXFy||doy0e3D`8B@v6cqCk6OZU0`q)!sawWhB<>7TS18A|J7b<3>o-x6nb`bV8>YMpu zQ@zwVDRFOEQuFubuS9Tk@$K$779>=Ta?a?CuHKjthc<2ANc`KE?EIzK z1X|hzXCaE5F38lJ(HP_3p|O|f#m-KNy>NF4$&( z=7}VreJQ}7?E;xR!C8ndrwo}VXT-(?^k~dQda$z-VlLWSKuYi@2*k?^&O)>~ou!5* zXM{#?hrIX{8#i|(5lu@G{;WY*&tHTrL`{+H<#ahCGbW--V=dE-oqZf@!MgmZ!k^^= zwf+zAur56~DEgdM!SOemN_-yaHgB#-Lh7D9^Ph4yxA4t0Ik3Gz-S~^baR3bx1~>A8 z4aLS{G!~h7g9|(RFd;Rk5P#eS8dXBb5@1BgjK#)c^eHs4CL1_+BoWo76n~6Bvr35q z#gD&?;70R>B-Y}_&N)s%y(z|@y&0&LDmV+iIWsb~wONV5jeeyj(B|-_kHxj>RO3%< z#o%PnD^ze6{B!zQGXopF?SC#Qdg_$a__ZVT__JN02EgDfgvse+&V*p>FJGfES7Hqc zX{LjCH6aE0^KpT`-5s0J2pJX|%owjej`59He)p1A#Nw4+jR79z~4@p4YiXpK(0 zpKHvAH;*aih9x(D@4VxoPEHU6+A0NSA>y1K$mE>S8SQ32--rh?zl2zumhAlb6X*s| zVZkk3oQ!V9lwzYYx*PbMj{R`++G1=}^79vw3$#=U&O$UfRmfa9qcFzUwL!Cc8S_sG zS`W*Y0{r>AK(P{LR~wv#=yJ+S4NuO9jWKm>@XY?SX{^Dk%E(9w{vvXLVl0+F6=xJ2 z6mL$c;NV72ziRs4oaD(JX@a@PXe<%dmKyv;<^pB%tT@173l$s`T~52;;6}S;A`(si zzb!IF{TWp)J|{<1ajNha)CEcaYC#bqMsE}VYI8)|qz%LAcHh`x7h%tS!A7j2oPsQf0G+I1mM37)COfmp|=VA1f$KQ4wf<65C0EkUV zIsR-Hc!99q!Ay=d`$kObNsaY zwQ&HgwB>hb|FCH>z!@5uN`^FKC5c>Le^I0Mj^g_w;lj;y}7iZ!`FZdN$)C&O!1 zGDm#iEk{b3Gq3DC26_I+lx=T^oh}9-=dab`uf+`D_sws&nE93;fUDC6z|V1iZvM`4 z0Qvsk7zE%C0rX`dcB2+RQLNTz`9C;E=0*Y`lBk?D^0;5gfZqil{t!TcpWx37U`q0b z_=V+f6ahs17=Hj@h7E(JpKP1Lf^}kqMwKI$*U70dI{Ya{KQ=rz&80+=U39 z0{|I+IRED3FZ^rrfXU6@ym>nJ$MEyv?=1I#Dt{9)fL_Qvp!`A~=IVpU?(Tb$&OY5n znLoKj4hs>)yoE-TVl0{A=g(goTLrV_pUs~Mz+~r710csw;!g~~)*Jpa&Em{`4-;DM13qNfa^3=;qJsAC;5idZ6bI@Vj^cgM0nqEPt8* zYXtz^{;#E3HwR1w{$v*@@ZwIc70JE42sscDD|ko!~XoGAAni{?W__Q5Em&Js{qfeak4s9+cJJ7|v%I*p zY|Gwdxyy1hiw>P?INe^Hl_Bq2=4_8&4IC^BquFYFII<_rbl)gASC$t0v7m9~Mxl7hp3I^ff zORon^F8=;e4GZ3L!b%Jdel3;g|6w1Xsd>YPxhqz#Kms{_@9ypFJbUhJduu~W`}uR7 zmwS2#23q&7eJ>~d3n<|osYm}A!i?T@PRB=TMPSb|tN%#O-rE0T8&She`#)T|eEHH` z@~x;S&md-*3C`^SfSuXJDT?6`2Q{rsh#hK`QY z3l^q>0}SOZTLJKuvDqJP|FIqb2G2A83sJ0_b;2l0EWY_*~=e#z#-fXXxfmo zbXh1TJ3Bjb!RgNGGrd=?T<+=V?&|95xjHb^b##0F^6W(=b=@Nu+mERH89?FuMe70; zCV(U5SUF&jDJjx{YWy$X{3XJ=>SbRvJfy`9}xuJl0Y?HjmwbYFh% zV&Jc9u(|12wlw~-{?UPA@fWQNRQub9KjwJVv*jbXNwa?RqaXh82S511n{U4H#v8A{ z{@QE*@-N^2{`bH4y?_4afBL8Ie)l`y`S!P8ef1yz@s(Fz`G;?P>sv3s{LO!s3o^%L z{xU+LiZf#m&s7&M$jDyaF#PcGgB#-`SHT5d>bTThdl2|rz8LTWi8IRDg!b0$4@Tf1 z1eHLWIw{iMH)8=*@ze1q;=xPINm)PM`iODr3*Cox5uKX)j3Bp&T`?$_(S~u*}~7_?;n6a zz%Nq%7E1uWvmmr~>*b-2x`Xo1q&BvXBBsL z7JUF=eG#mD%W7fL(SP}PNq+Xk$N+|%oSdThPCuJs4u4U)K&3#~u!ucy{@ygXKimIB z%bx_`{57Fa=x`&1<_n>m4TmQuC&orwKiu@e2at}Tqhn82Hta2w$oPrdzvuu52OA1U zP97?OZsPrQ#DPwVotcRMzVyS|%Vs^wKWpC4&)>Xx(^d?7_wGG<^l0U#tPdH0 z4uAZKpke&^+wS@E7iA-V02zeY5Xz9h{t7s9_}k#De+3;LP-On1BTy8HpSWY@6CyLt z`XLU#7Jk<7%iW(e{Ql!B{}45Q4uA``0b_!{P-r7i($`nP$m9QXbRNu3e|J9O=eR$? zA1*=$8`3vdLI)k)yL)%Z)~%a2Z_dxpf4^{v(w}g!9zxEmFTM2A3|NRPTefM_rakUZ ziUGbxWh-~qiCeoiZ8f=>Sy>ssn9naOBYn}Lg$wss?hgPwGjIN)hR#k#UzGg0SWx+k zY7mxy!+ly;!IISvMB+ORBAI;%C9X&9#Th_m;B%f&19YqkBqukC$%D+nD!HFA&3JeSOS1 zx&J|T4fBYaQ}w4$9Nu=gxOmm7)vLEO_Vkb6>^f1rVR`oax$i6j@D^A>;txFF8d!dK z_}drW|JmJb0pbEM3IZj<5I+b~iV}^?I7`CM?Eb+2@#){+N4WlAEwSc+VfowDajE=W z=KU=HSHi`A_g(0QMGN0uS+wP7&AEv`O*frBR)H?74I2FI>3TQME~cr{Qm8_el+Z z11f*{y7AY|iJQMb05ZVL0CKZ}q;p^!kv}&3dmR_Qw)<07zp(rj{6)O}kN}kVE7Fa> zz+b0@A0K}${zO;2yR{1Y2s){%x~8VKE^FSdf%i5as5;YFeV|=}52M`Q2n1ggfM)#q z`5uTzX8r)4;BOKkJ(246i?*= zFmeKIh&V&|ubm`Egjr6r+OowmA%uYect8W>o$@nFrFrsnzs^-OsCAA7vx z2)-Bq{|PF$8{&!#wCzCrn$FZ6Ee8IYnG3frZ*b7SAI$zRbOV3Z@jB|_ zzuA8LNuYT7i+CMk_q$-o0HAHQGe{9%qjX-3Q3haG{zAZCXC(Yt8!!h!^i>~L)|~+W z8&954U7*z6h~NtWaG8<5{(;j6>S6o*%$dgeBOf7u4RA3weqs0nnOywg_s-_8wG~{6 zkN=ZF2@ar*gv}5}>{L4J+VZ)0P_ z>C?wcEdLiNe>nV#3RZ#^tK zbo_N)>g>P)I12t?2TcIjQh%}%X5TEhx;&Bm!JUm)!Q)_X?SyA!rq7-K)|+oGualZ{ zY3_kIlxr|awMPT~3&=6{j$hwjgpzc%6juy>`7ba%Ee>H!Q#pk1)HSh-UE#I}5n zoe?5(I*ZH3IL6)nPtRXb$0f@FM$TXH$@=>G(+zA0Zf1^ES;3l_bb_GTJvYQ6bp*<1%e=KR>ykBz>k>nk@i{$%|8{2y+< zILi;s_zPcO_$K5pu=>{NMNenP1fX56rdUxho})3_S^bU9X)(tbfBV7dZDk|U1wDTu z#vg9M==uAk!RjP70Q2<+@OSz&@CRcsZt1qxpR7Rtmw5jEzO?j{U7r}k7WgAZj6M9P zx?TX-dhkp=+}p*;AFO>F8^8sg=If8}{PE$Jxj%dP8=gOh1q}(p7{CSEPEK2_ZPg(b zp)qFaqyljK-Tz@Y{;Dr^U1Br9$oc!In!|@{VBoK%1-ccMA&KX2@7d~o`{0L=?RDUQ zF~AM#ODtTzeE05w`8IsW-$4Cb)}mgRUtBy7Pkf(+pP9BaHR0^=^v4nUKgJ(-fA;d% z%O6f(Wd8?vSpDDwih)1BF*r=IVa(y11fEmkC&noEe>Q*T4gPOe2R=`JUOau^UVjMx z$K4ZER{U8k+`YQkd7-ZNIq-J_!c7RbAl$}q2f}9i&W4 zs}R5|R|nxSiUD|XzWYiKJok8^y{)Bwf8GGh{}lh%@QLRB2jh>sKbt?Cbk3I7zlfik zKZ^xDf86K*_{}KqmTMgxuZltASF16vZ%$K*e(9?JGxFC7&jMcRjDo+_+fFpMwx933 z+}qpNdleqT93CAV>g#&W{4oHLKL#M!8!jTP^^`5Q9x$7X-(^26b8_QVxEe+)8X|6p%taRR^dzhU^_?eKrt;cx8le@fy1 zn)Op-v7Bt9nbm5t`Wj7{e9|N1uL$_V6$l@GTlct}fX4r=-c?!K(A0YN;-#Lx!O`)_ z>6>@%OkOR|x1PdUkB`CNlygp$-|FzE3;$<3K!v}%_hbN(zk%(`&z|e(o49=w`5UqM zYi_7m>*8-n%bz^_D%&qMf3tn~gTc(Fdu;yx{_nr}&6i((`CH$DC!Jn-1%BA|>Z{-W z_IJMXo$r1Zo`iz?uHX9}JQn{i{{oK)yarDSyz$0=|3^RmkU;-h%~s!>=4^V4d;Hxg z_Fwv>`@azF=mH1W*=ght9zlg5jlZjRodEvY&UIWy{>CP6+`fBn@+!QZ0$=P_S$X2b z)~)5`1+W|~03z971)C-KXkg3_2YUE(4Zr~4Ps9*h-Om9$SL^2QY+G}~-n9dR%J}QU zpEdl-Pan*ly@~0y4=&H_|MdMLr=kP=$$$HA{~g2M{_WrXkN<%`^ugc!{N3Nd4}sY4 zf*6}fr^V+hul)BnekPaUjr;Lu{hZ#LiKY1FG~_WRyZoOYe~tt6`ak{@w8G!nzs{>LYP_ap#0f8YUYd%6D`8yW0lE;k4#AAZTv~(9|M4gERR33fxf>l|2X}ya{)UE{o8j?${A)msjaloq@2+lX zzt91{fgQd!F?I9y-Mf=l-Tc8bbsWHp5IR`k^Qg}1>gp5KJIi+hjtoeRk7S$5>mcmH zPy+#e@z`}4{U2L^7zbbnK;RD@;K25k{ciqRn@;c9SH_68|0#l}8}Z@Oh4$m83-<88 zJY%`Pfc(J^=Zpb=RhN~;`EvlY24Kwy%*No?Uq}A@iUt1s%)yPu3(*?%{6Vj{#@}6+ z;P(e)He|C4O@0@Udmj63%22gC7 zT*d%gAz!udf{jm(85O4~}J>V;^BY);%8GmN8Z(d)c7BfR4EM|V8?Ejs|@fY_a`ukP? zx2qHS+IE1Pzxl2|zf*9syk=+7(j2Ro@;+L<7KYPZryEDcu77&t)@PsH@bbs~AB?~( zC?3!aAh5-8>&73}vS1xs=?p)MKYItp9)P74FpR(!fHr@F+t(m}I*BVGD*W zfi3kfZ>`X}5UP607+ z{_tr-2%Nu5%>OYD=mn7RCwoEL|M~C-D`4P{1Bm?f!*5gIcTo#BZru0*=#jZ*|D~Ru zt0Tj0Fq>_HUFf<~o1AM4Wn}?+Ed1Ev&vt(<{+R!33&H8Q{y4UED+71-?8Cc1d{0_| z^8Pfiz>Pm<3k{D06jMO42DbPE0RQ1N=KnCauInMqs#R|dZq%2jOU=y4T)cM6YWUrL zR#s-|LEH_QIBeiA)QQ~;1Mmf$0b2aQPtuu}1PDV;7i~HXuiojr+}+dLbiTT_y82kj zM;ktX&fTlp-ot2e({se&<0J(E>c|gk(ayQ6_=$e{OKZW&&Isw!S!1wRx z!z#FZ&mO1YR&34brsmd;$$41u4NVDJJ6Z@8%Yj@O#p2N2sc37w?Mc9yq`apXTvR3c-PfR{I6)|&Yf&I)6#MhmgL*lX24d>+AI9h zCMFK9V*KGxBCPRuU+ezza{hgRf2H64;wiZPZ9BF)Ee$GKFh@8};P3FZt!1Sp8&>e{ zwfj@o-Q4&VY_(Veuvi1L86Y@77=gd*>;fWxI0=;b(-mv`KP=zNM+8k`r+9NloU)<- zUQ$9o=bhBO9B%Lze7NtE zo!jdgst)b@Xw!OlI}`?Wq%JCI?(4gBi37-oU*Y~>81C#WgP+l@T9lmwFV*9ih=19N zw-y#39`fKP@^fdu>Echl2Zy&ntO5A`{dF4;b>L0HGr0|8Uqbi_!b1p;9zA;e_%Xi3 z^xT?vfxmQ%znf#bm!tdR9uVh$OV{Krnms$$#2<{m7qI+}#u9A(0>R+n!xap`;tyaV zsF2`Afs?@b<0}wp6PoQpa{$FI%=b8f-*U!ZEI~sBFPQ)9F$Xt#iqa>%U$h1A+qy~c zhi?yq09wJL9|#}J?cF7R;0QTUk9mD)IO6?(5Krk0vK4+t%Z2 zP9g=)OV%cdKYItpI{?EB@c#V=5I%>nuH%a@BmlALz#jl;@n?;{H*V}v z`Fn3;9=f^XHhlkIz%5%gtjrcOF{{sA{l!y} zKWPap-38%OK>I!j+z4Sq@VEZOp8rLFmV8| z1UG#dfBz2Q&wu{&^$Esc=Bn+xs;c+m5WHo}M;lj#{0J2Mu>qJbfyD^SZ^MZ-2;PX} z8_@{h%WnR{P~`HEDu2-!b-*cUNZ&_*A9%kVr61xbh&ovLiHh+Eq@?99~RxopcFMr?V|DYTN0~hlyGp|+pSI#RX^QdeL z6VF_>W)s7&WM{?h4Qp}CEpC|nkH7roFUTJNXz+iq0KRHDz@57=MT#@CV`6_3ITCu$|4pn>TOK0v25ji{@?FY2f}?;P@eb zR)TPV7mvKNYtNp&;Qu1og2p`<;Q-fleQp8R)KmdmEmd*bb_B6v>wXbht ztl*cp^qBL`{2X}QWMSb-+(|C9@e|8m_Us=rivF(`$6thx;rI2Q{`99m{&9TFV({qh zPZ)q7tThtId-yGQ*`pYN#TpoH!-@NF4u9wY5y1cFZ@hr11bQfv&B2X+{AFgY%==*D z=8rz!0xxCX%-B19{onunKmG&x;{g6Iu-FMntAcMr+@-IwtL{{HxfKm6g^wPFV0+TH6v+`Mr^QJzYm-HjH1_QM8j z6Pk~}ViU%>6YPw^e=|$RpX>rPh&krqM!VsjNao_@g+(81+Wg_RH6L>JPJ=EmGqL#N z05bky1+o#{niM|ih8HUGEtu^LK&u1brqtrBoF&JuoP)>hA)H*16{H={> z^hNK-0zNr<^Tx%)$RP86g*eqauz!Dnzzxvj!AcPLO`fkidT7s%jVEibsyi_10Q{M< z1m;VSbr%_b17Ci|*TBHv!$%_kV9kd0>kA7C@?o_Av$d_ID^|R>X6Y}tl*2vgyuA0{ ze}B#DHLL3#|0louVD{qGtINRuT?YRL{K3$Rqc3xRh~FQ6|NGzn+vw<)EhyvC!h#L! z*R5TqEJdvT6a47_WDbx&iC}p^8^FK$FIxU2AE!giHU~F)u(J{}GnW;tEBavLs?|u( zhxPS;0zGX0m;+=kQ1G`Y7v8BYg-^QSrOdJHXy|xmX?&16c>L`n6((TL`wI4 zvTN7Yt-E#=6mHnCant5w96#p&qHuo*Lm;KaGcQ#Cbpb@ivu)P@~^T@G-axDWRk--KcO0f3!{4;^Co@7MurwNkj? z;$5qAvQ8a8a&X_S0~NbqC0|m~&2CIG2(9HW8-I&R3+AjkwRI;xI&+`F$LK@vhx`G4 zzyG&?`?ufyZghk*xFqlW{JfQ+#S%8G=PVNNX)8Vhl-D5O0qsSIL?8^o|M$fFm zuvHTrajPepCj>QSEzVxHwqV_gmB?O3UHu>b2wy~-KXiZ=fPeW5@>gBlaA`yC%2g{R z0*gB9;FcKNgT2z*e)QP!O4y#QuBoc4t*;M-HtgB`32fZr*6sEkJC3q2@DN5aHvUF| z-!1O`kUtzoKfQYO)aJ6z^JnU7tE!Gq0A+01fdImvHDx?bz`;2DrM1z~k-D~vJy*vj zCa3rL^5-}}dj@#1va+Y=3Iy=g=mC*Gbb#VjUDlQWeq79%y`{7B(&a9=_tM*UweR9Z z>m+#T?p0~~Wp(w*GuC5U7Jumb7<~$Uzx&;9fBW0vVa{Oay%j5Rq@@UUw?)ER7}z_2 ze*9y28_-XF0`CNZA)YVCf5rnoO!D^IKWE`hJEs>VUy3+^-b_}C)FkgY2IJzaB}>;V zU6#YxtAj5edrii3F3qa-q8G;snOx%sjR;{vKAkqhc<0t+_42!Z| z{6XHnzER|FVxn~&bSo}>1%6`d&tCiD%Ga9xIo^-EKg4gkV!F4t_Hx(73(f4W>9W5n z$^IE0fY#ug^a>7vm+Q};ztC~g;?J-Gc7BJXj=(m6NFeyX9)2I>8eXu28OAPt(5NQx z2M8tp7=BqTmpFQyKgIXC==&|h?_dA**T4Stvq8=vPN}UP7oQFQQY;3eAAjHirAj=} zsEms zLpS5z`jfU^q{WBr{uqAK<3q}e=Y#AL2LoF z=)N=XN;5y^`;b2HejLAN&z?PfI`Gj);0A3z@Xv}ELvW)%V%fn9mUAu!`jEYvk>CFI zcMu!^aR|0tpiLl05a)j);2Jo0Z9yIzggJfuUx_`8>iC24_e+aE;r)=mXh+}asfsHw z<1Fn2+N9fboiGvYxO9mLD^QR8L>>eEFj}5`t5_<-y4#CJD z0(eab;e1|P=EP<8FwXefv~%ZsIkPwIsuA?T4?FLn+HBQ}z7Ki<)*S}=o<4o@+*4i7-;oP5g^qrn6NB;5#;AfFNY>|og=sGUK6AA!+ zA9IC6(8WW;eIxJ}@6*~O|Ht@q?g2{x+Iuj10M~cpQsN4-NiVkGq zm#safr>D^WU1>|ZHVTUjz8-*TVg1wFiyL(CjLJ2@4#N1z^~tHTz#sR2>HzFK1S9_x zT{u9`2)w=%SHXJz`1q@#kNdw}moM+Z{>j-J_>38Z zsw(!)e*%x*JbvV$!3r~>65JR8Vx~$NoDmvp;j85UD10E!0&yCs^MUtmY5V)nRp$eF zrSZ*1m-xe}7WO#$9(?Y=y6-F29`3CdKKX;~y^5(TJ*U!Ovt|4`!p3@T z1R|rj9KOys1HlUPO-;3@wM|YMY(E+%=o9{zw#@`OcpRJJ+X#yN}-Mn>z zQL%CF-hGGaDk>`A^&?mDH6V37)Uq%#0)Mwb%=@_ad&-7h#IL%_!H;*g$r*9}3@rE> zIe&(nmN72177yX<*7AS?KzR^mKJYhRfBjp&5^?w&7^pplj<2+?7X6@xJCU?ev;MdS z{_hKmKe6^u_|v&R1`X2VxWB3Lo-5ra)7oL186P-6(in5&V@Myr2MlBG^b{J}7=IP4 z0Kp#{fNx0vGWgXI*yRC%KL((QKZQE$0Nfq&cVuXAH|u!xf6Vy-e;3EZnpZdRV`DGS z$7X(ytE(mWAZk&hXCb)JE?BKi90e~}le1))69_}F44~xz8GrU5{AZcJGi(C5{m?;N zfe6?%pL+ga<7Irj8OC2NelGWC8hye4^<3#{O=|^bHx4ciZ@Mu8uTQ{8I}L8`)~%bj zZr-^0=@eiI=Yjuo_;Y4}oIlqT5Zy7G0XqD#2N94z1W?Bxu0P!TSyMgaFQ=fmd{s_P zdipL4KVak^xL<;E#YyCEs$v=)AUl_1%{F&%myth-Z~qj|*#uMpT-OEs0f1iq20E6R z=;Qv+?whU5`SE{{KNQ{M{kZe1se#PG0ok8+f$=p)j9A>7R5JVDY3S>w?|}cZ zA-aRmfj?v}5xgRJxSKcATCWTKBmj9GDu1rqa0-C%JRk$m%^&!^jt^YU&+>iv!Tg`# zWt+kuqfdt)u6uFlW$>}QHhDZE%HyALGs^shLCo`=P@3dwkuzVwvs}iK5osB$^?+ah z`JSY2H2gs}XJ-sz$lvFmbN+DYfr}4VJ2V#R79WPK7oZQ8!l?X7@cHuRaelb;uy$R5J_kN; zrzxH7?$O{zl|MHqDn;?EwFI`J#U#)*1hYBd*GB&EuUGP5-Qg97eU0eRPsA$bsR=>Y+JVFe$5 z^#C#l=$ZoRhTt#Q^U!Yo_$9&{4))n2u!^72{|(>eBd@aM4D{j5&kY|VN_HDA#8ei7 z8(sXV#d?gYIhI=C(n?0x)U8_rW9&jv?R^FbqIEU}0o%V0f$<9*eGM=KM(` zFkb{a0J1NEX$}0jcphK!f1JL70pw4_UHBRJgN>HExcATLlkfHdeSDbJwt*CN<1ZUFAUUc(m~+&(Gs=eWRY=>62S z$Gy82e>Q&np;N!ncWQDQK4CRJIyyWyjZ-~A-uM{zeuy65nwiAg!0_LN(&g~0xqWBP z8rC4Jc3%Jmf79^f;gO@Y@K7PX%OC!l)AjY=&^K6P_o$rBiCcZ;B*?LtjR6p|y|Ua8 zJ1)?&6Xb|6s-K7;_klKnfBe%u7*%k9@$mN;ev|X)(ZjD`Ujkkb4-Y~ge{VI0P-sbB z!N!A)r}3xc=g+qnt;o;#csIH~Y2}N9FAR6X!yScu^2gS(+O@Ab`XX1on;V*%TZX~& zfv-dAaO6ez#}45iYa6<4a~=4qy_=WgPw*hT%wLxJ+G}~$)%<v_irmt zPF~}kbfq9Od)3BIYWw;Iu1$RT8N$JCd$9X%cZV7Z+1kVU!5aeTjJ~*Qhf_QZxblF2 z_)W6gS06$_$lioaAV0zi_+606wzOqCPMoMLUn0{dUf2s1*3`%jkUt88cgE@qSa<^i zoc^=2y%jt5?I~aJ9zG8WA__G0Ad30*r;?H_tSzf``{TI(x9ooOOcozOr@RiqJUkYLK`Uod~5`}#yPF8*Z zcYqmi@DDCKzynJBaRA$ZyI;LAcg~#ef8!h95ZtMsuf4VwUQULuNPb;4NJp8y1Mn*R ze)zd*ZB11^ys=vFH$Hj*-amu^HBUzsFbD@Yb_a%3JIMTfWw}12ueQWKg+=`MLwBT} zEg!{XG#88zC8}1r`BJ=W3>PAdz(3q&{!h!F0FbFZGO`C+$-5&IT6=s_@CQGL%iq3Z zXKfw4^d24*9q3*t_3f-#ui_7K1_qi6HfTm)>t`R94-`(YF9$uE%pVTG8vb6kMqH)B zs1GtK@K;A?;B_J0mpjh4H&x{I@~_tT=t0gOm+n9-58V$x=MM0e`29KBUHsuqcrSdc zAfL_#f5Yq2{~LobFk%U5QYch*9|%MQP5jm1V?iw3N7-9i;4wFJ1f0ONl_S>p+mlyf zy?^a$6JK~JKI0j449=Q3NHR~I1zA3pshTnK)(^Btso7)kH!*S;=YJj+9`hOC*o^_nB=I-$6{GLYRI6l{qTUh{Z;SzK zsd4d&f~pM-mmVO2CjJK4b7=@*v0SU=0GAve8sko)=fvrj@}2CLuy5-z2;qqb;3XSJ zZt3@5T=-4)@p6ze2ngM(YW`kp{x>naceRd{YRdsmKa`X~{>n%2 zF{=^e@6H`qu9*1K6)K9-@GeZ;0XRplm~T$kxltYZ797Y36di(Jyfrk8{r<6G{AK*1 z2kdvZ6%i~vF)?BBcd|sBVBl{S0=NT4)OklWL$JL95u@+qWPiMvil!Wgzno&u-{9Er z;g#arluye6j(;ho+Wd{Qv{crHT5-~1Gz8=D%htbQ1w3>3 zvo2`kCMJh{c}@f!;O(1JQ(u%gX%>HQ%X=dvhUL%5n5j-yQ*a}jI619#$Dm+$l5-rq zR#Q$$7yN}nn{@oa@|Oj#`?K2hA-p~rg44>J93y`(?deDV#{qQjzqm)=7w`Y0 zdWJi|sRuRosciiL<8Q?L>59eDjCIClvK4i_{ob8w(&CUc)!WXfoR6fc%3VazxD;$lGrI$ggd~=ds_ZB z!Aar$AC~Pd7LSc}#%8n&*3~JDdYj!Oty#1;DkEfa@i*k+uja{P_RAP~iKneXN;LB~ z>!lxZ{?Zm))USQ{_A}pqej)t*T)Geu720}8sbLOq`m;fCCf|Pn{|B?a$obO&Xsi>f z3T|}rI!%^hqF^b>i3+1f20SH@5i*70ZxDZ$3PIlDa$2O1`S3UEWs5%spn2A@=o*Z|Lzv%yfzi5VE^ZLV@qrJ)z zTqLyg~Kg16JjAHfSv;MU?;7)LZdF$ncj=!uEFw9dipa_MyIhXrd~{NquN}RTtyz5asvlhpNU>lkYueevMV=YztJ;P5-Ud=y_0 z!qy+^_{$0sr)qk|DK=5Y_);W&n=~5P7pl7$fjjem4eSi|%vy_55c7Ufy1z*In>Fj3 ze+Ms;efOP(OUlc3>^RWX(ba`Jkb}(oO#Ny2MoX2+ZF!C#$5bGRIIBmamcF?N3PEZ zj9SD*gMt`EURP7Zp?{O$EY2T0o%z3Ek&}H6!mm`H4k}6E4}P9-ND^3q${$zPc*&@4 z2pr)3r!`z?4}b0WjxG;>{>8GQOm)U+G$1B?vc6V+ZL$kj&I{foD5eY_e;H=wY4}6@ zo|Y)Jg+E9s{;+4vmyAYqfKNDo96#CrnfMD|tg+It4f4xrzL;1j3f?5nUju8hi9h4@ z7!0Y!ANVT6b+TkIK6&`y@gVCy4}V!z7n%Q?JzM&tWma+FiZ$07Lryb)%qRQQ>6a6} zP3D~bM&|!a{FR{ltMw^bYf3f#v==EQ);#+BtEVphfI3|NW}bpqRgbj;ep%U4ju#gV z4vKHiU>AhnNzNZr2OruU9+rz@{FTTlk;GGuza>k2E*p$bzWhSwFXtdm`Z#~vPeC}A zmYEe=>?tE$v7QWeLhzh+5%G=$Xi{c+X12v&2BOHGS{s%dRtxSg+SNxWl!E;EsJmpz zlG;aKu>~+6esg60GV`|{syxuTHI$Pf3(Mk=)01W=B*+*F`P3PcG5U+iSO`CIErmZc zRb1@wH)1OQk-?`=pGEEdQjwhPG#GjJKYUsIqCB~;;~*aby|Xq3^7xkF&j-^7=AHs0WuaYUf+J9t+lZ+1b^8# z6!P*1?yuCHEBtY)@~2mqY5V92EP&yUQ?vIbWVXSYV{ClW?AdcNvn$&kOrEbV%g*uU zwh;3gP~rp`MZtz%)PrK39D&h5w*F$$7iJYTb@iQ9`D4F>XFP^VpFIuW{*rtAoula) zA)i)VW*mM33lNLGOt$Fl0RGZ47iCnoeLmb#^MU>6%%tMO7t79&a)@Z~oPs~?rNqOi zE!fcB+|c^kuEEiBby>I)0sjYE(If0nCuL+{Lg}+-<>Hc~_)Kd5H%B9&BcH}sF=PGX zr#btZ8tUrT7l8}hupd{wg_&s?i!!(FKh=1q98U5I7`|A3Mm%#mh-n%W7h^P6n!BQ= zb7=fxb=D$KEt}tpBO#a~myAK>#k)D-P zSh929zM`U{ef#!RG+e$sG&FEv=-D&r53wrw{0@?yKevF2d|KnSCr@kIhWdMZ`-k9- zl#^38Zr>fhbb5I%O#9@ZswP_?{#=X-V(vQQW^`SPJ(=ZKtX?P@bJLgPmey4Dj`2Tg zHQL|Z1%K@V_`BvhH=@H7;7=9M1&YSFOz_tS{J|><0l6(1j}92n;>G#eYZ(tY*n;DKjBc3+Zw{~6|gJV>aBBO6pc_$S8!zD2DnpHR9Shdxn~If94EY} z>-yQ_TMFKD<-$XEPUVz@8@>EV4T`HVsw-W#DzB)#X?zO)(h$6>_Qu#?&!w)OYYH{}i6^N!d4Buf1RJ$Y5_QJe z7}b?7&->)^1pmWoH%Bh*+q8MhmeOZKrB2f#kEINMLO>f1HNq89)9jAQX&{QI(rxJfmK=Z|d_{n87w>LpwCg#YD8eyfu zjK*B4#vcgibFIl{^l5K!7JPH!H#i{z8VgM=|7X0+q%a(yw@@j^U#@ExSrK&R z8I|U_5Bz-5k^~u-_~{pe(S2>!aSHP1BVeu{fHTer_qq(d#*!0dOpHc`D@jHE%mM;p zNn`~6%&kqN(M-0Ve%M-b&c7=DOfsNCMvZ2iks)Tfl!O`+pwV9!e+B_LV##9Uh#6X& zV56bd-~y&1e{(e#B^1&Mm}sM>J#18B^+s(Jh#6d-@|K@6X z#z8FUjW$K`X2I4-hlFj^)f(KWs|wPRoIkHdl0wODl=$;r%JVRKzrnJ)9-@4Wp0jyE z((~umNEX4Xt4OmyWuqaU{NpdXTIcHQi+3W)YV=SPFBTkW zteB&arcSw5PtJ&qaurfMMfh`M&?uCw#)Pq;(a1qUV{{yEaHCU!5=$BWgho1rYygwX zsO3*w=W{hCOu*c^34cgAffdYEd17BIHOVGG=Ws?EjqMHI-NAEm>>b5iv7iE`6o06ZOBeYt*^EBz4cgtYP?QjLQ;a_u zx1^8z@D~FMfUa2?Un5j%&Kb~{a{MjzlPI~23FvMnPH>8H<1S$8@we1Rqoqk@%#D9{ z^Dn25LgDK4%jzd?uECg!{FyZZ{*uTDESOvSqKrOG3ZiaG@@Lct6-pi>B4%vui!&OV zgsqL%n4& zG4FN0Xk(nzO?Lh~GRTM}xlu;Ub15(0=xI_A>64#7mkg>1Gta1uKGKlI+N~N@#5^ug zxzZPr(=!DL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHIt zDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEU zkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS% z04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E z3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjoo zqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*b zKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L4 z0;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHIt zDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEU zkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS% z04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E z3XlS%04YEUkOHItDe(WZ_vZgq9CzNY|AoAK?{D`$ceae@ndHfn#95i^0XrDbiWbCX zz<>Z@1U6bgfY^5spj|>MizF6FXhC9?Fy8INyEBtaCdgukluBy}B zr;pI-R3+A_s&8%msrUI-Ri8cwtAJI&Dqt0`3RnfK0#*U5fK|XMU=^?mSOu&CRspMk zRlq7>6|f3e1*`&A0jq#jz$#!BunJfOtO8a6tAJI&Dqt0`3RnfK0#*U5fK|XMU=^?m zSOu&CRspMkRlq7>6|f3e1*`&A0jq#jz$#!BunJfOtO8a6tAJI&Dqt0`3RnfK0#*U5 zfK|XMU=^?mSOu&CRspMkRlq7>6|f3e1*`&A0jq#jz$#!BunJfOtO8a6tAJI&Dqt0` z3RnfK0#*U5fK|XMU=^?mSOu&CRspMkRlq7>6|f3e1*`&A0jq#jz$#!BunJfOtO8a6 ztAJI&Dqt0`3RnfK0#*U5fK|XMU=^?mSOu&CRspMkRlq7>6|f3e1*`&A0jq#jz$#!B zunJfOtO8a6tAJI&Dqt1(GgIJOHe|g5*^KOume`Qx3jE0Om*q2<-NU#1W%vAL`{=Wp zzaMA2`gZ);&EHZxfmyCVmh-pN5}4fpW<7rvz^n!^`}wmjFq;8P0)Ljk>?JT6{Mkh~ zTM0}Ef7S(NDS(OL&$_^@BrrMrIWBOi4cVl?kF)vn-yh3LgA0(eYdNOQ0~r^E;75{w#s>n>l`T$>h(vKz@$r zb3CE^Sr<5;q2tDu)%^X0yAT###sbT*uk%@f?B-9g&NBSqbw2BE$3_*%a{l~~as2s> zcGl;c0$IVC4ViuP|EV$&niNd_kgRv;+ z!>o*tEGhggq``=o(U@_iwH-K9ueHyKLBn6h2}}Tg4tfrNrrDaW{*Rx0)A%!H;|Ji2 zXRKFjIQ#h%@LRYr$0V20XsVb&n>^nc>`L3TJO(kXtrmkJar`aJF+i4MGRB~0q$b44 z&iIO_7^6G_Us_QormW=gX95uTGte26q+y$x!`W63%{KsbhPrZ^xmnMjwERNS02!L* zAA!EUbZo4*_te+)o{n{)Y_R|G0% zJHX7vRYcLdF)OZqX**fY-=c5;a{MXij91yfttn?aaP71UD!VsjrrjgoKvwhj{Y4PM z5Xkd4Z(Jao>t!rLUinGNXk6$uSR%&pWIumu0D(#Zn<$mY-#l@FnmHFop1G8!dt-Kd zqe%dN8W-pQC_pChH$PmUZl>d#WNk~ju2be~TMTPR1b;dL9o#%keEdC20@=eQ+R66D zoq?FP*Ifndpn^ZaKgOkMkaoyCV?q6d2Wci!8GV;QEs?raR7whe8W+fb zbFQ2v#aD$a8P8(#3>~qk|1}e%@b?leC#JC_hd-qYbU~wXxk)h+la?Wx@#fsbA6cPk z(GYF>ib4xcp4V;qF<&>4GJcxP&p(sSxL5;!xC<1@W;7v8y$hI->m*A;h#E2!vMS2W}Kim`;uN+5r{ z3zXEzg&<{lNu$v#mR>9tB!nTv(qhy`gpou^B$7X^3q-`U02+-^v{lrJI?uj(PTRix zInGdAVz{hi^2fVC_i;cFh9IUR&}a;&s;2ZD!!Z}#X|6PIc2fCM6X+nOCeUaMEU2O2 zT<|d0eHbn`kSV$R@ry8f7RE(PwFn!Hfy6WxijfTQrc8_thi}Sw$pMU6&EF5$2OpYO z;qwp_!$VBr0*%H1Qksi|pITFmc|)N2qElP5o4=P|){v+CKu+Y(=K^{De8$vDnoF2q z%3_=Oq61j7oxciLn z0khKAn6(fiA#w9Zh8TW)?8>6QCWgOME|8r=)RdusYKudl))>kVeui~5a&?B#`P>Dh zQ33)IyMn;&|LGh0C#e(27m`r(sF;xB*;Gzerw%xE+;sxXC~ z<4jdhj4iBJRW%VVFIoJhA~2jb87rMBq5`*TxU#@)U_nDW0ZmEc&$tNZ=H(e=iEcDB zsy2n1dvhdHIg{~^ezx2%7dU)D!NhIq`?qsn&T{N7!x&x=qzPhDGMZ! zKaC4SDG)I&fYFU9RaO=VG0$-pRgOi>bi2z-43(}maOfvcN6G%015L3Ir=th&kW}A6(W44-yqPLocp|2Ipl2rbr zMc5I8A*OVJ(T%~RR+lnOZhW@fw2rEVai}jUcy40(6I>wE!$V9#AZKLdy;q~rD;6PU znB4eG5yr(;F^q#N6)BqC{JrwBDY(x@yUK7B`2mb>blZ;-F-$IJN^`RqWnf$#ZXiOG zD-to9)%^V|FE0>*t~LTKGG{Uorb8>w4sF26lr99C3Wk3iD>a`=P&OSGl1kA;z}bUCb+=p zMz{S45%t9Sa=Dg$wg4Gi^@vebClQ_?1qfzgd_`w=3lc~#^IEi3FP zvV`i95~>a&H6&Kbdj8M>Y5~k6$CE|wB`Xl~Twrvg+kS+IYF-t2a?6S$rYyO7__U(+ z@QwIuB!oW=fq4`_*U2dnV+2Mw`ehBCqnKA!ma0L2Ia11>9m9hv8EF)Tn;8C-#}GgY zJs=~Deg5Vnjm(=b*U5-()U_z&t7g`pCsg4QTuCpN%XFfiEn3Xq;!7_@Ye}m0vuytT zak&2icVOJ$zZZyh!k4VzYf(+DFV8KliAXeruCHm<&Gt1Yb?6oh z8_9nDH~@V&{h z6tlsr-yGfOZ%dP-nC8UemzUlA37|;);jI{Xrh`)CCy=Wd(kPEXn^y6sEj)(?#kk_@ zJlBzKY;}qK*1<`u@pwZERu^UZW4S(&K4>v&2^}{nJO%e36n8) zl?48H6eIvePJCXBujx1PjQJ1?Yz#G7!eoc4nW@6Uh-f0tt`fl?&w>ab!N$`oEJa?! zikbn9UhhF;)WO8+W`cI@FK7AFKcV<@jtZt+{wuFwF2#`9T_eljUyVlQA^8ek+^Oni z@G;hIcD^$FEq}rIOCJ=T9X?L@;WPMnbR*x&Jd7iEFf0Xru@3g<`q`cT7T*7XD$!Xm z)+Fs}MaQbqjM`fB!Jy!dWyLzYB=MIyPz;D6_J$cT8V&WHwTZ!|c#U;viQ+FE7P1wT zSZQz@Q((uk+%mC5vYNji{=%n$$rt5EH|7`kdg3yo8@Wd2Rg}Tze~o#VSCleT*g;X{507A`(#+YX_uI3nQir2WhaN0=# zf1(SF&O*wXTF9_wG-J%O8B22t=)A00df*yK0)GO4(OC#kQv(^T8O6xW#Jq`mu=!)L z4>A$_@c=q3sD(<~sAi1ur(A@MM)feCZLXg>FDLeqWi@{<{(=Qh7vprtU{n$4HXGXL zjeGvYJlg!R*hiV&{4Hkuc^)u23qfo8y&P9Fq%rpSjG;k4b#6`!txHX0Ie&ftqq7jW zrq9dqHNzRD*_anemo|SS{_$lye~VoL9c`ks5Td38nOHN7G5!fnzePWGZch4*%PnL- ze-eNa3*z<0W)vBdnhjwThc`b`E^WR@2F8~F{#+MG?KOxUP$Hh&KWMXIu;m=uw)%*oyAv8s9=tkPGrrC6i+9oq^kg#);8RfDs|nHk*#o z&}e2&E^u-r6Wu0?KSH2hrA&ch;;$gOQGX$swaD0c=?wInF#g<|fl8^OvtX=QP^j$9 z$&79^wVFYj$D2A9Yu8ERkKKyF&7fbX=q#9PnyiJ9jsEe^NsgL2AveBpB#%GW1u6iH z&O(Zs26G_kdyL5wyG(f zNdB}0re;ASQ-KL0YSb}C`_#BVqfs}O4ChOn>63AcBcc3h7GdLbr)^Xd=rkL_7{~lZ zXhRR2Y>CjgTqUXeaW0U%X47~YFX%8&B`Fr(MA9Z{NBhXbTItzho`XJ+LhBLaup4*5I zGE+vnL(6vl%mm5+3M|Od#blHj6PgWWlm|FRX%lXKTbhH)e*OZvKu4wMEQF~kLMGJ= zVN7#sqgMAbX3mM)52=>`{>(0rorKBNMrR>hP5!9is~Oms*2G4yY^F_Njowv3K_d7I z^=FA!R+j+U0gdQ4tyLx+jk1or135*VkrY40fy0L(Lu*aGR zO987F?lo{P3l~>WWl`hv-5nkH|L*p7_^-RGy}b)U%AOO%pMt>CfLL#D9}QE}G~A=% zJ_(1HAb?~M)@p@s!S)y3;aFs0MttrogyGx-)t&l+WfurlBsi@Eg*jsKaM{luf*SE zDgcwkpXdS|{5<|BfFgekz;r;-%_PZRZn7GUz99u9gDYn0TW8~{8=?+=Jbx&!%->|2 zz6~5e()iK5fii>1+&aZi z$zLA@&^iA0(py~y02Ar|UVW9vkK6x|HNxlr9zG-h8d*r&C~@S>z0<7D1yZxXiJ1() zRj3>|;cth_S-7P38U}vRONx?W9%(c@1rn%MB!6qgbt%h!&Vf%qBu z0{{tsxc{c>FZ|Vc!0hI4$r6?OBlsEkyFxvn$loI#KtE(YP+nGtMe4w+%X822$*XMS z`Qu0Au@E5UZ!}Pjwq=5!nZG`A3MR)tmp>hV+0LH=K#Cv7A6tN(Jqmtc1WPbke?9;A z$PAzl6g7U3YX*?#2tW{;6}%jwL@)>9Cayv{{wRJv{v2s>|AzthfD(Ti0JEMyp9|FD zNACZDP&)oZ08@dYpMzg2{(2SssR2v?f1(R?@Kd-y zolozohj;uX{;wAR zl>J{%w`vcV1pasz$nc~5)$QA|r3gbY{I{IMmcwHw`MEhukM?#RI&@^;hSw|h@7=#| z_s+_lm6dzfZ`xL|5f(*fUF2qz;o-i%VggV}pj(yWh|6=Mre4K21y?fms0iSq#jh@Y zwWxSie*OOa`wtvEeCW`jeGuv{T!c`E|8{p?ejy6HoXCuF7nPTnZ#nO60DHPs049S! zmp~W4p5F5ZD#})rmaZsUxeCI{vK1>JRa{h8(ArZ&laS9@>BCKwVW$?d~J1 z%2t&XR-C%j(cWKGRKV`Vc(dx#!6L830w8Nmcs2COq3e~(NKE8!NkP$$rZWvE-rAnO zV*T1Jm2cMUt3Pq}bYtU%vuF0#@7z5&xHmU1e|d39=?V~>DPCGywqjcYV-Oy`^n1YU z;_q+8kn@@s)?)DR>uEs$5BCAOy0`BtTeEfz5=ik|c4TPi%GE3Vy&XOM*RBrT93GvR z=smLOwUYd|ng;LR={r`GpX2ht=GM(%kyjvub%CrdzS4&I94@Zy)vL*hWb9P{aB+Uo z$x9tqJDaLX*KF8!@O%edgs+2)Jw5%mYX=99eAkQ+3e>i9F-2WptqNciz?^?Nf^~y5juOK(4 zXK1K%bY%F}t(!N7Zr&OlpPU|RtS(<$oVUB>#=VhiCwcy~pdf#t3FHu?0P-1p+@Xrk z^5RCLS1iq#z~9oMlKM+MV_i*Eg)3g$e4%sv;ne8J?VCe`*RS?pyD{7`IM|+(n-31K zv~1NHfUlP9{_yr6nE_z%8sU$hFE)|h>-hNis(B1xD*U?r{GkWDi+2OMwwJ72Ra#P9 zTwIvbKGbx1bY$e_@bK+hw{8vJnYerF?7q#bi}R{mZ%+^OpAq@ffI|5T>jDKPfFmI- zBnV_e4wn#(|Hog_@E2V8e@OUmEd1Y2_`e{0{})_KhA0nUZb|8Z#^Ja7nyT`PR<3Wn z^Y;5Q)8p_KzA-p(ZQ#~uNB!VnVF8F+Qo3^0iXw+Tdi&V{aB^~phY#W&cW!Kad~E&# zDB#y6T?|R`{LwccS%Q(jsj0(-C9nV&7Z;TbA%CNzL$^mph9QiOO$?kpwz+IM@ONvn zyX#yrxBim(QGw#{7uE%e^KIadI9{Nzx_9V{nvl}SAX@F-{l+9$9et= zN=uJke(?F#rmCER;?*5fpMUwu(}&Y{zy;nIym7nbB=EO-Ip7Bp7u5DK?VXQ52!WFj zL;_vvxXAd}yaiCiPsJaPpZED2aesust@&^&D=ykHG*sVt=gytMTO)S>!_hlqQ zHWe3aZoWM+al3qTagO&<9GxD8qjLER=>qlh?XxI)^~!L@=Op|He~90A9sC^r{s#C1 z{DShgoCEOHoYGBuZr&YiJ-IZuu(W^ncfbGjXCKVon;5-)>&DGtu^tALYlZ3}EV#&4dl-dy}>dICd9NlC?0Z=Q8AkH3&EP-qZtSfn2)e=q9XpX>j^ z^2Y(VbVF%r>FG`g-PcP?wx6DvdGz33@2<)>-hg}zgM){Qis9ZuHIE5HeTVVdZCo$;XKVwdM5fX_ew!V5KX*=S-A(pkt0XWo;}-8S+t7) z=1IQpOfKZG4jUR=S$KQ7E{LAR@fx`0_jzHEVzTzD-LkMPk z?#Hw0@1;!YErKLN7lCiO)ggp9ZaOh-l{<5WrpXdG< ze|QL)?8x8Q026ff$e}~kd-m+yxpVX8&984+A-MaPbcXtktKYZubg{tkViIZMsEhFB~ z@qg7+{AJ5v8uD_Nt*zL7w)yI#KRoGbKUc9~!@70rs&1S(b+NOx@fg|laDQ;RiGL-3 zp51v^hu`=(;&=4G#m>&gle=E)-o0cMeA?F6f92}cYuB$23^rCW@D%(_AG)aEZ$jj6 zvugd7Ig$B`1Rw!S4$*ST^b5yd#$Vv`2L~X}UxjM@1^$K{ z{OJ1Y@W-ZN*`7w6BbcPdrsn3B)}kf#6R+(&(RjJD=|n#VA4aLaVF*4e0QLBdk9T*s zUTo{`y4-wv_qy)x-K%iac)%y2y`EjPiob~=IQ~lhZ-VkSjLpFLV}X`G!BhDF3{Id6 z5oHMfx>>nd4gCH`{i+nwp5C*bAv6BFxpoNT__)!lZYjR^1lao0PF;Ijen zJJ7j15GgXzw;%EAy4-rU3i#_LF5J1i$w>`=u=~Ss8~AgMAME@A7f$@&FbD_yEim!N zf#T;c@HxbtcgBzgK-cV6;37sNS1!#c0Wc+hrNG}%5dNGSFegFub-Nl`F9U#`7cYn| zkehBq@R^DB z0d$dY8N!I0>qa=Uo99N1Mz>V@5d<&*f3N^wr2rcJALXwLfm^rhVp}Hw*wxb{x=i|@E0yo(lC{YP?p%9wo8g>c@X19{U zDBEI92jZ`3=q7W3f%&Vx)YeG>Y`b`&k4yylGcWj)2H_?f4z%^)+S}RL(cXT(+VOuu z`NQRx9e)Xbd&m`g{;!YtKOYJL2`-%~{(|fRT|C5Q1NN%m&sWp!A^nKL|CN%R8NdH4 ztsjJ^Ifw%!`1R`cf4u*zZftL-0CvMMq}2t`oogC-7XI(~O1#+)jI~LF>A%R~*LGSHFssTj_U}^$gDCA~?^-8PDb=^kej!6D*8yq`S{N1`SG>8ju z2>##>8UwJW?P3G$zDba#Jd^yvCmSz-$H5@o2`?zjU$pe6FTS|Cl^f2vea)TJ`*n1* zx3#ri!t*ce|AO*|?$5|yAM<}WyWB*|lg*5308ogUN49=s^=)r!*h%=~@iX~9y!qlCKNRaPynW%F zusO$>TW=P9lbsfTZo7(RS;JUHVRoyUjb5>|V}!rs;Pm#873sQ~zf!^<-hxr{cd)~m zB(eb0^9S(P-VXf18jQDed)qD^MgNz1{{Et-=3xCnZP*R}h!J5A|1`BR0DDhfZiCNu zaq|af-_8zjf$jACk(xic{1W%)9)DBw=dqw5ff*yXK-bABn{}-!!~z=ArjBa>*Wcs2 zQt{Vxq6!sSb*WiioEATbbo~|z39k#y}(EkzssQYt|zkdF3`@;J_z{8mb z11K8)Olxq8W>c6`4+%Uc$4{D3>i=B+u4(*V{UAP1evLhSAfG>&|D*1Y`oFi%k<&+a zPcMEg7CyZi7`om%ItTnch42o9cOkro;e7}nVE7QiM<-t50Bjnc*tZS=9JwMnhT*x#>-~K_ZO1oG!2VD0e;o%EpMMbksQYvI!%gP`{``yh$^1DisQIHt55TwR zdq3#kPNjpRT@h$Zy=vl<1+x_Yk++!!94y!UYC$vf}AKXa#Uv-1?zHhc^Qx10-O ze+Gv6Ki2^Y{C)Tl4kzmWccKNZ!|QE1RBiIjfO^*CVBlm!0x|{ai9M}bnq59z@Z^6fA9z@1aAFZ zUw;Ak>$^I56Zv~E^YpzBKbpA%KTm-_?AFk5;liFhb#>)%94!YT$zlaJOYqUav>y)i z@h2_70N~Hr65KRS0leBG^LM4MyW_~FiAiDoHSp&wzx>k&3l>xoy^g`<>HVK_T7*n= zfK~rr$nfcZnfY_RF)wuFB}{OdYkF-2i%I^^#GmH?{r-i-^0PmYoG_ex(^JMwqt1e}L2UnXxaxJcFdi~i3!eX!#% zc|*bekMLKSia+v=e%*9Y@3>4?@&`u|Mfxm4(!zZN3-v|_u+>#cVzzHnK}yK z0E9sj=ss$wsj2Bg(}B7JfFl7?;UmdS<*g9vF*HMfZ#>?*iT;lqK(q_62O#i=4sc@M z+Hsk`-mdn;$7%`D?w<;Hx)C2f&2=9?%{ff}H2l#vZ-ayq z{>uKZoP6t-TjF4{^#D?1ME>;668`k&07Fvp6fP4EGcODG|E}Tsi}xeO$3_2FKLm5_ zIzY2!oC0Z2>H9L;ICp3zt9i2V8{{J;ct0qX-NtD5eaUV_Uzr)*xWKa z+S=Z6`Etj_*6D|jpFDl{{SO{{{L$?nbAPx6#6bDOrwt)c{%#QeM?9b(K*AsI1zrDV z;15o~z#jz=`5TAtroi{2a(C?5@doHoxZ(JX;o&>eQ+=?T?Si||t(PjjYbz}(0`y4u zna7{&{v`g0|LZG-;&}czw`UIlcfo?whjzWjoj~b)YFMDgTVeNqzk~gs6I4?Vc}8n= zqoy}SHvAJz%Qx*_58vA_Dk`iwiFZRDo!0PII)u{<3-EQ^0XqD_SJH`>1PDvLDXMIT zpWYd|d3$)Y>snJwQ`5QXH@Ck5lUrH2eOuk$y1KfCp@s%Ee~dr{fYiB39?n;qFM>59V ze)q_0rKSHso=9J?z~K*1ALPM59JF)%bqJ;YkMP%jkgN!A%zam-{ z|CV2T=PsS3;R`#fw(Z$ncd4WI>4(1@>S?`je7!u!+mH2(kKaHB#r2mtK;{82*1<6Z zzEXn04HRldYOjYD|kiRkN=D!}k5e1Q-%2;;$JvXH7Z$NL^Kh!yiIa zg!55Ud_=MSmL9wX{GkK9dj9O$t@*h2EamuHfA;A^I!U)z!w3B3yN(?^u&=eF@zk+5 zE4RU~LtzkC>WYf)v9TLBD1dbNW$q7_;h~{g_!`~1yy6o0Q9X(Y`%6x|EpYhokq2Lq zU$o#wi9hi(IQ$62S%5$OcC;CEfK_k6Mo=KZj{+qj zP5%7N2I`ge?IVhVQ`NAqyei}Mirs+Uo=V0a{(2Y$&sO+NOv1IR4%`^cW`biwyo+s z{ENcKppU;=fEYvW+RdBS(Vdw7MRrqrm zJCHwzz0S@j|M{Q)IZM2skH3l=qqqQr1FQ!CH|(ffEAn^YshYpuZJ&JxQ~N6jIMX=e zIPVnTaYr( z{jpEqFc(85eCSK~`!@)G`qQ5tKOzhkuG?4N*mMM!;N835+_A3IL||O}r9ez_WFmiI zj4I%SJf!bUzz@9N{+eAI%ZT5rZk+vJ5cK?&-WVAjb{$~p+Un|!xt#x7bLQy->i>pu z{iXiz^Ur5ydbgot;rZk6Bm8}UpU=Mc-s8t3bo+x@sobh|2=we zobrckEnd8P-@XM47B0?dblsncKfv!#fBfSg|M2i(71>E_I&-G!P)*hDw|4EU++3z# z4&&p`Op(Z^#9s)bz?wf%xN1Ws!LRzj(L>ue;hJ03G4t>L@gM(z`~iR({|5))JB|Z9 z0B_gQt#$j#3+eF}SLyZV*#6Js4?M}tW6B>+ufqKi{t!Rl55l{TA0Ismx3ejDOP1v2 zkm!0iH19254DOEviXZalWH1LfaOTze!-tQ6{|j;p8t=g{2e|dtFC758x{ksv@}sx~ z#DB~33fceV4tK*|b!_a>gYsAK=&|tCr6urllUuf|#XHGcT>RMampuE2jH3S=#q}5A zBl!LC4}bW>?|=XBfy3b0Lk9_fZ*0;M7?HnE?@3OTw8Qh@LyU(kx{>cfju#fL-T20i zoo~Li8-A31CtUHBLF%R03S*%FDhAaZsaOFZV%z&nxfL&ibIbPKjd#ypw$<>9|`#6=sQmbP9uZF z`)$Fk-ihPK%NcHf9t})BI>_wTsaa!0%aBQC%n2uEOg1lc-3_?!6j19}Dq z{yzU=8USqGzHQqUcuEIO3$R<;Q?q8xYa3R+vbzpGlis-T_19nDuzth(HqZa@zkRS^ z`TF&>;Qwxd{{#MD>BZHTxIe`2Uw`+z-~G$Id%Jg|j4QX4Z{N0c(<=$}{w9to z`Qv<#0=+=Gejd`aOH<>&v%n-@c=A=Q)ZW@qZ!QAHnY( zvVXhYavXjOZFlK~hVu;#=gytI<}AFp^&^3-zd#>w2z$XtS355?H@CL7wO?*Yxd2NJ za4Y*9?gM%ghVTaf4xK)Air~M0Kb+NS;DW2_*OwGsI)CQmvHBB7>)|9{U45H;Fi9Zf zzk6O$Q@(KBr9B7m(V344e1tyqe#jr-_q%`jmw)-~Z|_Z023Kr+ee=e(rOP>N$jmt; zg#G;ElZ{+MB25C=q6t#e>dU4XQH@2*i&t$b-@0ZkvRBaB_WR$%8`0$t9iRi?fBXmX z*HqPUV|&@!b!#~SD~4L(BQf|4c4V~w?78y|aC^3?xv{mSt*x|l`{6?e;l?fAy4|;b z|5*|yK8KZztiK`fdzZRDVrHfHHFIKmg&NH6=Vgg2Y4k zm)7pxn{MqJ7{2r1(ae)$M*chp=DpS_HZ-5DDga0nEf?Ly%l93`xpudun!wtz_Mq^xFv|{Ckm8(h!d#&*1 zgSVi|A6tPLfiqNWxC?mX__}qgJP(LF@UbzJ83R015AG0uqWtRC($WELhUnv4WA||< z_4rZmHijR&_u|}o;m^L(>g(e7_7ez4XCA@5#o?>nHy%>IN8khg9y;LNo1Vtsz`Wgk z?fTG}eyHK_r#S&r{@k0;ct?ob1jKPN{)W+efAIxHOSm@RuMWUNiMI^{98d z@*<&)%L__Zt|&(KT3Y@Eygubm0+6^sf}q165qHO`b?aOgi1b0A__4o;VMVdTAJiQi zyNCQedeplWrWKFA3_o`3&prF%$=BKadESq@Kg93J(I=y$EjMosT<<3TnlAZQCCQ(u z2^bCTNk?!AyxDf`+V#PU4u6^xu=hPAaRqh(L;}J84b#s-?&Ae}m|<+-jZQIxK0wIv zNAN4^xk1sR{0Y8KqVG2Zzkm9tU;p~o-%L^laZByYID7Q~;9_YQP5gli?vLR2r0&VcXmbtxt8`ay!7Sf}XQL+YCkdvehXx19U2FId89qtA)(2lw zzPY|1J_iB+r{hlo5C=q#ATEH!0|I{pK;Tcluz@$k90H#6hx$Jdd&z?0PQ35pgu5Sm z3op0xBfbym1Mf%i`{tW(zW#dR%{Rdfx_sb2E20h2jb_A(qc_agTn_Xhd(G3o`OR-3 zcmU!O?6^ReK#Cyl|5(5?aM`Bvjbss~^wGZ@dl*&m2kY;r4u8!1A%9_4-zT$2M_|WU zGX%78AJYxNMs)DT4f2o{c|P^_@HqTq2NR3?pUk@ai3>3K436?gmS7x-y8{%LVB`-0 zyw8O2UI$irah^Sl6aFd>9C)o{L1le2qYu8=`4Os3PQB>+U>4xqp`q{VufO{0t1rhr z9|LQ;7C(3Z$!OG9Ol2{(3%weu3&x_trl!091n)4|oz1d!)XaD!uGYW~2LJbKi#$-VKM%Ka%<-zT$m$lt~Z_*&#JIb`Bzbb|x%gaUv+ zMqJ@tnBu!rW7F_2-Y@Rs{2$@Z`wW-^(7gwv25{SL+&q~1li_ptD{pN(0XG24@=r4U zU~I@AtUAQ^Dd>Zh_t)V1zL|tJ#v2`pQ6DYerb-H895i?&%b8q9%>C8e#u z{x$e^^znWI@jZzf6Oq7Ah_Liceykk9J9_Dof^O4d*s0c|M~}kKkKDnZ0coY7g@oy8_;(xFzK?ppugTJj_%$_p_|eJMIU~xSh6SUM z@~5fk7-Q+R>MrhX9S_I=kr^v=MN2L%2IfI9vJ>YN0)JLK=o-N{2_;?e&R=Lh@^JYZ*D)y9vk zy+9w?`F+{c#K8wqhaxo#(T#4yVs9)JyM2SfEWtd0jsqn8xr^|hc>XSv4dA|0 zC-DTrU{k!R`GXrT4KmIy`c#cX2N`yNBT7@IyC3;NwTI z(mnw<_wKvzy!+16cix@_ETJ6uKaW3e2T1vowt(o4$qvxtk35Kg{2_oU{_y-E^XF{! zkiU}hs=9S0CHeXF4t~hshq&IbwHI$V^QT^hADlbBYL?*R`BV8Scd|npUH()xJ);u) z2)!bLxCJBt;uaVM;Q_==AUTKd{E>ZM>E=zF& z(Qe)B@J-6!%oE}t@!sGQ_#gT)vW5eJ@OcUD6=#sY*`rSoVN#ZED#`kecNyuE`0iKX zR1OdU+OCXaVH;JBHk`z+h9n?ik_@&CdOa`^et|;1BU5N8gV>LJo%L?$2F)2^%tg z?%{*%|7LIWE=FJXEb?+v~oIX}nu;S2G9jF-Ive}q02et7Q1rI)}*>bm672y2hN#%ARCOM#g0Js}K9 z8j-hOz*3U2dYU^1D?Q*J|MU^3Zy5fdns;XmV#wbwe@XenqX!;7;Ox-3MRoYl+bw~zQg${)7^2j2qX^QZ`*$R80# zjLz7@cAcAm~44QqZ>v3WKcwk;_S5=Zbh?AptJ;&J>Va;{NcY| zNrUqVFH4SfqW{Cd0|@-#ofqQ&9Q**lsm?8kkF@!d-5ACO4naCfW!exTR_zk4C^nvd1U_RCBiQp9CKG-5kIZ}oBEKh zyuvMKpbvL`GJJ$6-fc*Ti7Z4nO8kk<`i!DEonGP6s_*f;K-dK0jX-(|0R;8@0bSn5 zQ4Wa{Q&acOl$JXD@dVy?KJ~rH7GK=!PdWUVS6>0YnZ2_!kI(hO`Cyd=l>{u6BEcEi%a-v_=6iQ zAL6}#N+17eFVHvH3=`s;Pu?A6$ms8thdxt7VtoLDpa-l9bP?tN{Da3|SC{Y%D*agm z*Tqf1C$RVNhYjAl$nod7!29U^#IwgoA3FTG_|b<>O{?$h%wBxL>fyb6QxBftR*#YQ z@B#IHh#tK)GlL%k!++OnR>QaEj$OH;IfIbijQ}$Kp1_lbXU?|3LxuQT{_w9kUAumH zL&={0E7xwo53=LO&)09?hL2vR;XXJFN7(-@Sg?9^8{DGDf1efkBZohB?xntuz^CMo z^EZC}Vln#p7f#I{pIO#cyo9c=7Dnqr*!4raCwJR$tu*A7uA8Gt+tH+5mab zfZlf@PhSz|$5vkA4)MOsNEiNiIQ)4NzrzG!(f2ntHgdm(5cDn}E(YY z83o+nCqTxB&ulM$3%=|Ek3DbRvgvXc{F+>U|F1s!=+jR>`PJuNKKzKE7<>$7@6?qm z2;k=wK7qc;W_L#UYF^x#D=&kOrEQFWnCq4Kfw*yomYcyxm{EL11gQ^n3H<#ZK7v&R z7Z@LZU&42CzWCzv&)~iU{6IWB2z~yw^%zP^S8Ocbak8@=ze;}XT7Siw%>{2ALifj= zd~xxG?^Lm$kpi1j;`*WDe!#Y>ySEJdC~omB>Zu1L-*}$1z&Y! z=W6^49)!Q5zpeWH?{92sqQ3{NybI{x0$#ZI-WQL$o6j7BuLZ#`i>w);EAEq-e#};z-aK7+{0D7x0-nGLm9tPt4=s9DCe0;SF8bbCSxdhS_c0fM` zndw`+YX5}`4RtGc`q=OL0)@@ZyaS|g9N_1lzl*XH{#*dpo}GQ@@aOzq z*~aoc#|N%7o;tp#>bY-Y_`wfWLfE-2;O38`aO}dxhBx37U;-Tc!D9z_K#o5OU>|Vz ziytmpxbS=5`qsA?cjD{!zrP9o73J^$0D@@XQiY=v@KgBX@O9Ic=ElwNi`9(3hxbmv z?+;->&GS(O49o#OcpsKjH}L#@=D0qjucg{8LLw&q&>e}T`BYj)eZvS*rh4Uv&&6vW z;6a2C_^%%l|EJ`S0Z3Gzo<0ntq|;Gay6OB3;}57@ z?gjip&csAl`F6$X>wN9Q@qx?-cJI7;7I5AU6%{88yn^wMzq^IuX2_%8eYIoc)u@I!b%e54`6WP`tcTKh2&hzRQVYsSZdNcb3K@9BZZ+|UtF0yi~GJL~W8#%kyH*Y0$lI0-X(`0qpl z7LonsiN#CKc&jh2!Q8V4GJnrK_pKLS7gHkMb0)AxU%)9FQFA{z;W3(OQ z3<5&8?)>@l=hUsol}q`Ta}xId@y(sd0pQg*Dm<^WC-3UOG>o-J%4x{{T~HTzW*YxzR%wOxz|MGQ_T!@fU}=8yG6Wb{HdD7*0g=5XLK6&#w*5jppa7W4+&c2_`@fMCcB+poEMirmHQLN?xwW-J@@=! z*#A)g34Z+HgWPy|;(5QBnTas#W?W^t_1C>1R}weJiBJbP^O2IjN+^`t|6$v5vzXTQ znV!*YSk)&ps%`F&lx8;GP)4XE@po6^ulcJl$v0#8Bc8GgE>q9nbI<>n^0zo=Kz;V* z?>|2i{{E4>5EfdT*d+x6s{s@5jTQB(W+q3b1|EN&c(^fVT2l({8 zuRI~h{*T-#b@{vBN9M;Xj4RV&f&wq`aV1UeCl%AN&q&)haexkJNsp|D~FFi_{tvTFGRD@n!eApj6%cipxp@9 ztDEOWIHM#36p3VnN*;fuo14qFHG^QGfBn0l&-T5Q-8K)=>Ct2zyuT|VF_kL2l(FCEJgXlO&`Z!KUi5ccx7d| zEH9d2qc9NGE9*17G0lsC0hsxl{0e^_BAojR&YuFSzs5%fbKqZk-uKp9-&WW&p2hRW z)MfJ|z0zp|2l(+vUsuxv!EeDnfxib2B>n<5EA_>av>TR^UIQ4Fh_L}hF|xYSP*~DD zBq&AsBgKjTn_@M|>m+S`|;)hz`F`0>}xRA?W6{rDSQ zKK{(jlBRThrfJk5#(c86S9)!{3m3{o9}+ZEi;urdk@gh)A%0(13%#XIB#J+r8T}=r z5gp)Hls}3e@BeiCrEXT+Y03eaYU(d09g3n4iSpM$hOFaHdp#OMviJjErMXU$4aToN z|K!U_GJQV&iW&!q|68zt`$x;1=2A7Q?=_8@dj5z{HuPz#3GXI-O|y~sKOKM7=>A#^ zO)E`F<4<{!TxRnZzx?cLi9etY&%cG2;HRn|^aFlH#axYN7mW^zv1YUj!uKTQkEnwW z?M_YcO%eX8`J5o}#PPRch2gTn_|>Ps;`u8%iJLyk-@Z!_&Mhu1DqZetBUQ7$3T{U9 znr;(uMU2{1 zQ>#m~{o*S)0K-2{O@23_un*204<1%7Sg^3LxS{WpnQLve#U=jQ4q|=-N}M4hYgjXj zYEYcQ0gM{5)fbbWTU60?YwU{1ANd|U;jy&lo3A6dzwBOr7b<3k$){A87^l911BgRk zAvyF80)LAO^9maJemOPR{D%9_nQ_ghZkCkbYOrYZnv6f?rNm)WHmn(MS?RWxTa)*$ zwie+@1pFV|ik>F_bW%Y9X4HK1O&z=B5U*M7{}w6)^yE|cDq?K=^6Qf0T^+4$+bX~X zZa84nVljrf(y}$pLw6qzG!^B6YQ^-U zILhDVZQC|hwtVwVjsN0P=43m6zM0`b@vBRWEnj|J-FtUzWORJ$L4W_%fx#QMuTESk zD=sYI>dL}#CQY-fM0_#uEpKV(km7(6pdy1E6Qq`8%H0|f7I&U`0ZQpuU!Ct z_ocFdk_q5X6i@<1VO+)d8w38}Ckg?;iOzHD*RCuR>k9gbCJBZN870KTONq}I@?whe zR;+pJRKvtW`h#Wq+o`cSa|s?va14 zc;@Y!Ek`!LzS^te#C$dR5}7y3h=l}&AFRl(V8H};Et3CJ7)w^HtK8o@Ju^do!FzW4 z&h3%0$=Ywep&tzMO$CT2!T$*wp`23iWS|E4q^P2y;ZpbTUHs=b;fJ~&Upc?K{57c- zCgqw!k&GMt{BZ+{)fnnZSFPJvQP=fw7XGCn_^sNf4 z539X1edAc=&fU9fzPVfD4J~*qG5j$BT{y%DRYc5lQ>E+B>?XyCxhAoS72H(1HPu6+ zt-@coA1)Hz$n;Y->oaaXdE`i4UG1K|`!_-FPOscJ0!J5S@bTn~H|_5K=;aa_bXT5* zMm1t7rrXGZ(daZBzks?HF+WNC5drlV3*u*&Q9s@cy_tw3(`tmBMl)(_C5=B2&~UBU zW;Bd9It#{{_zup9fZ9fr<^QyonU#hNv0!X=u7ye*e`V5LWI@pRXB37fCrrL*MTU$k zO!}o^l&{T6Cy+mbfMq5C=baHg>(a~`$td7x&FcN%BE`&jh-JOer6_I|T#Zyn zxJFg4(T%FEATP`L^J~N@l{sfI=h0I1?6<^I~ZIls{T%a^wezY;x z6!i1XAFOk}SC;3#@{t6Ge)lMpBlSoR>!Jc%x5u z^qLfVPcbPLRlr2?hZ;$`@X2g58pa!Sy3?U3BkCrMKOVQNj~n<)g9SiWuZ+kF`@ivWS|OVi;*Ix8LeZ~ z?RHHl5UjPHHFZthh#Ym*L(&)%%b!wTM+CKi^UlclQ(nq5HO41kcJrs08J8lJM%iU_ z5mQ~tGd3!er*4%1CLmi8+pWh zm-6h5z9B`CKKuETWDrG|e@0>T;g&2iZqX6|f3e1*`&A0jq#jz$#!BunJfO ztO8a6tAJI&Dqt0`3RnfK0#*U5fK|XMU=^?mSOu&CRspMkRlq7>6|f3e1*`&A0jq#j zz$#!BunJfOtO8a6tAJI&Dqt0`3RnfK0#*U5fK|XMU=^?mSOu&CRspMkRlq7>6|f3e z1*`&A0jq#jz$#!BunJfOtO8a6tAJI&Dqt0`3RnfK0#*U5fK|XMU=^?mSOu&CRspMk zRlq7>6|f3e1*`&A0jq#jz$#!BunJfOtO8a6tAJI&Dqt0`3RnfK0#*U5fK|XMU=^?m zSOu&CRspMkRlq7>6|f3e1*`&A0jq#jz$#!BunJfOtO8a6tAJI&Dqt0`3RnfK0#*U5 zfK|XMU=^?mSOu&CRspMkRlq7>6|f3e1*`&A0jq#jz$#!BunJfOtO8a6tAJI&Dqt0` z3RnfK0#*U5fK|XMU=^?mSOu&CRspMkRlq7>6|f3e1*`&A0jq#jz$#!BunJfOtO8a6 ztAJI&Dqt0`3RnfK0#*U5fK|XMU=^?mSOu&CRspMkRlq7>6|f3e1*`&A0jq#jz$#!B zunJfOtO8a6tAJI&Dqt0`3RnfK0#*U5fK|XMU=^?mSOu&CRspMkRlq7>6|f3e1*`&A z0jq#jz$#!BunJfOtO8a6tAJI&Dqt0`3RnfK0#*U5fK|XMU=^?mSOu&CRspMkRlq7> z6|f3e1*`&A0jq#jz$#!BunJfOtO8a6tAJI&Dqt0`3RnfK0#*U5fK|XMU=^?mSOu&C zRspMkRlq7>6|f3e1*`&A0jq#jz$#!BunJfOtO8a6tAJI&Dqt0`3RnfK0#*U5fK|XM zU=^?mSOu&CRspMkRlq7>6|f3e1*`&A0jq#jz$#!BunJfOtO8a6tAJI&Dqt0`3RnfK z0#*U5fK|XMU=^?mSOu&CRspMkRlq7>6|f3e1*`&A0jq#jz$#!BunJfOtO8a6tAJI& zDqt0`3RnfK0#*U5fK|XMU=^?mSOu&CRspMkRlq7>6|f3e1*`&A0jq#jz$#!BunJfO ztO8a6tAJI&Dqt0`3RnfK0#*U5fK|XMU=^?mSOu&CRspMkRlq7>6|f3e1*`&A0jq#j zz$#!BunJfOtO8a6tAJI&Dqt0`3RnfK0#*U5z@M1{-?Aa=706~}f3(DgELY%1mcJ~Y z!R#Ks7f}!r4k-Lin>TFiQbU41d-IW+j2i;m>h_OKr#|1%8~(pa1??RvKJ@ zoMqGVT7hKo2aE8$R@)9vRv=;g$(Lu-^IU<%@waTA3vP!eD3Cn8biglLZ2e0#4 zcRMz!K$i38hm7OTZ?v;M-xSDt{$wvF@FyGRoBDP<+0UQkhra%(-6TFd(XRyo6!y#+3;EcrWMUk2lVpuG!fWfp1O;ObCA-WD5SQ3k>vb@Zg(60u#d@ zgG|MrM`xgk`133z;RngN&>=beIgqLOW5&6VG)w#N^EVd&{v?~9fBuOC8_v9x^*2>`T+OJOnYVsT50g zGWc`pgGi16vK*5!7Cj?0B2MBD$zm@iPj~Dc zq-ap?Uz1MJN;Go<`2zrTkRg9sV>AI{sO6jQ7}^Zcgs*#DMu;{87fm96dH^YZIywW3 z7IQ)We8=2@xjBFL+8lF3QLI_b-%s(1<>eO_U<@iXs6}H`RhW}J#dOa}zqAuY?}n_n z2bSIZ0c3vs9R5@Uy8Nk(aZ4MxJ;ihfuA6>AMeoL}^n2u6$a4O2RQxdy=y!n}e|{q` z%bdnl(>-?sQ1x!ii*;z(&flU%3J2%{C_~2cH-B89dbVSoWyoWDg7 zLI5c8C({{LySU1$=Q^&|Q?;Y+-JF}M&EWO~@TVb=IYC|viNAT`0yT5Z4?I(GP1h;2 zV;fI)^M~$_1f>sT2N6#!AAj?LK;3M6#uef zC_v^WE6iU@ptx5xBe6hZNKq(Z{P`DQ{`!qB&S>OYNjn|_2Iz|~9jW+vj{Mx!pB zH2!=pP`K<|Atfb@MyYH%$#hT%-fud+1g#r@C0t$t`Qu%nBt|X-Da%V5jb5=dV{srM3?YsNqqZRoBvK-g{BbT&6a^8} z0%$af?L<#SooTtAGXgn#uG7(laB3!#Ki&nhPvP)_31T_|jmETvCJoFr9+~eiU0ssO zpPE1iF*SikW4byg6hFK{lvYSCf7~L>9))544@gsp=JKa9D$0k=iq9Dd_hjO0HcCrj zMUcy9HGeRG@s2YrSY zLaLGV{H1b%!3mW3lkt><3z?h)7M?*DAw^IWPXK>$xj-L(z7<-bU*N1~K*t<(8Tj&b z1(U#EI2R~m<|-o_Fzp)CkHkm=4kpItqBoVkD#_q46@kJzMCOc%DCd(jGsf^5eVTnW zVKlzHXOWSyRzu7iQzAm3*^or3eZqM zMYczv))>wZeu_0Uay9wkv$+chU74#JF`6L$0=qz!R3`p#7pSTeB`@?8XSRZL31K^v zsz*pBiNAm@P@|F&F{4q_pe8%)9A~b)Vt^q#Q?w8=D_Q)dA`s3YA@#{f@sP8ef#Ukh z3e}munV^=W@t0~5Ht3R<7u9H}r#6L}CUYbIg&X3%q}p`02$cy1R9M| z)DJVwv6f9a*bu#?Y%67f1oCHafk=^-vFOH>Dk}?wnCCc)D#s#ZdcEbQIA#mgA|;Z@ zpVkHD62x=_MmI)MKGocM6vP?}GFIB5Q_|dfkmgKflF6TvK#;*fOzi@r8%+Y6ZRW|1 z*=in&-f9|#zE&_xQu$LX!XOAkOz8rn8-q!$E@hhB_-wf^se2EWYZync2K1#8%b(8$ zq6Qvf3IaK!UoWH4Upz#PVRGX$g&2>bs9_w6(xI}lo4;3HHU;-FXoyfzIEwrLMmL61 zTrO;wT+Wo@Ai$MWM&(XJrj>0%X2@m~#j~8hR{|2~T)BXlOki}QLf7C~y15neu{|M2 zObI4MBKUCBB^6C#mb!45+^pvBXMl{EK;{A+NhHL?b4YY!7{PsIHB&2PJJqDzz)4@P zVKdY{g>A)OKI{2Q0ibgZaV}g&OmKnGjec2!=cp&vm&-NivjxcDsz;2fI*Hg6Uo8pz znOz_u(79wkVw?+%ZsY|G8&}P%B2Q>mVNa1IRF9NUbP%Z>sZ#dy2M#bbfX-Db5c6DM zbfctcsH9?ERatVIsxpQ=xqA4tqVe#J_-iDDKOKR2^nC0be}(%m9Bxz=E`yH-F#2T; zo}-voRhFtje>qahpB=+NAtQ~#a1+Cy`XHR=0O(8~&lMjhWq1VXydAx_2=oT zb0xhZT&5HCY|&!=7GHWPT1!%`pJns!kK_3lPry!4+zSLV$#qs)QeOE7AXDa(%;-ja zo1R5Aw{o6WS{;*UkgKQ7imySbL-nWvULY5aj zD`&Z714g9!0c+`tN$p`$UpR~TQQ(R;xfh&nge92* zP^9s4v}P2eGz5K8HO=#tKmApNv?PT;*#kNRN@@h6NC_FM8Qv%jL7!Cfr!PNe7RA6~ z>pQFY`?(+kyA93XjTQ>z;SqxdDW4>n4PO1`=th5AnjFP6&xh4FPcbgd6-3C+ZvI42 zBmmI`N{R%f$WI_wGo(=(f!HOCyfb{F}9xs{&>b@00mCG^9RF;SCZv%ub!F9 zWYK^|uULQ>n`|f=>of=GT2q9LhhF01*~X7={k_YBYLvxf))S)%9|? z7&B)1E5{v+#(6~v;g17F1dsy36yxylqDCWw*DKD)oT_}57iaBe=PPr+3_9*L&I?Km ze;z0RAVPo~x-bL*#VssFNL?wFOvPAxCb;&cML`qfo`**jn9@eb?=tH z!2B)t0~no!pf&woj;k5cs3zdq<{J2me_&a?{=T@_4`6f_0@w6;Ilg8%WBe1Eev6jB z;Qnv1N1!8obQVI?lpqsphB2m3OZ+WZ{zCERx0I4yutDply#9m>H@X% zg9K*4h>&TUO~+_xG_xihe=lXC+XVSP+zP@XtXC;hpqTh8h;GzhNMz)Y>G`9USi4Rdf9}mdr6-Keg1M&2S{T_F>qG}{EERuX z58ha+lF8%Gb%BbdI64a{Y8uRi5VW&kG-_*Q))0Wd_y|lOe+Cz5obL3DTF9{3XvWN9 zrfoeCe{m6*NdB}0re;AmD<+5vXtNqG7u~2EONR3U@)r+*3FS|-2uEijNKLhub81E~ zW|%Ww^MUz`V;h)M{x}!N=^UMfU^P`P(5)HT$PIihqrv%$0l=*0??-scPaEh*!ooQ? zItu}7D!iOmGpungq2u}r!C(3SW;cJYzN)1U4U9ln+~_O>uIYn}uNlth4ts7RVfafM zz-;HwOrQ*)z=AxhF&SmXgl0n-N7F?sa@di(-_X2IRk$?FL#sB zLql&@Z*T9>W%(|F(LvGFq>x277O)TYye16OtuT4@!o(B}sk0U?uA<7K#^<{`I`IG9 z?d|YicUOCR7lf4kCWt=;fu{kn-rhbMrlx7QN5g#*4lh9f$s(-P3g5`ljM%QWW=f-? zt{MbguD)xNMc0~~zi>&-)vH&pIHA9vs5?c&T_;>Nb%eG=@h30B6hFpaA5~~t;O~J4 zpkAuz6am@#W>Xo{=I^DKOr7g&GH04J#gF5U$Sd(TnF_#U@h7@K2S1NL3ZTdz127#> zbTdivmz%6cqhd%J7PQ%>zI8TjV}ilKkLM5NmHC@&6KV+w96-|e<6I!aF9m-tfV%le zGk>{>F{4q@u`qu@5{fobW(8>%@Vf@9FTeiw;}Y!S&j26}Bys$?F7OoD`*HifJ`f1s zqLTl6zyLI|kdDz%cR2(D4?bfMG^8?y=5;2i1w|6^6Zk`IAt?AW1DHVmD1oPXl>7mJ zKK>pYA%_qn3+WlPjA64{V_~7ss6ZiAmJTvgeyTPJek%U@CI3fmHJs2kmq`EjDgt<_ zCj@^F34lfx(lqKIBhBiJgg?osol3pI1e(sKZw)OJKP7*C6hPeC830VA|9kaS z9zSmXN7e|R|9kk50BB?(ZKK4IGx1KdIu}UI0wiWKR92ySz=WzDH*evR+G`m2{U1@b zzaQ>&5dbNFy$*jpdH}x{^tT4)W4RxJt5XKR&vSn=e^)4g^#0!i2*4i#Xk;OMqY^;Y ztkUTCKPbaf<6;mr!dDI&H?G}(QbsjLg`CGC? z<^BkM2L7&44=D2YhzHOQnGqDu=$~26C}8B{&)+GYKUm-zHo&1WaW#WSRSl6mf}fec zK5_~s$3K@p9e~-+p8`ONAIBeCfSo-GeqaPkFj;>+|M$oYAj5#X%F`I!C=)2ul#RUq zgMwbKS~!#juU?3lvL(o*`1$yAq{aOo2HXQm{AmEpdj5PaP>UbA{|7?p_!9x-rW>bC z0m{p|ut*i$1_TJ)TqhzJb0A>Hz)2-SACV@ZR1v307e|7t|Y$?J}4F4@BvE}gCN&cpd8!OJAJNxEaHE))! zEUehNb?w@+qUD7}WxHw)?0*9eBGlh;^UCn>UJLd7sR{I|dcKv*bED>7Icr9vB7low zpS^MY>qSL%`|1xJym<1|*@laa%`MHBE}cGm_H^-{y_@p#3sKsJ=9-Bfw_RC8-v2YH^dqouX2^=#unM^0{D{_^6I(iI>$Q@pgaY{j+) z#vnX=>Gy!y#oynGA?GzOti|Br*VBOhAMOKmb#LEQwr1@bB#`2_ET?2>=;oc#;oG-v z4h;_79vz#UzBh1c=lZg|^_P0@-nn|FByTCBflV??bFZ(w5|%)&vO^%B!^I7~&e-esft5>fqL;ebKbC&HK8fv{WI&%B=@U5FSZw=oWzk9d)!0xqWc^jJh?%up|wj|%> zk4-lR2aCP}AtZsUD!vlN_zXVo0LW)~aih^ImS&9SZ&5+{F0QRL7^Z$CQ#PEHQ-@Il<; z&W(+akIi2I1^l|Ciy=v#Kl%nFOEB^`HFdbK1Qy`p;-dVV!-T)l+c!r>M=;zO8##CC z;EJN$*ITYlcDEldhOG=A)SwXl!V<_g3uuc;F{SR+DZ?3efAU}9LcQ3JF2(WpN=eDq zhLKOFS{j$<6qXzt`||f+|MI=Z_wIm%LqoSl+Diro59Tfh{6OM@+CHYe^YI5Ea1w$@ zpi3PW86TUs0E+mj_~Y^OK7S+bj}W*uA5LX>0_hkUdIPTXHthZ21;g#;OUIWN9BH3` zu%o2J`zVf1j?z&%{DpLZy4hCbD|=<-JY%N#Th0NvIKQ;~;LX1F_S3bSipmc?{^X-~ z-+p}V)X^h{4;?&oxElC7lUG<&Qc{#(12WE z0)-~whDF*TWBhRdE-ff2DeuH^^#CX}Hg*SsqanUD4h)tQ6~n!SY92rK@h>{S$;poL zsf(woYisFrojKz{$HgwsM*yFH{)HE6HffE<6-149Q2mM*PA{3w9cxjA`x1?0=Eg@y30FDomnZggggmV&?CEvM>^)R6yy z`#3_QhimAJ*X-E1W8-(8A%?5Q#^6Nm59qF8AJKNHt-ay&-qTf8uf4W@{qD};@rUo+ zYN&c+b@7r#FC%z4PT=@M4_MSRG*sbehx`%uNBEm&{Hgq(j|G9hP%h8|5b=ZHWr(pr z#!T|Z08GW-=7GV%y~~LAbNpX56@S?>n1;OEWos*TpKZSS=nqf2+s;*N*syNhx~dx| zPBnJ6HXft;dw+4laZgUev)c{o@EadT{Ei;D*xK26V%KZkyO(T)Puu$XuUx%)?fUhB z!P7e!cnbce4=pJla`{92IR1uv!To{%TcF`j%jHZ?c5w7#~ae&V&t6OETUn@;q5?2#DtIL{hD zJ$~ck-QBGhTf4e0H=llMU3d5HDjYT*@JYB(?_M;G@cs|>fQ`T(TRLR^di#w0iJnRZ zFgSrONQ^(RVV~K};3KT=^7@6;Ee&ctddn$=C8ipnI3uqeyx6D zV%?6D&6m5n+fKBJ0JHuiu&$$89 z1XH$d*ZJ1Vm%F+;FJ2H`AUEBJ;4@i*S83@RpJ+eP+TC^ea%V^D`8OGV&btdo38@n^8Z1VFBd$6UcV@;LKjHEM_#aLNUA|fH|Al{g*43;JxcRf0P^; zfR93g^Vc;@_`A^7i2(LAi7t@i52N#pzrM2<9sD}l&N2Vz;g^EH3BZpXe_^L5?*E8@ z$RF?J$P{vu&4Ho-Fd%^r7ecdK!z#0k5?@!g#hMoQ>s9l&ZD8oepyL1o_&>^D^~JVM z0$_LB#S4A7h|w@F_>%_VCL0bkbyDxw(cX5x#_@lF`9t?79Dlj<$B4(D3?M(*9w#dP zf&r)k4Em6`QTM8>E7goK$^2glx&O@Z_XgX=;4An)#{trtEV}(4U4I?^8r$0)0O1(Y z%J7?T=bA?5-2cJd$LA}_)pi~}*d&goADtg;{W?3^+uK?%?V zJhSUBe6)d!-~#zRcwxcv{G5gVKYQ=}SjBPf|N9^K{*ceTpWE_Db7DJ*uM@ZA+Pc@l zj%7d~RE{JNT|#tGRHza~fB=DdFQ@?Oy#lHtewq4Q3L3mK!$|9SP4LzStQBTD4saaxWfMxko#{Ee|hcQYzF8Pf4JaC$S$p} zsi}nrSz!*?)Kp)2nl3}k?xwNO_&)+(+}hMNscUK2nz3a|dfMjnEg9Q#Gu~Yu9sL>& zhu$^-Y%Q&U=XSvP)z_i>tEnm8i~g@obbs#rk>S_ikC0FQ>R<>tYUNL0!PeRH1kjm3 zUND~FSgThfD?boN*Bsb74abJjDp3FDJ0Jgsgurd*@Gbf714g!~BnRJ?}sT z2^^GwTp%k~S*(e@%FJ7&g44`toE3x!j=xs^&;d&RZ+izk3`k~x?)@kOkoH)aA>4)c}7@E!>;M-P4|)p8Fp{_z1$s-EY6id>`R&oB}vl*VzG& z+qSl}!VWun!8&k)xc}VbJAc&uIq+xqf6)KlL6P|To*BOD7G5Ah$q+*4y;h}j6mkZE zJ643n|CNF#EWkTtGJmiD0RX`PQv8~&>ksb#sQaV-Z(k8veE@iH_o@xSW4}tS%0kCQu0EEAZ{(+Iv5x8#n-bP<9K6u;O49|+DjWjiJ*G}DF zoofC^{U4qE$=ff6|BF)c$5=opQ~~?Q1n_%)-Yx%cRgpA}biR{0aHCU+Zt1fBGx!^l z{2%2H9H8&~c@CB|HMd^wgvWq~`UeJw#>THrj1BcX75*duKbAZo@HbxfZd_dX(B$0Q z?DUOmVdp?dBL$*ioK(m)aqn) zH9BQ-NtgM%!u_9xzw)jgd>sEW%yp@4SB55U-6 zBbzVA_6u2kL`5|>k2E)r!hwVTgPw63>`14X{C$@|Mi2wMo`3#@7k>D|AN}Y@KmPHH zFTM!h?Rx2@pZw$}KmF;;5MFuZl~-SV{`uEkdfj`i&Y4}?>zqw(A>;39WBuX8UtTBj z*Xhh3Jc0^A7=M%VOMt(Y%N^aw-;J4DcR#p4GYD^#h8MdPLnt|T@bF>y7I8)f5Q&|I za~9m2oBy+c&iv6Cpa>x0Z#*ycJn(l@=C85tWctXcH2%8rH^BGb-{^PhB{TwoKm4x60m3CS2M8X}bb#CgKKJVDuEo;VHaJyE-supS z(J`0OVEDgtI2tyZ{!iu)7Qpy@xK3C8;ES&K_&xp_Pk9z*} z3&Sr8_=Edya3qbt^lO(!hu|0I90M9Te=yA>lRRSprXw&15QgBN@*x=cb1N43b2A5S zbi5FyLBZbv@`R|&p9lFv2WatsD^|d>z$;g6+O&iA&8~IyGj3Dt&a%lO;IFH95cr!0 z{_cG+J4DA{fj>t8JG)3=51;k)09$31`A3f)-IkHDZ5!4SRaI39J1af-HE;SsPIl#w z55OOP!nR>Je-n9|IDdl!RTcU9JGO66hd?g}zA?TzKR1^FNYi4XfB7y01tM-Bvm>abtS!)}4FzZUKMD9tJx8USa&f5N!2;RsacO_>FFOvj7Rzy(9_# zFlbm5E8@@BMqzcK*oxk4ymt0NG65;sI>{B6x_S1Ap!V z@F&y*B7X%1J20%kwqw|E>QZOtz}Q$@@%ggi;-b2m%929%J*DETjVXL-@utWhoBi4N z8$tYt|2uYYBV~{Bcksa8O&$gy?N0|5nDIs2YC#7mjli-8q>ErW2LIq?;{PzWs_!9< z6|A-fZd8}2NJae8vfvA;2vI_MPGMnvn+t#a(9hffGXIBJNk~lGl2ydID5s?EQrp$8 zw)T#mOYK+N8fr_j_V3)ad-v|l%)%3g4;?;?tKVXa`(yr(&jEb}5a~1d!}(ujMMVYp zKbr&OThQhJi~#=pwX^~@bD-$ae^0}YT2{8TmhIoRab0Rk^eJXTwDzGnj|^*+=+t+ap zO_-PX7%cGT0H80mx;9B|RFI}jii=I!oSl=!zpOqg z%x=8LzE{+`ea}WuTF#-9wJp>4@Ao%V6`x2EI~U_SPdATtbs>Wm|0k_M;8FI&fD?SB z1OuJq5r+rNkrBAJw<^psmsAO#>9}3J1Y0w$%6o3 zPS&oq&^ZPPsvb4?qYjV*NJd}*Kw}$_0VwjPD%Q7Jv2r^-uI;s-u1;CIB`Z58clVAp zjG*}VSO~er;Ku6ehK4Nb4^KPthYqmx@WI17V{rqTjK7dO3Bvx$`;5OfnBqk5&aH2# ztF6v!EIGGxHM|omZQWXUMbv6|d7Cj~+`02oU0)aDk52#KU$zGB=H`X_>#z8Zu@6)@)yg6FQEciob5$8U5oQ|M>W_{YmgqWD6j9la|r{beg>LUn$`;SjUUp}6jn=RP> z5W)lKWOx@(U*GjQcqps3wxssRVF*XcC+liUOK}H~2996H0i+nhigoE*HpDJn8e@SF zklc3mEL+ChB;ghWc(f&9Pe1Ud9Dt3Rz?{EN9zcNRJt``CWC0NPn?wL*{vI^#CF3Xg z=Z)wUTlb}7G==eJEPqA+$N2kS2!Hs)WNkM7HEq~%yrSYH1#n;H=425DOIMlv88bkb z0+Rb|g??MQe0K+5%fxo1j+yNDT$^n@3_mJ@? zdO+Y00i^sDvhnwR%NBvZWtj(e;9)c$e(NoFf1{#c{Jo0)kJ9&tKm7jpzyICjBw;Wu zy{PhdZgy7IzCD>4DdKo2^%>`n-t48fdKrK8Hk=3`BM@%IJuCY^EJZ3-tdCS7TUo9A z!w;i;s{JS~d1L0Def!b-o!q-=Jr26{^?&{AU;ifaSI?Xq|H$tdfEU^^2k7yv%{h?1 zoZ8U+C1o~#VC4_+dptYaivADXjENuT&%h6#Rk?e25EumfN|3ds@V)lsjKOs|+1Wc1 z2!As)Ow(ZD?y8GsyW<4#46HAb-F6-Pd2=xK0>MPd~mhYyaLonY+?O0>xfq2)qdV88blc z0Qr4jwhe900blybvp-qR;W0kNVr~4%d>VOpM9j<6j8D<%9*caAj;k$a2%=aazSy|*+z*^}roetjIj zbj||*bLCzV{2zSl9Q+@rkK*_BZ-4vSudZFo%7U>tef9Qjdv@>Kk+Dvg2%5dfI{C-= zw2LtU(??x|T|i?S5IrF5!@Tsb&-{;phZ!-8Rq!VgXl5~DW?m!7jF?83SX^A{HrNW; zvv>cdZOC42-JkyS=RXVl0f3ahIvanUE_k0J>~-J_5Vj35q~dS5$Hx-{_m<8u{w^;A z{{{0uzqf&&BqXXp~-BJ%e+@He?>V`A#+ zcrJTgLfgzK?i6aDRlqufF=t zZzd)PgR4_Awrt;)u`vn$q-ij^ksSkn#t6*U!15Xd_o0~w1OWd<$)7j|oA8)1YptdQ zV!j!Tv#gQ0xHVh1?bwyMegk2zcKVNh{F9Zxzx~aTzs`PmC2BXkRJ)^n^MTIR zu#p6yFg{`DspiHr5DE)Vcvh#Z*>M>81NS%ELioeUAG$w$0@*hEGu?};&eH8yFX%L@2Sh{<6 z^1H_lmgS-UqeHI^zu)}&*T4R9e4H?toV;mc+Iky-41xcG=l!??6!;VGgP3Xp$0R;YV7v{z_zE6Jn3OtaH-%F~k zJ>rRig?Aggoc$6!s9y_@`Q!H$aB|YpQ_a-^|Cd$Q(17S+7{iI5L7(CMegph{`Q?|t z`qi(-#`f=ru{g=IZZ#0d&>>yNIy>5Y-juIF=pxuygvgV?@BNPhe_jw{UQUfzpvJiP zELw$JV~w{qe;IT>!Hon7cp3kAO9Jr7ZjK)sD9M>dNwZ z*o{N*2!&HHG?MXmb_U6VL-@&|hSsi$v9?>cZ=bgEXAHo`7K{ww0B*vN0hsOrB7f)s zud#Kn_`Z(3sHn0Y*qO%H>*8hs2KqI(k&#nGFEaS9I@d+nBm7zD`|Vda_!93&`Frx@ z$roRY5C-ECJusjU01!Y25-3K28$(vGy-D%0No$f*2z%AP`yD7D^2a8CxB?*n68_j$ za{b!%CV>UKE08}LfWrQs?izS|2e%2IbJ(9Vls`85vb`6Mpt=7d&j8sNj11xsTQEC4 zb#17>y}65Q$Pw3vz)j9f&j62vylWHVW21e|@Uz8NPTjg?@@LEdx&I^lnIkYZ4E-~% zK|}yAT%h~VAJGem`Gc_rMjH}Hba0^dq!WJ+DSz0>0)o~N_#Wa18!!;iZ<+W}A9!kR zV6dR0^YZ0(_G`M6ljN5rZ;atLh6V?Qdwb!R4&YIQhSv6u&Qjp-*6G_;2Z;QU0oXSH zFI=EELI{5neZ#|f@PL{aO!^pqQScLOQBk#51~`B4i~|Ud+b&--`V`LZ3xeNgpTPoX zs0Z`9{#SfQH0R#ugMqmaYu7Q~gB+@cJ z2BS&x+6`+{a8!}_qvY{_>oR%-$H$`J{~m(>Yo_a83x2-M0Lc)1^9bA@E4YZiN;fbv zGCGEX9d_*a1T2L*+B?W^*1@;y+B;gix_f#LpSlVDPvXz`9-IY0z69o*fPx1E{s@4` zpU4`<^F{td_#jTkU)ZjM1#m^hFyjyKL;e_j=FrQWANW2Pdl`LzA1>S;J<8_&h(9ah z7y>uCvzr)r!D3Flt5#V#0W%H4p9vrvg4r5G8iOtTc|1FF4tV9S*UsNfP;q8vx;dSU zZvubT^{*HAH&+0@w%}lAC;T9jahnc)7^0__JalQfXE*L_>ql1fjNRE-p!fr#Uh*=$jLeKIG4Ie$X9k<_8N8W9Wq; zV7R=z48lGP-2cH2tJtF!61^BLXZvyJRw*>er6kel@l<^YD}2JA-~rhjQ0)Rm{)pqt zJbN)4uG7{3^+GNSf;VycH$C0V*B^ZM+1u=oZon8Tkeh_RlbeT!2m9dnNFd-nI`Dg) zokQSY;OSBPLF|cZ6L90=2CPlC6x>AqBnOE1_{a*x0w6j-V-H4}0uufJz>kO;@;}V~ zDd?m8UAPLj$Z;325&a* z^Dc(6oVz=4BmSNXuR`$|c)?Om&jj+v3H*`_!SWbvbAcj%7tdg6l~q;nuUGysqrf0p ze@su4{TIRedBe{-`-AldYbr;vp$@G;*ad&7BN|jz_R4l+^w6pqYLJ2 z{E-3JF90uGzQ4&_MTM&!?hKvJ+=mwDueF=v7hD0x70 z6@ng+EJJ?(Cme&xEKuaHoIC{s+bK|^Py7i8B7d+|g8q+;ap-H<_A}r9llETJiw{`; z9_t_II~ff{O-%uC;O4HuBcB+XX7@wL2i^>Z1@kQ{e`Ekw0f=qEHW3GCT?7Mv1VBgs zr@#-r9rD+B>n78>!6sC4EbxMDIdiJ7;Rp;<%XVCUib1l?aKZ_6Ce}Nk>2g%(x0vv#dC{9h?fY~KULG&QpgzcD_8RYLK zjK9(Vtm4mj4nYMV~+P@^n8>EQWX9p z12SKuZ(s+h)|{Mc27ovdp{rnF5h5&uQCfl$a75hS=G>g^aQQy-$422f!0+Mo6!<^4 z(bwDJgYehigZ>YOS)dG^9RqM?26mufwR;;Ui{Sf+A-Z5z=1=JWaTf;v8k{`fg(4US zVL1Gd|4|8_F#ogSN8w}qabTj@)Q@~v@nb6wbKwh>f%mJ(vQfs1Q=$;K(cn+XHTSwu zzO6xKPJfJuAn|~VKeYpd5!m1lB!_jJ5nT8i+$#D%$cHX)4!3ltfIksGZ?iw!;sf|Q zJlod|{DH@tA$LW{KLFm`J+@;A{J};wJPm_%ri=WMdvHnsaS`qUU{L`+45Q+2WW>gw zK_B&hTrr71=KOHoEAKg*yMB)=vLUOjyLoZ=8APMc6a}x=oSf@y5e)v1?L!+gK)4l5 zrhtDI`4g|*AO4U)P8axt4duICeD;TnQrL75cb>`OgE}*^_XqUh{g2+R6I*70 zuUqfmCEL^_&-?EKbnul)ya@vTgK!h*y>*K7XAHo1RRAJ?Dgg1125yAF^H{hNu=zg} zCtvze9RWTU|2Iy@UORsHmDH^e-l!ZtbZ`M zxz}M5NL?Vk5yt^U{s@3x_!FjmX4LBc&;N zE12u)JdyGKT?#8Oi{L`|0N1`G;B0VeYI^qOtvh#4pPZc|^FP9$-~cUiKz9Ja{}BK! z<1cD6lD}(L!w4UOW&Ftek9UCJ|Hj90=0|q@Wba4l`|MK_KFln48ZVB02FvIVV!>+? zv_3cY1``!QU|D@4+SD5Ah>|9jyM2A%A!?3=tyyIo^09 zcYo3U&2;2+LY{lz9&w&`^CmuXFau8=(15-V0!;rVVU~#eog(A-9So#&QV&*0H(|&& zpaMX$Q%)Qpe;)e2Fz@5Y!Osgef=IY)BE1?KQN;BJ27Ct4;1BjE#|8S>!WV9Nz}O4B zE;4*1y9gOc5dR5|(ZZh>#ALJB8voi$+l~mbMTh_(u0bGxs)p#|O90vUJ2;4wJ9K@p z{X2H;#`Mh1y$<|gcfxnf4DSbz{!#uAKbb#*in;YiZ@f|W2lKz_sS_laW{4cVYBZR6v*5`crg)Au`%)6}b*o zlZ-lYb0?8NItH^%K=L%gpJo|<1tNSP0^tuIGXZ-~Pv6i;Pv7A1==k-?=`0I>^nC=^ zDSmA9CGPJ-fj@LcKDocy!|=F4Uv z#@d>~_S+xafB%CIKYsM+R=G$EK87_8_iB(ow(j8DeuTcy34BWa1b-uz$m2#g@&xhI zH=~_Dp-z=iFpIVExUaPu=fOZcNtYpi;5XvP^Dvx03x1w#*iU{0Og;Sg!y6?fCGge~%q4&dJ@gCoiu{jo$=}zufx~{=AL8bF+h&S}$L{ zdbOjQdOM(w-|{f&gXJ&sHwVA>g|82<-vj(%c=nl>UrwvU7Xr#du)&{d4FY%K==1oq zGn3cXt%-g2-MC_$l|XpVu!jo{{m}sP2?(>FKKkUtdw1dWmD59}l|F($KHkKiYOQ~t=_Gg)>}{O|+}gum1il~t9Msi~B{*S^7Dnf&t0=~Y#LAbMRg2Xp`s zUs>I7@%qfAp1~Uf%|~`^S-W8k-Y?@npY!3Rz2tJqKZ%W~1BA~WeSH7!t(nP*>4&D3 z^EXcXpM}0rfDZ%h4;r`q8&ad1KX=4J(%u78(gT5x{8ga;!+RPJAI_lYoIm^m9Prn8 zoAK8k8<&`}dvD#9uE9IAvs2ga+{VQM|JuhyLmGE~zKysXMJp?}-b8*#y0OHLL zEE`W@{6$CageUdSCnuxtdzRD3KVNwzy}FvMLGIs&GHc;|m+->NS26ID;Pd&?`s(_M ziW4WcZNrygLg4=IZu5Qvlf(qh-}{dre0cBn+|>1nj}0rVKgjqyK0bv0PxO5pK5{NX zCS;PMAv8MH>PkH&P;R7f6kvM+L317A7DJ*xmL z6F=hpCSmbG`7;14E3^ANscv5nehlBCP1&_;*9rKAxU#n9`s1aHzdK#@)-Dn6Ofw5D zearz)-mem5W&RKGn_%<4_j3e0kGga=_#1wf>T@2(fDsE-lj?gOkiF>HQOc2Z{ekxb zs0SpfU%Et|%|ca>zl@TP#_7IIXJ$=P+m+7l-kQ3)!ostKXI8%;cY8A7?@V;`9uEC2A{r)Gsa`vZk4M912%joc}XG_VCPy>TqtvN=r z7~u}^UZt5z{oe$6JQ4VN^5lJ(x0q%jMyJq1f*1{vaUNictqWcpmyL+vEN-xX)Y4WK zRD=8x0L3*PtseMGJNMx@WCCdzcn)@)!rgrvf7ygT1Tb6C*rd$*+hZi#aHly# za^UZo0^kq)9|ch6Pciz=&I|{ODSyh5_}g)ch6^XwDXt!pm;=21nQ7+yO-x*Z#8ri4 z)?%8482tS@6Br^R4qHMKtWhsU7qi>7(6y?yE_%%Pg9Cbh-Qxcke~%y8-5=)PcIF=N z2b&iS!WOI(fB#%CO87$nUGQV0@600Lk4i*l8tPu74sdq*F*_4JWd4%8@TV>oib4KD zU?c^bjkU&H>I_knyzo~+pACQfSmpk#t!Kwh{5=BzqW^=}(iR!~32R?z@c}z5Pv8Hs z3nDD2Y54TvN3aKY^I?vmg_1w=av0+j@5QptR@XTnp_*Fhsj?vq)0F6fY5OMZ@&ph)i<1ad<1NkGPueA5Vc9owV ze+qR?`RqpG0B?ObZt8;kkwu5e-*vFsQE@Al&i~jMX4xzjcutc)`=wwbjbN)&G}^C4 z$myTa3xDYTAdD!Q%J~KKfc46ZPQi%9 zSVbJ*2VWS;h@Z?Kd}$A?wwz*@EtoM#6a}xn?)+uLw_%Y#e}-ST@fS(>#v98CfdHU@ zpEUYDW&W({O8i-km5<>mnA^_$0ey@=Yfjfdqcv}!?KSz^O&;x)_g}K< z_oMxCf8O{*1c?)jwU55W-pf<{Ej7*~CvO=0 zlnV~rD7Q#<<`(_jj1qtFG%tO+iZ`ng`TNX2_vZtDjKISPpknKpjJ{8C{3Tt)C4f)4 z%kW4vevS_CvnrNE@T22z3uY~p`IBpHKV| zCzxLb_o>9k{)ENH)13b?DUF`7Nec<4kvPD|RV=~a@A`FtKiK~>3NdpUNoFL}Mxh-> zrAD288%-6=%QK4g&FBk%e$D11P6HU!8ki+{Grz){N-8vACzI{lnSH=AvUTIGaBT-asC%W z_#?l%0P{cF_&f3$epQQW;^U|5`qvvo-^V8gS3tNRS;vqH4?Xc7th|!e6C`^oay%^+gY>a5B;O}#I z8}etPVu3!ye-R2U7OG5{KdP?dHKV#<{QU99_Z|4d`5(Ntm7XV*!*U8KW)y7_uBM6g zwwNHsz2|f)nB)w|IL{^>z#AY={N=zGr9b~XN4!KI;t|Ck{$)B}Gc|$({P;-@#M#pa zQKtV(%p)%ihNlu%%B8@$a)NjiI-_%~*m}n%xf%(9V)Ny=d$EMSdI$eU`19BOB}Ep0 zpsuW^d_;_&J^JK}D$+;R@i+bmyu20sUsQBlqMXr(Vl5erL+=eTPPIf_SO}aVsKc3D zS0gmXJAXNrYw&s)Y5aM;V3+|up!`w%JRY3?-Dm}Q34iVF(ecrV3Es;I{TTs*0u6J` z8Cb&;4E`|YvCsc1Vf6Le{Y9MrgCE^;(U%xMd;Ix0N@(&2!!Gg%KY)JV%!PAhXFQ2< z)NFLOc$LaoZ`SmHd4E)qGk zch!ox`1JG~2nDOq0Rn#zID7bs{JAw(ZHq|$9D4@&OHTUqk-;Ad$N7tj^&Gx5^|MZ)1rvmbWVo%VQa1k;?hWU6e4jD$a@OPROP;abrDL9il| z5EH+>@k(DyZ3F(vD*Rs#@Mq2B!*QhYXA_WULAA~M^huQ`DuI0oKaTujNo!PeLhRBb zmuBa>>rN_kqdD#UA`}~RBSTDmDf2h_)!u{^32AlRLlbQ^9{fdV4;g;P;1^I565_~$ zWXl&{j4O2WmK4_fEm2$@2SuqaHLm&WNmXW3eQovj?a1JfG6=Z|TjE#75ojW!UXVtM-^A^Eh)VD zQwV6vr+|SOZ+tPHIWaQWKQwx+rRDO~j;@~G)}GwNxY%{<1iNa!6pQBsGpC7|qS&Aq z{b}#&b;sLBr><1Q$Hu|Xky}`pot=&RZOzMXXgG7K>WeQ@?HBJgCEWS5^^5?DU0r6p z^u-h4Z)gDcyK!S`=H{Jycl%qiQ<4(Q%vRw{3&a-4C?IC9Ge|~z!OmBjTzKYC-my)w zo}}EGg6c~U#>Xc{2D-aPhO2YOpFCM>)y0S7u;owE)0?1wVIrq z80_sC8XP_~{$$c}u1|*%z@IFjET5&kOXJ&x=V|nY@0_v5GNxw;n!wH#OSVJ1{VO>dBLFNfRFrB7r|iKr1LJ zh8S{?%wEWYHp}K^Q0=*6vX#f zgz#qyC;(zcxgKVU5L>iH1!CrP2H9v9EGT0}EgG#k($m%2-Q73P-CuNM&*sf(X-U=9 zx%?!5J|l)dE}*HI6r-9*N#?;KH7XG^U7*>1GiG$XnPkRXja)zb_)7|4d5yx-($dn6 z1FO+6c#kYusArW!QO^?j`iKm2l6e${Mm2waf?_HFF)yHmNF%8UmV-#*kLYW;KH(75 z6^<;?#b|VHZ>T`QaxKD`hk+Z-GHkKP;t&47ZC7X}gqzW&y@9jfniKzmO}%Wfz>T(| zctV8n$1f`)J_GOWz;iCNa1qB}N{Wkwi_hrNSa3#e)({z!-SnG>(cw}ekUtjzQ``Vt zbjIX)bhnEL-ii&4M*l$kISUAgg^>|Jac*rWjn448>4&X#&iN0;pOXxzkfTP6&PWh* zx|EO_L!i+=7Jm)`QpCc=ND*^rZHSEywFWL=B=YyR;-a`hN&!P{RJ4bTN~B)DjT|ut z7sxI~RcsJ)D)pP!#V8vZ*6WYA6+L4j7WPJiqF}RNXrw~IFsf<|+^DJw(!!iSyGDXS z;cgW8vtPWqswBjIYaQ4}l|OlermR7g>$SgS3k&qlEdAs!+8nKCF83Rh#uSWswW zBB3yvjyG_lS%DOb82-3MDuoOH!^^1Tk6-748bc;v%G;rME_tEKd|T#;{9(yS=1~|L z)rf_R1?HDb$)p&yN=fEH;KoSekGKX?6iW;>vLu;@i_+-W-oU#%@SGHTQ!z^{Pyr)~ zKh(&gi+C7rMwj*m+THV^C?x7e7=I#eVIOzlZyqcFy3WeD8lh6>oB)<37n#opbHpz{H=D;XmwZ_Q-a^!^pDd;A#ZiMWpxub#laYf{5fj`{DqMbSa5Fb z(=xg=DG+rdl0Qd{P@(WKB4UoMeR@X6CShy+HAX0Z4l+=M!o^4tb7(CyI?Z<7P#{=4 zdRFf_om#}yRX54O7_s~*^<^Tc1zdDS&Y$v9o~ki;0){t#ik>kjQfU-kMguX`r95S$ zLV4;|iNp#QXi6^`!<;|64CZwzdM&(+0%G>-eA>n!sT=P6*<=tA3v;80nC()YzR}jC zK%@_U{wy-cA}l_mH2Mfb7HPL^lo7MJKk?iVAq*322!{QX}Y!d`)AbkGWD z1+)TM0j+>mKr5gX&mKr5gX&mKr5gX&mKr5gX&mKr5gX&mKr5gX z&mKr5gX&mKr5gX&mKr5gX&mKr5gX&mKr5gX&mKr5gX&m zKr5gX&mKr5gX&mKr5gX&A?y_hXJlVrszaD7@Ve$N%zH4ro3Hr` z@BRz-)`vBJKMQyDb^F7czh$}u!(4$d=Wm%NFuVZ_d;T4I0G07{AmKi zm%zy2PY=T3N??TWr(Ix}0vIv;X%`rl1V#>j%mpsfA)FNWSvcSS{h3v1Z~@jNeY~g@ zh%EkK5MI=3>&96Xh%o-Fm#2>xxdIW#-||H+xNe@LK;-eaToWku@Zvs*ME*2^i<>#I zb&<)Rc7b9aFXniJ@~2(kVuns=TUhh=bKydGbU6<^L4Pi01;U#@#W>6HgU`jRyKWm* zAk6u*LniQNH|kzrYzl-ue^xIi@nf8fCNSie;hIu zeb zp^gP`If^SWn_!PAP7h*IB=EOn$@1k4za>jz+{AG;x@8Qd&D!OmtjnQnYZK&ShfG09 zjtu?``XCbH0$GfkF(`TlP>9^wPs0IPTVOj5;HrGg5&`^)@L}AAG2qXIP9G%;`f1kg z4tfP%3bfYmaO|a&z|0ZI9{}hC8S>|73{1cvVa?xv;T2nlUmSojsMMeq2cs&(f}|e}5?y_M;^Jp%Z1AdovjQ40%yi^K&w z^tIUVP{kdp_L>~D_JlWo=>AAh`ar%AF~zd+wTR>#A&5Cu>SKDaS`^m&z0C1* za)FFDLo5Y~f>s!HziA=(8bMEs7NW90=sVF}e7m{244Q{VIp`G~{vV zbH7UT?PQnQ{4)7go7b}xy4MZAx1jmv$>O#66gRVmB<52ayqpwa{MiR#@%mj|oU2hRW!`Z= zV1T;Bv;C`D%o&Qff2~oU9cld8T%dH>E2JaKfveGS?tBjCgM#n%&Zib%^ZMQz$1@|3 zKgk7Ju6u*%|3#`cteH^$DEN5i$C%Qm> z7Y;9&Am&7%t8v~!lL8hRkI>gQUtJ=VKQ)02F*Sj%#`)?Tq4?nj(Y!+B@+Se+N|GO#%l=xPi^eSf++sONfZtRJ;` zEz9c(5y+p53q*<>84KL#Rb}M~e)MxNPgS->N;GR*;boaEWeeya68Up)R-@Ar4_O>-D~%yIJa{mlRqVaAOk~8?E(Wgx(V!VbMM?BTiu)DY;|jf zb1vU3k;YCA1N-K8$6RA?NOZS(hkMhekFxBf4_XkEf|kMLxhSVP-F)%aHC&}TTgcB zT*2gJAt0Qoj9M%8^H^CXB>Gv5;`A`*@0UIj$gW&M%t~P3Muo1vCpq=4oQ~y5De{qI zQXKf+9QB!sA~8vQvOlR|&EFeWS0ZadeZYc0tvn zCa7!jw-o#NVb7lz0NFZ(UAPr7$pr>(w9D#yin?QMsX~D^n-2*>_5h=*N&+lO%oYj! zxw}9@AiHEcVuA|{+$aj_Z(P-{iZrQMh22Y%ls%vWMFjzsW62c${DA}X4j{W~1!ATP z4BTkZ)UQKDzp9d~Wvb3`Nwa46|5#DD|Ap9dLv$fShLTY6wKvVz#Btp1S1nUqNwE{P~ z6l$0hooh`p&h;^3$?sz>=hH0Z;i2=%{#YW5KO2BV3KKwolv%iQ6`gBI^1m%&0siN< z<`7F`aqB4o9!COyf&;X!L;O)CHrD0rz>O}23MNGln@PfXenc_9AE&#WYBt4)xt;EZ zE#ml-U7$q?H^NrNx-k~G(b5E`L!)Eu5vME*mu_!Uu*i-G{v-z|xj;)7d85dgK*$-O z(b5FRLq#96B*!cZWl%INSXD;`f5IY|tc8u0utg^C6ghcFk~1Kqr3uc5ieAPf=L`#F zQ8g@Bbw?(D#vp8w$p?xYyd2FLh|$sn=R;LDFIN7Xufk7Dr0{3;fQ&$k8a`2^gpB9( z-)L!q^P!qQ=kpiLqG(vq>JDrE-jrnE+tA`}v~5*NhgQg2A_6w;6{5{4k?Om zUJRq}N=3UIt{{Nq@a9hj#R4F@K#L;2Qe-Dk$myri(gf#2ML#cs$=^!NpW`jUH!ZHp z3g9nY`13-MOt#=T35XVmBsi@(E$6x%+5vPqdBIHfRt3{5hq?dr2B4*vTtShI7Cr+v zTAJW?XzgmZa~I5JZ4?{_vF`BaPXfi}0cCIG7e!LR)|~FfKs(6N*MSxj)O?mU;b9QV zj|Bci#;gEJoS5qmj+3Y)PZK^(vyjN6J{rx_e2DSRhEs#u%|2A^a0+jc@ab@7K*}!k++&3?K!9D<+Vr$kiyExA>wQHpOR98yiXd zg$@)K#Bk(y=@D0>OTMRVQgFwTg4?o);%`1Igexe)#etXRj2{llK@4Hd->dK1lyLJ! zv4I<7SJ`S35&}00g)FM5Gk-yDW|;H$u8qLJS#Zy}%Ei1YU}KPbGib$W{(M>fy!tK% zS@ghJ@RrjVK*$-OQEvO9IdaA?$ir`J*!w>#fPu5%HK!wFm@_crq9IObhzopVOGY?# z{?SO_Px63)v*1Hc2gqp7K#cS6z~GnQ#$V8DiwOQi02vEvq2_H=GsgK-4#KWRbu*uC zs^-s^xgVbM8kzseb3oVb4uVldpi!*f#-~e~x^~T<&-_K(0Suf4Uvt{MoRHH`qnd!H zo9e<}@EaD^xWTp-r_iN<}U>83-|FC9H8L>i9CU`;9pJ= zGFwicjUnjKc^9eq^Uwd81fukTv*2$|bE%=o>7#Mpu!NuhNB%-kS9st5fb|CtbRn$f z&qo&grpWMes+_(VL(!%4EmQO7zyAXa34u!g2Y9eP2^P-B7B-^9DS$+uS{*6; z1?mC?0F|I{5&buE0F^m?GV;2GPron_^7#|>s}7fweX28J_*1*UtaBL7pFa=();Bbf zaEXTI#^&bcg5|LWfq{eKkds0dxN#M~v1bmW?=)~A!_VZ80x0vx z0h|vgPCZG|4l2>(Pa{_h3{(3OSx7+vzlLvZ22X7mLO%Q-H^JDDscC=L-ni9gg9f`UJH03(n; zO5oW`O8x*q8-F)Wl0}Fs3-dENGKR%E8sp+@Mg z#YLq5dk+CTd&v*}CJBJ9EX>pB1Q{vT$w>IK7#+LP+2976lg+s_v{3w%{IyU3+48rU zZgsf;7?J+(z4t`?g!vyCBW(U}a*_b(%EG*j7LHiQvtpfGAT>*nxRar>O4)rpP?ck( zEm?Zt3I=ihN0e=Cg`F+}Amy)_@ps7?z@PZ~u7SmP-s|A%lmYNF-Jg}eHVPo!|GNPJ z_(K3)S(v|32_P?4X=MHn&Jn3GFz_`ZX7)AiRyN>w)rCIG%u(I(fkG=5OgzmHQ+3x$xIUJ)q3rln9_5GFMOpqrGPZqlA%-KYOLP{@_6<*aZ%i zNyzDYR8H(Cjla$1d||AQ0FS{>m~8q9ot#FQmLBE`?fACng6e;9BMXyMNRz_91f<^mn@ zBl~|KloNk4fI@d;w<$n*$0@w63Pu4E1V$>0NJeX|rqPKGWxAQokC?I~13z#4H7od2 z0~i7P$u5xLr*MBFf8YUa{*QP-+C_v0skaoMIN|rsJ1YL(HcJo+HWPVFFk1N&U7&PY z2oBWzf%j{_iov@6Fqgl?|1|@ER{wXYNi_$I1pY)9$nm569X_&kYXXKu_%EKs;^DEA z*z~lt9TzVY?c1NbFJ*Pyj&0l4txHLWk4s3|o11rZ7c4}mzZ3eE<6(ZrIrFC`(9CN3 zRw2!Zy4NaLLmHIr#>Sz* z@kC~nJ0T+@W9vn80(hxO1z=?GXAo%Mcd7Ye-j0;yq@?7O)oUQEPDxINqr`-`l`$)0 zYdb5lkLDdXe735(yu6~kyriW5MEZs`t9G6*?igu3=vldx(qQ(mL5CS}<4@@V&8!B1 zLXsJEt<^M!HYV}+i{-H!^U5nLj^?jUT$i4C;%sGgO&xijvaYJSrn;!Sy)I_O%Gmfs zPf{`n&J|BeN=e>c%o&7-FYO*My!d-w4l(P^unvQX-=$*o|F93x*tBzR%Gz~nkwA*y z@)%ENXZPShUvE!$XGdr6z|iQ#wf3`nHm0oHSk^o?c=^0%p z%UE|?ktf#Rk9W5K2ao;|f}aHPtoTV76BEQZ15ivdYw0Xuv{@z}dn0~QhY^0(* zAz}5#qgVRxOic{-_jY%+U%A}Y*;@z7qUryj6P-Xo{^kQ97VON3Q>`S2WPIWO`{p#_%{gu zis4@%{9g$FjNzXk{2vJ4_{KM$d&9X>$sz#wPNHY?h1T0Q!2hjESaZC4_TdL}*GA9< zcD7&b>8st|-hL=%6^QFeTD>MYfzd~|pBaFoqn#pr5I4CqV#C8jix)r%zedZ&kVBC_ z`UgmcVB~Ln{6w4w2H?cRgxHu9guj8_?*4&(41+`c7tS6_PFS(I>dI(S?de3A%7{S? z3gORR0>xqhZTV0>QrF7P5sWW<^XEVR+0R~o{q@&gd-c^iDPG1`Y2|GJ``fEMy9miJ01AZX!ssk-tdv^PS5I71!CeWZxhzt)cS^#DI zRQ!qfnfKp_`y&Kyi-lDgu0ZNKJ9ojA_QKo`J}~rFmYt4Yb+UE@!fubpycI_~N9m{- ze}1|^r`}eiD{JNFnMUC6XUN}cl)slLe~6!jzaRL=U%UWdbZk<_vF?`I+H(ie6EcoZ ze{}!O&FKqg3r?Ome(d;(9N_Q#%D4oNCm}W$Hej6i8|}X=xD5e2GYWmNI6a^RKY>5^ z4i<(32M**MXgZy;gJNiCfP0}dOq_^;KhlKw6j=UxXo!L^JdD)A@!4w_83q1U@LqQN zxC{GaLm4FAXQ^VR>w3ji)#z8RMmAa}FG!-F5!F37rsYSd0Mv(?7lNLT-9Gne|Bj zY}%ZjzGqM7L16E|$&)9~pD!xTOxQ~RH2I@X1dZd@-;OU^wlXmxxl`s3sT24EIp4EE zp#a6?&p!givT?`ECHQ1~;hUoSW5e%DCVx2mlJVDX{!9R4to$K>0)Kh!9Y+&mmoG#7 zO!vq6!$rtwU2JA?QBhIh@#DvH4j#T1^#l{JM&g8Tb7OZQ2=vR#H?JoihQ{>E)M?cQ&LiL%2{vGv*6#RsMeS+B-T9EhpZO`M(@0{_^F}4J%hHU$-NxsPgjE@8_Co zF6`K}X~TvM*uU!02mUY0fj^lO3xEE&K!Y*@m@|OX zY@97exRLNj79Yg@k=3tl^#RLYKK%N{9|O=Ef3N`QIK=Ry<1gdS=!%2j{NN6J+vW0# z%F3$h^-J?d)@Pn6Z>X<0(`vFuV$|b2Z2+C|8y;?IsxGZ=Y;34Jw|_%ZQ&u)M8yEN_ zl;j&1O(UZJgE?S1@W+P^D}T)`uKdZK$_k+G1R5Z5{^WvfW+Oq2@VxQMX~pcRhb2pv z`~m`5y#5b_|HgpV`6h%nF!;FtLhGW7V6Xg@cXYM$VU>)sR{rv9S@+Nt@cR6bkqx^G zD;pY{YR=Ti02(9S)AWEZLghw7ERm6mM@wq}zxwK;Ea0!{k}v$h?2m*|z^}rIKagerM?-Bv*sNk0D9#wucNE;G6T?+Keho=0bRCX z@5SndhQ`MF(h}JP3f+weK9wPOjU#=-BeiF$n;IJ$>g%d6?&JKizb^Q2@PC}YUN8K$ zfU9sCe+e#TkJ|b37eIkOiELJfU^Z*zF;^o$DF~kkV9XZ2|I$_lf4jEzhbONcZiV>H zUt=HPucW3P0lZWpyFh_Ij4m?%T8c^;eswh$xc@Wp^TOW<;K!H0Fw=v8&XexH^brA( zKhevPE)+Uj07U_yj|4IjXgOU@u3a{uQc`_BS@yZ9^yKf(Vo2S_(roaTRY{AK)=*VZxsVHr|w z0BH0zjV!qT!@K2~Rw9ApP%Ayh)hHD+&jaTFB>wRJhs+;L0I37?gTK-m1Hel)r58DXa*v00 z{DoT^I0!~r_rc><#mB}hdFzeWUQ4Yu+jHTFh#&Ar#@{w`@e3fn$)#4Y|YrRB|U9(`j(7sxf$;+kB){t zEewa=HUMlbt%2uu!1>kJq5G?;Dc+0zuT6A+?);JA*WizkPygy*2svuyPhi2;+4BU@ znLl1Ip5a)lS0gJw7$1bcouH37{;K%v0RD&rRP%SNj&%|lfEyWq;QxR>7=sbO=9piM`6qTcm>1%xRnzgb0qmR{qcdO8#$q2RsZ&W`OSevH2h2Z(k{e z5B(qB(Ygd33(JrN;P2Gs%YD@Ve@!jio9%|s)6;YRLkJ&1__+J+HxWD*s>dmSgLR!9 z@VISjODpWKqZh0LCy4vcO}_I--Jb)0cK-+c?;RA0zwephyKVs<34owv2%+;{tI|0N zIfK9*E5hRcO2HEr;2koVKUjbOfZzZre$CeP2ls!}{Zaq7uZXNZ06e&R)dqL3+B>gS z4J-tIm3JY$58)n!4H0rTT)+R{qB7jw$ZHQU0j=lb64ozbGgE2!W3MgD50W0^j^M zd;lIEg#YF@{~v^Jed}A_{`R-O^Bv*_!7V-u;rrkJK0Jo<19%qsx#yn$_h(*kES6&L zXePBfSzV1znOxFk{;qKUXW_5Bs|O#)zs&eMVd(^g|4YslVQq)UV91m+1nUpt{~R6Q#}a^5_yluZYTK2e$=kQ)rf*yyH~4F+D@eES zH>Tu|P5;Q|i?RJeRv%GO&CMgt&7*MO;Qyd!oCZ76X(oT)C6E!s0I%ntf8m86{_saX z`q7Vn{Njr*!qdnvy#xx&CUPWKxh8w3{V7+@Hd{9dLH<@Df8D@cQSosR2qNX_#0sSftDBy{_rOn zT=`RXjF3tk;CG4tGaVrHfZs7ZAUZ(q0*MEF^=Cp6im2oC5YG+VsHn*`3H_gnzXB>F z`!}y0zRwB{u#GxEyZ@t4K}-Bytgdfrxzf?o5ByEdiu~cb1<6Ym;E^T_?d@e3%gV|Q z9Xbj)Zr#d&^p!|t7U3mZeULY<S704aYHd8z#pe;qLXHr4`v4d?OM=aUU2 zG&Z{UKQ{dG{ zl6N`;W^~M@G#LKx9FB&KrvH=qg9R{tAFk7tKlq|6K7NnC22@|4v>~&s`cmuFj-G+x z@#|Bwx9@)N0r!6lw+S3>CS z?&_K*{J9OlCV*A+8rlnzR^yg&o$!Gcq>IwGC zGj#P)&!2u__(cJKaNo`9|LE5)jSj&t&N&7&a{geNM<#j308B?<4j>G{@bwHj1S5ZL z#R7kB=D>}P7os#M_&Y$J5S97!Ab;opE&gxC3V0TH<*H4acF?}rwT^zqZHnDlHdzGx zb@dJcf78Ih|*ea{cKYH}&wv3Ez+pw0Xs;WxZS?R&A zdD9Pasw;nd0RH$BwhhDio5sjZv3+|w1bR8}jq%O-xw#BLnidoN%XeWK z%=x44ubwY|P5$t8bE86L^@P2Iy>O0GaVc^naW`SYzM>(CPr`6p#Y= z+$+eRbFqX!XR~WwS0leDA3kv{^UgbT{3ZUc&B_0H%3)gqw_vCPq~q^$m^#zGApd3@ zIdbIazRjDBq%EoGTMljARys0qV|wn^oqP9g0e{FI20H#;Vf?`mZ1sRv010FGjc$0e z014H-Bnkd7Xdv`a|L1)G<^TeJxB`JCL|z)3{|(lZ^bFxfCC(;bKf3Tt)<)x+FcKdf zy#)DsQ{aznzF3zZu>KJFyKpc%IvTU)9?wh#?F9ZDQNhjzBk(5xh$~zQAb7wZvo#2O zEr_f^hzn%=ITlO#b2PJB)eyNF)mc=jak22*Pa6}`W6!{MdntcYFyGsy@_%_<_(l`% z04aaVaB`TKC_#w*lXLK5X;*h&-@rg!U1LLC@s){59fN=H zGVyuYJ*Tj+zRiWde&}cJ0Ga>8tRy5RZpkWQ zU6fN&cd6}aS6h2W&!zUOZ4I?0S^IbH+P!;sW@h1u!-ozZ#?^1J#r<*q*c{MT0FgeE zKb-$nR#a3>-??LRfP4$u9DotPpTCw?z-A5G_sf`NKlu2>1Nt?5CviO&^2!xw=!fOtc{%?C{ z9{||J9H8L;7=hW1_t^J}TDR}n=t;{tbh5T(`u_d?rmEr-DPreheCO%r(XK9J(Bl83 zH3&S)ei(3quasb*lRV<^fH^V(_x4uh=aadhR6&P=KidlU<4+L46>;R%R_oU7fb|Or zY=^UVZf*|i=-;>z@6?@T{C#{S%h+0Bn>M9hWPdSy`C%{$&aVvD&NFp6*v6O` zGIB@do;!IE0L;nSwH7+ZAVJlm27lB6asbH)EC6V112OKRx4I+r^mIu_S4lV zYqw-&=j86*v4#;8A0G=Lw;0@5UER=-W&Po4NB+=Qsd=1bfiP-zPm68V4{878gBw z{P^*w5I%$OIfO61_+kpew9MaU4Vf~3Yqqb$2_45z#a}n>jQ;VDfBfHRlfk3siccRR z0A}r4pXdsp1Ak-*B2ED*fG`K70Dk|c#`vp-NUYS(n1Gm{Mm25L)Q!8c5Wk$AgunRs zRlr_O<*mQ}{VRMx0RGyo{B`ugJH>DdrXAh`vp#KOLQKq3MlSM~Q}NL#^$~=>{l}-F zFCWj%%@%Bb2;l*AGQ5kYukU&tJd{;iTT*-EFoYxJlXbPFrN;D+yT6W(14uE173c!2%}`yHM^!Dj(*LWa)< zz#{_p-k+K2?>E*T2Vft1(^85b{JT1H_wFrtGz)>Qo#7?lg%B_ShDitle@QvH11kQM z12E_BA>&W}wp{pkHp?%lK=2i^Mmzy9^Ff0OyEXU>g(-|AYCTmA~!XwKxC+fbB=(;t~_L9X+x&9$tw`W`9XZd+Hxp z_`}|OMEPU*Il4cBAKWv$b7!!nVCBlKO9^=}=*Fy!f#^mYss;WSepB50G5-hrb-o5K z>^eD&{COXM$qW#i27e%gKLl{-YEKW003{a@z)1iQ`TOs8jx`ODdyD<&lfd7-w=_Q4 zljt#ieH_1Z&I12) zn!U(6`N#OQi!lPzM_q(nKw}#aJs|AEy!5Zn{EvZ$88M4h@Fx;zW-(%BUL(njm`0aa zTwLll*b3RRcmJks$X;#TpZ@gcKMVW;fRw*F8-JcIc%LHdb>IvTwhb|)u7Vzjk0%K3 zEuCTfU0w$M3+8{wA9$$#?Zo?8+@Cc1Qujyr18#1&pWht^FWaKtZ)wVoty}lyAIr~z z@mFwvI5lH~FURlZ&CbrOGiOilZ@GgW5Xv+BpW-P%J_V!z78N}N2M7Sp&?U%4 z4QZPbbt5+vTgQfx#sqWN;-q!i>T~Ecnb;6F-Cx0-gKO$sf)D=Q?VNiwaNWoGNr(0h0lkEP+iASd{bd z;rRHY@d-8qym%27!T95C=l1R0yJye-efwaHgaH3+&siT8d+6}7ydyc;S(v!{DE?8> zAU=JtbocJ$caI$`%R~Q1hh7_gzxnmAfBoh7IAJh3dDF(U^)><-0{;ci`*8;-@F(5} zG4F(c2mF_R<|IoDd7;QKTQadjhLo$(iXA}d5`GesX z0?Ys@f4yhG|0SocTaACoJd}6Crbt&e?7j6A9E4wLIC1LqnZmQMmAp40Av+tU(MJx$ zjzSKE?A+X}tP;TQ5dhe?-7@+je{SxN@P}*Jp`p{Kp`@hJl9G$?dm2T9Gi>0650gK# z^q2$CXJ)1v%ByN>>KZP090rgj2Z&QmX$2zl_n@cjtL%#CY&pZxL_ zcpx9YmsDGO#1jPz?>2Zj`z3f#zZM?z$L}lPo;#$<3aYSCw~urj52>@0!RU*>)+ae+7sLNX01);dU`y^`}@gO*&zN4 z(LTHzccrbhxdcWa-~+xeh5>tsgfW8OObh3at$n%s@2{?G$nW9we=eI0pGQDr@_GmWp;#mxc?^lNS-Bd3U7Wbj>eu8Xoq__NUW+plo& zCEkzn_vFcwFTNNd48|pTU_c=NAb<`eP>cdMhOA(Flj38O)+DD8_NssPJ5WO8k4*q^ z1wsHM{IRX%`nBs#0t%ge{A$+doLV8bN@x20kSa| z8N?yBV0L=y+E9Oca~Ii=Bd!mDo1B@R0Uil?*CxitM*EuKXN#|#x^>Is&zJ#n|3~;U zM__Ci`e$5&hyY%=K=+|Pq8CIVWB=wY*-&SIN76Wb5`QOadkp@@#!TNQIX~w8ens&6 z0zB?#!~6DuAB1@|C(z!XMv4nVh^KL$J>m*BdO3R*p5mtw76`G4z+P-s)o;K48rJ^! zmq`2{4#36;OxM6bpv0di*(8wj2V)J4HYAYf;6Uw3C;lE%{;-n;1g#_RJ;V<-U?8C1 zGV!B6@YLMEU_nRc<;(5t*K{W*$uCRZ7{hN24Gs+V_QEe6z@rEat?eD1rNG~<)3>b- z5cwkmux|ifxIk}&5dI|khKKXu0W~q0^fCUT;ALe|QMFeFIDhbr0|<}XE?+bH6wdDp zg5PJK!2)Qg3>ah#xR3yhaVS)u?nXDn=2fuNlsF_WHny_rtFK_i50_5?1P91QU=(lS2=qe=4G4Qo?yRFU|j9)kaCrt4n|e!k5B$q;<=2;3hl zxQM?>H!w0XI);NCcI@~BEQLDSJIHU=!ME$$J6gNCdwLI_x(WVI;?MXVoCQF>1m>H7 zf(Hct2!P0+$Qs7;MgBziAWp_#*sg>Ha7D#1;}7sd{uq7c(94`3_&ykW8GV2sF5DhH z%I5uuKP%!G0yny|n;3Y(VotoPR#`a#GY!I@2_PGS*&0L|gDw1dJUep^c;&Cx&fiT? zab{+^Ih~Ae0)N)^uNU_>R{*}Y;9zGb{2&v*O$;j`@OR|7Q}Xx=J}A&X&^9o`{t&>AZ1f5IS=hrvh4%_F>@u4|Z6^9<`9@#b`O(k3+Xgp-CPuMR6aEAb$mW1* z7bx;a9AD1MwE;JeS>W`A@8##n*eB>bJ+JUl$u2fs%G zfjp%TBW>ppI2d?(6n_wV;@Sk4Ty7`n9|FCg5>dc&g{@*|Yn%ZspHeBY0(H zrKP21gW0@`p)BX_4%~>p=fbN{dCYSKfeHW2KMaPlbID0 zA5WGSadBA?@M*=0iqX-LkouTd+;U0a_Qq zz#jq7(f=v%LvM%tHQu^OI>>YCWWly<_$CR4?Ck35v(?qr*n02>u<3%ke%7fUo%;#! zL7&sI9DG&aMpm#1l^hGaU|Y_d>T5Ux!xRwLAZ!k3AA+u+_l*DtAR>xWQ#W9CNm39!2sdFn zW@ZNYy9wj3GytpkGoC|G0m%6K)QLYEeh41~(;=b8Z^&P3=cSXhJz%V3n2padKd_qRDWXFFWJ&-}4bcnpc1K4OS2n3efcIzZfo!M_G4 z4|t&n#z7bkKjeQ@!Y9oCtoTv*7=Ij?C^q#YA6ER>%EMgvLS_FydvD$!#gXRw`VHJa z`p(Q<&V0_yoSy0Kb7r;M-96o$>&9-Y0fEp`LVz**YF2Auv%oB7F+189BoH78#G+k* zki?F`HjBYb&-CD`o!&`OpR4 z!Y$pKz@Lm?yxE^`@d5m88@qfF_ydnO%I=D=e*nB&51k!D;14#c;b|D8bD7K^y9Z|k z5EtPA03O+AFJ=hDahpHNKU&vAaZ?p61kypwVUl6lDT$IA5gS_+1 z79ZT1vAsW_5AT0my0B~cDDZXX!F{$(&GI~W0MNl#Ch;Z+{0HGS(0gaE;Lja^@0$Qb z{!9Sk9}V0Hf#3oJ@ENXsS-{!g&6^`*x9{A$_sQ&xiU$idGGHiJmGYofdw8d1dc2L^lw z(B%*ICkG|^oP{sk@_?}yc3o8XSaumQmLUI=8l%Qv9K=+!+?sIhbahKjfK$8WrG z_XqR8k(;|t-@XOwirernu0hy?2jKCXfG|QB8@Sv(FgP%D?Z&Qi*N{Lffarp~8^O#w ztNYOI^U$Z^XT>12wehci!qaD$w!~ui`~g$ps(X)@J3_%9>y@o_b-4CG{07;`EAP8F z^i@?sm-zZqc7bU!`fHWBPFIuMIw~rLkw88MJDY&)X@uX6IsEOB;R6vEfB2XQ*t@ze z_w;pL?(Xd$ygocqs`1C)M{u3v=ZwD0{XLfWLuZtb`y1N^j~g^!zcw&10^|XEBO`pr z`38=;-VO}X+H>|S{0Lp!*-!D|=@lzJIe*^b0bKwsYY@&K48dc&N*AtJw5X;Pe&4R4 zp|P$0`2IbMOP0e|7)r}`)YUgP*VpZDzx&yv2cJFu;@Pu1CuLgjF|0wjTZ8;L>khH) z$LM>>;4|_k`5U=J5qG;$B*>qM8U6f8b()NlS*}gQ6Rp)f4+i34z6|-TxDm&mhY|c~ z_{GX#KlvFj_4JF!H;x@U1`nPd*|>4jrnT$Vm&CRnJa}N|jvespzdJuVT3)eXLuKU! zGk!xb{tEBM_=`9C-Wuy}Idk^>`STqYxwixA#4QhxK3M)Df4AWGzVP+ojT?YJ4Bvk3 z_1BkH;|l@RA=u^5v<87Yar}AwvC-k{3+Cm$`)>YGoRvU$(zHPc4*k&t^9cxJUp@Qs z@x%M@`pS_W&&nUc9~^9J4GY-A{hVRRl)zxl~N|~gn(^FE46iwO~0WsGrOYQKY z0xT~@iV36k$qp>ozu=yU=}aZ0zRsdv|fMAinl7)U;H% zzr;pgL~Ec9!Ul#Pj=8w%KmhUP2bPVeIQ-@0d;m}Ce>!Il`o3=q`o!lOZ!D{=b=Dw{ z9zmJ)@V-lU;pO)+h?C&+yT==A8>^~z?OL@8Uxo=m_`mzDAG(+n6$<_yJb&`|;oVy| zuMd6UT4DXc#^1rg9`t{*?-TH`a}hErlNwE<(OPRD^_)PpkhGxdhxzE z2N3a7_$w}6+i+j_Ki*^cOJV-?>5)qZ%8nLb*RprQzA-4{_Icxd-~ONU7r?FA!5 z>8e#~y&BzoQxIt9PXKtA={_{X?#@9m_%+0}Cm#8{Q&L(nd&Vq?Aa_-1^HWX?DIjsZ_~NH?o`{k z&Wo4o8XETRKd^t_-0!K~K8NwQFDGY%2|xbKy@MZje`7tbz4om)*&SdUdLgjs-|$iX z{ZDx1?BA(1q~z!=x5HN+IL>~A8W_BArrb@IBf;M9B_aAl{q=n525>uJNk}|0)H&PYfT_5#DL!hH@*t}?xwqR}i{o$T|#vcM0fS)t^j!pvp zxI}EGVeU2V0LMn2J7>a&%3pCD{>;TfG00yUjI3ao2i?==7r{U5xR_K?e;wDwgNAF#tR@&3=bAjX23hOeG}4ts#NpO(8?82Mu_hjCAd zU#!#F<~oxxCQVVQo5c7#h>95fU!U{kgkD{|fMY9v03hd&0~oaTf*X8&VaCl=ovVz$ z&P6)>nWRSt_~7oHCpE4f9)GQn7T5n_*=}Ab5>ThhUxb`)Ua2Z6%4?Mp1smxGU!Ahie=SnZaK2adj@qy4EGQ^S<=Q|JNx%G^%Dh-R!|4)C+D-DJd1qrx9N&C8#z63wbc{$7N0 ze+lsC5V#EiG;BSy(RTvJU)DuJ0{B$B43A{vOLTxQYMdm7A0K~DW7hpDe@aeYxpJ}T zG5Rk?sxzcfl_6q#gZb-Y{DpIW3Gv6A;Eoe;pGto0Pg;CT@Jh#}yjEV3UYW8Qn|BG`!Eqwb= z;a%D@em-;Nwyl+wm0j?bUwmq0fMeKyNjrO74srbnE(04-@X_4RV|@Of}erw-*^!HIX*Es z6GCa}cjBF!4!g{sP}hJ7CUH56i@^at{_+(+;`mKr&>?@b+4zfZCXx#gIps7ro>rsU z4z128Mo$K}FgX(UVvxUq0j8yaznAbf;eE!8F z3x7EOgV(n5^Q3Z|oKlJxMVo@FXL5R5PLSjAbJ_}KIU^a9Y|>`D0b=8?9KI<1@@2Vv zi3!BBia-3zv|lqff&={GRXN0+rw^uh{;#N#y)+n}N|>dV0_Um;@=^MX_FA#^)+PlS z8G&;1)p&5RjK4;U|6}}xb$`X##UH4v>Zu+vav{9-Yj|J^tP z@-qI~+j9zXiVEYGlm0Uzf&vW-%o(lWg)V;>i}~k&)iC;oc7Ivt|KLZr0`z6Z7tdb~ zqJ$oQFzh0K@B`?Z_Z>cXVqdH%UoRwnv3d@196hHfA?2SDnlyVB{N@Swf3u4~D||k_ zzaM23VE}%QxgvB^v(Ep)kG|OnWX783FJ6IY0>2oW^fCXJv-Q-CFK#sNDK63`fW_`_ z$r%L>sxpb3P^+;zCou_QfJBz|&YqcHuxwd5ggvv-0Rn#z1bg_2`~@}FY|BditUZJL z%_;usnadvvC-|F^7u(iy^GRRxC&l?QbhUU+J=Hl*lQEst+X*yZOkVEnm8~t!O>O)0 zE&RdzN@^_n1SDrCe>QcQwsUH}dR318FDDmXHC~YK@Hb`3%))IgV~;O1d=iWK22CpSygzz6t+i4gRkj_|r2baGb6D`2=KIaBVBU zdQ}sfQs{gMKcD?#$(boRg?ZDqw~XDo*s$B68_((Q7pd5k8y&>Vmy*0ORC^0&7A|eL z*fZ2t7sFqajTtC#;eWj=W+Ue70&v#ttx^$+i zq9{LafpdaiwM2@=b5fbpL(EWYN{nIJJ9ojy?fo~;RTbpr!_ZN&e}7q78S=NXaz|6s zzP&YHe?8xS@o`f!oj+gCNTB%Db&M@vzXJYxt^$8IZrmKbeedD@E2qm!iVMBWx^SKa zV&i0#5cAiWBBQ@x`<2ewzi(^h&c%7L;)=RGwJi_^2Z#EuUcA`XTU#;s>eYL?E(siG zJb$`?ihLGh-AmxFzvpT%yc`yOKz;P~-CGxGH!YZ_FU+qy{}#kDw-sCkPZAP0_vccjGJG*s@YdveYCUxI^%D6c%XLox<$UMl7>E; zOhcNCI%3MDq|zu~i?jbCU6P~%GtQg8XzkX9>m#GLZo})qZ(Z+!*QgEdDG4a>O#l$Pu%&HqAy$t4fKe<|Z(^#29R>iUQZH2Tiw2^r6yu8}N) zuh%lms34|a=eQVCC}7s|r)i`jmeEEPG3`>u$*9#DUEM6^PtizMDA*{g;>$VFMjbKD z1#0b;Tbsh1ihkbuLsTx;D$?9f*=UJp{`kwT*1J0Q;=M?w8hsR{iUm&^C*~<+s8g=h zmos6bT!j?R68=0H3<_ncF>NduH1d!z7(K@u-RM=I#IlAzp^-@;7r@Li8u=5~Ij+XE z30U%WI-W}&uQK0Od18N9YLa)9fkrc8X=B0hOP*v(j9Zl??;yG{oA_g{!4pM`p+;Jg zcQ`4H*7ip4?&vu=_MT!|EUJK6#UE;<=^`IynlYfgQM)@CiqfKPmhmU!mhtfb{*qt; z(6uWQXoO1bIU^dgj=#A<5@nV#4c*PeiB3^T$_30m{^kZ~G&iG+C8_Uj{>K@hP`o;W zvIdD;VlieTe|C+4zYH=03-;Ddlrf-5QPj;!{;V3ILYZSk#H_8IIHR>m*xInhEalH4 z16L?hj2tmbYaOF)wi|>3!`kXuH7Sbpna`glgDS%0Gb*ExG-R=Mt40+up9@s3bRu$U;s#%g z(poQPM)UVa6f$0c*C>zzqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS% z04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E z3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjoo zqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*b zKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L4 z0;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHIt zDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEU zkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS% z04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E z3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjoo zqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*b zKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L4 z0;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHIt zDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEU zkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS% z04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E z3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlTdFa^FvA>$RuWMqFZjY5Vi@B`v6 z!+S8Zn@{{@cK>C1>oc0aA7;Ax)c(xoZ#s2ghAWWa{7okUGaJB+=Z^r)XaF;xKXQSY z3}80!M+9arf!VSe}-{Z;0K?R zS$Aq1S0Kar^Ft=_=QmQXPc{WIo+y}r)y@`#NZoO0<(la4>ALPYf0W= zpwT<+8yYjvhe>HyBdhpBFBhO#pfMm{noUmEr@59m#asAGJAhfk-&7ug_?r=EjMG8s zGkaYRb&P|{FsZ&NSr`pP{wrH^e#_lU6!)-CaA}j zOesmu4*p#FAd(vZS#FRqC3;3EMD6T}aDaLXe8&;4D#0vSz@H2s#se4w{sQPsP_mSt zrgwMBD~MB|UccoyPASpMS;!v%XoC#-vl^obn1Wos{!T%eA&T%-Z%YVKX2PdtBY$=P zIe#`f6BaGSC;Iz4#T5h>=da$H5?oLeYew_;_jtwn@QVX529p}xVlkRBj7yrKyT_$o zUWuXhfTUD6EVKCo$o%*@{Fw-J`7;?)Ep5W(8M-^+yvd(vs68+#`5NUCGMv9$6Mw=3 z`dy&JpWi6UGOlq`caL8KOtlB5rL<|8&fnW_8yuhupbnYL-{f(D=H5C`(wfA1I0qb6U)cnq#)4N+g`iV5VKX9V0x-rl+paXF7UIt zK!-P1ECYWNHwd$y#->@j+--NNBi0%dHl(UW*}|WNKmnX1mpD{EQ>A67a-5!qJOOH-j4yY}ZDY4q(o|QfPC9)1txpR-=s+9T@+`fjQ-MBA;_434IS^yq*Y8k^ci@Xj)`-nhpo(z zC7&IxYB^^*;^A6jK0Dj^^SMCfvS%ts+CiXEJ2#obWKc-F-ehV?G;iXqv7VWI{3$L_ zyY87mj0GCCf|58)v~z9enX0y(o9=W|Tf(JfA%C(9)WnzxK}+%u1C8EkNybuvgwljm z6d1J(r6G|HvXVc^1*)PTVpaeHjcPg3kD~Upp3iB4o;=>i(S>kwW+#8L3lw+Z@PZj) zHUa~UNej&i7;ik%UthAiWGjDW0v*K61O^(D)j3P?!wsUOLiX|}4Z`A47~%hbG=pd^ zeb4JRZOzMh_Qc_qE^z$>Czc;++4~#MSsTg+Q9%2Rp1C6?F;g6&At~HOk zEoeP0{f6*5s5vs5zjxj-a_1(>0OCAQM$G5s0*$I_5suQ2!bH>%K?pTVrt=pEKxGh~ zI#oeTcY%RMRkiR(69LCK)EQm~El0-l7taMIPN2r0j;AJE$is19VFIcMDT3kjEZ{Fy zF3`uHZ-h4JmvGVq&@m2GCVcs}6SIN8a4t~C%vDA=;IV5=K9WKjurVntE_zYPtCAi3 z#UoHzhv=LM5%qL-EQ~4mjNZ+@oX{Fy+C(HIuS&M?7iJLlfyQO@CG&*jH5MhLkdTz~ zhiwesK5=EyKeL9vcrH*{hZsOZ0S(C>fmUNUL-;PX$GiROe=j9LYNVf?eRO0LZ{*Cos?$Mg1_{oYHgSHa0|Uam$K(LKgBD-~y2%D`U}( zaaA^+5Tc(`d8)B3N}^ZW%s9(zC0nF}Y~;`C0%tPBYy?I(Mp8ar-+C0JG?qwPd4
  • ;j`3g9Hw?1$S$O8Wi5X`7l;~oh#3f!jDEcWjsDX^qy%(s>P#Wp zV>mUS9fq?*C1o~$KYJ%AxQ{_Ygo`3k zWHE-*Gn~JlB}kxiyYTiFoOHewREkP>}1nY6F&6S8aBaPQ`l1c=Vv^BaR7AIA@0tk{TB&0E{l{PM-wpmWlcQA(66Q>Q-S{T*jfJM6f6|d zQYbVxYxpxSgl9PbI^8F8CC8arPLSi#jdCg0xT$CTX}0XrncgWR8uol%yZZ~O-!Gp4h|=tg^)p2gI+dYX5(Ii}H|XHS|H zUx8YM=2j(;ng#rM0Q!9(DgXd7Ap$9abx43((TxFxx+X*C>Phao2}ZR13Ffk&rj>_> z_LIX{vWq_-fJ_Pxz%a^;-?@g))sn)uMJ^zGZas%wniJPgiFlk1{7DW_Ux$QIB`+`F z?C8dTLM4--hrJ}}ybw{$AL8_&Q@y6Rv7pmK*s_j4)dgxw1Q9kXFNm?|My(09L$_o7 z5nGn=OZT@aRb*!ce~JTCT%guP@hGwr2stAfwI)~(4Snn+S+k6n!O*l+Rh=FDNsC~% z7Is&{noRL2vUy0BGm=qjg8k6Y%a~-(FkTi@!%|gucJk*A!kSD8P-O9PJZBW6)&%>Z zshcM&fA*^gX~`D;bPwndsHu?EK7=^nZp`Rzg`BWJ{itfv7>Gz^Uid&J8&90~m1f zxS9N|N~KrMaQ_z%K&_VoK~aqwpV5t46M_!)t`0hP+-!QI;5dbKXFh)lC_WFUdZSPj zDFy2}gN;!;NbBpU#iTT!)+RhmVfop>pUjvJpu&l_{t!6HN{Te;(=$tnBATGlJ1v2j z=xjJOrQMuB)s|C4i=nURvSfvgmgXKtH@Zbk&Is}bEt9Yl(Ui9LC)Ul43^3sSixi@>5SWvZEmfmN zF{_#)9R(Vt^Cn+Zz-IVNX=AgAzx06;fEbSa0X-6E49GXpCIfd)Qfgb4Rs1ExLZ*U} zS{y`acKmQqPGQJs{=WaNPl+I3lo#EYH``Z}ln~u06*8%!cK%Y_%nax6T_1tbSqRQK zJHR|UvN6TInX=-DzeJWl-+vc_DtdGl;^njh2stAf)wWNXBRhU69)9yO-v8+UMrR>z zPAg=XGn#SI5GOSx06w-QBb}OjG#mI+JYaMd63A(RjOUDEOuhqCUqTRnDX%Rn_>%#2 zSTGBfw9(9%;LkV+2O7=IoY++2FOj()p7SP~|EY7p!0t|g(L|tIY-r=el4h=*_)D0- z96x~3Sx7Xe-^)okLmJHlOl)cZf2nU+MvuR5=J)}O&O*XDeO^w^8P1sc4o$v9;xBRk zm*WxW2p^q=5IHr-)SO|A$Vs*GvHhA^tln;b`W{?gFC zOdo&20lF@b$rGK0a5-hjd^r;~rlCiZE|T~Q=l?tcQTpgCgw5$KH4HfuG$sv88Va!T zmxj7B`~C;4KSbaQVKaXTvJjdg*UOo5CT2`WmnK^#@fY6z0fvk~qyGauoIZ&TiaDoK zaO#b=62~Lm#$Q_Mo|0%^PsZ~k|K|c|QHcYXaw7-OQfx9tYmwUGHbEP7y}ATugS*WTj{7ai~Ju>1z`}jtCTKKg7}*q-Dtm%^jfU^r31h$&#kP8%JvHYnx zqv)Xcb4mqAH;xl;dNrZ>GiK0BwH#*$e~Gz3nLH;BaJa%m2gQ`rEjYT-ZQ1xnj5tL| zOsLh_!e3MuC;?~$MTi*QC;&9(OvotemOevaAms}a4OK_L$qCh&HT;=fVCg{&pMLr& z{M*=ciiH*)TAN#2TldV!a|w(NiX|t9EV^;FxUuII5zyTRlb0_{%utXyX=+XxR~9us za;l*L|97gs9{zu-xxT&`Lfm?@h(7~?2LZ9x*3&!;4)Jh}hwCitnuY+fLD;Gl{vk&* zV!76wagBz&W)N(t_Ns#{w$k+UsnaUXo;};Jq{Jr&7P~|q8&Y9(y<0tXQbBXeg((IHKbb$2SLd(4PRXSlU<0y^Kgk6O z{NnKE0%+@xB-0Cm7z;ESDpvY0kpn{+ag!1$7w|g=qpv*vp1~p5$6o+|*pRH_&vk(Z z*xXN=|D6Vb@K03pe>Vhxfh;6r49HsmA%F*;F%dMha{`LDnY0oNhlroTA8HH1z+W(c zS;!wJ@IZ@^KLF6j-;LdD5faEkaz-m-Sgh5UpYJmoP>7dggDfaLUYQI(6Mttk|Hrl( z_F0R|O8@tB1n@vh2>yl{fPpL|X|zGcinSRTf11(Sm3BiAXf~U@G_-L1jQpME06NRx zR=(900AN=7zn}kH#!s66u`$Bu|AvPdfPpL|ZPYk&I^HSP<^s7{fh3p=lU2!{;DM|U?2<08;t;pVvR<}|G_yjH7*8;M&!(i#)HZR{LTmPhX4xv1b=n_vywl=FD`%A z5J1F_@dp6Tc408|l50~~a869nsB*;edN~b7k3Yp2#D>XcxSF_0rgEG##4nIP0Fd#A z^KU->!oM~TnA!YIn`Ux<48H*W+PDW)`MW6t=!Yy26v^oCS;?qikOasGz^=YSf2763DzKc5S< z;>Y&?Kqwo3Du7aVW49SVdB+ysHU+l;1p+tKi6}Nk*MN*##=6 zrQyKLA9%m^^BDB?hqwG?{;w4P)cs$}DbpM<8~Br5pumsww{82%m4z6J;Qs|IRsfHk zX53JbSw-|_Lz#|vS z`6zG!lNseMT(M%s$|K$cu;r8q!0h19CD6sMrS(YVnvyxi#dAvL&Vw+wWX>EoDk{vM zl{+i1zO$vtWfuB~fe&r>$k)YjD=I@#Wk zJ9AcEK~b!D4hSw3FD@>bv-+rD5FWntd%(=%@4ITqecuZUFnIX297X>R`vA?SKG<0D z-h%g#K#t#x+*oJl#qO(@FLhn)?C897wWojRTKjb-iY>q1BSxwg}- z=ez1E+uI9f=H}%W7RB)I9px_CzV#4nzu->H)rNEE2JzwFec%O@@N=n0{~E$%+-7#$ zl!uShiol*_R{xQly|e$vHlhZbKiN2U{`|Qm$lvUlxidC&cGh-Zy>jW&<*thtyDoS4 z4h)>CEL~7CYjO4Ifs1X2VtFoqqPrzHMD%A6LJ}yl;wNKFPLSgcfO3)-4>WqGB^i_X zdwX`#vLjUkeN`t53+FES=-iciH;1~fT)KFn{oL8M&P(;c-<;f;dEfwxOXj@?@NH(Z zKivLfJpc@zWBkeeB|6gk?CtHH=XUnwi^Rk4l%GHJfCIQ2(ELGc?!4kyQBhHTZf-?q z=dPZsSGu~cT)qV1N_SsR?d~n}W3v|5oEtoM=5Uec|DY3Xpdf$A0Eh+K8L`#Mawx{{ z{U3k)$NvSx|BK=Of$)E0_`e|h5rjX)@COk7PYB=o*0;X%V|%5FMFQ}hqS%tdXYSqr z|2MmE-p3cmo_==gS|7T=&i3re$Cp&utS+Y{s+j0VB~Lba94f|2H>Ki!o1vFjK8awE?&8M1w(hwmBR;i z&MBO^q~=`zsrpZfU@9XAGboI|umsA*0@~`KdStHEIY%;n?~i}^=g`&lfu>gbgxgEc1$a`R(*d%pVh%P;PaTmYJhv8JPhs1g+J25 zf)ZH%#(0nA^VZ$Qn!1#0AANW7S5B#6!{#^g( z;FqZXE06%3K06j$(TL$}B`DR?(+$DV5I;_~cf<;dU~i#Z#!uY-MF-g5->_ov_<{1x zn|XJA`l$z<6l(*O?c?dwXB8FB=~Vec>LmU^&Y%0BFo5Fm7mh%=Y}_#m2niX#_s6pPbB5o) z@%Y2xmyN%n`SSqG)%imJCH^YgJ3cDPn=u{n^W2}{4;LZ*4SDO29y)Yr|HmJHT)t(? z`VAYFFJHc75bLK9`{Q-bY)27cV+}qi?gKLNGZwhmNjK3@B{`$c*%g2NP6q&zJgRoRM z6dOcfLdNg?7kBh^+@Cx9Q>K4n_)XmZc^=T?uMPcQBXfWff936+m9wW$FGKt|faNoD zXU&?;zTBFh4}bL~B_-u2o!;VS!M~?!4s6?9!TtmHv71MCRq!6KShsZD(!ZKO4CnRq zz>3@-&|SkkqV7ap{n3M450;g^|Nf#yrHz++hwpYBEn7RkXxiKFAb7b>koZFnSXkBB zxyI2B`D5;n@i!#+GxGfU%&5UQ)AV> zGah>^#y!qN18B#uxA)Yk+T*p&%}vz@KU{d~RB0JD8yEO29NXbuG>^#s59WX;fj=>H z==`;w4&+buR62l(6X=2@_)`n^ncW0ABJvi@;1xShJxrZC^`{Wn;`RT7@INu&b^Zjx zk1-^;|HA9yi{QBYo$R>KE{0V$%If^>sCT-DuYlj*(buQn&T5pNkh$q4TT*U?6|a222%n*}{!SYMYvxn;VZGQ(d6c z-H70X48ilP^!4`D@2fr4+|<Vf_@QDECE*JYRZ71Mw*SG$NRkp*eki_|GzRdVLR@aCCwp6Ju zP~s1xlZ?O9hmJe=HPjsz{?EfN4u5@spIH9FOb-G&PrmKpz<(zN ze_A8l!=sf|jokY+)Yl!UaQt7w{Gt0(mcJ5zS3Lf70Oij1I5F{;7=R|gpbm{2bFId_ zTFw+63jY^l`_B@8YsD-EKf(Vw4v=rM*yewH{B`&{Szqq}2+NRK7eKeKd1T!EAKoor z&#ty{*Ul=nHD}QI!PKv@p}xMZ_QV#>pM;;q|B2xj@k@k1jRlni*JuPV5dw`2VI>MU zmRk8afkvg6Bo7?_r|^gOKUDr;0>~X;2>y=Oxd67*9X}!fRC_$V<1gIWz(H_|z7L*1 zyC5%j>RUg4^Ue9SUV9F2m+=Gs*!bJ#Eq(#SdgJ=n;g1c!?($a~e`Wutv7mRh0L2Vo zd;%RL{KYB&`phy(fkyenB;yAEw};(-Q~0ZFzv#>W6XFjS9ED}aYwPOj;XziI1D-n7 zSp5lKhIrl0W9jjK47~jJ=Fgk|9uF&5EMLBC>5^s3SFEa7@$QVA9N5#su=Q;hz%$3| z;JF=eevJ+2{_5(EZbbjrCcD32{@C#A^2f;Me|0c~e5CUyvEb|MBmuPZCkn3ut~6o{M9x%0JiRLs)MH(Ve}OQ)*oZ*-?Q=gQ{11j{bHH_IbAP0&_IF!C6Wtt z$~6}2VQ(^v7MbAma=K@wAcEts&L28J#s97DfQJFu3^15KXa2|d+jN}6hyD-mXthAc z!ZKtW_}hE-?B!a3zpftc&0d7i)z$UrF@(<{d~xyZpCEWns2$`0b~ki(z~i=OPM?7t zcJzV`-~@61`BdWkarbB8&+q@B|Ko$A@b{q?HU)((EEpL==;GIEbdFNa6maJhq4~e# z;0gEO9Ws?aSbzY4-~c&(t@`>y_&@Icxc}R9h^;;VJh*$+26wO8JI~i#9S{Dh??ZS1 z;UR?2AUuK~_|pKa83X{~Z{o_;zWzSAZus8D>0jwYi#7k zwSm5#s{r8nw$_%$Jxg@{1{!u6?!R&VxcgI=zkF%o=Il6H)6m@7)_&n~Pye;y z(OdT(JQ(fXw%mCNYc)Iu!={{RSbs48XLW#IC;-;r6U+_s+s^e2-@S8dCTl?X_;{Tv$Y=a%^G>^ak3>SX?`(pvU9=}$5 z=D_yavzaYy{QbmTf7tk|>_q-L?fk(bs1T&_cg~Juz~AY!9T$4sbfA(m!8{Q}l zFLpZ$;nkRUe*edxf>!uDQrmdy^tq0%E5P5)F_}Mnw;+4T0zA@$ zp}qaYkrO9QY~A`1;J9+717xB^BC`lD+v>xezK(nP1W+4*B>*{pLzVNdDExK6_}g3$ z{55@w&pz*NVxhS?!2dbJuULLK{DJq2&ma3nzpb;-2n4OgE$&Cd;U-54;H}qeYnm*{@{zQ`1n2k8c<_n@xt{dYFo~n@94VPJ9z!( z*xmb|eJ1>$$Da#eJA@7%m>)S=RdwuGWhK7jf&s}~q(&rkG2(`2R95cTv7;Ko#fuj% zj4=L!24D}s8vYH+p~{6fWdGLz>yM`TT}waO>^zuX5c^>1Qml>toD4qs%pXDB<^1vt zUwt%+r(YcWrT~9%-%a;_{A-tPhu|0ItO1P?`3ou*_zN;eH(D>mXfW`%nLQz@@)tw? z&;e@xZ{|#R7I@a|#f#VQzFE6~f95aF`{2azA>i-ArEcJF1o(US*;o%Be_0+&4T+6)k4j+G+|7)}Pzt~CG zmcT6-?g07tI|HW9yf4_lE4FXn{?Vo-OWdU8^Or5(x^mU=zM&fXA`g=y?R3P&gmWO zPb-0rF(QA~VmW_Svs0@XpFpEIizzif4}SY;QQ@+@eem60&fiU#@2xfYzsd{vMib!x zIe*h}a#&QPK#2WQzU9dA3l}e6zIwHxp}DEy=((X`V z#M0s?GJnqO&&OXM;>Y~o&Mk{Ldz`;5n>Q|wIRNthw6Ne9BlBk|R`6$WfljR^Y~tiJ zXECHAeoIT?3#kZE;j;4m`y1N=_`3rAEF7TY|1c{HMMcX?4>?^_eypLT?fiwd_KvQW z_VaB`^~Xv-{9x_6b?erz-@j|y)@|Ex^?Ouve}X?}4wxu_NT0_a&i|^bs;Wlr-Sas> zu?6i7zzE=v-dwuJnK^Lu_`jdPk6Ko@ojLL0sznRtm*o6(=9W|ZY3-gzXUgUX{-*qx z`9HS%;<`U^_ZK0c=2TQ*meeK4Q(i{Q`5Lh}3>i*+e7^O;DK z%sCCD8I$t!ikFm?mx?cIF$kA@0IxYP`oGnkmjS>FjsukZpF?0-^F!x*MQ2uTSQJ}Y zzIAu~>5)f|uAHhlx~oL)T#SG4No)Uw3&^16|CBWdJj%WeaDuOtVBnKH=J0?yHUeL| zRI_6Tn+qxxbQt*at$@Gy5&@i<&t7e{V8I$#zp&u!a9+A~>lW5=WzizMQ+L4O?~8M# z?$(Mk{vO=8c=7xr&R>jJez=T+^E-iS=h238Y-4UN8@Z=c9NfJH04y(E`yONEYjioY3S;m}3x%%yRS4(}&)=>A7#e`Q~Yd zKkESeIqX3re_wVTZoZ14`{W)W0|M~ZuJhM%3EnA&TQKeL9+>x+E-K8;o#v2>{FPUI-p_pm zEcpBK2?S`~vqOiT zX#fI$!w8_t-;+}t+4#x+`Ekz7m7A7fw8Y`hUH;1c&*ATXLHO-&hwIDmuW8}JkE^P7 za{xE3UouCALF=m2@MmyiGGab+bfeaM!(o2W;tiV^ew)iaSTmQiSJm|U-~R`Mf3onm zqw5NHfbIAl^?B>oy|3`MzV%BVf8bGE{+>KRe}nxF&!FJ505~DTX9MsVfd`|bSFX70 zkIk?Ty?7eO5B{CIe%-p|cr*rqubtr~-}@n80t}N7B>sxaE3TUOGY-Ilzo!m=vIhkI z5J1k~erNoBbb7hO-}Loc*5F}|7=9bIyT4PW!1#L}{U4|Ax4-?(Z+`RZ;bF$$(q)IL zKdvY%E#0(X{fZKKJXHEDRs6+4O!IOoe_@PD;|&M-a~7@Nu<1kee!Dj=ejf+j#>RjD z_kaJr%3q`7+{DL@uB-4uJI4XWV(*o2UN(c<(ESy!Z~jc@5Ab_FHg*R6AG#S2Kf#}i zA3m#c|9&?x2s_ZnkhN*>z4jRngA2;b%GMMz{ziEi;X&i?Hu%3cb6|nfE%-AI!14;@ zO93GAhYs-hSI&H(>Q^_e zGX|F}`}l*>4>xXDzjm2SpxkYd`P->6qyrdej&79O?vLlcw{$JXZ};jA$X-Lkzy905 z{Tt^G9U$|6F#pr}TYa$}2VelOeS3a>QPHZ8wy!LJSE91nUvcq<#wQwo*qhHde-3_D z_s8&qduI3Ub)VidYu3tXj64{0b7$p3bP*2K5`PYUH--0e{2%bw`6j%uYj-d57k>a| zGeB$_{DCn35Wt@EU0pB&96N#l4g-M5-~W7P=cyicZ}G~f#o+ILKCNI*tSILG`Z#{+ z+zH~(Rd`A8fAFny@PC3nj^D5T`Jey!m)EY9mcm%PZ0_n+8`gcWX2k+&BIxy8ME;(% zF7+{_5*TQXZuDC2i{!bnss=|3m)3LtRUGV4lKIZ+v{W1Q2`OAO$r=cOn;M|fG%U7>jv8WjSq9LdIVG$iMu{?{xlt|9dNcomb$Ms2Aa-+8ymn z7A{?~1m5$qhy|dqpzwpetbu`KKa>m@IXF(FR8wM zdu$3UyxZXA>@D!1emy+qkKb3o$;s)iTA>#BztV=LCPWXz08ac|`dsh#Pr%kQz3+4iW4mzv{9TIkiqJfy^MF`&b6IsJq9BX@Bv>K z!+<@+!T`f>^t9m5S^En2Cl()k0qhLH!u=tB46N>RC(5>+?`UhRtv=ZZyKx8}qwppS zjcoiK8%6Tq5Pou~>CABR=$*#|Fh_85N}egFJR9DJGgAF}4Z|@K`>GKx$=P2jT8GW6-7lEL+|Dw(SoiP|0#36Ri*vQRmJy+UW zFR%?c=K2u0;n9&%;E|DcZD??y|8gt*Z1K6hckX!nxidiF{}_MX2#gIw|BP!88NkDb z`9AdL{DL@S&cBsY7dE)yu{1%S!r$)tE|3F|G@D38a(cc-c6gp55l}! z5a{nuHzj}}#M3>`9|;5-U7$aUNC|0#1wvjCu$Na;^UuHf6|DX7FOm5_9Dv;sn6H6> zK!v~99FIW3AB;6H+OWW)-B;^(+xUCR`NK|@5Y$KDhln3+z(7F1<>AMD;NDwTyZ3Z- zo;};{{F?6YF#Bc68w2=_q3)}_moC9C9l)aqO=sFWI*$W?cRsnRJ3!=*4Zw*3c=#~C z5yJRW=fdm_CDzL5L+)u+o%#Brh+oy5^U^gcUzrJ_isSpfdtH0OA_hae+))$B)5i zGUvU8@0H-FqVUJb6aO}>=#m_t69xbG6#QQ+U;k?OB{lh zW)0&zWd3CMAkM~L*sg>Ha8*^W!yn*>{5kY_L$Blf!1uw}>(B@I;ll0Nvog_-__HF` z5ZxHeZc+4t<(zm|t-5*$W*Uq?4?t%KcGe)u7_9LZi+xbOIWB*f{QTVp6-P%$T9>i$ zP2x{q|Hg5DxAuUq-Ls{$6Mm3M+$M&V5coUx+$nqf1s@c+a<#3yw{KwS-aEG?|ECPV z8h>mGhz;`ti2NadpZn;O_|w?KL*#GU(PK;DX^s^u_?r`uKIG4He$XAx%nueG?$8TE zK<~+uCm^t|_6q+8JFIe#Y9z)nYG;Qy45}2G6i||EjKykx0V{mQpX33ZIiT4E%KS0M zxBkG9GPuq_|91&;X$aoN>EFmmt5|=C-RF3-KfVFuu0U=x{&p|v?d`q{zefUrJ*5vL zZD$WS7Z6Qaopkpc{xrizZpV13ea>j;`KSAIx*buCa!9Eu#^LJz)mR4O;1OLY54>L*( zvh~Nv2-|;=yk9*0^w}S*KVai~ch2?eLjwa&`7rCl87Th9E^ZOC#Rx1*?%cX_`)He*NhGuHCo+|3X+de^F&~S65x#jvaeoXJEsI4eLvD3&aCJ zr4aCG#j2|Q{=UAKxZZ&(an~OHaC=U=Oz^VYLp2iJi2MZ*$O=~L@{U3n%ZC_&iU;&o zA?N|wGUPYE!!el60%iVAvZr8RI|XV?h(8HI`vbECtbna2vK`Mn{pq z+c5qr1F(rd_c;U;fDV6O+4%F}hwwo_{zzexB#!M6jap z9~F>fjfsJsqFT$#ueku?OoXq3rA3Ie3`S`gO283we@iOLSHtBe%%3v~-va!ej@$(Q z7c~0DTYND7u5_XQgJBjZLucmzI64YD(6HLQi<3q0eaw)vXH4bK=m2pS2LBpt9`NuX z7zbfEd@TN`q)(Xt>G*N@9R36_Q=F+E`_S=oRvzBM7b*kqS5@kxOcZBCA-d7!&&aj# zx=_BaL0--b(|Xl`0HLN`#;EsF7Os^>D~nXWc=dI{(OrM z;BVX5<%_@{c)U?|SA_io;N5!Y>=*)nuu%<9!yui@Wd7JaI3s|#2oC`8&>nmk#>8J= zpN~J6KJNd7VhVqb^TTzoy65ce`aQ2IgRH*p7RBLb3XKUOE3iz)wfAY15;SUK!{(za*WfFg|p?v>>nEm0R6gC~?ooBZA;LeQg{Q-S=|Krky zUCT#-uR9O!vu$dY=fMMj4!$ypH$mV(2)BXWJ9`Cx?f`t>1R(Nf0ucXb;6?~MkA*7% zpZ`N~b|^opBf%Ho{|5Qk>&FitvtbL5D)H!o4w>8sJ{F=IW&V7{sz$v0=~?O40EsZR{SDt>JKCptj#e}jWK^JBYys`q2`z4*$54>PNsCW>=DQ)LW; zSn8Stt*@xKfdq0F$nS%QJ8_J_ko@)b4XuqC`0M5TJ!t{{5I;89!Rl`(@`pFW5Fy5& z^~M{!`-}c>w4=Ne@;n6hi1WPLxAB>SQF!Wr2lRaqVEQ)eyqQb|r%aE}I`JdDnHU8otrkdr}gljKvJ0j>TLL>lj4FUmFbw!t70_cpt-Q763 zL)Qn}zXR89jEvsiXyFgL6TV~SdOvvdkMoE4sr)fiysbZel{C4^kwevvBV!bqlDbw*fx0Fp!xc>fq@Yq57-+S;XBSZaLn~~V35|HvuEK) z=-SSHiVsh(Sn20%%!-aQ`BH-kbg5$2ZvEt9rl3kB5d1tn`n* z+GAJB=!@g+nT<`2%`JnRKeqSGmK_{FJOKmaZ~m_8n(FHL^ErKQeoOqo@awNHtEmA5 z(d)7~panpDWp&e$>!U4Q-8ZhbZeP3ny@m7eX*}`sIgeX5vdd-v6g8s`5MDg{;?ey( zqr*cZPd%&PZ;<&vjlO<>4+HNH9{2s5R->7}V8qhW-px0a2Li49RiXdGdm2xlj-u#- zKl}n5@Yj6T;jcX}zo=y0#)fkjy6=sR-MoJ9E-n_t*FJ`tmJ0Wm*yxLB4b(x{!0^K{ z7grq!Am04Ivhfs$znq*8;7R>Y=gdLh_iaI+_b z{!jLO0zP&wLMCNWqiHl+YYn8H6R0*)I*Qm{K+s!<*!*8F-WTTpB7O>g#l>qI?hF6N zdn|t`%)dT8a_K|8{8tI5(9!Q)>ceR+ybT{7xjBFI@Ur}4eE99V-y!+G zeM|@ZM>xRY&&$2u5d0y2gWkwn`PYB_*ClJ$uH6N{5O<=jwejQQ z4uAJ9@LRh~ynU@sIDOf1fWwb!Bv~E*hxiRS^S%e=l3hdtIvf0rILq{zgfTK=>1tAA z*AuoEJ=V`T($^n&KY)8crg}>Ydo~MILH<@8`+Sh^+jOq4JJois^WvqthKBw75A5GJ z_j_u$&td%S%gNbb!jC_5@8HMX-&oIUuYK!Hb_W=TUI=XZH++Rf3IFWfO(5&mSTJgEhWg&G#QfsTW($I z;<#)?1jlfL1*BHCvY;B|j{zvJ@p$#X-_nDR2O$%n+j+Qd%XWNVAr^bf48k06fBls? zInxiJ`-?mJj*S9;Ec`7C3o+og!Of+oD%Il;uH=;Pf2T{OuLQD-WdBq<4)9*Tm&W*m zN5$9+qF>cs!6#%b`~@+_zT)ncs>Y%l)pEieMXxm=C-T?0Iku%^FYfO9_$y=lA%JCu z#ul${ygR_M^>%tQBnyA9?E(J4|8W3S{tTn<*l2I6mVcyFbjoYTrZP4>m8Fq%Bw*e}A~ApYewP2H@w6zN3?XKQ0lQX_$MBJHWA#=gyh% zq4HN8hd*<%Pz>^y1|uujYpfm%s54DXio;(Oe>VL2bCdhiTkjm(_LxM% z4x%DP|JUbyIiXhuXB+OeklO44}Zk`AM=1Zev13UEtU!4@29S!UPNeX=EGXWS- zr^^tW<5la%0&{wK-Sg#$$SbG0abXN!Yd}u+0}Ran+4%zi-9{u3vHQQ*Ui-GgUrufZ z^2bJBW$(q=Rh~Hh4C;FF`Hjp0-g!Le>4N;RMTf`Vb+GzT2`ZM)|C}?tvYlA;oF0Gv zOTk9E!B?kj^k0jVGn_FFf9U=o^ck9CO$y=}kM&AkMPghsrPd8RSho@lf+W7+ZcwsQ*JPU4O=0SAe?A0X}$HE;8u;Pv$Q~ zv09BuX>F_P8A4`a=ZKG2&SH^o-yDA3hm$jvfBc>oNW+HU9@?csZ2<`9YeE zCdAwZg*PUDF^oU?@b+4zfZCXx#gIps7ro>rsU4z128Mo$K}FgX(U zVvxUq0j8yaznAbf;eE!8F3x7EOgV(n5^Q3Z| zoKlJxMVo@FXL5R5PLSjAbJ_}KIU^a9Y|>`D0b=8?9KI<1@@2Vvi3!BBia-3zv|lqf zf&={GRXN0+rw^uh{;#N#y)+n}N|>dV0_Um;@=^MX_FA#^)+PlS8G&;1)p&5RjK4;U z|6}}xb$`X##UH4v>Zu+vav{9-Yj|J^tP@-qI~+j9zXiVEYG zlm0Uzf&vW-%o(lWg)V;>i}~k&)iC;oc7Ivt|KLZr0`z6Z7tdb~qJ$oQFzh0K@B`?Z z_Z>cXVqdH%UoRwnv3d@196hHfA?2SDnlyVB{N@Swf3u4~D||k_zaM23VE}%QxgvB^ zv(Ep)kG|OnWX783FJ6IY0>2oW^fCXJv-Q-CFK#sNDK63`fW_`_$r%L>sxpb3P^+;z zCou_QfJBz|&YqcHuxwd5ggvv-0Rn#z1bg_2`~@}FY|BditUZJL%_;usnadvvC-|F^ z7u(iy^GRRxC&l?QbhUU+J=Hl*lQEst+X*yZOkVEnm8~t!O>O)0E&RdzN@^_n1SDrC ze>QcQwsUH}dR318FDDmXHC~YK@Hb`3%))IgV~;O1d=iWK22CpSygzz6t+i4gRkj_|r2baGb6D`2=KIaBVBUdQ}sfQs{gMKcD?# z$(boRg?ZDqw~XDo*s$B68_((Q7pd5k8y&>Vmy*0ORC^0&7A|eL*fZ2t7sFqaj5bM*GThxf0XE-NW6^fK$hc@~I`lTkvh{#OKo}ex>brXJVqb4<#o((~@9DZEaGde{=>jV9S&Vfr zfxrHqtG)1YSoi_;(c5=#U8vo(V4jj!6;Cl)iWTRiDyM>&UT4aT`d^c{3jV2H3ocFy z^GfEG9zNW|e%ESvc&PhQS5J5E-oaPH+PMiGW&wYyfU102uQ|rrSFes<>+kJG{_s6D zH^+ug?_a-U$$TZVqMu@cgYhtGh^cj^&KUAy3TMrEf5XnI0eH6_K6`hw|MI1)S9|xq zdNrtMlHfr$@TUl4-&i?C+zv1D5+TH6G`LaqH`fM@{X)@}FDVLH; zqkJvS{)==;k_yZ?Z~mgSTN|#AjNZBpuLHkzy$4>ShWs_jCng%t4*nzoCE-NW(?XuWBkhK@yK;34fk|5+Ghw=;5VEu}Nz*Am&|Xij7{uk}_UY(`erIt_x=_ zUc7wu;*~?&H!NAQbZK#IZG|`~%xBi{Cj|5~Q({~bCCNLOq(&oRo(uHa@5S7%_maF= zpi$`OAAeZ^PF}aLrAwDCa}RW*Ylt72I$qBzr=y;!>h&=h)FkgH1C3_>LW1Hc0I@ir zq)8*C3EDw6@yGPlu1`7yb)_RMIvI`j_NEIIELRi8JB)7h%J9Xqi$C}S_g$fvkZHz% z_C{wRFem;6dwTg|(T%>ML_(JFCoU^1KBISc^qk`@T-NbdQW7BHF=%+lI48!fd)7cd+7d)shPLLsAo z={6eL!$u`mZ)l@H%;Ex_i!l|Of}BSElDZgULo<5)@wTC7Jj62I=u(tw7F>-?NVrB* zt*1vzR|cBVD0jqpXTA=R_NI#55PEwO4L!3Uey@ zdFu~Rxm>GAb3bLHC7$`?FS}ar>fDR>BAIIRQIskcJZYSmr;wpexmI7!gpG0)Qanre z^JFk6l&QwFv0%{1L&9M69B*`^SAi1C8vcYvCWTx8Gs|e?Ph97?8q+3V$=m68E_uAl zd|Ty-{b8v|-cbe`&4{Iq1;;OWk|{B6Rg%1e=*Dc~kGTd<6fK4tX-VGUq%>OF8@;=u z=j7OXifOT^0%jF|sF9|Ne3)s*fc8f1?qn!Ri@I6HpNw0^#{>9Ff(1a=u1ug2Dz)c~ zXv{kP<_1ZWS;jPUHxnm1MI|X0F#GtM8=%qLj53y_zPtG!XMjTS>I}*nByNetn2r3| zH3I%J$OtUhTRTz4fF?yzH!JzGYJ>`9ju8>Fwszu-)+S+V!y2=cKZ^`pp-eGy#4N3K zjJDZs5DE-yt7na$)7BzKU2~Hx#;oPfsIMb}S-?qW6#N-4WkQXq6EL&+GxUs0kx8S> zGP;PFE@gs^2IaY1WfB`)peMaz%y9nvGI-Z%=(Wr;N{IQdbE1tYQa982^T{A1mf=Pj zG2f+3ywTUBDAH#>f0_)c2$RpKj6Twk#oDbJRm6NQP`T2H$f=1Nd@)LEy_^}%-yc!P zcm-agKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHIt zDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEU zkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS% z04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E z3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjoo zqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*b zKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L4 z0;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHIt zDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEU zkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS% z04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E z3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjoo zqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*b zKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L4 z0;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHIt zDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEU zkOHItDL@L40;B*bKnjooqyQ;E3Vg#9_!fnXS0Iy-{lPQ}8Lq$&h`$W)!OU(x@t4{C zm+7s~X#Ref>FQJaGn>EZ)PWhUK!)=-oe0cq05hIH0x+Wi%zXaH1!gjU*}xwWn7IUI z2Y)mOXDWeN!XLT73P~{$sEs8{>TMRX6U50Wi)?3k}gC5K`vi^r=ZLbMfj?>C4?w5;Zw7LKh6EYp^Y)l-(+)v{{Bw5`%-bD zzj|v*Dymb<$Y}olo?VLxKZ}=h__N`uN;7UphVCA>x^cxFCZxJynav;bWPbj@|6%ZJ z#nUZD|LNnDV(R~Ksv4>>)gF|V+LmQFf54alKZid*fD(UxV`_yBQJS&0L*!5HRAcQy ziOE%`<<4~ea^E)bCjitTlle3GM@^h@Iy86oI8`3n|3Pb0*tE>&FARVxe>$Bhlr>aQ z8OL7=0nBv%-o{{Zfi7w?3T~o^$i@kaoB2y&C)=;gQgz&9dx;?}8O|TW&q5$>!?55< zBCwM_J>!JMY`vY<2H7f2EInmS%M$*KF3=C4#GM;WnUrA*Vm*zStX&?q#Dvec)|jv% z;i}@CnA!XZ_lKXRL0EV|G1GIQQzCIBN|Tv)H5P8~^fXj$uPmG;MNP|S{$7{)O9Vg# zJMSPxT81h{^fqJ?QNyQe4OO;wGL-A27;1=|q*W5}@b5GE`Dc|svkP=7LxJ1_JrZct zPfo?L+wD-88}~;fYs6ZeGNpq2wM0va*dR4qR`F*f(7WCM{&0ZyPE31D?(jtUd+V*x z&62B0eLQq?Oc}C_Kf@p_U3DO0fkvs26hsqe0VjL3zRQ?W)Ne6M!A&a;tBVzx{?7jJa6~~$H?;mrQdA;s^+4qN;xk<*^oWwVa?IaU&uY+wc zpj|qWz%0gq#BMMWVwFMci&%vAT?n**1ZYF-o56UA7wp*M%p{Y^WM=Ms|Bd^7o~o|y zs_v>f-F^B9ot~=1oa%aN>rcJ)R9AJMK4J=Yq&0H+Qj12xgxG^9Y7AM2*pV_;FglCHUxm}>ZpWB$K&`}QhG^#6AS+X*ozho{@ zB+$i}-~-(@!I-Mh(}fQTT+&{U7!*cz4*Dh5wAl{7V>=P{${61bRtB0lldl zfl6aAL+~M1l*m@32T$fsAaG%>Y{+Pg_)9Vea|>aKRyzK05|$JRl@xf0(_6rG4nZ4p z6%TP3C;pOM1v&)Egwi6WHOgvJrv)A3^yQcIF<@ho8UiN8i@#(9(m6PwMA2stILc|r zu0E$gjj5~gYl$0wJ|1sR+~>Ln)tZY#jFFsvcLN zc<`6p1=7=&Am+Hh@J5%Wfet0(Dob+LDa)fta~BVOEU7$rCEgM-;4kMD4AK=aOQfq` zasP$Gjmp9$@KGN|udM!4B;zVel2s_lhuI{_sV@}LQYmycHvB07%%KQcPaW`F@o_S` zd;%X2Z{%xH#$_YxO;Z);bJ}_ML>jkEAtT-zUwkoSRbsJUPKz&cgzN-ee^CFYaDh1% zfb=>~Ja_&GAmb0kczB~&kNikBww^S3Q9IQ|gS)t$FKonJAsmOTN-P%#{%imxKG5cm zT<3}BE*$Cj19*kJ;(^vEW|bdF2G^Y=FQ`s+l~-rrwuhdxUt$rdmEya3|q;}nda zDT9KQq#;NK*PRr&Rlb72g*^rM+N`)IPq>7b@#k5G5LF0$N%hIi)no~8)KtouBqQu3 zarpv7v3!8+x~xuD?3gZl09!2hb0%QVRiG$^jyO!yN4_2ujty%aQ;LV+yKHPEGp%nB9ZJI$8(Y@#9H-6S6gk?a(VVh^-O?xEIZo+_ieQ*az%)0xko0E!1d5wbvt2^G6b>WyIs z$TikstBJInt50|sVf}I7k7rB|pobz}Nsp0dEWD9#B5B+nY`p4EntLiT*gbM0Q8pV3 z{sd551OhmYUO_2x%1TQ5G&-#Ziy8hxK9x6h#b$6+u))ps^5+v2t}-ra)yt)DQ9nl3 zu?+iU_&aXTjzxJUG4X%Egu{YBAf?4Ia`dpM)@W7C^144sS$85k)*4x+Y5Iw@LDy{P zoJgb{o-uXaD*Bdq{|Bnr!~M!Ds1ai5Xk{CnBqyddl6sxIt{4_|B{_#$qmykK#vI(- zv1mIRC;rk0iUu*nUNa{@n}X?osF9( z5?K?pqKxOyO<;Hyl9f~g2qi-rBOcADs`F6iBt+GORZk4~6J20<7Lt}!LWU*786zIe zNUC#Klf)uvfvY4A{0RVtXTe8F1!S~j7$Y|l(y}!Ib)1J z=_IT*%Dee^Q*GEJk;r?N(fqwIiv>p)?Qln6lo4oG8`$Xdd-}vA*x5;u_cF8jo6Y!h zJYaYh{Fd~3Ij&?tW8~u*L4`JK-be(syQ;`={=5K&XTf(#kC)?11~a-wV_F<~v9nX6 z?_Z|#H`^xA(k46$0ZO_c6H5j$Mn9mb*J#t`jig?=yN1l?&jp~11@U|%Gm4D4stsTi zyEi?Kyx6gnH1sb9{Mjy$$P=E0U?q9TJSBY_(=ehbSIKDeex_Wtw}M#k#|XsJ3(rE( zlFm{?Qqrf z3KgCOeMy})KeW-?|0zjH#vjqq?fvt6JB!0;?2DXB5%Lr{*F)~GC%UV{(*q9ZVd z{ApaEcDPeBDj~ya!x__ynX-0Y{6$4zEcsIsn4AUGsOTWdqtzqNhMc$#4$p$mk`gcHlniQ|O6a(@0`QkQfSJwTtFL;o<5C!bwz%P0 z@Lkda8DBD((eC!tMuPB{GJu)RpPoQBfC3BdQH{vxW=yCykkQ@2DLM|sU#b9RK7YPk zpfz;iSqM^6gv?bkfN_d=b2k%=zZ93jG2l<{0@+HKs2rY!U?usbhNq-&qZ^DVIu6QT zN-i)K{P}W$Y%bxnvu6;xT& zxVEFE1^?gC+zkJ9v^O`mLrB`e81W||@DL!@)zwYI|kFW!ABgOOr9b;Of zykUL>lgH(C=u?x|`r?a#-x-*F`T4g8r(h3%8UV2uvE$Enfd|R8A9wxN4FchxsLTId zV*qMdNX4irTL3`=gVg8;4=#_UdXCGcRUls^E_!{4E z{;m@MwJfA)R6$0nRT&9?9;0UvlO^atQ`z)&Qv9U+byEPXe2uhOaJ%kt2}<( z^&godJpS+cbpoK4g_MmRj>ynk)v8<|HM=0uvmvv(D)4bAtHn;6Ij8Cj2LAewDBIHm zJ6!}o%3qhoU#A+tANzgRz;w*_GjMg%0QfoX&&}Uy3LxG8y9NRHLjbibq;8Y~$f}hZ zE&m63cw%e}{73lW{^Gj20l%{v{t!ThALCCAU@ZAV{F3r_1p!3-2!8-zo(+R!jBK00 zf|b#yQRIk}bxKN%4u67C$A-ctIU#+Lq8>~}{IvW5fP_E1{-*OU{HyYSna$svIWqT0 z@YC>jntDKyzZ*P&UdXhda7OR2az+6o4}ac9as9ypSFr{Tg^4TdGpeY_!cWg%H(3Rf z<)6);3cyU~PXZvtkK>O`z}7Vi-Y|kGn9RS9|GS|FkYPYR-DBwA=q6Ao={EBI4>CHf zD&bHXoU%S+s+x%R^ZGwaTD<E&L?zkLM3OpzZ&g>Y8f`UXb9e81=w4eks@{Y`nHB{uURRtcZWT8@D2lAQ#KU7NT|ETxt?(PBr z-Ttq$Lv{@q2mW{$$nc~5ZQEX6UWlOx{wpA{0(k5sH+$RG`eWO7Y+Jmve0h1<@(pX( zEnd2?u&5+|dDV`?M{5=r^Wr{tCr6%D_gg5!;T$0YxW(iJ$dTn(G&X)9@tWP{KU4L+MTPk}S@1K|Z3CxvA31dRL{m#c zM@M`2`3obxhu&QJTH))*J4P{$|e0 zTUuM+*mCe-L1Iza=9pEeSr3kH5*D7E?S5LQvBv+<&-uJoEo`!VQA?5 zxq(4|Z+!CF;NgvnOLJeZ>zo+w+0XcMSWtlSvO2uv4SXdO1U`j}GfKE5Cu$i5K$0A= z`htA&Rlby6R9@RKG*DkxSiETY!TO%7;}?g|pX={CbGo~yf9OD6UteB!R&IV_Q3C(o zQSPGcTaUo@3+}{RY&nB&5Fh^C4_-hBuW}>$zaUJh#3fu0sVm z)c@h2(=l^~Ux=yk1B(v;h03@C3luJei)xKrzSN=ufBA{>+UBcc^>z7$rArPEj=p>A z%E-l`b8y|z|OwD`B~Yy-~fwD7c2z$s>szJZvT-H00z$x{`m1?1L=H@j*c#{ z2YdQOlHu3kg)1y3m2TcbpO%&_pVKV3mhEi>p$1f4E(Lm$^&r|#q$@G6k7Dr?Pm+X@$ms3 zK8QQq*|E{lk?9MdfM2`oWXK`UAN>O)Q!w&(`SR}k1Wdq1MTNOp^ELv1qa!20-z6GG z$B!R+J5i9mwS8#3eQ!l!o_#kCM@bFJE3X73P#km(f4Wx1F75q)!GhQY^%2M4D~ZI~ zy0J&&^>w*fd5MFQU;W{Wj~?8*Jc0xc4mC8d>g%h{E&%*M;=C$Y{#tbA* z80sKx{BZ!z&9nGx?OT;dB!H5UkyePXP}koAix1dasO0fuw|^19@$u%e;gbg|tE%X5 z9X{+p$Hm&FBY;mn`OGt$moF!mM8ZEy-&nr<&2?|S3G7wX)YKe4a^%?h!VLsKhd=s6 z&}IDk+pf8DbHD)>4-A|N$e#if%3m-7dB||bOcQ(=)5o6!Ak4bd|F!n+1m-;a?d%)a zQItDxF5>67KgJ&}LdIKi*Vi67a^%3SUAro`Y+1kl%@r$Fyir~vj3*qdf{=LT$tRzj z2J4Rn3)Zh+zuO%)V?g2-RX5^yczc`iMzyquhz?Cjml|4mM|&6%46 zJELLvld{103p5FHz~Mfvv|^gamobg}*#IJcWjOz~o>uXbF?w4}0*IR%=nySt;|;I^90BLJz!x`9c;fy z{oe&^{++=gVv`^P#pW-N3v_BH{K+cTwkjIa$sYqS8Goz#2Kuw+5$|XDze-E|dB|b* zyhW=w9jWiV@sD>qnvSksx^&5sB^86)4mPwl*6k&i-`qbq;Kad_e;(g*M1|kzDB`zg z=ZVIay8Rm#cWj!o4DPmd_nhwSJ#+SKU;m+X3_J;cle^{=tQ~;mubjW`ZXj?*k%B+3 zr{eev;sR{|E&jX}bBHA~ZVI;i{eO1jz9MuHLilS_`#)HK^l!EBqw_E0kB!BaL*Vq_ z;g8W@18p`KXj}@-nU)R=pYJU$0A4aLiVF*4h0M+=7j&^j^o@i`uYpp-@*5Zzi zO%>Q{;=1sk<68bEcjcYZ@dp4F%~0{@@l-s2L0ljkdd8opVxGTb#_C{!we9bdkibnwRbe_Z^H3&f<5C=1RoE8&qCvN zM_iGy+8qSH)`laSfWMAT?0Yi)VEIA9l{WxCw)_Ru`1S7vA|WJs#zp~($X@^gtp=QG zrC!c9d-?+jTBEE$xHJNwZ2lGatLq;e=(PaU@@H+p?1m95+)&%t*4EzMdg8d~0=eNv z1Ro243zYPYjvd?I*bd&WwWYBZ=3mO6ji1W@+58Owe*E&6+kf%Hp8$%RKmU_3g~4VB zBVIl@b3mgvd*~DdFe!gKfj{N|eel=bPWU_4)QSLho)TRk#~((g8Gqf!(D@;L&5cJY zEdCt)lJbY{kMXDSe;yV%5_nKN{P}l*ga@HozG6y?V0Ht<8{M`@kA3mi2mDzM&^Lb- zCz@IbfE`UIj(6iEM#Hq=&(#V0@Yv2%Er?%BOLJ3G?dG8TUAct(vE{FlKR1BpaC|6ud+$tTy7tKHqZcU7OVJA#oOaDFy^jVHEH{y6ui@PBOjoy6(aUj91# zjl+WYpgsTM*a|}^4z^>I@aNYBV*T!For+28ZnLj037Mie@cKV5e+j}Lae%(~8wS`4 zD^D~vkr?Uz+j43omSQ`g3$U!4K!& z=BCER#uL{1L*TEPt@-)=7slTh;^*a$-1Qat6IgJQz>HGE*rm^(+KLGFfR~K4gWqH|( z<;z!;uY|Dat&JNt7R-C~g;~HJhMh}TkHGnL&`Uou`!+Qm!~K_zP9DGU<68cR`?Ht7 zw*TY!>w)#JHMY*kc?LTI0HJnCwX9-nTVnRnB_~THF~Z*(#$OlS|7ar{#Qgl5*x3&c zW05OBIe%}rSc60+;CABw;QFr_9U#oXcuS$P=>*w{VWZ7H)4|{0)YQD~3Y%!Ct2=ez zz(&R%@^{4oup#NZ#uMQD@B$F= zgDJQZMi!PKj63$34*vFCzI>sj1s)232atz`hCcWZ!fzma1mWY4KQ6<0mxU`7z)Q^o zeZ8l9y1T%`cLIeifZ*|<{89HOEPoyTc982ofj`EAY_Kf=1^%A?Z~ntmPdy{AmXWQ; zQ(o5MjrBVk{CNIO$oX5E1JE&JLVyxHu*J z-FW!$;d>C?hwuReo`p=%}?(AsTw`{DVqc{2d zYxzHV^=EIt5cdcCVR@K}34c8ze-;b!!KOwBz#n|=YhU|McKFXM{1-d?S1$a;Up(=I zs#+hdD&R~~Qr)21BKkiCe~G?x@MRisfd2TyE4{>);~kwnX9mt)xHxj*;-ztTQhjoK zW-~HbAo_Xf` z-~av(e(=K|{_xpn;j1-2`q7Vn{F9&jGf;cr69AG`dc__5_L#cxIf zyX%Jj55CC{$*$2*U8s?44u3!RO9+4USAX@_fBo106T;WO{`GHsj=>k+vkC(N7%klh3~*e|3ZSZNhRuiUtOkK+&D zEl6JS29Gph=sS6`_Tf#UP-{zX-V{PQwXY%}FO%1H04!xOeKl(=|0eJq;@_%&oVLyFf`M(2l|0fJE`9{BY zXlM!=k-+bRmm@CDo`4yE&qAOxFh2!<_q*C^>1!KQjgogdgfl8jNevSJw}&iEMgO<6 zAHGfv{GFx%dgouj55A5~p1#Ll18Qw8S-GLUzO(0S|M`ofmw~@`9=!J+@qgsSmTmz1 zF!a+vrZLzkIlgV@HgJ*U<>hV{X%UIIk)378Qoa+zsZ*!mPR)QH{uqE3e^;xkUH-4L zZU5%i+A6A+ktPa?S5MBNFo{;f37P)2B32Zh!=s@H6ZY(s}}gvnZp~^XCkxs zjWhrDHSFG9vwL?{)p7_HfQy#f^kMT^vu6GJm$NwjnETW7S3B$1vt|KHUT<#i?Y(gQYX9*=`*u|yKCrL; zL`@UyNJHp9wQYV8xv>Ti@&1pwKiBfZnt#FRtwS_J6cX&(H=E`-8$gt4yRIp$f zX90hL10;Je0HCl5g8*6{korF?Eg5=DvMQEa!yD!GNm2p7H&$=LdmjmSbwuIv$^!?~ z{8gX3z&Su`{v}1pKcyQR5AE7jfp@JApK9qm-P(2f?CEoZJzZ@LCl8mcUbA-Xx^?T1 z9N4{W>$YvU`aNc?KV1G#yar6>0f$`t!POsxx*a>Nyz9>aep)u;rM;_p@~n8p&Z?^F zx|RbwRxV$%cfP|)Dge})!y6@} zNt5yl-`G^Sc|GNiKm_m4Sp#46t!cpph`9bp>^ygYbAXh;c@BRi+lL=mZ<}adw=g@g zV&#rK2hZO5}l+(nC4uEe)n6<1eVua!K17s9=J_rCh-)~(yOZ@-?vTXkzK z{vP)1ED`xT^4g-gGiH?7{8=MM>7)GN*3Ach`llN=Dk>O*hY#0oB>-;PxN<&oMNrf` zRE&gYajQ=OAZ$akMX>DwIsRnTa@5PrtW|B7q(<)<$WjUxEZ>HH1Ah0^696SE{%*l`s}mMjvoE|^Ur?^;R^^~LioGC`#TDt$lv?D+Xem#*RCl<)M}IR z_fHW1_{Tq9rwlIHv8%SG20h@Ww>GR=;LzY1EJlLGA6o*myC6~zh*R*l9seiigjI~6 z%2tCLJyppPUSGKp@vGWI_@nH#w%z^rfB*N7$nMMN=pM(pkyt;x&52$C!r_ADX`1 z!!^QS{_-R61S|l!al@Od7Ags3m^@4Fb+{ZL>_WQ$v{%93{x&N~KRp;|&EbuncH9a1 zMN8Iip!ik2wRREldljvB{_X!lQ1jO}46jrqTQK;H*3vgOEY614#;{PbqmTK&Lx>IX z2l#z<$L0^O`EmCbJP>Y%;9~*sbO1gj@E&Xy-@b5x0EqZiZQXHlDON(m{PpYBuYl+Z zmVLVAAoQNCrUC^G63(NdEtfP%FUOP@n^3)xze@jiY6xGc2wN}%$X}ui_Qv3~2`rRs?|t9$e>{J8DSyQMQT}iS zwD`lPQ!M-*A~3gxFW?5uabRuEoUC~{^X5t6$fN+xpDSxHgwr%t9 zhfl#^tNc?~WCaW}F!6ve|9)lxNPjZs2AP3xLR*x-)olGyyO{Df`_x?N^Dui-2e>^SRw?BhHaP#J#obs$WgggRa7DSf-QtPPyBlKDP-QeAyH3Qeax@&jM9>Sk? z0u~$~PQl0@0r1Z7Fiw{z_XB`HIpOdB%zCAg+*<7DJb!rU|Nix>a}uS+i2~=>#~t^_ zwqNWu3i?0BA2>glegA;&5Apl^-~H})UtO*z49FDLUaf1(BiLb>f$z{INbNCPgCKw_RxDfk`jQQml@%51H*MNj z{@VQ4UN3!h(bA=_FNObTXmb4Dsu^^2(Ek-ISu}go-Yr`$!h;Lo{{;Lfe_ws|6B)@P$*~(RGRxB<-_L`dh^rt`lv%?>8fH(!K_*-@E;xN4j zH_(rFrOH>7mBCA{R&9R+P%J1|x~c8p!RqRrJFDMXxOnNJ_1hVLM~-l_ujJ|vDIxBU z;dlQ&EG7E-)~(CW$w5b0c3}Vh1MuHIYx*TqFXIm`{7Asn$Gv-Z2kMR;J-olFq5h1E zKj~wr0)LyW8^ORIu7PX!;UXB?I=+3|wyj&XY^lP3Hf`GU)?07wSTdua7Ivj;;Qu?f zZ`)c~3Caw?l?etmCV=0fy(eqh+R&TAz3~q?`oR0y_yK>=&=+5fkG-`K7%WaKTv#L! z$SR!89=DpUtYF! z;X+_9x3TGuuoz?f!Sri8KmZW=J5|wgaZSm>#q-(a9e(zo>mL||8(zZ~E^OJpvwBy} z@!HyB$B);adMmN`?YFCU?tqcNajM#~r4m9vxW7k_?%dwP`19-j2!BY^=;+Clm6e+k zCyyULR(tfw5%hfoKKzIHKSm#r2c8zL4m4_~+lUzVXe2AC1HKE^I7mk;yi!I9j-VQpH9 zEUVqB7@Q>JAG`ST?fwuy2r$8(sjb-7H_+SG*ibiMaYq=u zasB#rD+`i`Kwi6cy6MdD$mP=y?myV4;!oTI#1Hwi9UuaT9*|DKj6Zx$+Mb1r7nU#|X!B?Np<&QFXFb)4<(Keq4wgZnmr{6jGI_dv)MAo(g1UK`*QptTF=0Eqk%0O?ur_>I=D zf$L)~aP;x~9cVaIJG?78=M^xDtyez;?Wn^qv(rHvb4jfd!IFiFj*t5`=p z`bQ69c7n$UKlVgIef^bx`2FuGe-?l+0ozxA4uArG37CW#fg*oot^ooeG&k2M`1_Rd zhwcye{xo|p_Ts~yeJ%V5f0r(;=snZdJ8b=$F1S7j*RH`Y!NQk@#>Vhx4&c&sc)06q zfB!(;-gm(NJ&-v-7l7C;1|T|X-UAZ;5J2RQV-2JA^SJ~8KFj|V7M5nsoY6Kg;NUm$ zxy$)!@Owmd3^vh0)eJTOXb-$3qBrU&N~vPLCltQ=`pdA`hihIJfQ|>`=3tROc<24X z*B389A1Lt0;RggN`GX|`tUoyXB#U1J@rh9& z4!z1{xaHs9-`5YfPT)2OZd?zvz}4ySlD+S|BjL|pfjBd;>k?1^u(sB^6X&6i=g$ov zO@+Sc3Tm#Zg7%~O_t9|_k)9(n~1KhT4hlD@8)@DJ$AG*I=HAgJ`!n!}? z@8XIr1MmWdGxlA&fdRPsA3O)&?!`3#-q*Vbl){%#CMMsY{HYzF10ZvNY!{mIfVKO6 zO96B{KlFcVi2swoNBJvjYg+;L-)80RWs@(=zn|Om!M5L5U)_hVBYZ%1lg7s2$%+nm zyE%E<6_y_?SpEyNSK*B3nsJZ>V;M{#d%{Qfp^g@00j@IoP)n2+TuSWBOR|H zcyAs(x(Pl4`#-pyHEOPyD)Oio(AcY6Ki&D(co?Y)Qo&%+;afX-bIdl!(+z~mCpo`S*uQ2=c@ zoCxJlh9CGl7@4LE7gv)3S_}ID^r@XKTWW?H8;=};C-|4)bz9+%jg{NB!_(aG%>l~W zMhMjT;bk>ww#gb}90Duq{RnTw-*fTSXQza(SS)#&00(`$jG_IN~mo`W7B^42+H4kz#sg9*D(IVT>T+_$lryU<*>rIJb_Lb0L5-MV!a-loH6hA3>CAdn}^#>b%^T=>!emjIv|{6$mm6Uv|E{n(9PeDFYUf5O(Yv-d*0-<>;eU%WV6Gn*`Q;cFUKNSMUP?b|>n z-W9gOojXwWomqSC-n$R}PjG|@IEc<`Q-0#;ej_j#Dy>6kKyOQ2S8DZ6vG|f=m#+sp8;E~oSdYW zfX)mIyD-R~q0hO;BWRMn|Li} zW(~HSh44myU`N$z<>VFS0pU8B>;k&>0j)*IzX<$Q!1*m$x3&yVbC2NP*wTZ1)>OgR z2xd5V59g6vH(~x|{NV#A;@%&5_6ivk@#FXZ?%v%4dyF-+0lDip5JHE*oA`J#JOpE3 zDiRa%f5;y?Kyr7FJmiJYy8y&~*=u0eHsH~{-~sr(*04rN-*`Je1#V&V!FB3q zN$>;T2Xk+o3qHsH!3gkGyWtFMwE2^jbd1rpT6yIPxda6MoNYj_1LXN@L+)5e#vhIk z&)|Cmzng~-%ki^kV0!o0wE^R-KaNd2hj`pnSw3Y z?(DfeG`O2Q{c?{`ijEDv+XI+!;VZk>(J_*6^A4i82l?B__)`LCuR-J<@aSIaUK~3! z>l4Wn_!IGy^M^mt^$6!)&$S=ibb~8DVDHmYrx<81$XGJ{jLgXMmjp2n_Cl9jJ!03Z z((V=&0fNpxw0H%GGw`4P{CDhc8w3x|ALjyrKiEZ}{*Mec`~&yN4jpvi=Y9BvTY=yX z4Dwe~Mu;ObDn$xja7FKfT)%z;eItaMc+GfkZw=#*tUzP{;)Q^72`B;Z5qTaJ2^9Sw z)uy4SXb)4#g`db@6PP4gL?vqb?BEugD+o{bVZ; zZuUj~?%W>OJn-K8?>&3~Wa7f)F1-!};^6-@;C11xTay6eB;oHqIzSnKcme9X5s=;q z7WSd(tzi3kR6JW2Ea-qg*jbhbYyPDV0aE@5ewTqiYvIe|ht7{oyW)`Ykck#JipJ>T zPpsBs6wTqgWc7XX)oVx~T?GHrKjDoyTmvJ49Di-XGtlr_1m|89+L z6uC2T9v+h)o47pQdh%+IoIhs`LY*>p4*4UCU}q6>1fPdS2YBg39lVRC(R!CPzDT(9 z)a!}rhOX}JGw^McuD-KpdQXF2zjou+?Yr;XJ?pB>$%q`02sbYP8*xAUE^FJ0!Rt`lKzpqUcd`2?g8g@749`$ zyD~X+=1l*>Zpl3qo00UtfcI5v&7{J`arm zzMXjCS3fJQ?1yI`;pxXKJ-f?2%!(Cm!{rs~>pZw-GAAEG{Q%7O? z6}YC$r~45>vPYoXl$hKLzUeJ z5pRSr4|t^g=Jn5So;%y!{#sEU{NV5I5&C!e?&)nMwupuBXF7a0T|Q0uv*-iw2g_;z8x|Z^UKwKPHp=+3+mstwcP0KB&VZe_^kkUP{ki%2b@n_e znS}rJ&r1GA34a8^TPXVEB;FihVaw@<)>YE+tb&Dw8+Kin;P>H&SFRi^XZ(@9Idc0K zr(dc21Mi2SXMcNZd*@}~59eF}khs6=obS71eHX5G!LIuHQ*TrFX3csU@UsF?*wpl% z1E8}8VF0$_i_-BIvRA!uV4&&h{gH<4Z!TLxU+9(CX7Tr+Z-<1xcR%~&!}spry?O20 z`}Lf}_!w#voqjKWLf0KQ_ac4uxV6d#jfv>`q&2w`*n4zg5?7a1SH=>7P~e zFYt#B5CJ5A^CEv$-Hg9e@Bn$~h7Ik#gO?_6-=Bmj`(7(foH*bgd~oIMWxyZV5%%f+ zV3%d02`<035cdb{*#ufMFJ%=<2qY#{7QxpC!1U#?Z0{8yQ z%b&+!o2|&aMtE<3EBp*Y)7v}C$tzkJfA`OE{1vnjCE%ame)`e-?}7uo_E5tgEPJW* zv*05rAxDfU&u1Eq%2u&WrI8Zo=_gAFS+eTJ4f|HypH%z-einfDE&kxaXFF8%erWl> z6YTk+vg$p9eT~OzkLA))#g`I4|9tiy+%tZM-uXzn_d?jZJ$C%~+S$t)f9U?;Ke+z} zYY{sAVm8L#^G|>Co8S1lo2X#NbBD(-!?Ud;BNr~fmkfH39(wG7~czU{2b>m~}UI=+4 z1D-z+TTN6D{)h)8(=W`wbzXN!7F0 za#w$@*_XTe!zT<*B;dO@Jbz^Vg#{-K%mJP}d2*A?=LyaGK6nw&Zq1t2tE!J5ZLMpx z=HI)6H5Pw~#QrWTz@mpaz8d7{0%3vCz^s$ z%^&e`o~F2j6k6qKaYf5o&wu^JR;Kj-R?Z2h_LyM5w`C;s~jgg^9q7|7Q1 z>-Y`arSPhqpNI`ab42*F&TqvY#@{;w@ctpm*fm%V@a`iQf6c%ihK~_G@Rzc}#kP!+ zNScf(fGz1{>PcnzYY69W-ux%RA56loDl70V5r$7IToyPiF0O34Z~4EDXm%N2k8W7zjtQj0-edsL}Be~H8n+y7Mv-%7Q$ z_?%n;-n#y2g$*F_f0Vz@&f13kWVV*_r(>)*Jx06ZAtLGM(mLd?wJfn^-MV%%1-trn z3b*PpxF=?sud?@Di@(kdc1Z<)Pwj<8t~^#WAF9{?zK zf9~$nV-X?(1KY6vJJyo0GjTV3MHknaAtn5!h?gCktq-E8{B>YuS{670k zTCE~c>HnU1;;COy{s2H5KgHGG?c2wrVk$^Izm}X7lTSKo4Ufc68?Lphrc1UYvh)LV@X_S`LwyH6b^@lb8di`God0OMM&)mymiC|8WhQBAiW$`yV zs~-S#F8+ksm%6{(V?nryD(|l)>N@gCjGI3v4)C`X0)Q<%fAIW;oVxChjV;TpaM?Ms$GhK2q|B z_!0g*CB=GO)uzWNAm;8+5Tm=S*j=|qqLn{WhrfN~$5hwx#>5!m@AE+JFByN}ji?jM zb=)7hiwlpLvaM%8@YdtxPgGY_4~}?^ii^fWh3K zFaFR8&h72JGI?nd2$Z_N$IKrmpjTVbh|i&Z_@$daI_oA}{D~z!HQQi&ERYa^F8!bPva`41@AwW54GqCP zx`_##eQ%8qaXF_fYWqJbSr?G>^2e9-*32aKS|BBLql`bh z+Th02yX59?FFQy4B>erho=@@-_s^e%D*SbU?`A{Tv`Kt`OOiJoNj!f{T`3E^@ykaa z|K^Jd;sELNJHy4_HL!8DP!YKVKAKje6ft%xVT`OSe+FDU>B%5}6Z`N_fcifPe_VCG zqQ6cD7gJTI$RAZ#dDf^d8XMs;tS{&SM8+Syu?xNxBGkf43W?SM+GJA}F{fx$RZOZ4 zWwZd4od~8S>o4(t9SZ*HasC}wRjo>o6@RFV>O_gU#Q4RdkA5rg*UjeNqV0Ws$RGSH z2=4*?RV!`SSBF$3)hCk-6m>~Q234!lbV!6Y34eza{EZ`jm8#OJ^my?HDydEs)2XrH zvrj)K3t-Fr71{h10)G%@%*vltDAwbnTCsqAltRgLP=qSzx}-J={Oy~-4M1W39fwyj zPS~gIHxf7g{G9g}pFScB5XxT>~9WSckK8pDbZ*^jSY`Jaq~y6`ictkm$zU2@J9EpqM~e9 zEk3Gsm0*YAOR^dw{|wQktem|3BFZ2Brlps^O299;`zwwie;Rd5Ai!vT@#srf05kVj z)D86Es-q~oXiNL`k0v^ICGzLF2AvjkC9=ab8HE5-;5cv3%7RM`e3+uS5bDZ zOG2xpE7dwolM(m}4vNzyd0#O#M1SXH7w+t7@91bbRD|=dl)o?4Eh;VXg>=##kUxQ55sl^wV54K;7^`WCCCT*ovSLxE!TFgaqXKT>)!?h~DtU9{u$ z*`AIT`jtvAe+^%JaoJtS$8lWwldEfiQgY*qiK5q6uUff^5J>pzn^lmXJ-4c9=t^(P z3a@aiq}Ku|X)>~krER8eRQ!?3PbQIDw6$YsthMD(eqIs!zZRH*Yd}8uY1Mp4NKAb3 zu_Oy&f;ZjR+ldi zTdlUu;6{d+v}6=UX~j$Cvkzy*z#N8Z({V~$oQ4(*WjJi58nH5qJM7*T-wkaUfEbl_?$8$cQW2KBQXX# znfZtNE)F*A!Cy1qw5;-tH){6mffqsTu09379p9Ar^2^JzP9{ssbpAXvaG-eAWyZ!Y zzO1-1F?wkfUe5{P*4_ILM>?t(l_qR_-MX=o%R6Jzim)~n<-AvbT| z>pxbtbWtf+T2zlSV02210%GnqBQv_r#r_MVXDps58uJSlEIhEk6@S+XUvqZ(!ubpE zqTLl=e)+a5udl-x@FxmLWpn-&RA9*qfxn*KzTuI{tMpAZH*U8d+E})j*~zE0B+D@s zMi*k@c}8b+k5R+8Cx8tgPX5=lunV#0YwY2?qv<^SM} zxN{-KMN5`#JUjqDBMCnu`R=>7Cr5^Wzu_;xe2vfOHy#iEIE`!soQR`^lV-r2{JyD}$Y+q~n+kEm0?jyWa0h;io`snK_rNH1Sox~b&?!4H1A{?_n` zLx*cuT>kP)zT>{*G2zb<&>lxV!sOu7?D(`bN)U6-Gh(Atv8@XqacQ(*drQsHqsLDi zIZ?fdmfXB~Gs_d`GdBD&0eRK9I2DCU;G@&jC`HV1f&3Y;xIOkPf&bJRnSL^V4V74B z`Rbgw*63uD#b!>`v&!jcXQp^YLi4o^=f0Kgf}XyEGdZ>e=tpG&Nb7Fn*N4oL0c04g3}_PveKCGr##P0 zG^B|IZM9RaQ|$OFQvHdOoK)aazZSo{3!&ah-HeABBtnVnvIH9!wV=-=Nx1-e=j-bVaJ$4QUTL#w0prt z>^8YHJE}D@#1t;jDr?szL{c_N^&?X(saj~vI!JOdvLczi|M!w)WNZ;+0%n%c=1+Di z={L$+4KJXSz6{@gehK7opBJZ4rW-lLyr(i1Mvu6T|C1=MH@e5kI?n9*7h!Xqn806V z8wJGNr!pnR2-S5B6Fba&{#+V~{AILJM9g(6Q)G0t8eUxnoa-|V{0SPl71A4dH9RFB zx6zH5%LTgn%l9_Il7fE16ieFYTXl6vEXil&;_={*8ySxXybPIT^iaf~%G4S8R>KR( zF~>(^!k?p&M4?PIN)U5SWh#x%pSMh18z-&N&Lu`-!ynT~CW1X`nPrqB#?Es}jcF6G z^rdt>mpoN<;{FR4$X^zBEJQQVC`T-9ELe_7P|Z1@R_9oVg*V2DKjIn$UHAjkh(8wM z)6%HyZ}{O3Uy@=kh{hj;6);x(p+>wYu6U*yHT?}c+^JBM7IkCBpNqK6KGg7+0t>l(97W;ii9_8ikUzsVk}z zw^U(_BY$d*fWHhf0t@QiJ}#rCOJUTFC4WkdP@&8*B4WzkK0c$eOW50>#+dS_kbx?c zDMpHzqPLb&b+xNQfncrltjSBNdSt09?~=k8TmGc_S|Z2=oOVXWpY&87t1&tOGn+rj z$k-IgG|DWajhO6I91PyXA8yJb{T7WPVjdSLoay5z=@M6eGD2^i zk{Qk4|1crr6?nn~qkvJsC}0#Y3K#{90!9I&fKk9GU=%P47zK<1MggOMQNSo*6fg=H z1&jhl0i%FXz$jo8FbWt2i~>dhqkvJsC}0#Y3K#{90!9I&fKk9GU=%P47zK<1MggOM zQNSo*6fg=H1&jhl0i%FXz$jo8FbWt2i~>dhqkvJsC}0#Y3K#{90!9I&fKk9GU=%P4 z7zK<1MggOMQNSo*6fg=H1&jhl0i%FXz$jo8FbWt2i~>dhqkvJsC}0#Y3K#{90!9I& zfKk9GU=%P47zK<1MggOMQNSo*6fg=H1&jhl0i%FXz$jo8FbWt2i~>dhqkvJsC}0#Y z3K#{90!9I&fKk9GU=%P47zK<1MggOMQNSo*6fg=H1&jhl0i%FXz$jo8FbWt2i~>dh zqkvJsC}0#Y3K#{90!9I&fKk9GU=%P47zK<1MggOMQNSo*6fg=H1&jhl0i%FXz$jo8 zFbWt2i~>dhqkvJsC}0#Y3K#{90!9I&fKk9GU=%P47zK<1MggOMQNSo*6fg=H1&jhl z0i%FXz$jo8FbWt2i~>dhqkvJsC}0#Y3K#{90!9I&fKk9GU=%P47zK<1MggOMQNSo* z6fg=H1&jhl0i%FXz$jo8FbWt2i~>dhqkvJsC}0#Y3K#{90!9I&fKk9GU=%P47zK<1 zMggOMQNSo*6fg=H1&jhl0i%FXz$jo8FbWt2i~>dhqkvJsC}0#Y3K#{90!9I&fKk9G zU=%P47zK<1MggOMQNSo*6fg=H1&jhl0i%FXz$jo8FbWt2i~>dhqkvJsC}0#Y3K#{9 z0!9I&fKk9GU=%P47zK<1MggOMQNSo*6fg=H1&jhl0i%FXz$jo8FbWt2i~>dhqkvJs zC}0#Y3K#{90!9I&fKk9GU=%P47zK<1MggOMQNSo*6fg=H1&jhl0i%FXz$jo8FbWt2 zi~>dhqkvJsC}0#Y3K#{90!9I&fKk9GU=%P47zK<1MggOMQNSo*6fg=H1&jhl0i%FX zz$jo8FbWt2i~>dhqkvJsC}0#Y3K#{90!9I&fKk9GU=%P47zK<1MggOMQNSo*6fg=H z1&jhl0i%FXz$jo8FbWt2i~>dhqkvJsC}0#Y3K#{90!9I&fKk9GU=%P47zK<1MggOM zQNSo*6fg=H1&jhl0i%FXz$jo8FbWt2i~>dhqkvJsC}0#Y3j7CC;J=%Y@d{)zvOk++ zLWV2wGs9nok6>nZ-|&~&{#)d!R0yC1p`0!`Bz_})5k^(=^ z|Ilg!C&ar|@zXe=0mhX{PK*GTc+vHmn{>1Tlus?Iv~^u#YR$PyXF7jbFUk000JnGQ8pt{(I>G_7td zm?fgFWi)?3<@xgipn#oo5RsOFs$ruIg;-SbXRpnf&~- z$e-K=+LWO{_JKR1HM%oLacmDeRA$G$5zZR2RjW>}B5y0oCsF|CoSB!Z}a7N9XcbFjAglIe&C z>y12j-1zglK>ob5`2%-cYjkH$pOTb&P3^d@ZTP zeUGjpqpWpB*0d+f`VuTHhWrUGP*fuuf|$Y`X^mXI)S^)^A@(4O8bj70b|lPEEcxSH zpr{EVrUX!H6zd6p6mpG-4*guB56;Rvh>XTW}rW}oM)TdEZDa#TA|Mzl4 zF3`(gqy$Dt4UT$*)rGGrW&C2{{{X-wK2TVPs9d0ozbIW`_+j>xH2h7A$5x+1@~prw zdnIv!KsRah&)A)69jE>`&T8;r6h0wx|A&1H-W_&k;XmUs{}Kiz)bU9ifnL&3KyNBX zpwbx35PXOgC9)Oi!IQZY2wa#e8!{Rr{*uhW+(KBQm5x80ge65nB?TVh^cHZPL(s-t z#X}s%iN9o5fewK(p|pr;jk4O*X+g(0efcGQ4A_{YhJZ=&;x8G2bPf(EQS{jZj&d5Z ztIsJ=W9n-BTH?l^k4ac1O-_!=0_j?#rkvaqXpke%EbkLb4bmJd6aF)H{3UaNFDbRk zfe_B9ETu678RkgyYWhS?0a{C1SJDhIAr9QxRxUB=>UR zjXHt#HvQm6Y}I!~ZPj%{UCN&&uKYUN0Bk7))|kZq49wrX&jiE~hl=YBT7ev`$D2T8}q>hV%D|Zvx4w3y8T1 z3~%(x8X!eAwqiO~D5Qu-f=O}^pg+>Ak}5Gtnm15l#`C8q&~|}T6c=LRJR9B^NO5;& z#n8Hvtx~R3--qr-10Ty<3S5n+Kt}WTOM;9q7f4UsjhNs9!y7%S21t+(tS6PL(PQ(G zz!eV}l{FHwD!y18_|v;Ux)P@6>_v=of#Hq3ph4rZah0SA)k^G1l7!-+4kQhPs>f9* z9{eSDf%LQ`h&e7WywRm;phL;H%97l5%JOK^+{J?*ODYduiMK=y_{(_(gLDPV66xw! z+<)P4qq1-beAI{0E35w$$+*grWED#CVKzx}>I;RmR0^Gq4Sxy%b0~t=QwKa(e4LCf zpTNh%8~Iw4aoNav(^SRzoOT{Qk;ZLP$cVSb7heonm00YT)8dO9Av;0WAJqRTTwsm` zAid5L&z(O4$oNAs9^NR{BR`UjttU-h)J}EL;4W_G3mb7)2*+Wo63fMbKN~=a548Cs z*LmW(3r9Ns0A3-lc%U_kS>;EP!F4Ce3#wCH<<%K|T3q$vq0f>@vc-!(FMxRZI0fTp z%AjB+X$X?RbteUGm9HRhVNU_RHY@JQ6D}cU{CU5lQbo!$Rz1lWG%;r zKNo<^1&TuXqDTrED;eA<_CRwa8Rlt$Uyh_>n$rko%4q&(2m zQ(~6%E4p?woIjBTH-In+i%R*YNF+PQ@tmXzu~xm&l~;4<9cWGFDKmL{6{T0sc>V-X zya0;BB=9GcgnXh=Bs&G8*aPj6d#JUUr^@K=6dXs`bmsFXfZ_pAge(wZLdEWqdSloD za*cJ^Y9cM?>JuJDSbrS&;~CQf=%I*L(qrTq3vc9`NE){X8?XA4=AMcSc8{D$l+DJ1 zKLHdMfdG!9S5S(avXYWMjZW*qVurtvPvuQru^Ai{Y;beE{P_fhtBi|U^>Qg()Q^#M zEW9ioPY@|A8v@aKG{jYJ?a%TG>V?$%$!=q+TblD~3f~NzS3x=wzFQF$Xty zEZWY-iNExLqCpI?*9?i)s44fjO>XL(<7j&pEB;bpAyYw#76(?FgPSMDvPAQl(fmC> z%Oir$7v+XG=H_`?;u69exk{#0)I*(}5OFs%oWEHf0>iVQFPWz?=Y=*#JerYJXXECH zMAihYDC7Ba6BwR_WF^%ALdlTEh(|N3>O9mr2~jm+)e{5$L>CyIg`_2wkYUMi#)wBV zlIk4RB(X?Z;3|m&e*%EvS@2O(0U0eB#>kDtw24Znvy&q4WGwjO0kl|<3zf1_&KToQ zItgoy@@_ufR2w!)B=VkRG=DG5V!_cxJKPZ%Wdz#Q1~xkVo<1=Nc6L(az07R>W;6aA z4;Y>Wza_n1jw>0^82NZcP@xT*HxfbZt|~H|KQDmcS@2!b z?oE#)FLo>?4gHG&f3^!G@`Pt0SVmA%qQ?iU7&3KMd2ub3JJp-X~l|a zQ!y&5OuxbK=a+SnKYA6Y93O-%4Mv1aS#2stO{M8IsrY*_9nHqb|KVH=ldxK)bb+Gd zFE6}NeIn_#DEUhVfHC9G-VBruQFs=#CG+y7tyziTjk;RXXjAh?Es-`IH~!dG46a7J zLWO5RUs7ky4{h}Je@ar4@dx(k`Vl|=Y!@g2Fgy!MN@~pc5R~JkH7ZM`*WiP{=m?A< ze;OC49q!bOO31LXi~{z}SRpj|Ssk?Z(WM*Z^_ z0e~6J-_N+)Z{8@mAt$bb!?WPCq{PcPC4(BL5<0G}0Q{v6U}p38>Z@MtxD-a9EpB)g ze3$e<#+M9cw7WgEks$n~3}B}7rzg-2pumEAR3kFF8561vWOR3MijD*Emnwjn&z~7J`%%A#;@sV4Nb}+|2~zFU4hW4EWQ#K(-PlDu-twSV?}V;VJ3c=mukoj)U@- zk_(Ilf4*EGn~V8R!5M`I#aoiAIJ|L+cyl)sm_KO&cdd%!c<|@X1@h!sae%`XCOjyz zl6J-6jdstbHX_9-M53==$A!PJE|3FI3JMc3xRC)UE$Pe1+MeRi5U5Fu`093S_>;T9 zO@}ZXK71JdZEfoyp_7KL_O7n3J@ayH0>gu%C`lm;Z_Hyi_LNnm#UO=AmN;{E1yvR` zuI*@P!T)zOH^YA&?aj^Y5R!HEld6I@JG+ZTN_Z$R}Ou|a7@DDkh z5$jc!Ols7XRo9xbMTVLzZRVWKy}iAstK+z$VU!b#U zjfyVGe32?m*S5;0s!L3_@Zhgcr z7=T(9QZZ`E7C_LzAT|2IgUh3-US;A+E24;>z#nQ0LBgLNz!>sJ2|U;-J3Nl@~sxFpJ@RRY^-VodBq1A!Vb7BQo?>wJH}#%`QmvY{;yx3Va;OYO&L1&Z#!gy!XE&bXTu;FBikmh zU}f}a6geVgostrx!=GT(v7xX@PDr1ms0Wh~KP`U%AmIxJIswU$7y#CLU7O(#>;5DF&KLvmp&!5KyD)A%xe;|~KKM_D~xN*p|pae0B z^}0%mMt7?YIx0F;=}t+%F-1)lejfkl;;&1>pB%s#@F%)J3qOhbx#D{rg=s z^?x|%bj+OL7h-Dsz~Tcyp)&5k0)5{{Pqwn6j zGIDX~99%c|o*5iIu(PjkepYraIKbl41q%VbDsuIQ+ka#PfWb3_KYqN}KsukJqoWJ# z!JdARWcYP>`9lvlfx7|iYZCJp6eo&`it@9vvepd@tQxs=@xr;`3&RjDTpGLFym#CD z;@qY6y%U`shYGT@oWUc*?HmfRS5g2F$rdUT4s@;3zX_K8sqbTW=9y=}m3$Y%cfRxO zZ-4t+5T1VeZyMwu9$|d{c0DQSHQFg5F;idYzyxhVC zCokQ9^!~kT6W{^|2m1QYH8cZ%tF!Vz+(hyG1to}KGPh8YkjfN*^9ih@6p3+@Q{3rHYWx&U@8gu_q= zgg?>!S^QD=XZt_O-_!mZaR8i~XYtqCwYav|000n z)w15*sH3ksX2V) z$g%Z>8wh|7fAoo<%lP%TU32H=fCDTZ7&sM>KLse1zhDIN#i4B}*y>w;gO~ZLHf%F2A{daKMR!CI39W z<%kNu(NV;2&(0H#Ep_`hEbiDeXBpgW>+U(-+k58h*}ncm>lkA}Mvy>+Kf)i*Sh|OT)V%nt@YIY9u7W?QjfzB zd|Uvk@f#iO=%_u>*xuG!f9S2n9UYr0u-C+O;XlW<{7vr4JEh|f04$oJ;?Lu$c>aR8 zKn5V=&r>naUozu77UtSv4h!u0Nc*j@vu3fj{e4mgDD!_KZ1I7gefxd=*jVAZ1NCj~9Zmb2aQvKL&v+EU#{=NA(74?ZS7fYq2f?qk;m9W7ucH(D zo{T?OevokG4Zx2re*raq{d<8(2uYr?QGg=y7l1&k0jFB2m$S{D{y>7(C@T;yjR2UG zzq1~ z50C9U)q?o7v@|z0)ou>T-<3r$9cmN3^Vzqq560-{`v8>A&;i1I; zwUR3{{`zlK-ym~<1ivoT^&g#ossF32YihOtgk?x0!_QiP*dspG{tq@EpL}vX*=^dr zdsp=-yCWFs0q1Aq*LY$J<&SfJ3jfEZ-$|T)?d7k--#9FY58CrDj;%0+;$S;Q34eZF zAlC1$)~T4Z?l$}Cl8`Bi1F!${@|Pg|5eMjtzhQu_u<}GxGd@WOQ!s3&ov3913Im?z z`4?_&Jo76E?tO5$znPOY_oW#xyzs&{r$0xx6Z~-QZEk98Y&>DDKLq}&*_xlFE{{!%ze|4c1R+g8oSiXEk`AP_z-rBffW5K*vUzi2# zVc5BZ^$46_2fg$ovu{)5G2DOY=;ZMmKd$AExIcUOYx_TrzaCisT4U>soM*5j01#@I zRLd&Hwk2jCU2?KS5+nSrVf=OB{f{=XLCnv;iJkrMFc!H2l=Js?i#14O0&XY%53c{3 z(E-97jJFgzn@*6O7&hAMGadZ>O-;?)uCR%Qy1G*b4s2xXA%9ma0J{#fHNjJ3ZMgQp z%Rg{|$2KnU$Dio_Z2oZj1=qizjXnQz{5dR0NMOdWE)e06R_nMxnb~GaYqWDk5W)Eu z_)C!e=dP24KV-wr-?;&@1*7NBy8a{lZ9DELhQ<;xdZ zTHv7ocmR25Xy}6vA^ZlyM-V>#_~SC1cUibX0ld^a(ARsqr@IS0d?!%Y0tg-t${%%q z!t&SQZwI;l6Zm5+$OhX2P~h+B|K>kD_0%)+Y8lyjJmqCA-dMk*!H?(fgq*)s{pYCz zr1*K(AI$$z_ecHT#v|71qZ964o$ftz=Ip@PhKp0e-;IY4AHE0SeFz^w;Q4a{2wxVj zhHJn}7vWE1E?ydkC)Fp%N6t?Pf7gLOC4k@nc@Ky$E2y5E zzkTG|-P^ZrT%DX4yL93FVE>uZUG1%V-f;6b(ek!r{bBKUk*q%K?H9`5{w}=htK^T2 zsB(1B!4z5q{_5$czx6G6`29QIfydy#_q}JHf#>3X@Pi-z@Q2Sn3lAgz=tn>PG2EPm z$CH2h+;h*tqslM*#a}9`r5GvAq*awwYg84|q^tN_D@?%1A6x#9zU=$M*L!WR*X#A}*LS_^8HZ(dCwRmFl0e}|BBMb80Rn^~ zN+5}-fItytB!O~3IR`)jhVjf02F%!FkL_`~|Hj_usp{&k>aOk+bfnQaTQ!4Dbw6Fz z)t`Ewr$SfvscmR#>*(qo92uXUTUcIMeYkihH<2#FO2JhyWD;aO+F!T)ePT4hz#jm3 zy7SKLz2&9(ndym9#$QW=g}(_Yf2{jQ@ngeZirs#Oc_IJMX9efJo@4t&|LLhh|pAcUEC)0VvO?PJ%kLI@= zQ}PFT5(oU`?XSpRRWCkG*wbmtA1pzIptZl0zslN%=9`^eeaPSJ;@#CpA1vO%H%fPQ zUWHJZlamV%5ho?V+z3vQya7`sxH8ZmO9N-^{2%K9ery0p_?s!rxHEfK;IF-{xuGOs zY`ldHt?>FB`O}(TulaLZP1FJYGl2~K9{>hxPS64V3-16;9`O6$ch1;Tk8ARm9C14!POJ2O*w#>5}Azc-2##+u05XR>~=xERJ! zS5AiMOaDkC0PFuW|3^n3`sxGC|6P*%KcRxj6aChzp?lDX1pYI4IpX5<2ABZ|YoVYC z#uk`wg1`TL$870q8|+R>-s#|*(KeOTAn|`CWN0e-zk+UfoErG+pa5FiU%(F@$0n=q z@z;PF8)J{0s;OyhzunzGJUR*d-Cuq5i1P34aVgjlY?~LX-b%ZMvAXw<#n0FgZnJOiE$a;lp%; z{-g{neg*=I7=FzCX~SO)zb$nffA}IDY=0vYLk{9kCol+Mc1Bt`WG5OKf2JNF1JLLK zB7s^T5cqS-7Wi{A`);(q5}CzA{!)-X!vQk>!opw;dibtg@uZF-4jw$dpT;8M_od#P zs|5af;Q{Q)nT4hMD-RznjX?V=^2a=&1;CzO8v01+?}ul}uGbXi=O-NjB=IC~yio}I z(ArOr>C;m5^vn?d$B#hR2-s+VnFl1V44;8eSZuVvUA09;#YIKg*$EId0QV>8^x^44 zLqgvAj}VSO=Kh@ds|a~3Bm@{NO+OeN$pD1Pwg-?HiT^VkAZvjcfW!fU2Q*yZkAF<~ zv(J|BXK!|2BpFR!?n52l`$eoV|JRS>Uzz`Nb@#*5{JI0A{Dr~eUYLaz;O#M~1!?gv zZ4>V{q!gUV&8r-nnq64BxAO4eJ&r%x{=%#b4TL|z1DXMZxjZtP2NQY&_*{2g9dd^| z&G-R-9yGujfP_EtzN*3^9Dg*y!k5CU17qVbQojTJ37C>@s6SuCE)qW@0X^#8IR2RX zW5ZvG|EoB2h_HvI(xU8)BXJslSlPA&QX{s%beO>pff;|`07(n1@rN!@7@%b*NW?5MjAqQb?)wHa$^l3;{6|Uf2QGw*8YOiJ0217 zmWD0zSDKTZm6^JqR12+78x}NU2=FI3Kr#md01A^Z2%y#pr2Y?6OR64sX&K9{z8mHF zN%jJM?;lOadmk=%bwpG`=A}#a{1x^MaSl*xe@RmECoZk-%DHnHc-N}5x}mkBv905F zM_+GyTT|`z(xjuujvr4=JyCY4DECZmE{=Y$YU2-+{}cOw?mXati9hK5L8!{lpMKz( z1ALOS<(>0KdGfS)MnQIVVO7JW{38kRdm`Wd`Rk!Y!(>&^$ZFr&IEEkP4@V!(Q&ZaH zi@y2*og4-jf82(9KuuklnUHyCsK#JvX$bjdSGKm_m4IR+2<7B}DkL>zy(3i^gP2T1wb zVel85H@K?3ZQ@4izA#tfk^GX&x0gTpV4%77YN4fuk6maS@9o7VSZjVY_owsMO{=py zmmmfGJm~3Zv|;-T0a{?>ucok&^alA;Z1}+`-)Q}#_0E+y5(3vOj z7j^tt6rxt)mcQRa_~MH%<|u;)^UqZj7o!JEKYc1?k3oZ_vKR>(e{2ZM?t(}?AU46@ zGyI>N6P7W4+Md;Wqa`cZjzdS%5Wnnn!XIU?vFYyL{`R+z$n4ALXo=z6NUR&)=0tmd zu(>B8d5_3nLH`4uzjNoHI-y$cE|-)&dGZ81TOSV(4-WyvgEP3eq~S(GLvG&rj*h9( zYuB#96rdKgIhgKjq!>cjzJ$bsJGX5M7a>FdA<{B;e&D;3EU46f0NKY8jv7`!%yh1mQq=KroB zHpm~~_iS0`5BvN$`wJcjCqr;q0IUwcRRWJ-vUq7|hyaNAWuM8v9*>#Ou=_;niA0Dd zLR+|`P2O~LT!OHS4JBzR8GtLdUVl9%Gi%ZvfBFbS2e7>S8GK{R00MuDqW^m^kpG`L ze{V*g$=$PM3k)vcp>r)T{@6hrf8hPTApHII^Upt@u1m)+Q39;NMgUWf9gfox6l#o- z0Q`w9FgFRK-w4q$gwvkLP>aX=r%$gfiM)J2LY*6>i-CR8h`V=`_o$BiucYH6_*hH92;Q40b&!3{1E_` z2M4jaynYb?1j-42{}u9XCb_lP(%N4d|DSKYx6Kt7 z*7vvQ{t&-kfA-mDuO@2Mhk2LkSvAyq3!{{_6?Sj z`!up;C*q8}hZJ_x4I{0i(KyT;i-_2poPzkJ#3Os@H*Wm#k6(TX{1Nx30Z903l=7!J z!0x;LtZU%Yhj{E5lcQShgB_ki^g2=RcL z11u}U0z(VDOot#9S8)u20463Ljz4tpRAy#I#))(HMdOsNtdAt{AFo3f`LCA16N$YK`@-FGA}px zOioTtHvUOZPd|P7bpF9Dkrgm2T@3#hA5P0i z=YwS|erjfMC)@JGsOQf|A|m2;CnUx1+Xw9JtgHV524jprXnu7E2mm5~)fo-L$71&# z*v-0k_}Sgp-O~#|1Z^~}t4*Y(QEs}<#CW$60|eE5g>KSm#r2c8yshpqU`XZ!80 zzI@=%*#QbI@YAPdM{nI4y?q<{9&{3>q5@}O<`)*UUIqWYj;qmI(z;=pbpPPs&@enK zLuU-|K$rrhRYiJ#m)N6AfZxOieA}zHzUcl4elOs(&qqhokilqJn?T$XsXxb_3_aeD}Rb)dr+Jb$$R z$C}{3Ab%Jd8XC{1^90hz;;6`Xa4uz_ucr&&k)P*sW#NnpyQ8apKE{cSCBh%;{CVvD z5I+dez}~9J$nEOsY^tlR>e09(49?HZ&1r`qc?jg}Y)AdA!I8<1)s@u?cKnHRfV>C9 zSwLe94E&L4K*k>xh5oIH-jWGRx<`$dc(7+n;gi$`!7rkmOuP{9hx}pBk9Pf7-;XrD zfFHOEzno-+n%zXoemuU>OxZN_!W*j&u$$el5+7-X-u_H$4G@k9P}2Z#Wo2c%6f z;}2hxRm#YBxnZgHv-xHBPZSd>Z`@`0kG^Uh496>KH z&o_?rceVD;k!F|unM0ok9udi#fraxE;{z?XJG-x5SXo`s`QsPC=+A+W9w2!X3Ht`v z1Jq^#4FHip0wBFA9>39=W6(e50!JUu-=*5y*!E)Mi~diePj7oaqkTWa`{9fgPFjI% zchXM57!T2Nz+`Pkr;MGbNB?L+Oy6NK!jCp%i@OS4Vhx4xno~IM{Z(ySu0A{C)6$t1<^@0uYPE07PfadqBb;0*L%^tYNffH@8E8 zPxF6KQE{PfZ)xi3G4PvsZgPGO_&p^v2I;g?9V#0D)GJ;RaW*So zTO2`yr%l90y%VZ;d}0)cL$5LkxBR=iySm}l3ET$3iR+#Q=$#H8Jb(Ybgg<=*Vzj`f zE}#HlMTK@J&O#s0pBX;N-@&uzGNYrn6%|$J-fsf<1NEJqA5wiC!nJE@X{S#!SAst) zVjFxnI0TTI;C;PepcEcLnV5Q?@@MY= z4FH(~WV6tm2dud8DFx8%{Lud$BmPeUALTEpsVNcezlH2P&l+E7f1m60!L;A2S1a&1 z!ecU~=47=i3_n=V{1<3%iy!r>Cc;wBI|OTUc0H4mp1h{hx(D;sA}iAo?sIYk^4@ zP;Y|4|4{&SIgAM9Plg}(JE)oZq2Z&Xf{w#{0A00{lT$oUS65aBEBFs%zbz_1Ei*R{ zR&&FX1C+Nk2-Nvuw;D7{*XkInK#N*Gd^h6nxmfel_xPT%m~xT;2ovq#0SSN~ah*WJ z1%3&EjzV7PuP7K9>8s0x+!E{RFT#hn{5=Bx;0s>N`17Urhxj3XL&XU&!kC;urwjpD z#3#k!_r-y@d6*=@`xLNLVDUaoA_9N+%>0oj(G38xC^~<77f=AOyd3yL4`^Vg#|VEG z{IIVF0cx<~@#8dFky}DSj<&a-IddiiZ`0wLAqrbM1hTShd>rz@fiDfv1pw9H8!>!c zAi)+Ds22$Ov0}v0i2T{6q(-}ZNlH(RnUhoK|Ex{0I1Gk1_%~!64E$k0_czjey%@@v zSnrmZq5NsykKOpig$IKB6Q-VxxfkO7mY2^C4-Xb^B?Db}Ok zZu$zqyu;f#p;lKlLdWZvG!XLxWfDeG86xmd_??z7$v*Xjtvt~|B z(Jr9T0>dl}@~1gKZ5T`@Vl@7U7wo$9KAh@sc^NJ_aPoi7{4x9(e;j`Ljo($g^TrK+ zvGJu%@ZIw;)mXe0*B;EHmqTxdUT=O5_E5h^_`}H<3?U`=?yuaI@n<+du?r{z5Z40| z01fQ)SS^fy4e&^e(MR};PB@k3dJmp!D1?5W7ABwJq#wECYsHVux{!$%_|9Hu4X@_R zw91yV;JeW?u>Ez`%&BSS0ihpEW&us}fZ8DBPXd1#aD97Hk0;?}mI(fh4L!(baW>SA zV1|uH*p4hNK>N%1!v!ef+#gwcg$#=L@$-Lo@0P$EWARo%Zf+hSGzeV4<;k!JM(-*T z6Y+n@A38vCcaJRcLg-BZV!8A&uxT2w{5&{81W>0=kBrqGv!!pmnQsDf7=6%BedY!~ z@O{wsR+->4{2$Z+pS2lIuZ=o?vXq9=e`n2{o+e#D;Ln%_v^qeZzb5341$X>m{qPJv zBKR$omdf$dTVQ(k*E9iRj6m)#=PnQS6>UY2x1@PQ7?)t%yt;ZHJthuSsP98M7ZOU~ zS*HSxKc4|4>!A@q9Xl-o=+z$Q`7>M}#^n6LZzMf+@_q>4)2CHcptS`uraylA(daQ^ zempB8NTvbp93Tur7=I|Yr~Eab|096|@FDOA4P`O%M>_n_=m9FXd(jJd(gbU;Etf0} z^cIoTFZT$g=-AM^twM_nkL=E&VW&e&`glA-SpFMMxH-6h*_`~iexGg>#s_S8yyV@ z8uQR%4-i}6FTea1`)-5a-t)(~K;RE%5vczol?{L3KG~JaCj6|6U$_wn&cGml#Yu!X z(xOtN;00IoKFHkMJo-im3)pA8cfOeMM@Aqr0I?%rbO9v*J|*i>kwDS^QEh6Yqf3}d zCj3PH>S_B+ej)COh99o$7l$4|Up0C@J%}}`@1_xd8Abk_5i|4Ydy%m8nKLsm5lvkn ze*FEDu`yOBVrB{s?}Pz@Iko5=aNZzyCenh{G{363Fq_B&>mk*CH7AqTv49$k^lrzE|zus>mNB@FBWC{D=KN ztp)yYX6Eb>34a6@aRAI(U-W;A3q_>I2jpRsi^E=Yc|ab;4#y!l6j`3=hh_3(6O-eO z*Js-0{25~q>Xfl?$R8O58-tKCTn~*7@Xoa=co$8b_AYCDk#KAEAy;8-+s&J|;Axb$ zuG_adJHW5c&Mz+Ay??iZqhrgSJvsFwBb2{S7=3^rOe?~I7s&kCv(YL@V5v`@v2`+9 z&&rkDY7_wUy-3D6vSw#F7l;5-0$h+V#Q(&6aGE~{E$Dn`;+2_!;HeYxtV5|IU8R9#>fyC*u%CL z=tBSrdH5^{xXlq*?cNTr*}J?g1VU0$S68k5AlL~&x*i$ji1D2cEj37Sp7KN zel9r?@YsLg06ZUBiMRg-+uA;T{_N=|zx?Inj~1U83e!j6>L)w85kch7)b>6bt)@p? z>yxYQbuxO5m|u#-s}r3yJ7+A%Vg%aqSB>)nIH&T_N8_z65SojR9ZO9;aWcjgpOuz< zxvIPjZeo^REJ;W_eE96yZa9+Q$4|ciz-Nyz{P4Lq+t$}`f6M*-ZS4SjcOPwO348=V z;{FW!kibrSaWjS!$X`gvYw!!NKmKuIT^)TNs_Z_9cq4>)z_R9rx#tUgw{JG@jot-6 z_**nWzn2d>a$|K7qu^^n!lzF^{@~%grMcAz+Oaak z*4rrKPd}&J$ciiR*Kh^wx}_JxjONd*xjD8Tl{CVC_@ga8F`Gycfj9J&3A&9Bt`f%n7EezCc+xpflw!!{QHB<^pH z^L@+Ovv8ez&ehaZpQZ4HgnSq9(*jUfU;oGe&=`X-0Gsed>G%uTDX(AZsh?RHsm(ii z_#l0umn&D}Z?!95!ry~upM3ae<2ZR#JCK#o;JUepU`m!w!KJS4Nk4HN@F6L zK50&-oosWhMqdzHm-1%M&2x(p{_sb;_80g=2Z#WY@4U!g_D#lLH7p>HJ9VnLv-i%_ z(#jMx+4mZ;;lv7m{CN88Vc?I<2z%)MV3uW~9=cyS#Qgz#I)PftOBn_42caV*Bn6ys zX>v69K8`*{AvnYOdi-4oSg+jx_x_WUU&ElAwaAV-cyE6r{0u|=*@9&9idM$oN*~8x zWD`*W{(So6qYoZ{1Dt*6z#j~Isq@p|BPbz9j48|KIvQ=yis!U7QUWdIWIKFJnLR(R z--`Q#9e;qI2H=XuA1r*EPteYPg{N{NH_V;dYvBsavb+JtgFz8_p@ZK1yKJo|g+fom&$HG^n z0)No-K6oIhJ_r85-*6&ZqH8eP^GAG~l z03Q?8TU&8amL76iS^InKG`xT9c5iKceZz@8d-m?#6ZcPIoyQ`66PLDbJ*jv8jNTua z`ZM9TbnR_c-79oi6`*S7U563zQhv7-~AqV|B$5aYBdLV z_o<1$8^9ihj}bobm$Jk$x{Q*@IvLjhwxp12-<5>FhS0yT@O#1^G{UAVGw?1Eh9?;& z3v9;3WY(`}{;#F-O70PQ$eVH!P9cAVTeog62LSD-{&4OE0{A%Kj|6nS~@4S(M_4^Iw(|04kE{NYn? z^s!5``_uXJ7g`?WBl1VDDF%Xmbz+1%z~$wSYjys}J2)tRNZlpE-`f)B>5wgcw{6e4 z79*w=uRz#xV{|Y(6&Ei?IE}Hv_=7!C{>Jcr0P-hse~@N>8F|YmEVLxAC!+b<@b|TE zlu-V*hL-F2;h;pg@oTuh&BdRcfY<|Ey|?^JH~h){pMAX${_M`V1|wnDsD5+Ip+J;+ ze;0|gzm3^g4$7bD1*n8S08sA!%*ChMcKjiL=>N8E4K1@y{aM`KCgacMD_r~R z(B<{4|L0M=>92$A+F$Sta_K*P^2-la?>?Haw7+!JvCS=i@@&cZ?9S=CQR7d3PG*!> zEBi{wT_7uxG_AeS4SyLFzh}>++1l*1^?zUc+Bbei`2zrT{A_xEOG}mhV#=F*-jZAt zlTTV}wNKFjK766`N5>!fsDtn~SRh9uLri*34~^2aj%Rf+lJSSu{#yNC z23f7~?3sC3ED_Au<-p(9{z2n!YiKtBXmtLB)|a}!r7>@~@z=3$Nz^sulNhs}69@Qd zh5%p#&mXM6aOO{vt>Za;HyVgZ&grpH@`d4wWRSpO4n~8&7JMks)czuWuw3YweDuMx zH+?$s2Qzxi0|J2h*h72nmm7S*t-Eo5G38A@z@J53PCki|IKT(b^$sBMe>{JHAM)pA zw){y=0_!ayImRm^kC>%Eo*N}H_*hay;W2gS03STHA{8>_p`I@p_AESVnxj^0+&1uErnl%!w{FNH~ zT_8WEn#vnpV}!rwUb#Pa{DC*3PH?B;{>WWiSZ2zmo&mwduaZAeT~R&Q;x&qiK?nHY z6C3`vT_XI!%4m{{n9`K-`WX3KAugWNccZ9+DO=Bt9Dn2%h>Sn4dVekt_|pi?1puvs zU*`T6C%kDl{dFw1za(%q!*&^;f)O3y!;dGl{vT<6w~+Ri^!g}&*#3H%t#LL#rZn^C zIVjeYLRxcaJvAEqWlQ*jCvm_7dguN;;t!qR_Rh}fsXJ3Zpw#_+#r$ysTD298xDNHh zmuCKGtLrlHC#JOItb=W_zz#nc{Y#NiB8&gyi|b@G_@fWk2>ve;k56gMnMrK5z@D`mW&G*cdT(5Nk<9#^XV-|IguhQ~_+1{vJ?BqC75=)w z_roBhr;7`?B*(7DE}lQ8u9OAd`10w;zkHEF93X9ex0v{w1sj(O6_MM)N7vOTMT}jF zFGiM@zXC4q_F|B~i3|7@p#D$7AD5j+(YMxvi;c=t? z#_mFiy2SY6=|`Um{M}^jZ**Q)7xD)`3&MLq&$5-C*rNj3rL?~occ9p(G-R+lt8_o? zgmV)9uGsK5j{IfXrMBB2T>ODbb{C5Ksj>Filjmdrthv8voxdpH55ks^h>$2T9}lt> z6X;uONVy&qp$bk#vNsC+U6{ZLK%xB|hgUI9=$GwjB-r@#0pFUvyMNLi5ar^Eb~$M~9hm@gQ4M3byHcN|uA)KYb`tX!x#(Xv!b{ zrlpm?Ou)~3_ZJg{{5hx_3ju2L#nYEC0A}tl`XV%`Yx@-=pa#y{9<;5M;*z`hmf)h`AfC z1|VDD)z2aDp7AJ+zb#v~MxAL{d^~vLTy)q@lZ0AI(_U?Jos7Vr_n;U>l9v_t`cU2- zVNnI`%`GhrSE8}~mGbw}{*1Pk;N;J)+KBM6v3s5Ye}JF%81!`l!f#iTir2iw8QpIluH zl-T(f6VZo`rW{Ej1QPzbLLwu=wrAH5Om{XUT7_dNtrpm`PDYlo^qgxq+I&gn7vtI) zeWqn#tg+!r#I9)ce+|$A7lVB8)2b1$!!_~ZWtL4ropuK_e{V~!jL9ceml<1Myo{b2 z9~r(g)^hRUr7NZ76{kwil}1H`A2@vMXmWzD**c!ndm}?kn$jOeX~s+=lX36ftQ!Lp z3pYwacLH!p_?`7z=;>L64~7D3T*yN8sXI%@=07EGd6q*{7sAwkBm>x&B8mY zS08;i(S1G^x;FF(t86SKd~TSLyBO;^{bKa$Vs@8y4focT;IEmdAI^OL{o;}mco9@l zVKw}Ae7)=C%Sl-w-R%o>{wy?bpjg#q#<~|TGo~j-?~KChIUy|GU3oatQn)|PrQ>VX zjin@?8)W1VvtEjyjMj|puQV<>De-X1-pJUvD)_BT_;nWyqeH#$1IzW9FJCsAW$|P? z;Q2EPD9C4LEPDB}c6wqA-orXWUK6sgbg%nrcKrT0F14s0XTa!&7zM=4=k%M=bS?Tf zkY2HPp=gYV+_Ue}#YX&HD}2q_?pinc4Q3vUy*R6ha17bpCBNU&G_V$(0A5N)*XERaN@}ewLN!6h6j7QZ`WQ+)U7Nc zOUZ0J4f;Mk;+K?W#Dwegr;)!FlmCM&;;w}l_a8i*R@wtUBMCnu`QX9Q)W`tvH~8}9 zUj9H&N67@_L--ZSjhT4B{?hD*%mh z{?@{R8&K9sBcl?mv0&nl)Qt5K*&a}rMXjHk(P&@Z^#TR+wQ8g7@!e>fWo&G4@dwR> z!?gyQ(V@J)v*4H#zrpJwplzl>#-HtV2BKk|Sa8gCqjd^8{^Iz17-ls$pV3@GVcW?U z#jcYv)=9rL7=;TnHiD2p2La=p0NixO*fmtQg9z@jT_24MuI!y*GWGtt$j*?I%6X^`LmM&6|&W6(-{e3c9*iQ#&yuhKu(ASDStKsQp5tqND;Fs z?K&H6&g#2>0(HhlK=b#GaUFV$DI^tey^VS)xQKpEZl4}?G&00&T%eX#&r67`*(lYI zG_fRW;bhu|n~RYp3GDg5cO*5Viy#v)u#7r?vP)TiqwK7{3n-;8z~`Ue0XfXq#VHi% zMh-FSrEG-JBCg^8B+5G*&Gn>h2X_05u$eAQ;4iR^0%GP%*$|^2)iqTU+YEgEOd5&& z1+-B_%ycOmWHg=CcXb(Xrq5vDPteG$kh76j!;PLx@py7{cBojfewZJk;5o6c6p~iI+Fz%i8SeLv}dE)#F7sxk@+ZLh$ zXp|$iZY*eyNl?w$pjKmBi1}^|CjN+P5Om=;P$Pa@h;K@xZFzmKZr@W<>;=*I4PONe zD*jL-UKCS2(2Ne{^|iX!LeaXY8)W>MhzsmZ2maQ;0-$Rzi=z=vYM;_aW6<%p+exCp zGOj~)6LEZ}D9-N!1|NUB9W>e=ne^JkU8xK2r}1(uOR%zB+)wb4)N20DKh8F<73+{hzlxs2%1iS))jOwX-p9Z@6s{Db?-*#1j0j@xR^S50Q7}x*?JbwznfCez|`BN@1kO2$^ z{uF_MOJH#DryAivB``?%Q!X$-0Sp@clnV?<0)vM?%>`~(A&?aKX&~SK{i#`LZ~^9B zYI{>F5M2C0BfP2AR)sSw5M=zBFHdc6as`5pza5)ga8*1(f#BnBhaym@;my4fjQlAA zH#c*9>4K9#v3K_?r)u?KHvndep{F%L+z@OQ;+0<9%34H!cUQXoCWK{LN*%Sx{{sb>) z=1(wgHuX1QdEUgZapejM{&+8E;g2_NoUVa+CIa7v5*Q@>8IVc%Q!db>Z-W8f1`-%F z{4vO6{26q5R1yC(4>IAmL5*)HPO)L#UHDjz*`1>uEDr`FftqCU0{Q#BG~Ie%T!y z{Js75jvX3*Z-+WS7V2d5i=IAYB3AZS;Q-Agux$HqRURe@3jP>+Ze1Yo=Rl{2lKK5G zb9MXu1a2}kpWkNNO)kEfgOWd6$dEr98`4ca5C{|!1doJMwbr+xk{S%C8gj7jj*__G4Y z@n<#qS6DA{lh(Eu>DP9s^z2SM*LFHn>Oki&^c@+03_vqvJbyC(XcA|GHszJQK_~ax z|3PW}ShT?B&l>*gxQC6?A3ON4!2w>;{Z_Of;ZN!UtpIY| z=~2H)>1{@&rZJIim3y0`$H&{A(PM+hr*d;(VDrb^AAZUjVdeo@Pfv$Vh`5nxoy@eV zF?0J$Ux!oMpUgXppNbaH{QW!6pCzp{K;LQP8kZMZ+7U5FBbSLE zqMow=C*u$H?%aIJ^@w{{8u`(|#-G&%^4A^4Z}6e(Z@W*g7~r(l>BinP}Bqwvjx!6DCXn)mX8%}Nv4&+vVVhj_%4K&GC29; zT_7(JLClUoMfg>4C(!ch99$9YoXSvOF;AYW8warb%VbpOoTEJlej=f<63q0Cw^om zdadC2Dg9b?32gp;@e3<-{2m@aT#LXXW^sXz#s?gG3{%%$}) z8btiLX@j|eutX~-{;(02B=KpN*Hyf}2~5}E?a55Zeb@{p{@nEf4FY9CIU?q0l;vi> z&)Yg)zvCrk^zvkq9K75WT>QBskhZ~IO632;y{zSRINbi4ygHeEHlA97jXw_>VLNHU z!|g1v-_huhPHyt5lKpYE1eE$qO{@l61cWku^hv1vhHkE_Hn`-v!u?}UT zCNIwBHZQja1R;OUF3_Ga;(p9f*duNUCdq~u<&hpL$r5)-kM(M2!1L!!pzZ>xC?>?jcGh>JSBjetwy9dv zZY`B*ug9C_lX|@^KaP*|5tp|C;2pACTF6hUp(0nZg5 zC$-D(;N!j<`CM${vYNH-vrEn&sUO4dq;cKkW5k-{n{WD|=`l2opFmpq~UUQ>Ge13k2u^21FJQ@S}UxcIXI zh?kE|Fn+EX6wG8Df~0cIySzRtpMlqjEeZJCw7BIM-*yBUf0l6wQH9XwR-c_a9S-r` z=#VLAlGL!Vi#yJXD5m$~aHm6!qUbTF!@aNt1%E~ZZ0QAxQaBM7zSD^@-;H7kY`64^ zl|*b4ZID4yB!7_|9Q+9Y3XQN>L+&WDClFHlXcS9eza^_=y3an%23aIU^VgY!gg+5L zjzCc-_Y~QANRrYgqgVomElDktT@Fb$#w012-%cDX{Mi903grPsHeQaV^u;Kaz+p>L z&A7`U#Ri!q1@qgPgN8p7fXoGoLU}}y6f&05d!twahb>7pZwmavB?WW14DU<<&EFQ` zTJbI%7wC--;RqaJ&>-%nTg(QZR2RwfQC{_T)Y7+PpQo?bfQ6zf}MzI8rTjr{EJb0sw=0d@?A5R_l z{0X2~02Cqf3NayLb4q8UuN7pfYhSbRYdTYzaMO?Z2LpdRW6l6tDB_j076F@N$2;dlcc}tOzR+7>~qj7fcVv0W>SLJP7vMn4H zta5i+`STDIE;BBw<;(5iqMjI8!2;});cvJ-J?76V2@?MYOc*Q(1X5ZIBS#O5IvTZ% z*|FwNlGePFZ95v-L7VgwX@jEa&>0cG7I@3Xc`N^)iT8h?ieBCCzKa?mhK5$U(b#3g z9E~Jj<5*J+lbUuJn~p}~uuT{9e;75Gx8iA@ugP^a(mdm)2@KOR7h1-VdbHp&@e{7D;ON29!$U+rEUHc7o&yd0m>JEN&KHpQkDJAIG;%NOYUZPf|XwDFw-2n{x8=oAd$y+ z7Q9QzLuN_ov2h)0bj?{Z+N_^z&f1zmQ1HhH#LM%Y1#eRtLk&qv4~^Ce`ReyrxzqNn zN6~_WKdlj#^XDN8UQ?udIax~2jO$UQYt2*f=jlq-*SB(kOvOn3AK;TUUF47U z0&VLDAzKF{LS~!oT8s{vuCK|Czc<(8v_a(muq}p0*j}ae0>z2HUA`ObFJyf!w*0LJ zfI-HeJ{c&jBHvkXOt~vUdNysR??$Iw*U@IrAGP@P)WOCdn~K5Fh*c=xS#VD2WR39I zXf6MS>~Y5**!@?I;Nwqsff4|HXTeQM2Xh1j+xl`e+NN4xg9rTikH8@0&%p&cR`=SB zwvb`Az8TjSGuzxf;?G|M1|@&C1iELzu2!5N+C{Uq@p8T!?aH#w`#t2(KLiFTe>RP< z?<{zdQtsunls*{OnKQfWJ?76J-N0bwk8^>X&c3tYSxT7;)KhwG^lqIrR0YimXsbF&0uWMwzvGP z$pr=le;&C&))w=hf-~|R6l+Q@W8aM%#GCmvUh^kSV9wQMJ2?3B%mwn~X>ou<7shu` zWGVHGeK+bQ+t`Q{Cm(itRO(>i&sP`70VoB9iRitN0VqxBk&&HygFk&hO`61`Vh0U> zau=9>1w(0RDg140Y9XPOhPLLmwziTTJ9Pqm2gN2Oh0J&3E_P$jHj8yJNMVxge0ysK zRTed_XlZD`|66X{fPXE`H*PdTa9hA2;!i^06+o=5?IsPAQ#4G|FhfGoHUy9~!nRuB z7uh!>=4+eMt9h?u?t8KZ+m6ACcF@-?%#fgNr}W1#0*i{80c!{uqF30mTWi zh)5@^qtT{FGGAo3&*|KDHoLsU{ThBee<-h+zwvr88NY=E2sZvW7s&8)!=DbIU45)^ zf20#*jz;;3`5JT|m*?TU*L|*Uz6tooIta)Ip<07lk#_y0;mmt+vrr6 z1Asy4|K59#$B*m(kv78O|K{chfQ~Gz*=XU2RK1q1oeQL96C}=T$gHLeJlK@wqVIcq zTlOsseE*Lq+ujZ{T?9bNUz^5Xt37}}^7LH;oAJ1xfUA=Rz|U}hX8t-TfOP(E76R~x z06Ma;cB2$PmaWvN`9C;@Cq~D>^9Y~Zv$#{zXz(W(o!GFkNiN8FmrXvn8}W1G4*(?mVgH-9 zzwp=20|qvK+qTKvAHmOozYgjFMgHb_0IiTYg2EZC)yf$Kj4b?FPm1FY7PyQZ;IJ`q zi9L+kWTfHe%->Bi3MRupoj*GO1D!t!fD}KDKh^+ieH6T51Whn$e+~aP?+hTrfP9+k z&~u}iKp~~s$ooGy&^W6t9JU4{t%otYoQU_c`aext?EhiFKA?#|8vp~IKZ^^r#gEMY zflzk*i2!odjaB9diZ@0vUsFoaXg;ffjvXC#`;C;I#%yxZ@U!?o6MtdGUeB2r?SxYRZh50E(AJF0C~sXEggi zJeUh#R#9*?E;c46Hg5MG2)pBAV__>gDk3}- zeulcKr@E-@O6j%whT4{v=9~RPBb`@H#_x?fRM|2z)O303Hk|6}x)d4~zB4k~ z6%z}BGsR*pI=$uk|YWf0zeoZaH=;Zr}cWNFc>; zM`(CleNXks@X)|Oe_u~8z&AcM+gqA;Aa3WOs@93Y_KS=^g9RHoaNcMn{u z>gw7R7P>PcD%yqLca%Fi?@Sp?zu-*FaKkNhgShzjB6tBIyvNn({|jL=PBZIe+KEeQ zSs-hf#Xp?0*XIAoMAT&Sg;Trt?%f@S{DDq8!lHV53W2}Dfx-U%{=R|X(TVBybLsnI zb{?wkn7G|`B{H1)Kdf|GXGZl4F?;;L;sJm{X55Ab3b%)gIvTm-))p1`i*O}Z+?W}w zsfvh-J6PH~`e1Q-WO$$t`puoUdIv8Rbam|x4ciG0FeYx#K7cQq^!{-AkJJD#c#H7I z*B7fu<8yR$bdO%yn=j%HzZNTh=m95iHlX>KYxkZQS9ElAL}+MeYEMtf$erP#zQLhE z2t#+qCU2b2-5s+tzNT}cwdG1=Sg28Xq`HkwA@=SX07SBd%!Cc6v(oPbJNz5}jNu1A z_yM?*??d>6eEZwq{?@nt9|(WncK_6G2GG;|k;>x$XECpJo>k4`^p0FICM z@bE$0;7*T?j*e_z00sP-O&3EpdH(1ZkTk)_-{fRbgbNzr=;)}Op*zxmztNEq;O`C% zqvMrV&blJQ&NL5}(?gbS6)(ML|s zfAsjlz3%eM=Z_sbdh}RglB=uxLU=@!%N4aV3+7-Le%kFfoxky+PR?y`*cs80%?h9i zKb}8$2n#}Xc6MfV>xH=U6hmDDoC_skYN}`ld`Y7scZJ;m{%F_&fdUBIm#1y`6S?5F zkUuX87=64=WwE-o%DE4z9k>J$Oc;E%2dn#9N7&TZcw4h}G;r>ELW{%k;@{CP(pU;j8` z<`6tGe#6rG68ymb8O<+gf4%0<@PN?jQvcW3Spdvg_$%n@$&cQ-V>{w!xIe}p4noEo zcAlsxD=WKn?%cV|oSYLUP9`QMzMmW`)F*7DKydxw>tFx+CK!M0*>mE=i6V2z!hpo> zFRa67c=f9U?pkW%ppo76i|zEXc7=zBhlLd}|2H+&v~7Dh%#8ZNpOgj0pI41A2OQ4R zN;9U%cx3#B*7_Q~KhFJe-M@eIoI&q_U5Mu>Bhm)xt0E_8A*Gix4rX=@OQ#N02(;{&;v$ASHbj))c*}> z?e7*25uF4XC^~;$xj-X#!k;W-$Fqt?;Lm7%Vf68DfFJ4pvF4Wyjp?`;N5#h3J&0*d zusi-zx_Y`pcM$KV`M*p}{2j<)*pB^2)5~f)=YMy%rM~=VeEh+K2QzweFV{BKRh=i@ zZ|)CPIB~G#&sR4bvBPh46!9x5xK`Itb@9}Jmh^3h;cnZ__KwcZTeoj_bzezk;7RzK zI=3zIcn=JJ<^0{e2?TD5w&Bm}sW|?;ae;cpYy4R=<`8pd`~e+&F!%Rw$RCbAerPnm zWM)Qlf8Oz@c|gKnlfD0g0Z8{54L{ocGX7XyA8y#(Fskm0x+|*ce<@AA;mh=oPHE~_|r_zzXsdKxko%jO) zqqo@cXYo`#f8Mx21|Z|lk}=PpJL3)(w(DUV3v7L){#MwK5O!|QKB)tg`M(i1_`uJu z?w)Ses*+aL%wJ)pRzY+GoLo3I7L|Ibrm4B5{$f4WpAqyHk0SWh0Qf^Vxn2-cWUL~e z;MZ7NmJa;2v|`!a@dv{X5~kk={MhgpP~*qHLqtN@WvLq*P(=Q`AW%Djk*(Cr>1IoL zV27homcX}t1VFd^Rdx6FbZP)P@~2I}6hVzeovNs7YHDt7yjCf?K(4wG!LJ0sJ+}0X zj$OT2*9_jTv7xR4+F#0_j-Q?X)A<_!{P^K7H~-=Ze*!3G{ycAlDGWM87_sufnY}bx z5BGTw0_c{%0^pB1Ko9t9ZYKO)t#3pCTdPGE$nl5KO~&8NtLXd?zZ-RB85(~Ees1|g z_s96N^M4i=I1+eJEc|)y0tpX7wtU8v7Qt)=h&P&Te%npOnYGrs?q($&ixAi=NAuK!2dU+VudtLkrP0Kzb&j^U>bK=c~l zX#WS3k6-`#2{PMMRCKPeS}zDj+QIqh_|;v@q5N^~&&L0;=64F4Uw!y%@HY+v;>&vb zi*+khp;+05QNo|6E)erKXKQ3k&Tck)R3zabe{5L&pOrrs;g2{#kN6t|*rGD8)!)FC zgwO=TblSBF2B1*k>)igrt&JbN0l_>E4)-_1L$|-P<@MKJ&o#*N0byw~2_r-dWQNr?#wiOEMG zq@PYZl@__tM#Qo{RU)}$4{I$dQ zSF2m2<}8)%2LK^=NwzFwJh#N`L6M9@{1PMl9b^2p;r)*$GC|C@zpjFASd2w_fO7uM zHfWVZ8sKK)|DgYO105i=!FWrdwf-8JiD9+PKG%c4zb`I6YYOQ!R8>`9x|GJ)L;j{U z0NXA#)x)Z>CLDWU_YYj))wF}2@h7@Joj;s@!SOF>qqn~te+CN@5}46f7l?33vo&0x z%&arzXw;ALLj>Dj;LkS7%52%{K7x ztw3P|AXw=wf7Ja6!(W5HeA53D_+u={Dq90k;P3Qh&qH~6mONWVwlkjcv>I=$-2TCj z=kJ=Fzm)EN>HsNzmhlJkf7Ja^|Cd&#jXql8?o~(Uty{NyZr2WP2!HbrA3l5p;R6Vd zA@KZ}0fdLe3!x8qXBfU3BX?lga&K>cPxtLxH(Oh3FB~3gY3X#|{%Zb@_Wtzg7vlba zKTHozG2yRW`jqT#G${GstTOhzC2^b6(hVjJG|wdIf0sBP__gNf1z z%=mBb!krIov!xi>n#o!1td2&zL=OAy_&Y8%z{nr;0NXYGicFPY@_)!*Y~j_~hNiZT zuHM0s@#(pR<(1Wki+6Gp=_0HYTm?fWLDr-Fb<5u;MiY$gWTxn`*cYkw(!m9-7cH#@ufkiXf*yQ_~r zSiFO8lkDT)ALLBIR5b6g5)J{u%rn?*Y)ca*RSWDIg_se zsS}Anx+fA3LVVd=-|PcB0JRaw$Dil`k~ikg%oLt6@dxeijpBr{CbIUKtY0iHhH=!D zlVSSOKhg-m`ajM8(b0##`atu4m*oCWs6+BZzg-QX5ef93KgVq8Ya8rNO5W+!1Ky+h4#B9>*rD@A21w8XIGeoT{m5ZNJ^!KRh}K{M}!D z^oaOB@?uLffL$26X&}uQOq5jS7UY79OioTVyGV^l#Er}>!y(B97^Flp3^|BDoxmW3*%@i&kez7skw2$wfj=j+??(G8ky$+CF9rEC93bN_EDYA5hws`I zPwFV*;KAejX)GdsU+T@dO5m>-9>AWQSy;Ni^6=5p2(-TN8R8-=hBt^M?vJ}pI0&kXT@{0M}NfQ<&2c|h{Y@EHh&#YX$vRa;b4 zTvU{uod7`taDReMAD%umB;>9C2;ul+?$4ROijcQLLV&^2^n=ln3_z%CdjN^iH~;69 zE#c42S!KZBKF9LfC6eq%9pC#!tTF%BkNrQH|8sTs!_)k_1El?>3|qoXO3r9GjY5Sh~0J@ZmjKF zhda&q0e>Dez#4#rKk~k+!Xg}hG{M4`!m9&g<1kXc1N{k@l5VI!U&JmFKO+G>>fSj1 znEPYHUy1*#ICF@who;h^?2IFE8h}`zwggh6PwlT$wv-|_XO%F6yJ(SYjGph_wG%wo z{^;mksZr5%{L$ZG$6s~-5E+4Vkp`IZx19`;@p)Fhb|Ow^mUi6k?Hd>z?uDO>YicMR zpPHLrzPIw=;etJXx4j1tUvEKzxBTH0AoKt!fY5nsJr9qX@|SSvMssK9(A-RS<&_KP z3QI3tsJT{L4>Qsbx~p?{N0S?C01@y1nENvgKeYB2oZj(>h_^Isk-yTM?5xbx{iL>N zecG^~8AE*N|7l5Wh;X#}ZnQa@bvNMm{?T;2_u+z9M?@uLUbB z;?nA_oI96+cdbgR8(KRW+d6J{^!2v4HPv1(O*(q)`0>=#6J?i*a?j-E;^_CPHvTaA zKd}$!&I1mZ_=DabgsS}f=?9)Uz$Zyt-Z_7iCr^uK6l7-?RyAD8Kavo?C-Uu|zaCmN zOjZSrtoEIaWB5`2aP-kUHKk3y=&KLV$zg!;$8ER=)YO%k37Ln6Y7CZ^mLPx`Cl0un zKY=Q@R+3@#oj+-|M#fUJ#-DSMq$y?BAltVq>izW0tP_+!0uj7F=NLTbTik#H5OMtB zD(D;H93bUyhrwTL-r%bCwuu|5`@&p_NAgQ9-(LRYgMsGStA&;tK6asTytfydV6FMp z+@H>0H?7Xeb@n($dl)7v8EnuJQMilUnN9m*d;ndCJKm2}v zJ|lxMSXx?fh5(qJc4W8KpkYdDO)(N)dOiQbQcAPs_;Wg|G_9MAt=C}P6S*fLJtH$K z^BChVGIA$`rlxLeWvu+A^bBbZ@LFziO7t$Ad*J!Y7#K>iQ8YM4p0&ZY6HC}^z z`1|+5>&YPCYs$#m+bAo?jjr* zczvggKfDQt_kj5k$R|&p+_^I{^7PrWXXWM3pFjT;!V3s5A^hrBzoGz&{C&`wC-4__ z{8$vCR^gVv-$VG~i!bIVg9r1^RTLMa2TVVGDrJvBgQcoiWv_`p3D+F-5&+p*@zb_Q zQlsT)*^Wa;(h$GwbiyBHud(Uw-~RTukI3xH=xB-I+(@h&-sVJmfUvnIA$gC;UqSx^ zp1*VFpgN&i?=F{=J$dp3J6j(Q4-XFk#Dg=qxTN7mLql%f`HqgM(QDVP!4#ktv^kjW zY@`@M*uI3sgFClv3l||oB9C0YOz7?E0%vufgq2&cv?c1~6tuq{jX4bG(-SI{IbvFUysL3XxM!s^+X~>6QM0!(k5>@Ixaz2#)gtKl?=d@Td%(! zlbJQ?jz4__q61i7{tUh`W&nY|MbZB~7|8!moxeAu&*bjevIPbg@X)yy7=P>_jz92z zUl9I&`}yaePuHd6mnZ?&U?YI3#}3Eo2nuz^DtoQ`xk1e2<=9!ZPoYGD=Y#`9qnoXW zn1iV&DSjm>$#Kx;W;87Q`Okm;3*qmtw*Ier0AHyHQ!qWqpDPLG#&$9K;LTWhogZla zkLT|$<&U^O${)6X8h^MtMZ@nQ0<$ zut0*8zqs7oTnm4=3I@;0ufifDU}%Af2ZZ+bnFb*J$(VW40x!V1D1S%U_@m+g3U}3kGj9*a%g|C^>uWAgue%? ztE&qOCE>}T+X#6C!cd4F1f)`_|0DEi{LS<3PiuiI-aA)RTtfJBY=8v^h)po^M*v(N z9K`1G`b7W`C@1{=SIE1Wu59%Gshw#_9mwwekt+DUiyt2fBfT@Ujl!`{b>La{u-tHX%4VEZD2+o-Pyrr@23^^+ormwZ4M;L;O}(S5{WKyHiuc!x236gmFiYB&VG{ zSC~NBU&`OY!aN49_0{@+z2z4#T|9qca&;8^sogzbeFQ>0pymL}%CNxD0x#1cNX1ng zgCKy3iHGA49XyqpnUQfKJv}XX@9w>a;@;aIAAcww{?Slx_`j4bw06+{MIPM0HT`@} z&M+)o0RJc8NBMj8>J{`eCnio~TO7MTKJoB=7j=Sq<*~~4mcQD=EG7S`XDj8;aDjXp z7T43V9sH+mw2MSU?MXV4axC#cEV5T$|A#;P;g1G?!~tRxY{y?p-|!&42iMb$ccqdO zlak;iS1Eb#1B{W8@##&MFBcXT6cnD`cOZWMi9E(%SsB;*N_u}t32}c6zm*jjN_2Il zrbdK^qa#eZbn)UP_;*2Teo52I_=Aog2{`(=ckgab)z$LSi`lg`w@my=m!S&$Wob8p zfj=ArS6skBFr2G0FE{s0PEJlX{z*?yKYjXi{=qGg6)-DZ4F4D8<(|pR1Z4)GXM%y% z3E;Q?{Pp6dCiJFoZ~QSwA9z0iHL~Xosbm2Zy&I?v#$ON7>qIgp!wAuAOMK`RcAB|AB)|0 zV7Jl1bGf?vx_f%zhS%WGP)=S!;kn|&dJGy&<*bI z>C@$<62_mW?vLwj4zn+|HtS9^1#zV@30l0`E0-4 z)t3+aIXgh11%CRp?C7mqqqlEE--AxVR8-(B%>2TF)~n#(*Ksv^OIkN9lkOiJ92$m) zW$26n9tcyQw5mw&?-F}-3GkcvfNy*C))(C$!S4l}_W9^&8ZsCSYZHijA~neBKHBs| zi@Qy^gTve$DblR^S;YuVD(=&_Y>nXB55&jqA?($G4qx#6(f%K6g8zd2VQ6S*JfF@J zNFR%%BHzKel!3mUE__FRp39YmGb-$kuJ-vDCpMM{f2{N8vHL^(AV33qt0E(}tEaQ6 zuC}U2QX2%nh;lOVLcAaHhdn>q^<#ZM()a>?;QkOlm@;_I7^M9g ztWmvs&5^Yk*DYgnF{2}Q#<*gTz1rH(K>@@M`O_UB0*D@vHo=TPd`()(z61MWnGe+Y z)4u4#@MT8~`-@zz0^DZ`9{_$&ME5&QFXFwA}9OzItJ0bw%foUj(B+2SR#)#NBg_aC`6F!&5D4MMjba=A zo>2bK{o&l7!`zEL_|RKl4L`!)ojZx0x4Jq9wO`W(*9T#C7Jdm99vT`O!=E{TuIb=l z+wJb|o~rZr!T+tw9H0q6EEWS0oi*joE(c6lODs=BR0sMjb&dv|1z7FBqwY0R;r8Uq%QXCab$`g; zaAHmmynx}BewVJN2YUa#eeiTIjsft#-Y`%K51~vu z+7*T$ENK1CiO0tX*=aaard4G$L=z-rJzy2cJ&U3V8u zb&QSSL!HypQ&ZaSoz5*REG>tezlZ+M!XI&f#$6D77Lc{TqzkAw!QlTWfVvz;gz_iD z5Bwd}O#RUCQBpz2VLpJa+R4c&9;mA;D}xpMFf9kJEk7+YHxE{G!;=G)w=@XU`C+#j zG)vd&7^^^wT0eX@;_taw^V9eEp0Sv6k^l%3?cf0kfFE(4K*I%o34xA6Ug@tW7#ZoS z%Y@t#>*_DUhqwGa0{-9&Ud;IOrT2&UA%8=~2{6K#oIs}x0a(N*#o_nGfw*~?B*FU> zuvB32K1?D4fA`G%ktfj&0I?`Ke|i^C0I<9q_(Kn9V5i3je-`|(uLl8Yu;TIKG+L2c zLPCzVx1Tw4CIoNO;hG@|TRH@?vTS@D^1*>G4bTMu)!-X3d|e>H78R%$2>P*N#L3qVLg8LJuo{hN|;{BGF&khd{7H=g3U3g4mnuIBgEG+?@cvn~p%gd1Ven`pP zdn@4o1P7?U3PIukMF0^500P!yQ2=%7j0hQjP??Osls>}Wx$bWI3c%y=D@d2|r;s4P z`?Rp+Q=j3$18;hW17E@)!_R;ZfT9%HRJZR&PY|=?)627FPEFA+pwR-uEDZ9eIY4a~ zOeSJ9{)iXsy7WGr>Tr1(E;(@Wf6n|d{1|^6e)^5yRlM`Y4SuolrA_eN^Dxy|ycO3T z%%hh>Z--uQeh&6fzeo7P$rubFCHL;H+?MfYI6$!rC<74J0}=oY?DSYIjDHRANQ}`( z_=`?BmF9X6o@*$CexDX5pW&n*x#MfakIcG|i5K|JUS|!j=FGIpmb2iy(KE39b=J(O zY32c;A53NeP4j@-AmmR1e;IInds2@l;boQx{*4Vi$Y*gj)Qw<C= zO#ouK^f9n$8nFC4I6(wZr%sQI)gH5@Z@igr0&^IB&`*8l20!q9(DqiD;4}Om)BvBg z8BVW_I)Ad1hS7g#&77VlT|nT^m+Ap>rs(F(f?6xYNMk|m`W!6ME>e&`%8Wy?umvU zuIm?v9zb6;dOkgfHLCBX5q}v){+tmr^XYq$u=JTTGcXZNT_Aran6Ach&mVdMz56#h zHZ|pP>HZH)%mq^ZJ}v_P2jo!~2;*1ekN1AE5eV1%B7e(EJy|`EK6vzS70ASa$z9qH z1mfU-8n9ouxHttcP7(fA&;iN-#15$OMnHNeSeS>Vw}SQcsCcz3XwU(FFtaQVTKh{E z0aE@5ev`nTHt^-~L+3}DU9n1e$V3Zl`NwGDPt4Y06wSUDNh^ES%q$W}2f@GpJ>H1J zF)$Ly@z*4*fri&2826&!{@cjd=6ln1Qu}s z%vxXce~Sx6q{j#3VUvr)UUYdt9>osFAvhFSp6G{V@?#T|mq(ug}geF5SI< zmtXY0WzU|R`jHXJ-zSVdzz?PsVZjSz{_NRk6;z3(K6%F0$!I+*&-Hp0$v8*W>@4R3 z5kN}dOZp`@y?_@)C;{g+1NRzcr>6#P-Rkb?>*|G9Ta8T)LMwejL??b>3`IEapDSkN2D4d&{X@;4z@daRv3~_-yY*x7^>~)(*gT_tBP?z()Wi?$4kP3GBodH)A+~ z{Dp+P2EXw7;~yv1)zSB%%I<@RH$s>PENfnvd%nV>OQTzT`Q|x5zFCg!NTK54?ekf>-yyj=Rd7+t%ASvU7SCMKJb39tOl@Q!QmWh z8qY!-V<0BFYjBQm2I9PpCnJpd=d#O}#q+3a68^(~So!N`{4oTtq3AHy>y_(0LyC!#NiKWbW^> zG9N?vU`}ioZe>MJK*~^`u10NeVJWvdv@3VkLYpbm1 z#qCnvl`HP8xIftV1Ner{PBiM?T#8w&3sMQ}F&5Cu}|_@Vj_VIKbP^B^^7&<8Nr_0pE0a@_64q zS2PxZBh?H!RXC%)Rn~R66KcFLU8=kCz_}N~9?5{`57bsGYZ!mb1G4EC=HEHeA-rlok`}^TFdMAIN41U$n5Kc+;qu-UT{3>qromzXktZ2pA> zClA5_?%A_vsmbRl&4*5m#IswlVE(-N-5<8?ZFc70o5wde{8d(NKIjBk^auxd>l_w8nDzW(~u>L03OUWND# zZW%p#k$dsyUH!4GKMlX@4bMIIjTaez==U(Nt>?G#7r4{mPs0C`+CVZX#-DS3vo;F; zKJJ0{4;jX;$#H-;A8GvU2lg;LK={C4nhH;LWi&(zWXuC>LnrgknvH)A;pEk;zi0fx zB&=0ggLjEAd|9JeAUSz*ZOd)P|8?x%wsI~%WTcsd#mHa%=+R?81OV+@f4KJo0el?r z$HI?UfPIg*c49Eso>Z>PUuETL*ZeUPAB(IY$1@;r#&Q&*1)`%o{t{TR!1I zOZIvqUay6}=f1U(^EbNWLl-|>lqffTJ@+@f__GO!SAchJ-S|8Qe`fz@9~Z`-t<5}) zj9qW|^;kfMIQ2<)iOs)lb=VKipY{S&#vcG^c7J;JdD_Mw0*L-^^yrcu{;fZs`x|Eb zS-!$eUp@Hr-p4mSHSx!;`ndnI@@J`*BbK}|Z;BG`lENSE!D#%U`-5O{e-Zh6?nfIp zGyWKW?&89+2&vH3})#cfBE?*cW&Mv^v%C~)iEX~f97ggd$u;? z8y)`4Z3?4#SjAVu>;grXjAiY{9Q@UA{J#3iSgj?~>i?d5?pr_O`~iS2ewM4h>(_TD z#Z;7fUNW4Nkk2@3O^?t4-h1Nm$JZb3s)O-&F~I*p?NQqd>`n6E?OjRqNF_E+K$-Xoy%w-5RII-2{d4CD{?lz%an zAz0FFaevPJjc3Q7uC6RC#MJ?x+>8$J?jtLIh#%w6S5mE4tF|yk1u?xtQH*+7wY$1T zs#QKykH1aqkEt${jg{vZe_uy(f4TSrZ^WJ81ke4kySVU}sn~i31g||y{#12U^&ll{ zGVHML${HY~TWxe>{$0}f`#_u{elq?Z?~}7a#KZGv zpz3e&dBIW$OP8t-a2bv*M3&5-P}j(UY<&9Y!RJqEm;>a~?+A^*OJI{~;UY>Iaj__g znD~P?cEPVglv+edCDTcu%`{~(bDl<9#jM&`Mh8ICiC|u?{xbj9Vc~Bd&c6e;s%`n1 z;t!RvohVb68J|4*;<3WtAu<0}tU7iK`GY?TB74Aa)fyWP)gfC+`^n@2#a_~r!PcrV zA2Oj$#@{vze*?&0t*x{zKU@5PO12Zld~R&|>dUX$0@!hX6)u0}z#oJWqsm5=tM!Da zRxRMB@+esdicp21F4>I=f13tz15lZN2jEqVgYIdEjbx3#Fz5Z`%SUVh!uhM<{NYt! z`GN%tX0O`6dS*pMS(u6ne2kq1XgIzxR@SO4cjox3 zstVA68=D?I)cIpqeHG2a6&0mgEg`DaN{D3qlA?yhKVx*MWctB#7&isc6`e=*p(uCi>LHt4*d zmMM}2GESZx^j*rw7$DJP^T~`GUp}{XHH3K+k-vpL|A+i76M}^Q%p`x-VFPVZ`lgFd zVF5A%Ui~}@-ZNhA@Hb+_=<*dE*FNptzpkQmf+nF;QpTw40;bPIxblk<9^5b3P)XI9q% zrRvI)!HT!%&zn1s5y<#EHfmB?>DaoKQx}f3&hiTi<}KT{f4Fn;_I2CK%f`PsW5N76Gvihp*k*L2K+IS&38S%Mp^;{M{q<%0 zPYqt(zp-Qj05_YxL8?alk!c`s@W~~EY&o;i&7Yto<=u>VZoz=g|~ z;GNZX?teDe{XrF6+VCU%vWb%LSu~?`GX6FbF-AI>Dcg^oIo`Ap|C;&I8MW`cvti># zcoEe4`bPNM@hz24pPn~$GFM*O`SZ~rf#O$J7@MCwt+_DRf3_c9&k5n$&D;0-I_lr3 zu5|I$brU7oXOWB&V*XP}$mp-wex}uPX3v^2@AXMl)qCM@Wx`)~!O(yDIQ)U-mfELJ z+jLpNq*KqIE}$Zx&A9&Q)20i9=ioi87ujn@y{m5e8`V;2RXxdo(V`d?#Pl{3 zGivAJegpX#t0#)avPn~?ZQ0z0f7c3Mb9Vmp$x z!|-Zb_CB*q{qTO-zH{LJ)+N(qYruTUF z(WZu3u2n=#C5268NWveHynFZh zP~R!wulMQG*X0Al#+n&7xA?4^4!>DxTu*qmtbjMMS z2sz|DH$H5Q2E@GcOxWmE?CK&%G>xXNYTfYRhr1hgG}JHUC6_H*CXR{pnHm0sfU;^* zoQoo5$kAbHG$Q7?K=}+r+#P$7A%6xMg?=V~4V5@$%UMht~Gk^K8kOPzgY2*!pKb9r_*vR-#QRINSBI^Gfjz)Wb3k3?+>(|D~iEp%4 zSyh!S{$QF2IM=ip1Ns}Eg}{>d4K9d))=D$RpY=S`Xeba1fz=jUr_AwJE#Je?)f|3C zy@yIV$QM-=$XFGmUmix~#JofX`3n%RItajFXROL&xC2DURc&E3@{m(vS>(?yARv|^ zBYZ6S?zc!TuA-r7ql)Om?)@@JC)6|!nH?2HUC+o=@PSOASYr^gu%x1&GR3m?3|)_b4@z8UBPuCK23GOPA4zm^ja(8Ve?1^(%#VF1c8B;{J;i*f&c`E724h&4?9@ z1;;Tds(A@+_0mc#zA;PuG1s8zA}3HIIjzKprP12o_`@B)B*$J6O-{rWFjM@YMzSbc zJZ;8+{>B~dd?+f2x*6k7BQD*^0RHk|0noL}5@>`*?ImLxGsoYQAc@jtEWmIxapF@{ zow$J6<8MlUMpIH{tWJKo`Ij?5p7z1ozdDQ>}^zIM)|YIz!gf1kt1g5tz)!Z?FOO1u(o>E+$C*2a?~|< z$zsebe@1;B5zGP(JEP#wcq-4-n4Ey==Fc!PE=4Ad(q(iJGo8vaHX4-YZk0)FaDkrm ziZSK<`DO6V(=cl3GD?W~&+}Ou6H+(r{P|>%5lgvIM$C6A&)(?kQXJ{i&z~lPD#GwH zDzlF?WwCy%Minui3slbZS(Mbo4L%vAw_eFq^Y=d}q+WsND3AiA04YEUkOHItDL@L4 z0;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHIt zDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEU zkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS% z04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E z3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjoo zqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*b zKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L4 z0;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHIt zDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEU zkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS% z04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E z3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjoo zqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*b zKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L4 z0;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHIt zDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEU zkOF^Z3Vee?>J>;cvOgI^A>|7Eg!oJO2&TLH#9z9{FYUcgHGe-%yZY4sbn`cs1~BCc zq@2I8L}0oBOg(=DV5$L3KY!!`(+pr1_#*<-OJFwmqe(ce1ZIRka)BuYFf;s-3rr<} z+2PM|fnzD8Nr9iH`Tp-ub)~@t=vkB=)(T{cKbVAvwc6A7WpFrhc|O_@3P4sxj=c0hjTom{E-VB&d^DHOErH#lTJiL z$B7^^=yNzLkZ%4A^Q^!RK8LgJ)Hkj`%K7s{Ch_Mt(x?wN1yawS?&TEzbmMSSpZb%2 z{xmPA@~0VTyoZ|tS>R9cayoyCak!~J4Er+@L-G2R3I1d+=i^T{7Ejl7w#48oDuEf{ z&x6dsAGyF#-v$r9A`+Mx{sd$u{yaKE4aA@4K^lHVnuN2%p97hhKVkgyL@U%;`}ivg zfj>+0^UpuiU_+g?EH4>o^bY$ojTz{}tb(hNDgMyQ1t=D13@BG<6C3+NHm}7tnXZD63+9-m{F`S9l1%J$Nc!F73;_qb)B|+i@8iNWJ)TWJjLEAD{ zuJu4mn#&}Un<4%fe!&Emlng@xwb3s4l;;OW?}0DvFOpPFHuw|h@lQKrz@Ocil%$dC z(T6*71$h?G+qb0iY{gtW6a1<8!5nOZ4EeJf%>or6*EilpXj5v!*Ssx5Dw9|=ll)l$ z75T?IVg>mf?r+|jlV3xo(y8X}=gt}F{tx&wdpUSD-cWsx~R!=xS1j%8=qO+GS zML5z}$U5SYYYbb!+G1!!*p_k~mva8N_p`e|L75|iNUEQ?G%Adp4>i_pAM^sWY;P=@ zC84gRn?DJ^P;;=#otsKX%ShFT(S|}IYWQ@mk?PhChI*YGLkqF9v_@hkf1c*&pMS3K zXC_dA%uVPKR8P+F7BphhWS7$7r7#hjd zly{f%)g{&ZJ@4CpRx{+MVwg}KVg?r&Xv|mVB=H+LGTGYlfiPcP(#_wiud2Z5DH%ZY zfihw~7Z_;FSLX!1Up}}bT{dr7(#~HF0F_DjWtBhO1uFdM#(afNdfabjWNXs>Ndrguh7GUmxQ~KuPP;3;4hL3d|9JRQc$x%QZnWpO?c#oM|F9tk_rBz zxj-v{e*O|AFhOE)*b}TSepSTZt4`2Y9%@=yhu8o#@t33vj6cl2lEmMzcx?4~K_7go z!1+=NfjVjYYpmxw>HNRrq6QyE@iP+lf7r+9-4SON|1$&Y4*{W%IWfqu1u_`^xqUm{*k0qrEBHl`Ji zk<1c*;U{5}P=SaA8cns?^P-M%;f^=-F=Ate8X{(8i@#h1ia9uF0fs2UoanIued4AFt>vsM=K&eyFP96nX@ZDZ0Sq+SY7ESeFwBV_8rUgj z^qMQpvCPh0SFR(n$X~Dvv_i%Zvk@3*%q6nL?u~O1W0t-s*}OYED@WIqaxoIwKwM8~YlUzMIU~m&O zm5x+fjg|`NNVJOlM`xBlqYLy9Gt9G+(XUgW(SLZ9oPg0y%oL?R{$m5W;XgQ9PP+Mf zP277keyaE1UU`MDVqKDSpXGSSV@sw3Bj%-=zh59S!2r7J5T=WcnBoHC8zXA29Bv#~f2LH% zZ_J$|6^|J;wGp!_xmf!73r8S7XFp<+3yg1+1&tavjjJI~sn%f6k);%ml`u3As~)XT zHuwv170AxpfSBh3;~O&r726}fP@h_X+;xMGZx&@0p%j8i8olO_I)1ditb!pR1M z5`-8O4G=ic7*NhN83tF+a*que(aMJ`Wj{=-50mzTqgXP*pKyRC06`4&fT&5{phOWi z!m)oJIFd^W8_{M-rU6WyaMxE+xq3ALOJ z{=5m;w+>Ns%0-c#Kqwj0sB|D8A&spT4JcF`lhz+hCe?68_)`IteV`&#Zi;Lkl9h~Q zR5}opFpM(G3Mx|+o1tTgwVM_GtNM}k;Tn4^eedzq}uWz@l#yUdD!X2vh36lvnmIeM4N9H3?7RyIbpiySbS2DCwZX#z~9qgQk7vTW4M=X_O ztC`?WVL=B_G5Rs`#Pr)BN0{RqMGfIUxr0g8&xCJXJCgXb@8yh;s1UME@bi}sMFK#n zB`NAOD&jajgEy%IcXS9cUB> z4bx8q54vXHFVPNG!!8MqyAzkIzD`l6C;0WK5&l_hFN=@@mUB_(gGPT8ONCa0485U5PylcmI?l3038<0Lgj5VGbZ>mPQrmk zb2p!DF7X%U+7FL;kzN0(*MNb;oq*9qpj&NZ+E#+yMTP?^vqm-xo*w0gTT==#oA!Czp(7OnyM~uaWo*@Bc=71UkaUXCXpK4KlT4 z6l4ChBwvG_zvSCW_x=m`KOcY^3#w#^8CAx#Y9koc?hTI=@fXSec?7cIi_b!|k}_nz zlA(>mgE+Mp#9vhY#3YP5#%CdFNpGoPC>heIj{NXQS@|nO_tL)pU^B4m0?qt|WFay| zu9q{F3}-CFkmg$_@fY3y2?CA&5AbltBt9tSl1|0R8*McfBj3hfLE8Rh*s>$MN-t z;sQv{`gg(b$k%A0vHi8Yqj|p11c@7$;RKv zLNuEp|A&4M?uXb_DikO|{7sB+w4X>}Emr;t0bs`XbMJ>3hbTS^fh8xF8C!EQ;~Rr& zEuhWLAGajhbk_J2TQPWJ*e_Ik7J^F#S<7M@{rxXWPA>kyo?Jh&$Div04FJYxAxFso za~TBdcm*1*r3z~Z!C!I&W{|%C7Z^C)`5CQ{VYTs$g~iNTdno>rA~2KuSqaR|f^AfS zAezXlwRpMsMq6JBoF9_EWC+YCf0ju&J_}(=n!TJ;GKR6hoY|@m&0i9ifm!8Ga)FZ0 z@mUC0(&Pf&l97#4$BP*a&tC!nrkcN>h+7}VPxskqZ}8%?5VEAf%XuZE8ohoOH5P%t z`~gfif4};bkvx$p2y}-pJ`15s`XG}_Ml-tIE^Z_We|ZC#cK(71)B#jj&_^{Pqt2L8 zZ6u@K!6Kz2@s}@v>E|z$3v`AqJ_}Jws*q_VBN&Uso8C+`{_l#a?@UM?^b{DpFXVlI|H6=xJ56n{yn;`qiQ@uoKunLlFzy;e&) z8~lZHfiihc9N=(;i4TgYq+4-(quaCMMvOSc$PCr%tne4t1xf%KK@lQGHwplaB|{lS z+eQA2fts;MsBUM5KeG#5x(&nj?c3qsw)PGdI(ayF;NZc78^=v>35*Ylr6h+ezHy?s zv1hHKAO<5$rp%W|*KlP~?0c4Y~RV(}=$1`HR){;4mL1pc==4~;dX3Bec%(5d#j&wQU@L{Izc^(Fx z(4MO|nc`2MggJhKze8N1A%(w79)NbK;!^}<+pEoG9NzqK{3QOEyc&N4xd6--f2s>~ z@bmcN0IK{60P_JQ2(hwBLDoQHaF>Fr&C6!;b^bgDKbb$2SLbh_#a~q9U@x=ApX34s zemVGa0nBUA?1d)jJm3Yd`7w z?+^%tUs28fT@nBWvXGB4pzI_F0Wbv`1Ip!T3IdPY5I==K)E0t)zhD3}$R8(gYp0Pv z0MN(Zr44Km639Y+Mk`}jt<_jo7HG^9vY_I*0A%=?_&comKepAd*;-vD{ok(;z^$DT z_`A#i3}hisqYW}vt-fp<0oDJ zu{px$|1Mu<00y#F_OW?%s? z)0_}k(!h}@ho6CEbz=}92Af_ySK3g8$U^)A`2zqMe|Y`P=U@2O<^j{q-kO@%j$~UIS|USpZBue?Avz#gFa(flxO7Q~))H69`Haqgt<4QZ;J)$wnI< z0r@Ilxl)#j9Q<>=8KOW?A{mv6^^%HF=T9?j6({oyTtng2To0P6m) zv%_=^m<9f17bx)K{HH8Cz2N`qCxK zXHA_mcj5bsSFc&KVbiurq`xZG%ngDk>*ef#5>%$&;(A z=Is&;!o!z-5120geyE0$H@)x%1`of^UFiQ|AK*a8g2mO--k632a{R`XlulUE)6>*< z=JcuFlP8X!IC;8nVCX{EhK18AOV{o_)!*GZZ(@nF9Vmt>5R~Xf0hzI+FdF%DjS3)P z65>=se#%w8GQMK|uBP6TdpDF%esk9LrqdVBodM~(kA2j2sH?ko%iiv`($bO%W#tu> z_&L7ycc*^UqPpL-!CYF|rD=l5w)3c88ce?lF$y2>&`p*xvuCD?9);IQ@J<>LJ z0{4H22Oq9LP~sSQy_S-~%bCp%Kn@_)i$aFw5N6FXS82#q_$#ZNx3le1-`JPX7*a!fFA2I&q@e%{+ zefIbFPjv@-_(gKz*Wu?6J>Vej1{_#WIc4hP%8H7LvXYXL(#6Q%*|Voko;i)-Oy59X z!-kdB<)s@N&z){>o$buOY*^TE8wMy(BO}{Zt%JSI%)7)1ng7}U7bTpq)#=NZ0DQH) za^CLacTex#J8?q!)TRqxeEG@Eiv!>SkM|twKG}@?y-_j|#I2k>WolKqLm%IMb^sh0 z=#k-rxW}Cv>+kOyz5put9nemOC1w8j7m!WC$lv+%>&q%(0H1}sMxK=5XoIJBcvGoy*9CO z?!NPn`uFXfP*PTT;PP*N_vFFdYZv>Fz!RsMx6JGAURgQ`@B@h_)*TYsJGVa=fddd! z0$u8)2#m>a1yIG$#Gj0xcmIvKKSto(32bRrHh$c={XLE48#ZiMyJl%^U0vNd3|&i? zmTx|fG97@!R33u(13J}%l~kZnIntj9EX#}tV8QsCBssvBCskH1Z*OmF+q8R5<-9Fd zuU@`%;ozc$?=Dy{fBu45vn#v1*NrbLudFPeunaa}Z2S$JJ|ejd2|F*^H(UYK@RRw2 zA7McNj%w>VH&uVYF?2P+y-*g0hSraRFKPLtiKXSOz#k7IATa*ULAuMrpPvmW=e5^f z4GOPlK`J>3_9~QQnm_-i{hud?!8pro&_t;LLWu zSH745=<&y&2s)2He_JL)-7LN``&wtX3d)Q&YUV`JRva; zLgkOX`OR+*gZ0PMsqek_-g-SO!+^xSQQy=q&AI5>JNH$OpHx2a)v^4vCXOFJzO;0` z@P9)??PJD{FE5|4x2I>W=l;U@zW`7Gzt{k>PUzHm>3~+zcQQN+$)5|L$KO%#f0aJ} zSKobn&BU=|YY;yUU~Oqh$@q!v$E{^$V<$|muCA)xE1Uv9?9ic(rmZVCEMxxz_pyOT z*DvEEzHH%)g){!)8DcoKuMbw_{($ZpSZmqS+_r0L!`7usVHfc2C2hUuF5f=6t7i7= z6=Pm`6~TjrE(;QW=mEUOUGUh){=u>epQe~}3EDreQ&ePBSVj6W<{ zQ2qpf4u5a~XydP^yJQ^mevbdEb;KWs9F~rKW8Tv3`;J`s_nRFpAI_gX{jIm&syV)L zYg1eE-VgZIkM}PQI0Kyh^X!%*HvIbg5x)=C>}hV@yLs`O9ZSc|=zt%YAMQGGEWjXxs`4t|j)VKMGbFKmzrvp8J&3^Xc7=9AF*>oU&2mEAo(-KZS~Hp}Y# zE!^!44_^V#sXuqFeBq9L?FTwqHn*q%x-%X}@Yw+P574;V5iN3V=jsN;ueE8%QsA$n zGYo&Q{9xh2xcPuzqZmCue}}gQ@Mj><<1fM_>>wd}0iPyFTytcjsX)9u1|a&s+3mOi z)3wHQ{a3lS=Xm!Kj$bf;9KVkBFl6P6cQ&=RA2`s~uv>M3(r_b!&ji4!R{HwS?OxM- z0P$;W-U;(B=g;|e!H32Fx%{o)Bk?EvAC!Z^?!xbI4*vY08v@wbsJcLjKa37D{toSG0Q}n8TKDg7-c{r9=i!%= zzX67`%H!SD;N$$_$|HSk= z#N3~OKfLQE@u#t%4z_C4_zQg%2vF$Nsulap3PyTa#h7Hm;{W=*+aD0?J}w+!1pXRY zTK2bc0Koh>rK#Dp>3Bd10yI=V)Qe|>9Osn$oUR;!PJ`?zm z{2%WBtp>N(&Ja=jAMgiHqr&=)<9ASBf2?igPJ+8X?*HE3!ChZRC)~a2>Vn%>Jx80) z6okXW*ja?3pvCdr$S8IDYKtk%Juvnl{Zi=ktF- z{GDU&&)t4e_#0v3&mqtoU^T|Z*W>Rm|MIW?>aYI#umAdgLin4%`I~Ql`#azH&i{?! zyWjmDhVOs>Zz24Eg&+R#FaNI+wKgfmDwfhpG`>-(CS(Hr-vW34*}~sE;Ey@L@cdOS z-`&x9_-M}wc>etKnX>}}gXf0^`c4*$KLMcN4;>)*KhB>FU>&}uK_R-*zw-JfeV+e-uU>=y=!Mz&f*VY&BNUf@Q-b6g;;+)fB*i?KnExRxSzcy=V7hCU7e`{56N^rIjB-QWGM|MmBO|M&m!5C8Cw|MNpN*#%)Y{9p=xL_B*oY=*!V$uEBig0j82XxBK#qjU4; z0l5Dn0(klDeT=^gDu1)i4QTwe2Jq*&KWF>Jaev(Z1@RX+K4^3t;4g8S{k{a?-?;$d zj|&|DaqYnX{J{%>{5ijvw=^CEXK+bNh1P7x|5dX4zgGV0Pn=>7(9R!w3fkchcUr3I zcQylmA9bHZ|99ouEr~z&Zb9~z1$d?j!yXK)R;-w%0U4G^1Q+;an|-j`x9C0u$pPX8 zpu7V45`wq}We z0i1w!{_tA|n~=_^R9Hs`3}|EoGXD5Q9y&nqfXJWf0l@(Z515NT{vH8a%Q?GbD;P0f z_&J28ee!hw`3qOBe|+cu z{cC;P|4IBg03KtZn+3dx-@A8jtgw9$oYd`+c-aEX3))^y>Q{W_oq}k1mgXM zdGpq;T(SGy(50)_ZvlU|4E*s4_-IZ5PqM&w^iK8m_BJ;+pMXUX4?2JM?%%vGPryb8 z$N_8t|HmER<+l%B?(Ar9ZE4=OW5*e|nuQG+{2?>k5^332@0=C;m^6vQ^$VFlG5u=G zj}c=2eQC}*hds{U`nqLHW>v8P!@;%^$c+wv;tCMfAkGXd0qCwl*cAMO=fVHktL6OV zXw`xf?~=tBJ{ug@#2ItpNBaxG7qjcXlU?ro`!2+!(0%H(fX)~y3Wm#^Ho_3_=iS0(-w_lFQlGcY{wiov1EVN3_G7rzjB@y7rJ z{tj;1v_QtItZaSjfsXFeA75(pOu9zONEq9Z^1W=9VpX{u)o>%S>PkhB-jT|FNR%pDDHNN7v!) zHh6BNvHegN{CBYHF{b%m~puA!5Axl(jG{-lZ8}sLuO`Ta&TlXI4 zk3m#kR<;0s=)0j6Z-8k2uO9eg4v=lZaQ;d?{-)Ir+`$1ld-lwxh0{tZXU$!`aqGUD z4?j8C-n3JhyV&?H*mP*%_;Ka}o%vTxz$$=^E8&(cT;o9iPp5l8 zJ(zz5f9882umWKKPMOADZuQ1g{!+hFH*em;@atdy9|$)fG`uwy@78T|_UbpZxy+fpDp1DPyo=)sBXB9Ka=uW>vc|_{Yg5;g{l0hqnY_YhZT~ z%r60j2bB0T6X;fKGRHUi&%l^7ecsZV+GRBh9RA>k`x79vw|B$y9|)k%--*-U0C5kx zVb1J%l@r+pgv8(4fm?il&eV*Cuxi!u!GoQR?R$1Fd29VNcsmq?H>bTmmA$=-y}{2_k-{_p?(yWd@EW(>Z)bp5{dFatIY_Xm8&5a0hrAyzBqq0o+}MN_sEi zz5Ukxt*t9pebCi4)Zft1;9UK|B_Lk`cX!tz#SluT&7AetgfU~rs}LfQbGL1Gt{HFN zzI_M7-Me>Zk9zUdMVmN(p#X$VG63hDeQW^m`i&b7e_I{@_gQDHAZx^kpG}xiyNo40 z_t)sCEsnzNKX8A8j|G0e`|WRk`%V}%|F-Z~*^TTl2Z+>7U-bT)rQ;<2>Id2U+tK%d0l zi=%3nZNGQ#238!#0X~xW^8mccJs>ox^Y`U3&;7kLYR<}4<3_wRYE+p!`-<%s{_r0_ z$oPZZQQ!}}AGklp-*5iy-~MfAXvq@12!D4zc)*1V=FfoDupbqdgwc4n_q&<{d-?L2lK-n-bKd9wnESi25&R!_e-Ds92S4Ob!tdTaa9!7Wfx+(X z-N4$IF(u>1j~fFq2yfKX)XX0}`fY&W`t@r(T*bgH)Nb8k{0Rs6){FC2uG|GgnO|yfb^iy4$Y#B0c%xj49xp3ec=CwfPsv6)xX#{$;Z-!w>nx^{D3l z5Wl;3VFtZ=Xyf=fC1V(QFzJpPUjos$R<2yRkoiBsAJTW_3g?eI!H=B@xO~yN^&6VN z{~>=l9iRl@yt7|(0PlmZ8XWxB`R4$@Yn;D-89l0eGu~T-7s8y|Q~A=b#!RlRs1)xJ zMF8=xixU+7UK%+P{NIOXp~lDD`MLOU{@_yc>A=AI$Y52~j5)KXS5;^Px+DO9{0dN= zfn@;svk3em@GrjQmh>Vr5MHhu^UQ9R7YS+f4%1_071QQ2{NA39?6vH_@`wNUkN=eU z0|1e~V{N|czsmZPXTbron{cae1?L3Z!iQG_R;@bmpsx?E>-x^j9XoFH=ymH5zem6y z_`lrlPvCdw4mh*!?uBLJ8F=9Q##GOrJEwl_y84+o|2ppP>NWIHj6a7y?gMY%Ik|Gn z_D$<*dwZGxv-0OUKmj25zf-UXK@a$VFF`ne0N|ea^XJaRXZUBXUcI8OZta_Iy*<6^ zwUM+9Fl*Ux+7t?6$rS|#|~u|vWym-}P<-3F$P9b1SH zq9dHWWy|Kx@ZTon4**2phu#nQbLjh+;|HmOAMV<*ZS(T|>&^uOh<$np3nRhbS6|N) zH{u@fH8Ajp4)EIb9b4gA6Z-)7wRu>+qI^U}=Yh8U&HEbn?%2L{%l4MDI60g;3HUW~ z*+<~@-+RYk;|13qj6bB$k00=d_&t6M^!LBN7{sh7pUl40&iHxbj~64_p9CA@gt2lM z#}mwtdZk6o#X@ALpT1UQa~PvrzZ@wmtA1zZ?ANDFM)sO7!BXt^KK?j>$Y0~mEvFV# zO?z{S3}8=pcMtBt!8&Bcsx@oZ@7%R>*RF=$`x<9g&R@5-e$DDtD_7JlUtYHYUcd(L zFzW{Q2mbH+M!}zb@!`(C96#jmW9Z-YHOEP?%BP2*Uk@j>|j@ZoIVa9I}ZN2 z4JB@Vd=nNs*BV<|+q#bQoZQoG=g-}PRsh`nLI2Tn=X&8yFMJbben0NQT)TFeUsm+V zZ%o)(K4R(dlP7z7PoIHbYJp(={o>bs=i0gKc#T-Pg}w30;qPls-*12O8(e#^i$Cyw z-2EYbU&DW}Wzg4v431`gP|2@X2F`zo4r>oP7}{2<+PK=FOWQ?W|dOwC6~B^S-@3SFc{fiI*XG<;vyD z&Vg_gkOzM8@}EY(>!j!Y27y1^cv10lCSMOf(Co`E&+JA9Czdf5 zq|sNS$by<9rE(hcDJ!+3Moq1jO{#=VTwrfW(*+RVcP@W0{~~|R42(-)1n_f7=}pt# zoK_`#VBe5LAddos@UZ&&%F10nQ(xaTl=3?Be-eI%y%&r70|J38U~A*e8&}%So<7>y zf0?aK0YM({0C;Hc+SMzUFI~DYbRJ%Vc?5QGmfg8?7x}ZUK$rt`F9Gpuf)@*X8yc?w zAv}Ew{4oIGvKE3Q3Pg9-$!VEBg};`TUU&8#1pYXFKIg|KU&s3~{MdT!^l7#u9e@eC zDs}jkR1Ra1IS6e0PAKgt37-*m_wdHX`}_9&8+`K^e+qz}2Xp|G`P*0tldvGrfEYMJ z2>bWHYvB*@llepcBe_3ZpyEaev59o)l0K*S8g~xFl_`n7XHUOv$zLp3!22qq(#ZoWI%K*Iz?>9EW)dqa+`~W&G z5Z1uXCd{Ay^qC~~M(}-A@IFcA0_Tx@$EaujQn|*fbJRu z*TC)~Se}CC_wjY`1L^xHqTK(P=;LqN*xL2c>~dHmmv4mkC?IHONuQ36?b~7YiTO92(h z#>!fL5zPn$4+t*Mor3uy1U;ZK2Y(K-3Z1u3L+}=TxOWMBMCI=j7+APC+p*nQdx%>f z+SW6}54WD(y%%To!7u)Rzb;tp%v#gm5BooCi7aQzNF9!yOzzbr4RS5p*4S#h$V$O=P zQMfE&hN{Ch6eYS9>Aj$j;A z0pwHgZ-4iH@MaLKf;|@~=3wLx12EWkq^TB~s%>dm1|L!RyAS-qmdOUepELVLc7Ndi z&YYe#^WsHVU^uUi?#D}{)2HDY@XA$K_u|G3+Z+VPckkXY=Ks3x>JzZH26h+0ViS<@ zr!7K|KLpUqAD7Adm(j=h`{>ipmNLr9M~oV|_weBr_}D$nttBP!Pg&Vg2us+5Wmp<6 z{MfZ0zxd<#ioW=wVTphP2iwX*d}9d2tb7Lbh>cAz@N_pO6%Y!fBd81 z52fkaGZUJc)!b4GA8mdB5d#bxOzRKe@8Lu4|D>%y{mz@`{>+OH@Pez(oavo43pl%S zO)<{6b6y(kITp zsE-^R{Fwh+)!Dg>@%I6|c?1LeaBgEW^mu3pZu#P_pSbIR8!rxjV(ANhd5;SpK*$E$ z24H-nQ?a02|BPR;zho^Us9XZVE{wAZb{wEL1%D=q+OY{MTeD;P4EPA*Z#OtgJ=|yb zq5l*3iN&w?;DNgUc4l99?~iW)vMKoH2e8#RYcyO+UcbV&pxG@D-2Y>@L(u=>Z4q>U z?4J#zZr+0Vm-#ev4>xz+i&9b2YSF&H+oOZ zd=u~ZUS|#wx20jqy>a9AZMXr9+tk1wbBfol1D6LK|Cc9#^MDTYfbxC-e#a2HT^n8W zAR^PJ@>lNghxncUlGEq+eqTLuF8%iI1(|&u$zzb9F*ITcb_oy++6KfsAqs!E|L2BK z{7D4f1O5Pq4agtg`uqHIfaSyqZ2_Fi{oUAjw*hQu>xUxfKff*Jm+Sd${*)$*8&MZ6+d?U$Gsn{emQ>(KjhDJ z?FXCA48J~vj|B}fk)5zn<}U|gzVC$6C9S*Sdezw7iV8$fUW2$xU~w-__J0b0_{Re= zpcCLTEnj@#`U9^2+`y0O>jVB^hGYH@@Z$)6e(~bAt-zmm1t?FyrWFYAwDJ1Mp7om% zIefq#rfcL1H<$4y2n!rOHX*~3WTVUwP(U?*o(`P16ej7hf25M%_pYmndnp6vp%jp+aQ z)X^9|`c?jbIu|lVAoACLZU}xeEb}L>0`DPy4m1NoB(;A7bi3|<)y3|_u`VIA;y^e8T@9ZsFQAf5+g?pOuT zeH1}l0|I}{14{h4${>G(1NgrI_J5f{y#E27kN=DQ&%y8fLqC4}y6=(e`CQaw0^Ljz zNyZorF_l2k6ZbQ8mz)D@mtEwm5b-RG2cXAa`$pwAaQwBD9)afuhR$ESeC@`^y&8Xx zcf|RZxj+xU&px|&aVz*gAAjK7q>DfI;TOEnL;rW}%6g34U{`v0t;ZG`d;x;%UcUQ+ zM+}`hbGB~)z7@0gi6w~U0r4u-dnKjk0d)Wce~(XZs;{Zpw&TN{J9q7F*b9GMthr_9 zTjdM(Y}&kK+nzmp_U>ue-Pm#aHuUb}yZ1l4ew>lV{~0lL>hcxm`#6CQh4bS}58QXb ziFfZ(9|_!qX`dgK%^&1oFlGnzvWtV=-S0U0A3@%Xc*Y2LK6X}9`3>PoJ zLrxcm;AN)AdQKcWe)9C$b0>QDR90?dKh5PYBRGEi5WMB(;6Yq}h+DtncOT&Y)^7FV z$8LWZXJ5o_Ei`%H0=${;Dv-w388F}CybBoeJr~>vgrDu=^H}hP!b67+b{>3x6o%Qe zH#awX0OFOWwg|QYi2N}J*z?w}{^b|H9KYM~eh=FhG7%YJrLd>R!q;DZ@#+0L-S_Se zeIoORw>!?C@9IJXAM#y4cj1f958;ELOO0t}D4@~Qs@tZ?m`InT7Ah(bL1!NjreN*@ z|M0}o*~ec$+aP63o$kPJVD^If@K%EvlPjk$UA$)79{#7NTUN}RIb%kB{bd7w;QlVG z-735v+kC-k(!2ZS?*GA}mId@_Cr=*g`skzX?h`oE!oCZm4+%s+$ebUekKuRw4E}r> z!u~}Jz)|?h`d$KZF441DlIFk5d3}Cv51Mt zFdHxU`{L1qPw#zvg7#x7ASgp`lrxG*xTQe9WijzOCb`rj@THUe=6?b5k(rmxYJBdtZdA`jfXE{&s@1s9wBy_mSgghprC|UA!=Ks|_bk?#b*{e;UT3p!HNG^stijRce)nRJzY?Yd{B!X!cYv2K-QVZu z5Ahqq-!(bTZ{I$TGpx)@X|Vztt*x@ND{hzSE?*HY@b^z(F^5v?^KT7aEJG0PPxF6k zdha;=ZRZZ7tZYsF#-7gRT{|{S;G@dki~U0BM|hL=W9imM;NpYvcYOEmcSp~3`NJ6) z!evGupL+pXh9BJh1?;~4?f)r2iuqZwX&d|*hBkQY8h&MPwCmuO>4Lv|J>dU1Vw(=S z_+7mB^~2AY1H9|!Z)gbLQTf2R@@s4?S11eO58aV+u#(8jXs?(7s5GP`@@iG7Nlw&V z`GT)QkUtCuWGlpL*lG;{|#U;(G`j;3s!)UB7($obLaCJq%wwfg5gL+}#9bQM2r1Z2UE{%_{VON15uKotvf7RduJt zM%j)ft({#*k2kgK-|*hlsqoIB>hG&V|32b3ID7Qi58aDD+twd?Kj5$7x#zy|BKLn1 zf0w6EpZ?A}@8Ao!)drGDx%{!ye80n)e{Xj#t%N7@q;u!drmwgIy!ExlAEyubdwduC zrGdXn<%h0%%>^>%0k+=5MACKe&gQn0SFS!r=Lg}OR_i(7k59pxfhDU}A9vOt{cU^e zR?g+ej5L$*0q2hcXqtUB_lNwkfUh;M3R z7azlpKb!R20p9q`z~6;~IsKn?z!5(Pim+~i+u|7WrYPYq<>0S|ZP|kZG`l}n5G7*e z4*=x+F#uiu{9Av_{SB}A*Gj`=b_ICz(?NIsb^IU9yB>d=kwh&x6q!e~Kt_i@bDP4b z^;0B8mqc3mjA;zPAKWniN?(>JP3s9ef6skuEc1Wh0ef8h_;Uxot3P-JU!oIh$jV;Q z&j!*$TjQur1_uZ?;qE`t_`}OSm%odF{FxkCaGS0*YHz)hF_a8xG|LdLOA>#Bxct>X z+lRN~0l+BkuQGtY=f3CgH@c)70A!oTHuv|8_%o>MN(&L+uubL;@Rr6OTXhKjF6Q9R zSgqHpq0P8P4>3cVp^bhOr0-}Xqr_haJhe73pq}eF#^2W(mFh9RR1kl#qbEEd0Eql? z{0#2z+OzS02FAR>HX570f~OGfK6V$tZ2pz_gU5f6zY1Nb9MwwZZ7kuRJnfX5)d-)_a z7JSAgcnabEBf%fTZ-nRnB>sFQ)taNU8NQ@kE5z*uqZ!q6(W(t?$>V{F$Rc${GfE&yfx_}&XX zZU0RDpDdtXTS;9sK7pqY9z1mxz%GCAC+2Y1Me%=;s@2Z~qQ%LQ?tlpTge& z@;Bhs7B2Qu5iluO6DogPUF&J1y3E)N58^-J3lJZFM=!#A;()&~@-*a@G}MhY`B>$U zJdL)BS+%i@4uDE0r9`g&B7be0k-yGCTz_c%J$drDR;n#jG*kScGWN4}jZYqZ!TEFW zD|h{08NP4w(&iB(;Emi;MQ%wc&y5z&Xg?WfGp3Od==R+g39gp$x18|@mtGowPw-VJ zz6x_?WQ#vgH|T^NWAj%J9N0J?ID_$r?;B@?~Ht^-QdlqXO3 zy$fIFmi6-yz*QJF&6`;^zGV64V@KNR{KAQnehcIj$S5i{wwb@tLbmy2;J2Xb51l;U z(z+D>`X}eFwY3KS1l~lrFa#o>Jh^P?XJB@!`Fq)LYC=Avx~}o!lfid~2KvtQ4_>%* z1zy^E{N$;tJ+&2O6JEzdVpp4NNr9NLLrEBo6}uX#Co}E!&F!a$2HTfTK;Y`w8>Afm z7=cfpwi{&2nU!w-3<7%csroS`Tw~{xr^w&get19PrAyat-oAII_u!K1$z}Wqzie(v zLzAxmgM*SR=^8u&>GK-0oRdX5|53y%9;AMS%y*=iN?JROGW7*FJrE`oj5v z{=p%9Ejqp^^J3@rh3~vE)hn*5=jF3lQH%;=`l%#l)W1#YnW`s>p1!o=^*Nij9J>U6 z*9!l}#=w~q@DiadO;2MjKPG2@KUF|gK5f)o7IVX?>4*__uBc>r_S^Zo_~5(J}z`T8~jNE`dLV=(T|vPDv26pvU3YKb^4M+ zeb?bnSlzyR@6J&72dmewt@+n~9SY5yd|*cS^8_@=msXlh4jQGzwucJEQ#OXf(U4>G-Lc(&ohmN0BV?bxY83kDjWULC(FAt;TR5Hk4fPmFO01i82RUX3~AVRKc3!~AW5r1|8 z0kISr0Tg?03u&~&8>Aoh)?V_@ia(nSsE}2oVP|BB*-oXP#sX;c=f$5zK#o{ij2tmb zZwqX+v>IQ)Eb{k?;iQB@Mga?LH1vnfO03<;MuC{c1v)2Vsy2a=M*Z?S8DmFNz5aN` zFftxusW-Y5CC!4XkqHUcXlgaS(bN>=rJO&%Mv_8lH%k2ZPo)?}|2J4x*GE*K(RVb@ zNIieLMzRRLQA?LmK}cRX*Cv%1%pN&5(cB^c;g$r29#K4_!AnL6mkJfm(j?d zIM1RQ3npOoD@AxrxnNDaqVmMPSZbD+N}@i!$%qI4MxFx*U>_!LzqE@1Zfn-ZYWlvEk3lOJyWhr^%p-F#L?l>?2KCtlz3pMa<^{l{0-7B{gw_ zPe$pjS2ET7{SOMMSKv7cqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS% z04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E z3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjoo zqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*b zKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L4 z0;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHIt zDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEU zkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS% z04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E z3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjoo zqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*b zKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L4 z0;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHIt zDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEU zkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS% z04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E z3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%z@M1{-=L6s1=5V{PsUJ4xdJ~S z{!%`I>Fz%9m+tXPd+$@t-%r!7KJ`D{{EejnOt}In=Wi?#m~H@5&mRGpY5>#EAGyFZ z1DFN=h`{s`m<|4D5>6|D8R3syU`heZ41eSTQ%PWU_;Xy~SPE%U;HPQ6|NB#2X>b90 z7Nv)^0@>mZCgEYNHg!%{AY=ULr$^~wu0ZDa8#l}ar|u~VWRJgbM4&Rl!#j~h{)oWg z&79o3Z1P7gP#)vq9M33!cdTe)bpo%IfXynINa2y{-mEj&C99$X+|3F;ifynbbZKiSLq_>+yr(>0wfG5Cr~U`F`!AT#hsE-=)$!Go`e1ZIXm0hx(E zkIqm7@#lGvhF_5;;q36|KxXDo82>!c3U$^#{)$52&(i$-^UpNcP-iX6O9mRf!~RTT z2Kq3o;A&)wKlE||iUk@2$`#tg#=g*v&)7=Pn}_zO1X7@)$Ly`hITir{h# zXX16iA2S@DU{;p+dl^GXkT`+Hpn?UpX=7f{w#=1lJOT36E$K*{_&1jL4Jq(o44lV z*N~}ns`>l5b4I%V1OCij&f(8&G|5wxJi~AorCr{Lq4j{Qq&t>w{8GE&cqx5&=M!zv1En&10S5AO}`u zZap|Ru);{j)bsZW1QUTSYO)+|rijSKXBM}Nbus#5X*y?iVtq?He~2Fh6M^n5?1?3? zlRiD;GmF_qyRaRywHhivv94u=zmNb*+_}-jq>Nk@HAH=ExwD>Sr#E3S;L(jdj}xy#OuS8;fR1sB7uwPr@(M9ISHZrV`RJQZ-_< zp^%6gK3!|1y0wF$UMI)ULhLN9k(kM!r}_EkpDX;C2~;3+6M6*I)AI_N_J+Nr#Ebi4 z&6IZbD&Lm{aP+@{u zAv+O#SjPA>tV5*p4|H*XMyZknqTwNc#-xKIHZ7GbNHk)1jE859KZ6UD&bw5K`=WtH z-_dzwB1b6%B|}aG8s!7?iYLK@+JPi$j9G`;kvOSL@+Z4MMHEEL3Sgj7sV0v^%z^e5 z6J78bikl(25&JWn{K+m*mWCl_BQVe?*AhQoTsVr+T6`^d$*l5cCeT65OkkifU!7U` zVgOO(HuBXayZp(Muy`KOMa(n_2O9I$8OtYzMshXf-KBhWNi~1Z`}UvJ4Ed=TCX|Pm z!372y^VK;?{6>yUwzhmA%vYCm^Y`kjDsXyA1`vIqjF`^_1{(9#IYIB24=zcU&0ChV z^OplaWfFc_UEZo>g1=}k&`O}6zeEX4kQf~H1gnc*74i3~ z6ZDmbnpV~!HULfhCFug=53{c%@i#0UTYX;82cIf%zLY|sP8$Cj>$y%k|L?e{!G}@& zjKuvP_Az>Q#F@qa%)t6XKHh?f(2hznalJBg@`X~knCv&3KcN!TP*AYy?=Q*HLVsAF8X z;|+a`*qEV)h*{a) zpq$whX^;~?*xVKl>CjMvU_{-%2;eLokt?>{nrqa?tqqS6kA<8f(dTcxeA!7wiJ9kTJw; z1O^&&iEOcZ<6Oj;r7uc0?+(w((KV%9j6^p1vlGZUG79A&W_E#rMq_oMGINY>vgS}d zG8`1p3&YXTva-sbWfB$`nILm*k&V$LSC0-D+yqUfBh^-;r2;wg_>lA48A08zqV005RMd^?K*nn>M4~~|TZvI{q_uh=3>ixG@Ug4`)mn2;%*MN~o zM*ZMO>43pa&J?Lf{)25D@gEsAC*}OT7J)#JMnO!6EWR{+uB~_y@ zrM4J7<*SfqM6XCMk$V1u36xwQp1FpY>gD1abx|XwEkmniJ5_4Ap_7q%WIEW`lIg&R zd8y{_7l=$SfbKek>7pa1xWM?vh?*;h8wb{(DV6aXbLU9KV@6GF#H>m#mVW-i5y;Qk zkC@~F;~Ql`qsC3+YRFTnHP~}xDaB(Y3=PDpM=O*K{z6;@vhy||=DEQ5MorU5NyE4r zv$RT$$JjHq(lL^Tx?|MhE0P8NU<%GX39~adA|?ooZ}h1eA;U1PrYuv9hU4%sLv~)f zf2`?(R0=g`hQAyxkcsG9e+cB5RV4J!_(r`ZW7;&b{ybw*$dr$CkC8KZ+%?6F_-h8@8kwj#9W{%jGTh9a#D>48r4J1Dbv{c^2|j= zE*vhR>{BnU7~>@L3iSr#)XB03)wQfO&RmiAyWS|?5r{&)acAIJ$6o+2j?NlN-Mat)9r1iehMWM!7b zCTLjH&YLLwIReNj<$@vyFGoxIF>(!%B?R4!S+XKaV-qwiY9~$@{wx3*E|62o6-7eG z^pf6IXT=@#FL$hn=?${Dh*?_94krM0hAoPWF~v7qWP_ed;WM(tOUyV<4am8;>ot~ z=f&s&6kAt&C>ygwZH6# z?HHM2+HH`doBbR08r*+U2NPXCc?6?Nm*;Ao|YQ*gO=$hf(K0# z@E6qvCpmkUKq%?c$hUpjq@3}K^6;A$_x{fcpnn#^mUM&+OZqb|8{(LTNZ=z|GR&co zM-zcR&I9^q!9__2$Y@DF#>hJ``ZdV-i~89TfHG-?VX0Bv%qN?x z_;WM&!(&#E`5!+Aly-L%j3NS!YCRjDENSAiEB;*bx5^Hne-_-9w0k+Gq(`HefG3+P z;V=3Pi|g@s#VR|1{#kHc(&pvZlHQEb@6gC=RQ$R3f2&LaHR1iU;Gv`iGQOl2W8}0% zzXoUiqJOUV-hToAX9Li}0xwzAj67qOYCRbF<}Htu;?I-+GYKT!=br^{C0WR9C0!er z2XTBW6n|d%(+6SH(LW1bOPWg!K}nZJzUP-m%8|cVG%w!QA7lhJT%edgmn?Xu$nbKa zlJ1PL=+a2*RQ!4Oe>#Ce{|9(zec~S!aY?P>=o_7CT#9@r{$g_Om)!RCay*W%KR6dC z8h=qZ3ZR38{*AO^hiW4+I#wBbgW}IE>pXvS7UvUr9JT%DS+|MpDP!rb)A0}yp-f2vy}8;Tq54Ar}4&Lgv;Or;7{%X^_4JD z**^>3O0r80TS?bOD;P_Z_R3#GE-)eZbL9f{v6%hjoRNP}>?N6s{Tr8vH|uFU^Cv7| zt<@o&82q_&fh>7i9N;j7@ehiqq*1YdqtUXZjR>7svo81VtCoyHN*F zSkjeIKlc)U`an%s#8tBshCi_jEIfnZ?Af#MZ%1c02|YCQcJ=o5o>-e^5a=HihmsUB z|HgFv#-3vpF);{X5@o)+>L67XHLmGyZ^!?-+uGp&-Cb>MT@b=HFoF0J5O@X<>+S8M z;qp8U3p89Ip>j0>NCsg?t?-NN&xrLpmJDl@m36K)Vv7hhQQoVoiw6b<`n7QWJW=;D z4Rc!P4A+{3;?Ft=Q~Y%P`lv$l9Di3$0G*}sPZ5yqTx~ex^5&1?$M8qwweU9^4#33X zk9UC@ekOkuK%PGxz(_!mA(oLTvr3KfCdsOe$R_f2_B;(gmOqr&%HM3Oy{MjptxPoj z7#FDH7luCrz=%4{xp1b8F{#nH&e&RXK4hPO-vtHtbvh{Px#^2TBWDz1|Au^*QV_2=DF(X52j1V$e@o)eV{6zenxA;G@)o{wOx`gz9Zz6zI zJs$X5BmhcTh|uT+8L8IENceL$$^eSg=12@NQ2d1a^-%z|+D>XHDMkpAz@H(C6c z`5zf0Z2oU?kpL)VA!4JLBduB|7f8(lnB;s|t%3@|WLazA(yy*AxqyM4{}E-+pNE|; z0wCqDSL3h88Ng@VzH4AP9`^#cI$;3(O!sHyub%=)_y4X!0R9j_DGQMsg#hZ+3XPio zgJbNpa6p7h*o%kDk<|$JU6k;L0P6VZ{5b=dko+NjVfkA?01-dJ9{`wcz#!-y!zN$> zr)gS{mK1QLm%~qKS*uY7h`?qo9xg4YLeE0{r2GMZgg=~r)A1Mnb@G7m&EM+PBKJq| zlknG1Js{8DH5NcS3sO)RqrGDpqn$CS(e!_C7+Sec=(iK#RNY&e|=;XOqPEJ ze@+0#JAVQIDSixp`T(rWQSgQl48dglHT~Z;Ie=17+#9VNaV4!rjz5dhkq-w$c;fgY z9IAPl;%DPelNRTH7;p|~;m-lUxaZI20v+)q`+p#m6MsB_7KbAR#fy=z*HV%94gZA3&&rtfIl&S3BVuk0yX>u?vLdUJfPTxCd4%XewQv{u&zJM=75R7AL{~j{3w5Ahx76R7_#91nIx78kDa8YBp+(2tTz$o@OACujiwX<& z735~;1T)G?YR@*D2xO#K_I{%25LJ3lc! zd@g@$PHK6>@a$02?)0@}JI3r00Vw{BCPsp4O{0iE!z*)qxJsDqbra*Nd^I^^YfV#Y zd)1*p&f9s#CmP#2I=Wzsq3?WeM|)@czEJ3Ra!P7iW>#=rHVCdOzHVJkc7CnSAUu3& z_ki)m-_v|Ze%lP2FqruD)S~}~eSog+-TQJjZrX?hQvB8?r=%Si9BiJN7#|%Q86FxQ z8K0V+U+6zxurZi&v|)5Q)SjQ7tZfJCT_pv@yHSTsSW-6%`7?|hAZ`-+rMUcLt9&gr zYgcXa*hs_iz`D10oNXRon3(|SLYFS|_w|RyPB(-)Qc{xBG6Gpa{Jx{yS%(i*!}bgA z#7wkbKsSgF|DFObz=b!N9{m}F<+#mkv?&cAsnr8{mYM&DarWB&AK8ey+;wu_`nTR% zpM(6RrzEdUNhuo~JVy8%9~&7N9h;cGJlkG*5csQXoSGcy*qKKCAL2o~OA3k~Bdyn= zr0(U!W(^<>kY~T+GCTv}x4#uvDahsc%LwMzbX=WkIFT91*-|rl{qFUJsfp3yq0q&F z3&UflD?*_Fe5=6$uFKi55#TE!vp?MaBRv2NULgFj{iS!L`8hp3y}{`0n< z^ni1?8_=~oxPHUBU{+REMsjj;%0A?8a&mNJVjRQ7)a+FK@v@vi%8ACA@y_;KZTuzO zLb_YfK@l1W*-q7J*gKhNlPDqI`Sy!1zWA$O{py7mR;*a@{PWK}_sd`Y;upVo_Sv8R z{AWM==}&+1lOO;1M?d<}GtWHz^bdddgCBhV`@eCj)0WQw_o$Kw9$JBqG)#}3t3H;MzNT?t zcJ}?k?V0Ay5a<`^Zd>md8UBq>u~Ca4624+W2=Yhp1O5;{!r!yNAB<)?e~4cs{JoJL z+}U*b%ju?uwB(Fn*W&O0_~_xi8&{@~z~RxB)A^xLSxP402NI{3^y%7bw?7Dhvk-U! z4eFQ(^vQAskjGENAB&%P|BbjmLg3CcvNX#`UAwkzurYA_`0=CVg+(PLB{LZM3kw6M zE~89ca2U^nj6a~0KbT8Njogv;gtRO%B7g>ehWi8mN8KOt_ha;bru*~Y|1uc|_-bY_ zSlZdy(Q&daFPMM&`t`-D3%z>_-rc=>*RI_=a)Y7JvDAz}Fc?THh7A}e{$|Gq7`MS- zXGW)%D}WaKSpMKgSP+1tqLQAIIqy*n4GnNFl!W>D%C+z%4P>UL1loZ=8j>Io{$?QE z@4%m(4JPM}H(r;8*DQf4nF)IpT+(xb@@I2@-0;is_dWOHZzcoa8lAt6OZmazyFkg* zR7cPZp-?p}K45R5h{aF8{fiE8cD5~dtf30N?zFqko;9IkVx7woz^9&i=9%K{+sUkl z`)BJr+qds6*t-|lD*<-~S5~tpu#W&}@<*Qtx{N=6JGN#`Dy~7c4|?TK2Fe@W2xK7x z=Gmk*?#B4cx4GG$HvHo3j~RZA>7OV5OaPP9HUH#7-2$^k9 z+f!3rU48o4v16sBMSJ$_-LYfGJ9*h$e?lT3LhzZVo_cB-tUoqv*t2I(r8N{|K;kx4 zGF)z=w+p+rlzK(RO9ZyzyrMX!3Uv{9n-K|0+U5<>_nI97OymfJG_E z$*Jk&$E_I|Ytq)`?1g@v#SxaB~{*v#Uck=lc~Z)L51?R5kX7P=%b{GkU7WbGUj z@HY$n*&ov8f3KJf(3k`i!k;Gs&B{r&&OT6bR>B{aeCD5U?W-?-?fmh>uSfoL05$$# z0_eowU?_Pl@qU{BE7HVYiyWq`*_2;+wrSwnf86eFeSg>1t(!M*J~&iX)!fn2@E)E0 znEzsj6TrzoPi{Hlgx~Zu;`d(pxt8{ZQ~TcTE?m8>8-8SdzJFlg!o`c1LT3uJOV>YG z{^pOZ&dhFf=C7|0*B?n@{v3wG2msXhbEp_y90U_rE~XiO`|NKay!6sbNlCxK@atdG z{TO{iMq3l>n=-4oI>IODA|xDt`JutkAq_t|{_6bcosf41oE|**{-Q9%;vDxUkNT`$YH-9WW z8}J*Xy%)gc2N{27ssF=~p(e?RKOqYmex3$lz2CJ~I6u8iTwMtKb@#a850)P!EUeuH_%-UiXXo#Hm4rV5fhK<*24M{ey%q2&g2XU;Hi`=P z%Oe1y|I6*f4VeCN(flviFgO$%p!mu8qxf}KLYD>h)iif@c6D{s*YPfp>25^ui2%64 zk-q7fy7HDT#IL=j2F72?pZ4v74+sBe@KAt*xc@ zpvIqxUs(QTariCQyH@jm7XDtf_LhD)`Q(i(ejWy4QVCbBoj}gapH*9bXh&FWQBAw! zZ+K95fUfyFSl`+~0PJq9uj?}h;bp;}r4dH5w!E<&2LiyawWX%mD}M_MqsX7O{i4tR z5MSs@^nd5k|GjE;MjQziV*~*5)!G2$%@(MnM$560BwhKx{LrxO0O=-+b^ceO&Hq^c zSJco74|1Ug?Cxkh)nWi>^fiqvvHyd;M+kd5Iy!J4=-AOlt~uw?`Qg}$__frRQvMkC z=ivYJ;dh?6KLLMu*Nx%N!UEsfywSp+>scT`VXc<0*k5Y6eyxCC z^Eqw(;ekKm{`BRq=Krv`jX#bBYiBb?8-A__VG)0vZ?&D3#Y$>q4~#G_@PF7JQ+ECa z$qK|he+L^{06@b9Hl3u)5Tm!XNNoJyQ%^nr{OXM~ zN1Ls!}Dhhzmo*NM!n<7`j-?(|7YdT>~8&d?o;bq-T}fBP~pQbwZK(pBn)PZ946w< zXdD%Vh&ldB_^Sww3=a|qDCbX`{}KN7*HidlV~6n916>Qtkfq@7Id~qR9o|JXO2Q|f zLih~A!-o$)$1wWpi@Nt)v7+QE0dTTy@KXQzzFzo(^#Fu818l1k@mHbmxoe+z|HI&q z4!@0BhZ6jfbpDdML-<23)BnLyTK_QsFj39O0Qg;R_bdOEsf>-fIV*d-8P`i{pHcW= z{2%WB9Rau3Nf6=uAMgiHqr&=);@4|ke;jS6PJ+5W>i_mvQ`gts19z|b`{DN0;Kk;N zrQz?vgO4D54B-<941WZ`FIWJ9KLl`c0wx`k6BFZOql3dkmo5(Uc6T+O+%{wLe=`1N zi2F0PUpW4fMEq$4njOr?NdKDr{o_CW(?9*wx4-@E{{i8j|M{Q4^PPYBmw)+xFnssB z-^1|z@BaY84@r3X>3{sclKI-06sed=b5Z|Bt{RsK^nbgJ{bvXM@_|3%0NwK!EUoMA zIe&3*7@j{LpO~DTox40gJ2kRY{OJJd{GkH`|3~>_0W87Sv}{Sqo4j)K#`VR8%d^uH z<0FHYE}ZY_YJShg-&~!HKkEMUcg+*{zUwpM*aqE<1gti;X+mKNru0d5Hj>} z0NB${!|#25^rIjD_$NR4$xna!)1Uq9=Rf~BJQfeX&49Td%n+Y{{`sd@NccnS^-~Bw z{Tl_9NwdiKYxuuj2mcpr7)A#e8gS$f9zlh`jKA3xb-Mo zE&R1h_|x24E$aZn(T0ElZ30^kpym-46m zV&388GC1WW9V&Fp*8E?P-2ZjtuVQ$VI6!Cq$Wzc7f4I|;v0ER}!r!OyDyZPbW2M=ycQUAyArvZ40gb)ch ziEn6VXsjzMF9RO)^02EAEUSyuam&cagHZl%dHE422c;)0{MlC^0N@y11p|M?0WNMi z&+*q+U3I*8V`o`OF`hmnHL$MqOmXoxk|6)U-|a|!Gda1^7=9<{>O+6}#gV_2E0b5g zl2-B-{{=Nrr3$vb?;hv9Ss5^w1#h0d=S5Q+cu3l2EL4pIi4g&${8=2J&L2zx=^W7TfP_EiY6*YN=I~7t;^Zw8|3*&oo7uSjrT%Zk$^Qih zN2mkrCje6Z;MX?VPnijHxw!=e$M&xeY6Rl_hWz}aWryl!=C5AA`2q0vfq*|c0ACCX z;0OtHM{jg&Y^VOk%l7rEGHaM>IZ9Cyd`vu^O$^37m z-xz=2g%}e;qvMPN)chZ@XJxSvLjN2ne!u_H@G!i1q80vRTxWaL?EK=jTOWLQ@7{HW zKhFIjgvN`^mJ}Bqe0L-GAljb} zENI4+;Q#1GAKB*9__Iv`%~kLZH2xf_HO?yLMLVkxqqvA57wIcHh`uifuZ{?8-+ubE zGk=XE_%ajNf*}r2^M9l$`DcAm=fz`qyA7TjY3%IlhyQx}M~2V$b~c}@%iXp6-FFKL z_Ut)bS$6198Lob7`QcZ`?~*nGy93b5Un9)^ARIZ;ejg`*x&vgl0GU0&FTVT|0o?u` zj~+?Av;D-G z{vF1>fk%J3)_M@X@`S;(v(@hr01MyUWS}4%4s|$J68?k^kgh?DMKIll`N1!#|AX>^ z&W9|$Vxif;QG7CcZpMb~2a8JfQ2q!+fsBmZ@I&9@?RW#k;{PguKjHw%77XPt#pG{e z#q3?|pvlRJlLZ@-gFAK}IZ@Sg`}2=RI-6^_v5WNY?vs79Lqo&`YU8gy0P_GgmccDs znBzeJPiOUj$X`==Ia!}@5beBwAI4vuKk+>fSb-1#*KZ^*x7xIUzSM8@_U#Wa{O))E z6T&SB^_$n=-MTXxe_wVK^8D47Zb=5#Y9#z!1pMY6{ptUOaJ99NFqn0?y8aji@W9?3 zIR*^&elkdSNx#!!EKRY4_HfnM+{*Us9`2EL!{Kp^vc(sKvxTUbNsS<|Z!UOyE=BJ_)wY zGDuj3H`~5TH=(&*XbK?NhQ>9B2H^Ki{|8mrs%8A2$ZVX|vr%v+YgR_q)_waCzmfyU zpT=HC$6x>Y*Z=P(x%+G5FMkO5LkAeD0P5b(%?kj541Y%!?y>wGI|h9UU3>f1`_4@3?XIX_8ptkRwymg`Bu)3%sHv?Vh1-AN{^q{X@%!T+{_uz2FDwuSx0M__N&wuG_m)nCrMCd-ok6_>~^m zz9|#^UPs4Y{@4HdU%=mA9rz1|kR9Rxk-DvW_rIO8mf^2rj*P!kQ`G-y{N29A^7jxQ zLcy(In33VL0qCE=O+5fwZj6u96)^Bua_Gppt&~3ecm1A%Jv-oY$IY8Jaqp%X|EYlI zs48xMNCMEu@V8=RQSsUP_itgvZXDp341Xqo*Qp1DQ(F1^;*#n9URjw}c6e>lD=SxK z7^APg{X!r90|*I!usaI;f%gOVNBH~wzyJHc&(9w?fRpfdcYy~i*u85TtcLBV7$mI1 zyS?AFI6!(IjxK`XP8^*BB7e{R!#}Jwin^f4;vuC)rA9k&Tu#QO?FAIS6NLr2$XonoIC(<=hk(Zgg~~(jLs(f835`I zP`?qZ-3KuV{D*J7Y!uZZY_(SZ2Zyj`Yf<+`wiatVBV*I9cL{#S-`R`owY6RU@BjX@ z&L28J^nW7$Dn=*AaSH}s3J8C7JCK#P|3F?Qc`F-w!JH%04=m#^_A5-^ar`BtFToG_ z!}X}e{ULt$?!gFpz3)V7Uh--}9t^r`QhBRn0P(Ji7C8Q1dHH4VfA3F1jo(n`XW&QqgQ@1@+1dTbV0QMlyxgtXSr!5f5`aHC z1LQ|w7C`ze0-Xf@!`~Yv%?JyGnQO#Mvyo*+TpG>8tg(!&jXMc`Tec#5t!>x-^S}P< zzghkOK;-XIhi(2BtQeUD2S{$h9mW-$7H|t6UJaOic=+Me6wK?UCU&k_yK2?3V~F3E zz#sU(@a|8?@9tf2W}#3)Mk)ahoZsr4T|4tCjvlMnj^nT9{;uCZA4T}n=%YUH&fSr+ z(`Qc}D;gUk{?Cy=!vX35g8v(ZMF@Jphja--`2zsY?b@|-CqBc!{m79+B_&7S-n?aN z_8Tu39W2SFPAK2c~cf1RU2L52|wcz(V!0+*6SoeH0 zLl}gq^`?zMl+@@v69DaTD>4L=H86b=#u$Q)TfyX3$oKx$ENRA!Y6boejxx+<2{Y!- zXdJ~yGBVb0%mw`3ejC{WhX7CH={s@0}fT>HD3J^kc zgt@0rpE?EqokacsK=gg+{g6M6z7HvWkm`NEw))Je(zatWasaVSpTojP=kKep^Yt5X z59t~h_(KPH<7Rag%r&tMa9^8-(nEoyte&opww9*GhU&9br_Z)d;@~hk0{Asj*^_Yo zx91XUyx`h{@Q3u-@dN%4zi++)`lt8r12MA#>&TZ{>p!#qaWZ20lVGd0u!aq5X@dAs zv$P&Fu+TI3Pup2pvm2vPKO4!&$a!ab?pqtzA$u)XVJY?}8-El)1K z0UQj425}D#)**)ummjUHsjaE4t*>ip%nj~3cC@1W$lC8eb$hu{Tl@D8&OxIgfJ zH&5vNIWInp(U;`h3-R;}AFVq|?yEr(|+0xW7c>Vef9C!(W*RCxtY6rqmKpyzT#j8DC zgO_Jp?%un9X~5(UpTozq;?l=hfshfH0}y&&n*kC4cTM9y;O~B?@kd3`{;f%RX5>X%c00I7J@CV~B@~4f!xCBN3KV_6Yx$*6d*}4y$nr8^4QGgI0 zR<8^OYi*{fsT(NeP2&F;`~-V14(<;K1hRmw6Sr?&>zEwB*fYII)~0|U4R`=Nw0GnB zwZ*Gf7v?X+YcL037pM5{-FwKN;|hd0Kw}DsUlW`x&~0d(0YZ5E82BRq!n76wBML-o zO4u|@AID#7>zFb6&H;ZEKb!L-gRkcO2!3R}Ha<>vq$QZ3tKz$#ONBAY%rda?JHefY zOW2ICyN5S6_BS>CJACs9e;k0O2h;#$`8yGWL0BhHfEYMJ2yJceI`9YhvHYR`VcZ`s zP;sM#@JE)fcK3&_?ZoZL$s-po^!JZxe@z$uqB91H+{`TewG!MK!iQq|FNFq&Hp5^ zv69XP0J+Y$B;-aJMG;lZv?7oJ=%wv%Y=qeceC_N2YAz7gz}hCvU;gq5BlZdKecA9n zN#X(>`J0@S@dx9t=Kc^r&+ZRb&=ZrBJ1)Rqr5l)m+1VV-wa}};&o1z-NFlmyeG&Gp z2M0qJM`22~^3L5mp-Tuh5^mjtb_?LHDY_2+%toKdpAA09z+d{NS5=h-0@-`_4rp^f zTz`Be8hfF2_wJE0DU20{`Jph^}6}zIfy2?F%bmMQrDfEI~{F@k?P$0Yv};e+b~e zdEgHh8{|zdU9hu}Z_@qgmYAXo*PE>ItXkv|N;;M72K z5uB>1wY3;Nyz=(|_=7Ez<2rxZ=Y(xA;=#B=*S-?cK{pYsBsdluq7Qsh{9YDkY!*S921Ni&= zbL#(?tv~CXH`D!z7a!mS4^K>t?brdFUAuM#d>suKxpj+-&+u^zo>PG^kHhzSD?b4L z$Ma{r3c+-M_7Ru|aMvX82LOJi4+hL9j=!i68#Mfg|2y2%Q%v}K58gb20e(1lq6Jz! zKM%Kjao113>wz0D8h`rI7yR-$13rL|bhZxz6GqhTlUP}K_1SIkA>*$OoTW89Ao!vG)A7?6zvhDn#sXLyeT}_8x&cUr;M?!P zR^yIUFqOP{jch@aTOhdqM{b9p|HIoN=m5z-$5-C|0LEY9|F{*1WdsfbAUuyi02J^y zQ)}Z-r;qx-Kp?n|@Mlc^;3Y9%>YL8St~1?ufpxF3^aY3f@Z33`1+72D1o}6+13Ox0 zEh?G^7f5G-ngcYqVHgMa=RY(2O}FDis2g_g%EikJ!ymc_4ngGa^BdP0{+PWNc<$;W zeD(?l$uRqWz~A`D_Eq5ikT@g|h{GEobl@e950@$6192mY3&Yw2=>ipr~sQD7_d1&qr#&v+3-1R{A$j%E)S3du| z!2+M@|DXqWE=dYIh6b-jgFjKVrZIYFEh<{j7QwItadLqF{ogEqRrcP6W5e6u#yh?@i9^I~X&7>E-MVuJZUEypHLypV z;*FcYWv}M{A_OoW=s*w1?g!v^456z%VW0;QmOh@pfW{x~h<8Fb{&4@#2(I{J2)qyc0S?EJKf3kz>8Ak8@UUe89M1jS zI&o`sX!{NvTR*s~J?00)H4u9Xo({cv4ep8HLWG15k;Rp_!2k8b#8O{@kUc=J9xz|K z8OQTSxvSmH0LbG<=6}@t!RnXtNAN@bL~}pbbSC&sA$%lQAk(v>Y-IThgP83*!8OU! zTz$SOY;IN-BFL^mj3uysFOKzp9Dn%712Lczpd&3^eBk;6=6^<@M@>xue=x!k{|ER{ z1V6oUTha z1R#24{yqr53eMN=26OyrZm%{F=mNJ#QWpFK{89Wae*yH_mma_#{Aa?4-NyGQ*V)lC z^88uOX)#9gtOP+qps@z|)1SyLAlZoiua7m2;bWTT52!OBBLpIU(=+q%n_-qeeHM5h z@%#Mq%Cn!D3y>?6KQVsh(+6bqrFVakzgss#m7xb8J-B}to^v6A&I9R_2;gJM4h+r= zXXh3d7mfjc7cb(%TH{o^3u1ae;*NO$jYkpmIUw*yJRrlLp$zgjH;ez9CI4p`#QPuM z`RKps|1|tAe{RQ*&ilSJJfDFYOQ4adN1|i&h8Rzv-V)=}b(6FMi;Az%Rfzs9j0vF0 zU*`$#H*oZ|lqP{Qv-6j)EZ(^F;h2R#%{$`wOI)Cd-zT43xl#rG&&D4(H)ir@Jp6(a zJ@kJ!u2o{>7MbbcT#qa?=mG@Sy>#~lkC-2wn4Fr0Z$)mCz67y&K%9k|ucS0RpcO!! zzi-A*RvbKdruzMwn%cVh2Keh@Ev+@11G~?iJaziaxpU_l&ehj7cHg-Jt^4ragHLV_ z5%TCiNgFnl9=be53H)4lest-9`z|=}HWb=OfNo_v)aYa*5*4HUtW33bW0aa?o};+< z>L2JTgc*Xt1?v2D8keg@`r#hKl?8an>B>C3%=FUW@TH-V@yVItv2(%T8S>Ly`Z9u{ zp+0!aOK&f(KlEF_`tLr#{~fKe<410P2uEMU?kJqHYXRQOcO6J0>kJrgaohzA>7EO2 z1j5gD@p&wGLt$TEZ%^<3l^Ak!Pqnm|0OHKkvIuqr5cwkxaB%aR|N5JkQtLGD_jzXl z5s?s<0(*KSeEr2|A3wMox_@u}BbGnB-EsMHe?KDlIo*Vw2- zCgRWN?id+;Af`80_=rJ8XWAr8C-oiqa!5^8O8T>iAKOnIAR98n=H=$1>kSsfF^udkalLbxiQ2cHR z-^%Czj(I=jbI-N5wmEu0xJ%KEe;zx3*NPP@UO3a***{u$v~XwMMsmw6W4+Gb{oefo z{_cJK`KJ%=+`fKw@#8`hKy-iL|1S3v{tWuyp2t@J9|qbVrjA@JrbaP;a*ATo-jc;d zG7Cf(2tM$!lm9dM`R%Yy!xQY&L848kH2fuoL^Iu$r)B=rYx}-8XeC{u5M|YRI+$YcY%L;1dBP8 z+B*Io#K|%Q-ThhoU-{TwjlZ+hVPs^KSDYB^X{oI~nMQk+ychfVlnZ#1_Cw~@hjj5l z_#3LLdw11#gFhUBAuJO5=-3O;68zxqFJSkb@B9xPB%hyIC(pp2Vd#LzuHjb(7yElp zZ`Jv`KM4MhB6hOZ!0*cauRs5UIKX>${^sZL9hDEYnO|dLfTK*tAG#y%U@j4n(Yay* zAa^1z5z$$>n%G3iwa@4}1o^|z#abbLkgV1aK2{m|7~bBv4;fNgDIRMI8z&gPDhYuCR)=LccNQtJ%xM~7gG zfh4n!3~B3+>5hhyvYqspm&GK!NBN@wibh|H`$PUnz}K2Q`|M^2H5kOTM=O`-FLYDDy;FZf1^=_kBI-XI+rB; zb>s$13JRS0Ljb2m4bC|-_5q#$O?TomgETns_xJBzo&*24iU26sdA8zb@E0w#T-9UY zk6aP_Cf^Yi%>my2{2&h?@qay#7nJblTy0Qx&IpV_D699A;4_R-VwOmR=X5%0F8V*< z4}y?Cbbk=O{PLinwZY)#Lj#?i_?YF1GSlwB-?#q$1m$m4^7|t92U9u>WblR2myExz zkcsL+PW(9us5!vf56uM#*?)1#pJTPiUkr>g7N7?u=Oo6cH_0V`UkTlxi5wSm;_q9} zoS4f4|F>#Yay0-bUwkY({+y(z4)E3|0{#|y!}>qR4oCbTaKai1YV%`^n4%~*DGYxH z$(B7hK(YHX1mPl%`~iTJKLVh^pMC3(xWDB!{yNgIj?4gWe>`W5zncGpao6PU6q0BO z8b#tBjge8~Pkc_@XlbXO)SINIxlf-)7yQ8u^RKMS67JO2gfoBN`ujD+|A7Y_H1MO( z9oS}n@Cv@D4y-OKd&PP+kk+*o_S!mdfN&G;!6OTQIPEj|yCUUJ)*ni2PCf1n%#~lktB7#>~zZ8k@g@rx5OaV=REl_{;DIkN+TlSyrLK zRLhv7SCU1{6DZIGra^XieHz{I*JKqynwIhhd&HJS$1rP$ujNzB^XKwwW`+Ktdu>kU2hAC4a z6doPm{m=CUu#`VbNlVRqwXrZ-`Qt9dqme6XX)GjNU5uQ+BK$StH(wuQs&Wwx@Bave zf48Rp96Zi24--HZKf3pVPuo9H|Hlew*OpNijgR0dgolr{1+c*%{E0c-b>aM6WIfVahLodgx~uYcsy(Es+5$n zBjx4g!=t04aF1?o9KGM{>?z$@F6HMdOyBBP$7GpN*4Xm(7oR-P_@nqGEg*J6{)|pD z>tbSTwM;gRMs5@#W;DpV(S+C5R?GPECD7lI|a`lRoC&dQC(tefd}y)(FKT&zl&Gk zJ#oO_YIYbhN($;m8+$BwNQ6eGib=J;j2eJkBbh|F_9A~Br;xv%Ib45O_`kR$>G+dH&|+IR6LqJ!%$TrX)Af_>)|!X?pbe*JJ@g z_=9CH`oF7v5Xs2cR$Nu{UR5Eydf7buNyU7E0?n0{^mREI=~)_oMFAl3BrZKU{)&)4 z?^P!pf7#g*5j11-mtP=%y8A=^t`hz-GtYG1{b-_XS1>KWSL3EyzJ#9gUsA6j>YqNE zv?dk))(P={MH!C#&3SKW0`ezOHyZ+K^v##J0M^}K0N+hb_)7_#>HPSE!G_&gYcqK* zwSs)6o{Eui-8$KKDH5YZqIKenNlwiysVXWedgq;V!XLUn&i^%o`zzK3asQc+{5f_T zXiKDT{_1fP;I}d}nY`{9@dN&nl9B^wJ3qY|YTTWXVUbWTY00L^7#V>-@1ST63|G>* zV)4Z!r(~Tv-__mOUYbS5UmJgOj~-nXiX+cVO#YnICE9{<8{rS%H%fPI!f%q=_TzJ$cd3ruUL4_ z$c+wUi!TO#3tHJXa=Ep=5dQin<*&W{ApQxwbeI?dk&hlNICa2;3h*z)PxP9mT z-Lc*SIqNd$5q8;(l7dqj{tpg{W=RtKewjM~`dfXJ@8nfIkS=ZhdfTsD9t3^~M2K zxyv!m!n4MSO*hik(fS2`6jSa%vOp(8Gt1NEPanGMs zKu$hKzP#Fm8SPxT&F%l+n;;V`+$y(TW&%DbX~t z*TPO_>2)zGpmF`%TXPG~O(TAH;JvsvE{~5+OwC<>e4Rbcbv!ZnV+6Fb5VuA8 zS+N@dHyl55>eQLDC(A2Jw$qZuEL0xi3B#W*pisW}Gzt+jU7+Ynjh^>x)FrZyzoaUy zVo|kWN{Wl^1}Dk2#+y;nUjHmeOX4>;CIUKEnqd4n zUS~WSV#I>9+NIVh;rPqRkx01wjFQHJGm=?jWXzW77lF~?QWB6qiGVpW0GFLHJA&?( zh!C#Y*l6@O#GkW(fLI(E0Tk!f#?t5vuS`E|t#ir0DgK;fK!qGNT6RW)nA4@i)EEPe z{)EIy=HLRgixE{D zg_1)3BDxr1L*sh=@tUA#OvK{eXiyYw77UFSP^W!7GFjVG3#|Mi7|=-CM3)5xlj+{mh8EBU02R>UkW(9&MEwNWg|>1VD#^pmq^)$@!`*60wA|M8bx zt$B6E#ha0MHQFeO77L~{TFg{Pa8CBDwvw)m>?xRdg79a`AW$e?jWJ_Eppl7$z-T&N z|3>o!Tr6Su(=`$)WB?dnMj?OtbuOtfW&-BCwgitU$LtiZ@jQ_)7N2FN;?O8YEM_cd ze#w-Ki%~0=WhVR^6Nx|K8cb0vG1SPCWhR%U(XqY$yW4+BioL0rCFWPagyIi1vgpDl z3!e0a|0J_e~NR4n(=aN2+3CG`hnMCnrj6ru3ar{%1 z6LkR-kH7U2jn>DNF(>-nO~0HHg~FXvR#YZ#j)O4~`E%9?_=_VWu;AR8FJW*rx1dMO~1U+L=B+@9pj0R$&OL@XZf%4R?5{U&a z(3GAt#yNj>8O-Yx^jdrw8N}?@`J|0eQa9fDv&q0B7UxD5G25j)d84gKex#3o{wy-^ zA}l{6H~KI`7HKzcDrYL{IOon#vf~3I$h)QOa#6qB``twGa(c3r(B?`Z-WWn5)zm&{OOR1_%rEr)e(Q2 z2U+l2qCq%u_|qU0^QRmCHqml*)i(Z?guoZ$`T6G;EwG`^mMk+VHJXS0jm8A@VOGr5 zNGSf$%SjZI8YSgoZ6bXi>yw13-+{lF1DIg^t(Ea7H-_n;*qP0)hjT1}%Mo0O*#vt` zaCj865{bW8F(k{xks4(MV`{VXc}&kGuH4cBOHy1WI=KnNAHh#fU~=*@B+$~^F(2~m z;8bV|kcYCfN!UERwJES8#i@10~@WA528X)NT};W^w}W3SX}e>OACKO0?q4{A-1&qv0ZNx#^!2q4jc1I zT*d?yMls7}BjkrA7=MCw2y^{X7bi6`l|&)x9s)QSbFjx#GbLjZ_1GNY;fcndzy&hb zox;RzQK`{(bOad@z~KI@5uC(zQTMHJJTme4v${a`(oxRA($7H8i4EQ^ZY*RF}Bs?>XE4 zGoQge6+?tF5fivTsWDQWqlw?kFGshwNFa<IGHzQJ2k`sYHPcHCP3uTOg77HX9W5m%Y_x$pxE@D*@foz}Zq9fmYJ! z*Vvk?r6d22>owRg@}CiP|A%e#-W>hP{C_6E`a_4%#vGr)5@#mV43-)y<(vSv}gu>|7JgA25)CF2hVVS5RGIi9;XwgoH~;q_#e;y#jz#Gm^? zSR|AbF{x2hn{%GmK91e-f;M_QnV<%bS&7A8I0E%C*rQ0%KHOt3$KY`1EAn(Q=W5(q z5{*AsgRoef)Kqak$(0%<<-{gWogDRp#ck4ay_`-j+KdN#{n6a}@-e^nlXY`)3RY)_uS7a>__x#BTWLzL#xdk!a%lS82MfH?+ z=vqs*R;4A^b<$IdLyn8T%doWN6opzg&k|pWXjl$ z;d7YcKBJ;@_^gU87XSRYBamLR9WllQ`ZuzIdX08U@GUVS?<4cK^t!V^S&9oG|=_ae+ic+xkOCj#%}C_0zx6T9Ys> z>REf9uqb4*M;gblnKW*gd`9dwzWlP!s`z5RSr$L!@Yx8m{($R2{|7<137D$oN+}{P zkQatcL0LA*$E8O8P;pAsx3)ZSQ9Tz9*Q4aqTAVZbNmwhip3qO7q)a05CnPYH0tgo? z2StV$6qN`pHA>1ECPC*~vy5Y1Ml9uBmU2GKQXeLr5B6e72>x^jC;||~Ko5wTL=1`- zVLcox=v-@-huY|&Yz3YR+X}F?X>r>z{xTAbKRbaai^g9Vh0@X_2lzKiDm6@k{$;a_ z<2*&M22TZ?4`v&q#hnlKWJx&wtg}E)4H;pnX)?zA8@UEJCj?z=WI2~w8jH~oCK9DH zCkB7!0Bl=_a5{yf$eBPW>C?zHK$2kk))JKzS{jq3J(!Hvi4%lB9zfOyazcft$jL*J zlD>>w17rz7FOw`;nI*9a8Wy$lCJKL!0CGyXpvb|?(UN|QTmxhYK{sQTtjN;X1PzPY zi4%rD3xI|Tvy09iuN&&y%- zsS4)+xCWT)ch3=_CY!x$~cN|JT}B?m8=$=<4HekobT+ghcCea ze2YjbnrBT2{x}w_0CGk$DP7*_fE?b(N zzep(30c6g^M75JLIZRYLmV-gSot72d)+G{uu>(bd7-BE!5~)#A?n#>ze=gjw#s5K{NxR#>@k!ILJ)!2hTm^JzY+s4x_=hJm2?IWO8PYNZC^GiXZ)f({HDdd|FZ(u8l+|2#(m=$FH$Ik(!-5mv^h(M!S&&DT9n)vLBKiB-NvIFR! z1-B*bUXCg0(I_V1$>vJ zc>gSTC~1L=FX_b?IW5tz!I{74pDVuiU%>y_0JN~cOBOXF&zPlJ4@SOu%Oj=u^W^_b z0!jDzXTe)Z7BX8&*T&^R9N!AXpI83$K^S%P&w|&I=2AmY(xs8_`Q?#vh z8G#KKDCW;43!W)5yqu_{J7X-mG}1a1f8PC{PN2~L0UlbP_y zn4J41w|%`FkE81k&IO9bUlfi4=pdngBdyq>+DMF!RmR?+_;brT&mWxyI`$7j76T(f z=2&ecMoFcyH97J3ax6}pK>iQ?AlwgeRw-7X$oNb5Z*;zp*jgOO~7aDf5<{j(6Jq{N&7!Lh%jM#oaIHMrm}dITmQe-alc?e55oj*wxs{*1B3%&~S? z{6&kvgyheW!0;?M^@{7#4(zVeF#uBBy@)wZ{ObGs5xj=m^Ws6Yr5Op@xSi2Hu!&cS6f>b zgs=@vApQgdo&m&qd;4g(JWs;{4Od90T#W#dLD*3%{381^V!e(f!y08}ookKQB0^1+ z_v-56fq{X3Eu243)V)l@oEAF6wI-qXvkt-(Kb^lms?a>g-&GSpXQ}*C1Y|o`8_u}A z`J?zT{1JIA{LO{~FtPaKU7&`a$sYxf=T8SP5>RA_Wn{{%Qlq>{vT7r;iF}P;m-gtqE2%zoGD{WYILqMwicZa*(cz40Y+bT{5_9D zu#GtH|7sng8{HK=>84_`j<1wXp(_PZHOPo zA8HFhz@Hqz1muqrSk)us4*<0Bcl9_~gh*M4%;?A%R_kcY$dDQ%giKaE9DoEr5r5|` z{*P=moN}x#A^qQ*2w+u@2mTfbfKnDBG&(^>s&z6F{+x|6fFiXy5mtJcW{QnLUiIUiQ5pn@=2)*86< ztE)>cU|{EeMA`G_VW*1#Ncrp4`0H^7@L9L-8d#3Uy#TIG7yv)h{aN|zrvTFZzpD^{ zKLk+9LgYpvfO@q;qvrqM7&|Q-5aAN`;^A^+H3EJYCHx_PI(|BT&HyGPe~4dL{uU5G z#E z{Dps=JYanDw|ce6{So{m{Pj~0$n$rN1<=ld6com2?^woYXH04|{U030SFFR}`jamn zKIK?3!B5U#A6W&H<)6Wy6M*r~p8!CLAH$zM0Bds;ykP`GFd2VM|94FepcEAMMk_~L zNvo0L&ti1s!@&@qIQ|HSYF?)J+4$3>#rYowoC8|;a{w^z`Lnq|NBqeC9|+~d9}l3# z;YdO8V&v!*1rX>RD(Gk*F{oLJQ=x)ttC05l|RPlfQKxbyPU8IaZP~V zrHdG>>ko7JOZ;Ci0BH4pJ>8-?U?T9xx%UnwXajSNTjl8n*LpJ^2HAW9Xpg)Uw=3izOqDQ{1@d8 zK6KpsRcLzMPGlHX?x(zqQFJ zX$J-eo2MqmN5@8nhlWSSr)K9D`i~cE45l1y7@ZEa=cgxY+ktvlNkQ>$)FBg=)Qv*^ z3?m1Kn}mKTEop zrcwWgc+l>Wg5t+W>vbrpdpWUL14skp+3&av&p`O?Z^cy#aykAog84NaSEm|IWCn7! z)Qn!gdwpSQVsv;YbaCLq@Yv~!P$&T3YH)z-ayD!P_)5s^54ZnF4*-J~2!CvU=^bf) zPESv7FgknrMZ)3NZRZa?;2iD-bnOnV->@#2m6er|oSdAp5BZy%937b$$1pK9J5_(Y zEGLk1qH$)tvprWEe@VBH?iO@VghoQPQ?(lQPG;I9O2~J<{o;!+{_0o1df|l?D^@)J z{BzI!@|VB(#V?+H_UAwU+0TCZ)1Un0$3OnjkAC#bGfzML!yo?O2jBnxZ=CA1FKHE3m}JIm*rwelI4$n0m%@I{9V3WnGu8mI4diVmb^B(b8v9?^wiYo z@c1MR(_`nVk7cE=X&jiHeZO#frg<|2`USe%);mUqem{1NHIwk^rvRnb=@e}dK;%DA}Bkqq7xHF9`%`#Hgu5BA^3>-gx z{AhV$QAtV342J%~!oaD^C{q_4#`7TK59s6%=2B84cceWbElZ3DpuwNv{=olH_lNxb z82z8={yg} zndvEkcHobOBnX7R8A$g#@MmX($$8_A*Ja@~OJGW7!d?ZJ^qipl+1wvD{Brz#&;9tD z$pE-U=da^ZelYkhP%<^u5i~<6R1J#{*jp%K@zZbrq63_rZOa{NsDiIM?XI(DP3V|d z=W+z_si&TKrg-~yGV9^~+4|1*?RyLM?gjQrz@5RB)$9rEBLJHG(I~U$ zj}064?AcRk4aFFcxJ?z!oy?evuDz!zCp9yW{`wkvS?Q^%sVOOyy8oM>?_9kmH4sQ^ z7#!Sbx<5DmPXY?y=NmxM2rG4FT5?wOodnNkzRfN^jM<-f_3=G#{22h6{9Oe97qt1m ziqKGb`kFNd5kCrGQA%=hYC8FGYevSJv~@W-*+mVyQ=o_S^>sH_l^ri8{{#1NoJK2) zX^$5dY%AFI(Wg1HfBf+4kv|4-?eq? z=FOWA4wY3kceFITM`u6gzu4gfaPrTSTaGy4H$9E`y;pv&rM=nF8(7^$(W6`D3dyvm2fH>+8eyN0OL7hv6^+05$#`Dn=Iv!Niq|X~y3^ z`&$Svz4TI2(r+;Q`qy+nM&FRp*2Maz%qp&q@CmvI3CCZ4XfSk0!;g-?I)8d6P?G&D9gH8ZJV5A4|^${03?71#tO6#@|`$|8QieNpj*($byESr$JcncdZppkm+Xq zaPCuTExKfX0YN z5qvTL{uEAbG{h2_sX0=Q__a4z7Xp9XJ#P4eGiG0dKgq5}T%2!QDSayxMYroUV?{|hz@4uu9NescaOe%+POWr2M)&7GZH zT^;pxybENy8xedW0B&%kZ+fP#yrm2AYj3H6@t5+aeY@bp!T%ZjRi0z`)BPWmgU;^9 z?{FCY?4U#dpaB?0?0hh0k4F39zH<=3u>8RS#BhL8|3~@j>LUErwss(ZJ&n8zWcb7A zGUKnWwjS{7=xA?iYpFe`@n_-}mcLmXe#`Z))%>4@zgMljr5{c{c_WLThe4QB!c}W0 zkTdgV)z%-{5msAN)9&~i9@HJ6YyJ+_w{{Q!yIbq)`piLiS@36RgpsT*Z*0eb0Pt&V zsVVl#-@?Ku@~3UT=<`3s7rGMt-+AaxjVM+ z$lZyd@WB3kd-oLVO6R{-`n7}iYlYC#QcL+`@eAYs7I5@6wqGp# zb%*drB4qp}y=(TIt+S&8prBf&Vw?Yi!`(H>JSy@S!yk-EWc^FQ;QU_$8GsQ$F@Hzv zwN4@faMu{FKVbgX1_5e?AsF^_TkFYA%#!f;4m^Frg+dY<8yiobK3%nCn?c`-6{S}x zfW4<%06!dk34t&Gx7P08jO*X<{29aVB*Cvy?|8EQCB@PIS@|=&TR)!r)cTfpfN!}8 z_>%w(Ty;jmV8+N{BJPaFQBjDP)RhQ9|7K7#Nugijza{1E`ZU;zaF5WvX^m~>1|OpK3>4h|1p zx;W6=-PL?@+linSt z1pi0*V*xC|*R*U&$(y`#^TzeXh0C+k6XPR;moA*|>1uw@#@}3>j6drB^yM$bFNy5i z!(BHqe}q8Cu0j+727sRM``-`$>dYT;yz;X;SxJ+ej6U*R@fQpY!*9930c!kJS_UD3 z|I4nZZEo-E?Y}fMHZ{9&_4=(3@7}*LS+;{dgq4rGAK)K5I%2W@c<#Z2+foO}0N6%e z6SSpz^2)87Hx{qV1Ak*0e@!Re0sjZj%o|~DuKuXy(qa77hhNS8iTFE>{0-X1U(#Q~ zg{t0@41WjzkEih0|0CfKvDZ%_`1EfSR3^PkyX;YY-|xv&`mTO=>N1V5l6dvj5E zCM!IDCV*{M5WvMPO@zM%p1&P4vljl^CH!gbPuqUc+#mISGXA9fgGS8(MwdVB7xNA$ zm%%A7=}@6#w&wqWTCe-*={!~r_t9iV;u1^ni^y2#V__}73sI#TlY zH8j8kFf=kgeR<*9%@6NBcyMEi`agz04Zuqzgh;?id_zM+V_jK!8St2whh2qWSzV-# zTSi76gz|UG%a1@gC_Q1}&%Ocy0LSPm82BR&aB<6dj=#R@s^i5QJIhLn@$?y~fpw*4 zii@|A1o;R4Zb#~y$;p+*@H;_QANtcTj{L1$nY{9qw34^*w?(wK@P~7b0gXQSlT`~~ z%gp|bju#>{xa2QCGzJcEaPSg!fI5FE@FZ|5bi`KLEt@yLyGiSytsD28pRNV|hDPD{ z*H^CJ{P52G`!|t40w4TC{oe%&Am#6p2_W?45DCLHG(k95QE}wR&Yfh)Mj*?}n;ILN z&`u8>(f|m#;_piepvE7(Cvr=l>Hh{QD~}(qti%WLi??mlsAJ&E$Vgtf@|C0)S^l)M z>&ssue}@iXF$igAOA6l70ff$W29OvLK=1xfRxRN#%vpuRK&G_4PKEw|_TIZaiX=_< zo-c6z*lYH^_F8jhcXrI~IrHxBbThj!~S(uQ)A2P!&k%rnl<*LX>R1}5lr!0SJ^J^G>gsAQB+sXL~dz8O| z?5wkiv7}b8J}n8PL8ts#7E39z4AY|un88i9L5n z+ytPjXKFGedRx}s7>ZvA$Dca-5b;-aF(EY*)ZVr0ZQ_3Dep!d1p)e+GN3D>S!^H(!~FEfED7-E2m|B;S3;$W8$=bAV5uJwpH!uJGuQwWA4z*Dsg$w4F)6bna|y^nY&K z7S^ut_qa1NM&*yRzcBmK-rm?)OTRPluOhX=D2{*oaccU8H`G7j;hd z^$`nH+F!K+<^in9gNmKyb{EqZU;Gl%Dx}K8JMnJab%noY&FMUU<>x*O1J=rH`0EDzre1#Y8%XmF z>4d?U^QD#f6u`5m65})&%=M&UlIhj--~WaAKP{_0MIwjPvkLOs2F+I>+;Su(JtH$K zd$`~Lyd4VCp#vZ6CvR_~tQKh&1H!0M}BzyH4#a`)H7 zUrHbFhX&Y_2h<%(N{$8s8U8NJK4kgJ&xbmNs$E;XQ3_6jcl{qfhP$g!$j}_#+iz-Y zYRWx-rK@XZyt1-V>HR?$kdAbwD_zk-w*Rbo+ZdG&%SD zu8_AwLnF1;SDk*L5B~v#gg=-a1^&SO!2Sq-U;gS>znYmjdloz4CsM%y(~hUc!)VxC zqZ$cb`rqTHO#5ptR;67lo(DzSOc%Te^1GVWA`d2{QTz(i(~^+Arlvpq;g5f0`5QF1 zzpkDkd^SWiKv&Yaa|w+9#a)~>`5&>r)k5$;YJZQBKLtPJkHK$!9jt442pH_?sQ}h? z>tWc1^|y8*&9U&WNS?$!CU^C6SS<#0$Z$!Ki&e3S-BS4 zHpm}@UyF=9n0kWXclqckWUsMt>G!|?gUTNoAo`z(zr4ZGVVr`2mjc3H-Hwh)K65rX zioBH#T`=y#_#;F6i}ecKcWi%2>r3!M{%|~Mus_7_;X`OampTi>lf!lp@}SY(6&?nQ zhjVjt(}@46{2_gdi@`J z^7micL!+n-rV8>3L-3u=BEew07x zYQCDBJcA6z#>OWn9f^%G5U5qQm;8NlMa!u#usG;jwIw~QZPFJ!4C=cXmm*^h93%LB zcm&yNXk7g5?|%1tmOlUx`Mc9>>i@a&21daE$xXQPID%6WPT|9=0b|eiKOP%{e%;u} zv7NiNZ_m$1{GI`S;D7GzPsQ)S12D6mp0vnt0v?#(j=0og$$6LZ^AfQARqStR89j>d zr_e_|@cx5=+-oI8`I$pQ#Q!Y$(+p4r5d3ct1|jHxkLeJE@&^D`rKTP`hR^UPT)2>v zoqg%h;SZ0*zV}vUMp|l03cjK)6+YElFTh}kB0E^$_7C=*O{nj@+=@g4f6(?C@cRPr zd-V#&JufB*gV41;c)*2{YL%x0;N|!uUeBp7X~hcs5BAc+dJcWbv!J#YzYrO@??4jZ zcjyqZm)@}W%{RXV{B-`X0fwaUw|DPey?^I&f&UE+694OMf|pw*9!pM!o3p8@=i`CI zsHpht?n{^Q@-F7(r5rqbQ(r!2>Am5 z(fiQ-kUxdKdlWxd>$p)~dj0CT#{3C;0P&ojz`#i5@A>B`>W#R^bPNpqp#d(hlomr@ z6VCzeYtwWtCpsjiy|uZqzP6^iw50f2Ny8{M4ub=LUk#N#1p9v{@4&GjY3qQLzM@lidux};&DZs0#nM`gpF1hw+ng~-Ub zqX|hL9N3HO)z8CF>>Cq*6hP#!rtI3_@z?{0_OSr=_Vo1P92|^8a?W48R8Ur4R$g9N zQCpMbO3lBNck#mc+??!l=dyF)1#IvRvmUTN@V}Kpl|Spjht~R1{E)wU_wL>uhsR@3$bG&OhK>K&-+vF1;kgXRFNxH8^7 zF);*hdZCjrspB{cv%I`OyA@;X8xzW+L(=;O28M=)N8p!QAlUf0`s==l7AiaT5qDoB zZ@g0Y`<&AE)t6u5*n@Qb!2PKGA%363e=ub*R*4M4jZrW`QWRVN?6QwRvH!sV==rlp zVwRl+qxQsp7`HD5*gIJ__cioG2!FqY#2MfpkwD}RQ&UsRl|w9nYCXUw%re8H>Vy5g zce>#n`S8qOabqKyOxFMD?q0#p{mKgQKYa37;m@x3hpjKd1c~(R*4EZ;mu2L3_ugu$ zudVJ~T3W`2mms*fxUit?2zvo};EN0M?XA7jll2cCuHU((^M}vj<56+wqm4jF3(Nrs zwXgI534p2NI1l*67Yct=6y@Ktkhjl|j_X8f@WBG*5Ah4hEg#VBZwmOsi5DI}rSaAA z1I?a39jQPD_e2sFnA+zTkp+GWv*tGFOJ3$U{G#Znkx?#~#0B=w*3E(dUu*n9`-}W3 zEiev&5x`FwrHc+6IuNUR;Mfd9AYB9q;bHXxm#f?q8XH?iDOZU9G587QUM%bn2n4c# zt-`g{#pcoB?)LEoGByPSX~G@gp}pm$#fAC#*_mm04dyMF#mRc`;34v7IRYUDsC5DH zYl58xIt`6IKuE7%0e=KQ=+;7FM1jS!Y<8WckK?bQVMuFzr+`0-pUM14%W(}ln2 zjEN#QF$sUI1gD1Zp_r~aJ-vO0VJ^m!KVpEM0Eqk{fXE+98!j{a@$jksw=Xqy_wJb1 zmNB*S2OYrYw)g@4@D(8Vp{DRSZY8Z)o5}_Nxym=J*az(>@~L3vETSU;y@)e4HPG9D zubmk{#R6drtW3iE@sFP{Vi$t<#lrg}i3M8nH#%y^AGE)U{ULr{+aHdgM@B~zZ^K`u zyEOs5vnl9np{u~pF7T~LJ#^Z-8|JNhdwaSEp-UEe|H1v9I|w!sZk&Vm6u{ImIu8EK zM4!%|2|n0>zx3TxT$~#n9ee83Ev4^=Kg3C0Dl4i=@`V&2J`+0Y1B+ zG4%e>{wROv`ugAxGEvxIlDfASy8rM<0+`66 z;22mN1hY+W>KGjdKW4s<>LT?&5qlcHga99;oPL3I;z_6nG5oyPg`3_Noi?z_PGS+J?N+DR5bkA3#LYmAi4<}clUhV?x1i=HU+Uk zS0?R569T~j!2-1=m<~eF0l7B#Q;=2Fx%dErcj`vtMRqKrM%TOWq0 zXM!J2J!^9>%IJf3{(!$O80;io93O}IA2P*DZV}_s1pcOTc6JW_!=Dh%&o3=3udLk; zg%PotKQaW-0mLtb)&&#+2>c;{zwm-T9Bhy`y|hlwfD2-RQ3(F%4S#hWlAIG6io+5T z*njd&`uPiR`-I*kWjQn8&2FGsIWx}B{8yd3U!(09y0e>)Ma#`h1X??x6Kk&bi;lzZw zIT&CluZ|waF4FKY^Z^%_VBCunGh}iQjBkDY4)MRPhsFk29|LQHV099Z@Mjo=Ab$v; zC4W>V{a->K{KMs}OVpG`GM!fT^Rv9{~87+88jO*#4qEY*O$e{&&8; zJ&W*n1>QV@34Swyz53V-U*7ks%&gAX7im2Cx}^Pp0&O1bgV zdBNtKnTR0Q1%z1`WfZI!pxy+3!id^dgoRx!Es2K@JN_!bERE?A!4LgU#ZMjl>JJ`h z17M}~)#m=_1R!aG*RH@+W8!w`O0F!DDQI#F1n2+A?GW@oye)zTNdCDTx^@@ZU*dn< z2*l6=y8#fMM<4(S_?sv<@u$*9{VzJ&wU_Xxb^hQbG0)UVXKmJ*PQ1XlR~!0*AwRCF z;#pAYQ%s=qpl4wF>!_KTGhl(V2dEgJJ`KYd;2-|L@HgIs525Zqo|=T0>4rbF2M$5x z@5%BK!yhyE0?%E2gwI}KBk5-T5BM7%NZ1bchr}U)Kpfrxp$#uFJ{+ci2jW!VVtXh& z5(E5k4KOi41Ayi(AnSnY^MEG);QLul|EQbxP1WgB`BPe7l|TF&HP7&#hhl%wt^?fU zt_QM5W?rDX^5jXi0Y2UTpays@$r@G+P2L7I{zS#;y3-2(r0$R z=g*X`Uv)LeY~qNm13v~mM$C_=1PIzq1LB$^ zQpoa0dVnGT(Utl8Ap9seMY$Wy@u%2cd31Cu*dAFk;3wdZ;y3*i=ra#JfIaw6hYzcb zuTieD{TJlr3zcB7dvPJq10FK63 zFWAvT|65)xz=c)P)5E?V8EDV}2#$N{>>Z;0$nzsA*;q2}`eDujm zA0dza6S9B*xt!@SO5hXK{OHgF=UuSjtxh+Q0M*J=s8-3^f?tB>qcX*sgMKJByW_qW$|4!e)@aRPUP?gJdo%}SHzKo!+ zuM^(#($RtA5B1is`nwPCze~kt{K)MOVe5<7U4lcl&cd7dmVh)e&Vcq7+g-qr&bi=3 zApC3>pT~kX6n1uYw0E2d#gvqEwZ2{l5PP16L9iu&$R9Dl-ox+z*H3>IUZJ?(la@3h zA|Y%y%;}Nz`P0une)OPc{o%|Bs#Ka(i$~+)6!PEkO`%=-_#NRlXASFx7fo5)$I$^YRu1_<{Y+UMg1Ik4(N`GpXNw)8_wRP)ibe+Q2|(*X`RqJ^k3y!n_Ni z4+%scB<4rxBlz7P!JjWfYCJ^%4E-*=PVkL45*r%usCo-*n1|+{1|&^z%KXCeCnHT2 z*OT`~?T(C$QTh9%?GzD_U>2_O_t~?@AFtnAU7VZwM8qFDAn=DzG>~~0VtzF>Dr6%5 zwCdiVpaaCz(-nSSJPUIC!Ae%v0urd-iL>Sp@FN7`Z8emAW+qXWCic?EJ=!#n74M2Y z5Pd0M#ZPN}NxL^YTdeU%dS@Dcmi7k()?ICFZfzsz0pTu18~%Chl+J}cS^1yN-^b)zKQoi0e-UEy2JS2Tm4R8rojzT0t8a8>WoBk>cIIv~Hk`=c#~)Mv z@WCi6`{M^6Xn-sI4fPH6=O};Reu@R+{SRz;@6tDBVC#GDUQuY=`I76`vtyz|L%)mW zXH0LtnFK$5Z^AqOkluu}&A1D!=Z?aYQxoH3@H(5?3;5FD$Vx?*M?I%?{=$e7@Xy=} zYJdy#k7~{QA$~LXyC%i_YHM}QurPhiu(W|e%cGL3+uT#iURYEu@SB$~m_w}{_vFD;6o75oECZ|=U0H)-!Nw?1qKAB4ZYii#84 z6Eyy?1%|Xh=%Z~fKuhq0yT5?lcfRv?0;as#6jO8^{tQDiJa!GgGU)E=xOPP4Z@m}% zk0Msoq2V{T{`r$nhygw{^EWet@2GsN^!#dSqB+X!_(MD5cIH-m3R)LT0OStDt@w0Q zt|WFPd+{?m4nh7fwX#uI&LE>T1d!Sv@W;HmC^maYeQ>ojGV)w*Wncf$FwD-3Pfd>Z z{v%iOu}~e5j&482+<7y!zD(~A>~H0oYx{PEKZ+imawfwLjla9(iIShQEiQLlZ&%(@ z1xQJ`P}WpI{tPgDFCUK31{Uh?YKM~GC%&CvfFC`)yRvY9!svg%9;VM;!VS029u`4Z zEm`@rK@3_v!_M5Q4?%1GNGr=7#<1eUQ4J+AtJ%#1j~~PHSXTVikjW}^z;2>?d;3); zcWm~cI#E`7wyC|VyRWXH@$$+2`{A8Kao^*s{tV(bm9%~54XyKMHT8$?2mDoj>sx=Z zjrt$M-@=h2M~)sn`eR7^0sOP3@kcK6Oq#{qh4?!2?RqILc=_Y6swvh;rX?4&f^grMal8`^NKSWeCpJ zn{r_f7`=ui0lu#beKrv3#~6{e?3CRuRB@sXCj#HE45867>5idC1>$q5bK|aZ6VGee0hJr;@?{wr>wB1pw^_ zA6t$;E9t2Lu6`omZ??m&|5;Wz;s=QnR$HMlCqbVn^7ARV;V*+s*@FR!?N1YgTd?F0 z0Hpj805$&1Q-8$%wpROVNyA>!16=!fN^5@=|ATf{=kF?#Xh;f0;u_tcpu(T{m}<~) zp6aT4O6ofI=`iR4e{jS6xp7#+9oo2J&EL2FawqXWaKK&-KlS1AT z8?Oe|Rc(c}wigT#Zo)l!Y2XjLeHwprw)}~V%>I~K)Z4SwFGi5l!=P9OXQ#yQH-*Dr z18B8)J01Y^#{OJ3{C(>m75=t|^#Fim^4QA$UK4);bv0>W!BlN~sR76I-E);))(5FWnJ2Ee5KW%z@~e~`Zzqfl;&Wy1c;$s*$d#CobrEC0wb*K568ZcULSu(bvbR#!TJ|ZKmBO^b8P@(;1BU5 z{F!rd1@oG6A^+v{8%2IPB{%%RvvBBuyfp6C-0%k-F=~P*bo--IFU;HvKJxe4{15A% zy+{KKreGaBh4AQ^${)cmME5_2KT}S=WN(h?c}})eShmjSU68*PL$MwQDSz2`j?n%> z4-l91gs{$4MCEP#BK;hL@x3XcZ3{zM%B+wy0~ zX(*X5b|ZpD{9F&DtMe>20DN& zest~ypSFLk{>KVv)|OEhgD>GJgvYOx0kFm&{E0c7b>aNat747E)wDMTaNV<@88IfO z*FpWDCjVp4$R=pwFHO4+SfKnFJdxc&xa=W+2;o2eJv^MXYy0lqxfd>8yx2cDI0*OX zriRh|CMU0|X1ST4Z!>kPULCVZUu1*zpFjQNk-{IvFJu<66Y{53nqC&O#1_kJri;D@ zMTlu9pe?m2i*9Pqb;ZHBa* zg0eBj?#u1sW6-K#Qmmt(0w8yi%!<47MgE$vB7f~uIQ}s3_wwb7Or|uC7K4gERL1(M zwcyKVpHco4{Gv7gi^TU$&R-1)fj4q91!*~%ZQA0-1+6azj_EK+2-MEqv|wK>;qM&b z54v6k{$Ap%P)r57+Yns*fx31VtOVt@ma-XSBlf&)yw+suWQL6C~&&AIUQY2dNmd~NCInSw<;P+1lr?fL1{?-Zc zzsyKW{-(S?(;(!}M%`FQsL_jOH~?1dFB;!XP59d#eZA%5yS>%NV|GRHS}Ga&jq2J4 z1^4c?`!4wsw2^49_+rAsqq2)LGc%7K-9z|8`{Vqt4(u;W6~yz;pybc8+CW<(eckg{ zwSZq}R2X^PGvWvQg@lAfm$ZC3-&1ouGSVQSn$xhEu5M5e`12kVxe^W1CP7B4c-x!?yS*c63gkIuq0e`QujF~)G4_5xf z>MEeb?tA&F_5^&H8wyep!1I`jQW7G=!_Hm3bE`SqES#Ft9Dywx6jTcq9@F=r1=-?@ zf!~4_bPh~6G^N8||D^miHD%zRz{5X+2SmPnxnOagwwnW*zjp+erphN&R}0R)oH{Wx zIW{srH9NluFKz7`7+mVjjERi+0C#a*tbcQ=hzU=~A3Hg0bu z-NP)KmQ!#@&HrF0f9BK?6BO&0pjYR+ue|e4ciq|D(J_hX7cSuMC{t2XFXz|cU$DKF z`RY}^`P$uY33UEUHN${nR#yu)zI;_SGdVFf0sKK)T)n&6S9$v2K5Yl1a#{cvHYvy; zWy!%Kv&)xB~w{MZ`={P6GP3m$p(s`gxY<@K(KdGfl}FF92Yq0xyU934T$k4G8&peP z7~FU0NK$&$IO2C7-iy0DJv=xvHZ}cfiQUiR<>26t5zx#+zy{5TF_+@kAWOD;0rwv{ z+c~xZf5Phi!}SLax_|pXx$QP(V0b=^4_&KN-?1_N;FJHKN z^?FIs#k}kUnlp=q%8TWo;ZGG%C|_U(g^1}ED7sRu=DiN85}DgyQj}7#s8}~S#aYy? z?`+t^$(vB{JL*-?PofdONnZ=VpqM{5)$I!?8>Eq53Ff6>;*Zpf`4ZJFpsu=T{@jW| z>+{_xP_SIHHp&*~LCYdzV}px7Xq;@WHPC`K=j)sW+no3f-VgyT3k@>b4QVU9lUZ z(Vr22)&c@z0TcvKtj~5MgVylc>4#@)o%7F%KPwqfAxn+6T#z7UbtxMf+yIUKy!f*a zkRlc+L5i5g*=}&q;;7CA3`YLm6+8C=}>HhClPAY(~)h z4QACf5oH@R?M*iXJby-wSP@LM7Fa;ktxRgx_8jk8* z-5}!s6i7&{$fFwMp@Do5*(8~swt<(LG}>La**(+%OFrFP=g!BfF6?Tc z1#QmPX?6QT(T1oSWc;zX1^luNe?G7P=vpgd8-#;e=X4khI{xAw0gbD>t5D~LH+t(MgJS99^Z-YU~pM?xmp+E^z#4OHM30n1b?NA_CTRN-zoK|O~ zs4G4ti(t_5C)8IFK`h{w3#$AHFXgoc{U=~x^CzeojUtgoffdvc6J5${4hoc~wn`)x zSfDOF7YuOz%rfZLDX6u;3Nnb9uk&>W{iJT7^JkKQMJ&LBEMlfhdHq4tDLIio@cA>y zz>Bc;f?VsvG+E@l`5=#&$pX16eH}Rs;@V#fceZ-YfadT2ktyI6_?Apk0jYpgKq?>= zkP1izqyka_sen{KDj*e*3P=T{0#X5~fK)&#AQg}bNCl(=kP1izqyka_sen{KDj*e*3P=T{0#X5~fK)&#AQg}bNCl(=kP1izqyka_sen{KDj*e*3P=T{0#X5~fK)&#AQg}bNCl(< zQUR%eR6r^q6_5%@1*8H}0jYpgKq?>=kP1izqyka_sen{KDj*e*3P=T{0#X5~fK)&# zAQg}bNCl(=kP1izqyka_sen{KDj*e*3P=T{ z0#X5~fK)&#AQg}bNCl(=kP1izqyka_sen{K zDj*e*3P=T{0#X5~fK)&#AQg}bNCl(=kP1iz zqyka_sen{KDj*e*3P=T{0#X5~fK)&#AQg}bNCl(=kP1izqyka_sen{KDj*e*3P=T{0#X5~fK)&#AQg}bNCl(=kP1izqyka_sen{KDj*e*3P=T{0#X5~fK)&#AQg}bNCl(=kP1izqyka_sen{KDj*e*3P=T{0#X5~fK)&#AQg}b zNCl(=kP1izqyka_sen{KDj*e*3P=T{0#X5~ zfK)&#AQg}bNCl(=kP1izqyka_sen{KDj*e* z3P=T{0#X5~fK)&#AQg}bNCl(=kP1izqyka_ zsen{KDj*e*3P=T{0#X5~fK)&#AQg}bNCl(= zkP1izqyka_sen{KDj*e*3P=T{0#X5~fK)&#AQg}bNCl(=kP1izqyka_sen{KDj*e*3P=T{0#X5~fK)&#AQg}bNCl(=_%l=BFJuaM1p*n_pX`t+z!mt3Kd|}RDJw9*6$o(tc1i*R8^D0)PXZXw00usP(gFh+z+m7{5*WAy1_yt# z5e`%WgM>e6fdL9&(C{ZMFdzvG9{v;y+$mEaDe&)seE;{~jY@+B7&poFEv-Os@du6Y zmR4JyoKb-wV z^A@JfJFlSNk99c{e{68`bPdcS5%@Nhz#!pIhfKhqv_Ox(4LW?ANMO+Lr$Q#;Pp8wP zium)~$$;M`HNwHep8}bfKQ;L0iIzuKZQ^fJ2>g#ge*XDC46vcjhE4jaZBXCs&vZ#Z zA8y)kHG+yibU7QvY=buWHafH^747CCEcn}S0E3LbU3UE02i;WA#)b8&hhuDln4 z->@^XJfg<|f1O%@^QYoxVSz>hHR!kne`)DmIkc)Vk9DPcSE5IITOZ83h(`x=zf&hLY7q!KW7WEZ2 z#%zO3AwDj8T)P%KiVknF>xOR@or`zdf{;I>1u}x{hQzG$i?%_2S0C2=!32MJf0XF3 z4E{u%tOX^1tOatSAYzsP+6K8|d{}W<(WY$bDVPGAyFon-$DhH;A7_D#HUu#%0&Rm# zA-*hbE*%?xG+(qJd4rWdmOx!31u-#!wn4oNUzRo=h|NBNFKS@k;N_2Pgw@BfG{i)W zux-#+oiV@P*zjLTzMhiLvII1LZy-5TcZhn7{&UgFXxGNBrJ;%bzg&hM=#y z1U7#^|2Yqwag7BKJ&;ArWP!FpUv>5?e!d~&&)h!C66pN70g!8i-{JW)S|G=tG3cw% z{;c#ZRFvL1A>}{X8!z2pdYEhQ}$zV&WqBtbG#7n{XfzK zyK98GHrT+Q=5iwbw4h^w^^DG|*_0CsHEnwx8}y5Fx)dz@dDRFTNu%N!w<_ySl{M${ zBO!k056;%X>6x$5+bu!EpF0cWN5V$dP&VT>ZQVAg?d8dWgDSSjvfE<2gU$Mhd!1R& z>U1fH`18gBO>)`shmEi)gToD8SMkPXFkFSV2Qy@Muo_JKd2WFsp==Se4T?&$-sWu` zZ`}TZbM*3Hf)c#k6kPndBTyTMc&U<~clWZEH(+<`3-an<*2Q=l2{!&bYlMaBgog_^ z(~Y)4n|$JsSC#DdoyF&*ZuRDHY98mGLC2pv3&e4VSgvqL;s|WD4O-^133yY@e%;sR zoM_04qq!~1?G8c6pIZyG5Csvl1kg5UQK0>1FRIzEyV{=>M)kw7%lcM*$$eSw_6SD) z+*qI$WCSrsL3bis#PxdI#3hS!;WpFLp_|;C8kf((N^tULL!h;hDg22o&^G8mcO{FP z+VyYP;c42Pgu^2BY{AN(RU>S}ks1)B7d6+VyA5`!myX*qn#Hle49uu=#sWdHO*7VR#Vm-FI1v%r3|9 z=Vj1FU-u!K%JpB!{b|_kVR;^QnLLXHIDhYXLm-_+VKCz*Vgk76MUdas+cnGDWdl$s zyZY8!hE+ajTJ!dt4EvkPQ#KfObFdol{Mi%8S|F%nK#XV1dC-CGdKRm?<+m&Q^BX;1 z(+}rxNq8uSWwF@;n!leSGWG!K;}E=RBVwG(IS;aedb=#FSo21`SaZySEqZpROQM4~ zEsMz)`22ZDAh~8UVvGek4>Edsy(p?z!8Wd3LEOzIF1w=@!2ukVW5^T?{CQ-7SlJE zvaE)jehP|w%V}9$zMls2`^U-n11{%1nuN1};mTH9{)h!~x^OFyms{snY=hjM7AvB< zHEpxVt89kdxgCtzxuBC3V}Zs4Iw_On2@3vH07V`ct^nu;F|a@z4V?#V3e`e_$~A4$ zcJ^$+kl(XRHhUS$!&RHTy$A&te*yqO3*vzAX-kn8n)!QILFF1Zd3{K>0IxHfGO(p7 z%cgytZ3r^{%;ON01p#y~Peg>xKF)(Sg{mPz_41q49liR*BD~6Ay)%C{WX*bKuOdOm zpV0z|8Y+KwgoQ`gG3GqzVRZ^B=0J(Z%5Q0T6WTeOt z=5u-zG#tfl)l}7XJ8xFZbW(mb<-q5UW5LMZd+%}82Z)fgxh7lvfb{!2|Ms-MArOy)S%#xW5Ea@ z7c^s}i)oI*7B%cVsFvXIr(!V5hsW7AXh+GP^%PJDLxn1TM1&bYA1G3@GDl(-m4h-{ zm__xB0`pU8Wd->8v-u4V690p0*ID2JBt+;z1`u`8HmDS=?rZ!cd5s&@b=#o2(-!?i z@Zi)0{Q32SL#*9p>ql7s168!@767ORMe=DmMM08P7{HB~)mhLo*H#D!xKlRy_w0g+ zKb?j5-Xkh0A5<}4K(Rp#3ACva+n`Op*Bz4ld0@W=|ARWSt#0SR*GE;psYPr4{Ct`L&R?jBK<6yj=iFlx-s3pv=i2mJ zagsj|n%{4|8H$M)-8l>Ha#{ljIUNT1bKf#+*7*6k`Hcv8|1$#UoCUWzEg{34&VpNp zIOarb;3HEq%&xvK1_OVb13G8HgPazS(VR|#zE`0COR(e5??(#?{#XDN7Q{mN927IA z@+WMBZG+;|e7##Gf1dRHaG!0Y|Htl=PY=d z)9i9gPA`LE0$%S{8~*%%VgcR$Zrg4K&^Zep=QOz-o720X|0~q@5+#40``>n*Kt*`x zEO?RA02!ato1pKs_`d{e{``Nez@C2r|1$w-V1bv+Z$X|hL$O{2`P17PYmz^&{7)y4 zRG)JeyvxZ#X3FVtaBC39pM~VlTmIBW7>T8*jKkxldB~a*pfQM2i&Os69R0{Tg(5l4E*lxw& zh8+86p4RnN+>eewI13cDzbG69&_Y7zL0YgyvAzT?i`@8t&rH1nQLPM9`NTs0)vo08w<3pZr_5IkYTaTf*XsO zW$7OA=Pv?-l0QoV-Lqg-D|Qg=p~YIbob#a7xoq%u5Bc*CfkDcjMI-E-1y6E{T~5jA zAh^MtSrzXwfBxtO1}lGz1u{B2XTh_aA`8@VdL3j=d@~n4=g$uS1~h*^QEz<+KecD0 z^~!V3f`>T;E~n@8HmINXrY?EGpYH$$Hh=HGFC-&FNiGJMGO~ z@PXwX#4O_7x@v z`EB`oXGivJOl<#;D0}lJ%yba|DSsUbf9=)){;Q|&8rX{a{RFH|m;gWB{*3%}Q2^=u z-#jGX4*|4g!S|pLK($z5Q1L(5#{ogUWwGNA@awkW4*^v1Q~9$7Fev#${M_<4ivS{i zgg*dqj|PLF?zEVQ1rLJ)j`W;@pw6E#C^$>MZL;AH@w4R*03`fj|C_eI@UN8v1~z{? zc8Khc;Ag{M7j;0MzeN^6Gi0`)FhMf`Owf#xZII>9d`9LY`E!dUfCN8#{yNDhm<<0k z{;U8Dbp8YYQv4YH)CO4Tqu>oAXo5-mtNY)gJ%F~Lcpfwo$mKK!IsVK+f8u5;%ZfjW zpNT(3TI~N}!aksZKMMc@ocO z-GrF!25}j0DfqeLuS39}7{DOlkGDVtKY{(R{DA{f|Lg2*ZZ0S&E3dh6V@@t;Z%N|BoU|QSjJF#O|;ogM+6}ok&YPm2p1r(uLgg%>vsTa#@8*5KQM&NLmsTEsLybLl0n{zTjGk;vrJN({yi@6#P7C8Xl-MuH`WPah5 z4-Q2|e{g7DVqRY1wUWx}+Nv9smx~K>k}E6Ehet%Bz)?hIlsh^pDJi*3?*O*9i2w`^ z{xkwL{MtLpE~duC?%f+3w{JhBeQ~j|uoe>?86F0IhI)5FZ*Ny)eO+Bs`_1lKeS;$t z(=(m<@gGDUsAwM>ZYzr1y$jLM>q!8L^Ppa^pjbU9;*U8cFZ~aI($Coa635?rVUh9W zwN1^%=c4x=NzJ}e+t}RF+TPyTd9$;*wWaxVPftPE?(m4H7}wrd5L{J!@7}oBlya3p zc=*!nfPux|5BU^!NKXeb>G-vmqyNJ^Kx^Ca({Tq59zX&qe!CFB*52OcvEku?p@D(E z{-M!{>AA_OoP_;*4pw(e_1`Ss6Gr&cS+E7g`=DB|FsB+6@}~zkmOsv2-wTgES>7ows_2n|gYRckd31h>VVL;rAWojya!G3ezt*6Eo6u z8*LCD{=Et=z@_(@8vTDEZN+J3?VKX;kykR_uAI$`h9;+R7T>_NbT><*f@N)t959_kCE^w{m}Jc5Gy@zrUyZR`}BDDWt}^1X?%Qa>jKE(*J`*JvdZ#Dzks9( zM*gO!3nE?60LR2cM}+MPJKWoQv3YE4taos996rXz#wS{D+&CH;o_uR?awtBY=K>(( zH?V8Yg8&&@ka5|4!JEe4dwX0*o93R6R2N2sMY=kczWnOtXAhRq0tfo~D1XOyM*)5y z@t*8XReR<32LW&r5>KE;9kT$Hu~h-&@e}dK;-}BQ5c?wp9*clc8IC}9?Mm-Ek%E3dK;<0OvwUnwcrs1z*zWDChzre>g`F9UTHG4f&fa zjE+tl7V~F9gWK@l&+XE?hQwtxg3bj!SlLki7yx&w{59W6ak);ACZo`$r=FgxF!+GE zg-jMd_4Y3s;N)au(ol7AW_C8Mu96ZRI%cV5D+2iKZ~xOjWhEq#z83pW?2)4h2?=Q@ zPXc?{moFEiHV6vlVjQX#N`x7u7CRWx4*pw#vl9lpFDZ8z?iZyA#sQD>RQw` zSMl}snz-<&y?cJXonF?S@bK{6VFjxH&CImy*cl!j9g)~;;%`#(KjbgOM1l<{z@Jk9 z$(O)*EPdS;Pa537_7~2BG`{eU&R@6C|MGfzFYej7GXwFX0A}tE3k%;ve%u-vxieyK zTwH8MwQ34%70F_FcBEL!7PY4K_CKYEQA?uTK4D4|^gZw*!TO;yE} z#p&rV3;5yLrlE<&`~77ZNgu@Qc=vq(Zx;-7Nn-dz2ZSzQH4J}A)zkJD{ZHYqvy=Fr zh(AjLkw32pGx~Ci>zeDUuVCkgO<3imfhYgGzTTh}e&gea-<6A1^^H|m zPiMBJ?}%?}>*&1Mb?esc?(RE1#c9f=s~;?XGx6u_`OYWu7EU$fr*bEWk3_U0(~(e_v6 zPpyRH>tK5D@W-v{nwr|Wy5t>s6VWG%s#}_BuHIzngJtSDDuS;IKx_QQ$J^S@Y(}BOXc2D?&;Ri{xyY>Qp>hMXH1FR?y!gfJp%&+Sl ze_b&C74VnWGoTru2mVL-%cyK&u|jjYWc;=CW>D}Lw#A6_xw#F zel=>+kKQ4KST# zvFiWP_LuUPS>4d60N7k}wO#{It82Qj$^Hj(kC0B{bIF)4ovUHb=VoO+JaLBZ2l&-h zo;yhTe~kTE_@CPR&dd&)_&cZjA9;`$n1q_=s*opv4;uLM*aG1I#$x$`O<|8uiQCcF z6~+G){w@mrFQuoyzqh9gTVOBvt88d!ghvUX35Myk$}$x|w!UF`WBG%78~^mPpBbmY zaXl*H-FJVwZQHiETf&%_LY=lh)M-JoJQAK(6oPJyq@&{-b z`0GUjwD!NeN~MwrfVBNhC;kUbFg#67Es)H_Yzlw>aQX5jHl>qPQ&V#dAE&%>jG{;R zTc7}zU#my_Nb3s*hzM3t|8viu-uz95wY~hXU~Yd2Av}LD08tE3?0=UO{#gGzQwgmv(uXZD-qM1)g<;61@HaR!3vZ$t zgfukt>8GDTdJO3aq^FRcC57k!#x4*5I~#lNblvRi=!DzafFX{58$9+ujX&D_iv2Id z%%94FsIo->e%HJEmH#C!*Q29~vnsr)CHMzFGXJ9V|1OaB*W`c5UmqGE#jnFS{^0m) zZ5_h-mouf*`r6vz?iJh`x!v8{T{p5h{4Ia<(Z`TJf%GXP1wf5I7Qk3|Lk2kD=*Ywv zKCL`72*26r?Y@1hv#qtRD1HLwpS|!uGWyV_UvTsr5^@fj4VZrp5%H%$Ca!EHs2kvS z|N5{0=5PMyZ~yl1F#XT}`JL~4=kNcX_~3WH`;VBu_r33Z|NGzn0ZBjn;a~rcFspJY zKUPsfm7}0lCcEvt|7rYt)W z{GkCV{1q4~;ZhSd38njo{ui59Uf0;t(RHV9cyw}herfgIgZ1Un+(h~iRtn60VB_4p zG2`DQq5rA;Jp}--t4(k$c|}lO#^~(o^3wb)%U^BbQG@?YO;uhpTsrWl;y0t%Uk$AW ziXZVmJN|$Ln`&eD`x!!}SlkbP2m||n{^$Sozy3F*AN>e^+x0L1@~{8;umARM|Mue_ z|M-nJ-gxuPH-EUzhCjsK{;1Yb+nm*-swfm1gS{}tWO5eIw4k`ECxpVL%;-n;aO(iDK6CVup&!d6BpQr&w07(as z0g&>ya4AQ`U*g1M+b#OlBL7ZJHQDetK}R3z^oytbi7STLsuEg-??|&^dmlGx?;o0X@Efp5#o`#1`F{w7l_@82b%J4(MPw0P9QK5&)q$I5& z@{4|}%EBNaFoZO-ngJRC{1$55Hu^9v}tK z-2MW7Fv=xQ-{W5cYHp5AxsYGoeiQf`9-p3FTmk+bEss(EBmCWB0lY&}4^3br)z#HC zxfd_y0*}ebP*aGN$s#q}A|sO_UBFabRRx7C0Dr0h;s69kAUFbM4e&VyP~i^%EMWP& z-C0_EIqSgX%bD2(6nOACt*N*;o~+P+;174~2!rRJwdPl`KYRYl@Q=g2_0yl0oIHfT zuOyX%Kb*5n7^M9D0~(;-0t*4un_%G2u2}f7lwH_)(DFir0iM4OY=2wG6bU!}aOL&F zPpQEGyE*=L?}jIV!?i!4l63g+iG%xS<(xcts%x|y1|abIiSg;VrIma4*Vk8A{wRL8 z6$dl|2zA*C-wv4-xaR6Zq`<&PZ!vpv9P6hP#U7~tZi zeCmJ0F#fn*S5R=dxw#P@#E0kc<5}$J^+m!AHc+&k;crCcj|wgDzp|X1(9oaKR8o+g zo)k-7Yl0+L14x1hV2H{ejB>F7R{4WIAkzYC022PJizWP7huxo&5GSuT!uM}aWZ3&Z z*W2F#EB;b?@TdHm0aE{qfS-HO-$;`>NlH4LmVaiSi&hZ1U*+i+jlW5px8MJEXjtT) z_W)TXm6V*z&dN+XfHj7dZAl;vD*UNEKn_4M1kuJIDu6$D1N_grSdWfsffGF?i=fAw zqawh$;*Y_Fr$K?_`GbLC3;%QV4#3P9&Os9cRN7zoNw5ok_0K{Gb#ylC28=rT2Zu%) z;7`W2G+mvXSy)`Xd+*`HB^&;{25@MIIHQoi`A19xY-NDYpCf>=@S5DBqT>o%%EyQG zt+#HCPR(_f7vy9dKbCnmFSmS%JeM{$bMM*_`d~eR#PFw%KGdgQRQ^r^eIqaj-?Nwj zdhgk#Fj!J@83D{VdC;ZsXF(vX?3hsSKh*%~ET1+8#!)bFKFXnYk9k&PiCp%i|s>+j6kDoY^2ESxHUXYuUlbegfU(Np%`&00{b4Tj} zqM_M3AjcoEKj<~$Nan%?kNA5=0DQNIM~|!>Je7Who+%Ab9ru;2G zDEW}`7Xti+h7~~fucIh0r%|)N8mcwNAN=lU^_$=PW_~_hWe|SNnL_|fKXYuqQlUZ? z)L1CtPhfy_7RChN_kT?N5AvHUAGfFl3&YNX;)9vCMnxVzmXVotQsECNgtqiJbcioE zVFwU5FsTOk)2Co4f8l!k(B;JyT0bLK(sqQo5>qY~7T2ylSs7@lE4yf_;p0V}lYM=p z0T%NIPyA!5&V_rnH8t4x1GD3rV0gC=@jvJU;sAw9J1Kw05r}#dPMv}Q{+^e(4&b}3 z()02%;7z)NYj;Wd;)`GY@|VAYvW(v5=4=z!^GP9NJJW!VU8B>W8Sb$pkOKu8ae?*gi` zfd7IH2yO6>8UDlsY6XkJ&Vwe>gd1Xxo=gY)GL9?!MMdq2h=|Cnxl0BgX8!W};ocN6 zz~tnlWY->?eqi}4np;x%OWzLZ{Q3T=&i2}ts*1CR3l4m+KMv9%_#`iHqpTc1-qAGN zrwl*fr)5f_{@2q}nGHXq+qpa16&)SU!UY3ec#*s`IC`zl-;;9#+u`0seAMo{YiuGL1jw-3R31 zD+plw+Yi$Nz#roG_19m0_0{YwVK6?vpf(=>JbU_7>W6zNfu`EhNZ5||It&296abk8 z#Cd2k3kdVjN)!A&-T$BnQ?ZQyiNe}Zy$%YFWZV=Pd-O~?;+K6E`J?P*G&lb@B$dAt zy1C&J;g1+#9;_WoN{;6F%U^uJ@|T|v)e5z{x_YA&>;~q2pFDm{UIsNZG>4CuG&VLh z<(|LN)ipC-Sy^euFB>U_wEJK};^BxLJHj~#36RHNwujK#`vB5IOzZ1C@LW~&Dc}zQ zWbXm1Q!scctN|+gy%DFAR4095=>;cwyI^-zt!kRAK7vtn`efQBFc^a15>fbd80 z1OC4J@=Ng6nX_lX2jlnUA4dSwjwisl2CLmrTeyj&?RdAB-fB~C^m2n>Z4^ui{NBH5 z?Jvo0D3<5X`lzD3^nBERO$EEz5P2Z}l!9OOnS_Jrdl^m5fBX|9YyMJt;YDK901?L{ zr_LPO&G_H`OYW9K^jC+`}hd-v|$$AtJ%{vNLPY}>XsGfUx*0}v0cOaY1j90mWQ z051LF7fWA!u}mCr6#(SAX{k~36wvz1%@%OItfE=1mhrBaX{Jz|JAqN(y}TGY_UfFgI(CLF{|f6wiM$s z(0BXn%7!@ZyE1!M z%pbJB3Vz5R-it8UAL2*&dk7dUbryyvhwUKbL8H4X92Y;#&CN|C{-@%%4D>B7g8z{O z28bSr3_mP?Kfj=`a29RVgDDsdz!xt@FrN>|NR|%_eH0$RdURNzjxn#I~|754TL}F`?2^@{-DbVN2Ltz*?Z)0{2>E@8VSH3 z?Ex}(LFgF7=zs*kzxqopr@p`fp>Nfe^su%`U+^%f?`B+z+;<=a@jLe6F=Vfy;kUp2 z-R~%We}se$@Spxf_-i)%U)~VBQI;Cud7MK~68w+?zOTUF(ETy!)QydeB<lSbN^ubuKIY z%*nJ9r*P17vP_--)`q`i_~9zO+|mMbFPK&+eVY5R_`P@u2Ol3tAKVjjAZ{;vi(d!e zNAz(&K^Kq%P-%nz@xSz(`Vzy0zENM&!ulqC$+Mug7rzi0wf|_+vDAb^#P=E+e)G*Y zzt#DJ_E#Yg`RfG!Qe*b+MQ2k}-q7gK;2_*?=Z{;Dgd_aBHNguB@!I5q4*(x(DoYe`vq}7 zfDaN5*v7_Ap9Vdnqv?$~tU|s1u`{CnNibz$hmv-(X%}5#a_VI;tt`7yUskBmH?T{d z1eJaGkH|gy;uDfSIDo!aU;pc0!(5brKeRyPucqu;)A86thxV!P!6&?hvIl+z(myyn z+?ji!^u~?yin8+Zs>-@rmn${@Qr^W2=kbxMbJ;oPGBe?xX%FD{Y;|R&5cv}iJ~aMF z%gOO~@7~?J1b$bZoAbxFqKF7 zvmAjC17ralO_&%WuX&lGlQ0tnWEy5=Ws>$QKo%_xB!qFNpn-`GRN9o<1ENAq?)J&n8$Br4eX+$U}Tgl_QNqg?drtb z?w&4ql~z^n($X?Eyu<I%;l=(Hq#Zv?d0*fnEZtqoWixG z#pcnW?sj}kk4kpAX%Mpkt03v_H0nz_Z3?>aE)M{%y zd@6q-Z@(21a=EcX$8Tyy(Dvfc1F;ACaJCc`L;mQuwE_XMp~$zOUBPzjBKAha^mS7( zDk1{@+*8Dv+UZ~Y>Px`S1fVtqQ5=xv?}RH>Cs2SGZUP2Jt@s1{RxX#at*_PGi#GUx zmK2kQAK`CwwB~m2t*#;RpcGK3raAbz#>DvK#PHDY&=A}Z8R~#OLSJ>!gNN%^Mf|Bh zMACl>$F{)aK8WUkT<6b3-^3)gze#7$9*W+6z6RensiqZk*AMVR_6UCC zXsB=lySgeJlUAiRl??!Dy9pNTgLV}8R4{WU(THBenVM;s$->*2F!5vnP#=RRlQ4he z`Ga@fABjIgEYOC(6Jq~+3j8gXmNM?g&b^p>f22J%;kQ~iGCG=g8~!TYtqJI$f$?BB zZE9wE1|Cnq`yXWD8o-0p1rObfd|34G!Ft}h;(yo!#2Fv$DH!cRRIUXk_u<$kc)VP> z6}O;SowlIxCx8#m4EZZ7uYi9)JeID)2cOI8v>yz7;Wtb7C#J9`(EzWg!$GzF{%Q8- zXIPi`XGJWN^PoMu{yC~EdQbHv{T2%WgfWPz4PIe>p+@GQ;kn{e1kh{#pecb~+2s=2 znrL%>qN!&)`=k7wgGX{o`#2`kvovy5x}Lqb_a!{+GJ;guFO}@m2QjyK&}(Ww!teX ztMW}hcL2e=QdM=-0e^Ut6^5*(Dt{JJ&o+Y(())w{9}IR9FT%s1L-Y|Rde0ZC4Jvzj zc6N3S{@N)%!Mw1rG`j{xW~e}q2be`#~`oxmSK zFy!P-Os&NOYq946S6W0?X=l9z?J$ zizU=shrKweGMj*Nsn`TR`SQ!BUw-uhMjG8-DQ;KN)0 zWB9{cuLk^-)>ohZBU8_`^QZRyaC8BwaAagSF=1|QW@<`#b@Vv&IpKCR8IAzrI4zBn zgDY$655e?8uiU%;@anod0M*}uXk9?Q2_7p0{t!Sb{*3mg;D_z+iM#jIhoIjMjlX%b zC@1H`(9p=pUAuOXyLIVj&z^1Je1vn?>!2c*+5-;g%=7s{OwjX%()p&|jw%4%rHgbzFZ>cB9Kap3RCljY@-5^~>{ z<&Vz2e57>%wYzVWLE{MI{K&{qVj|pfTZBQbn&9;sv#V;G><`WpiXWCa{!K&DUV|b_#1@ruK_=mKHC4ws;xbZ_IDh9lhxW1eJpM;`BA=1)FnbWCDWdfBG0ipN3KT zfSLoc{8biVVHZoT$HRw_FS?Gy-_aAupAkWd-xBoyjP?glzTmSjJb$L?XELpaSb_fy zCMK?e$1Tz+Xo4a%w=lbn8@J9hO@i#Fs zm{7;#r_)FIL-ruKjw<61{2L)FSOG@-J~z+2jA+hBX;A7@OrZ0iXJGs5sF|5F*aX8E zST{hu3&@OufBy-iYy}0@&Z; zB7g?J(goybA~?b=+O?Hc_#a@nglL9dxl8;{+iuaUIm8Gm{+jQxASgz!fiJ#i0EfA&S(`mU|kt`7Al93uFk z9b$s!79MT5OKyEY8w{?8NFtx`5r1V(=Z_45wYzXuV-V5@)E>qn(=emR9|FkpN4}kd z2`&1!)PSFWKY}0dN9ki4Utmwck2bzlAhr#Eehqqzm>-XtnK`R9!EhhMWPsQOWcj%hc<^B~KgsaQT}`c801HJq;6U#0sPgE4@h z-UKUS;BUUc1{iyQkSzIYQ3s!hnFb^2|MC1OLHdgJ0NL; z;d#J(xHW+QGXAGBH<%C|O;kW{)M!($lr2LLeHa**4N>_+!~H~m4S!yC;~o~K-0>~;_Up~Y~k9yhj0%V z8FdWc=jeX~!28P2?gj=)LV1wC$thUCFXHGFefN^ep@BaYzv*R#KINV->G*-;g86aq z5!_hF4E*sdI1d{5&}6g$2=Z54Y|;ASqo+*kOQu!;HRNw;F&{oxR_|g@51L#^*c~L`;kXx{ zQic%lI^7Si`xqY{9nU$_*3;t)fW-gE-Qdf48JU@x7it>dCsXh);aj!E*9xNdCa0%o zW?#C{+}76C*ihf|;G>Tpee~&*XP>Uu(4hzUH)Q|*oQAP6!XWa8Q;OiJ05tGkFCKXJ-JiUBbQqm)Vq)UsDnA+3FkKsEH)2b~=si`N9#>ek<9Xfp~?|KEk{OWpReRe{^(W7~J^Vs-eQr!=q zJ%IV+V!|J`i=>C9@u#^T+xnvYtq!(zc6HzG?irvHFZcu&jx`n*=xq;$J{-0{8Mncs z$a@$*AOMDvS0=yl24JZ70ZCr~PfNcOtWU#`K?w8#N2_OFt@PF1xRxB}+5_*jNmck; zSSuFrH~Zr0XCFPhyRtC5HfG{)n({}zkD4D2JH~5NClev25x7A?5r5h-#XZTn z_(C;SWb@PoI)6{me|Vej*3*X716}c{D35_CWNdd=Y-1 zd^}72kMzrsKMg<2))yPz>Z`5Itz9s+LK7tr{ExJ}#QPNbfWQ3(wKX-D;wXI-Kjp36 zZ@!t-*r+=o2Vg7yx+og99#o3B*=ThjT1`?#wA&8|Ot z{PFs|)y28hr)K`92jL~DE2Qbw+TI$%6|E03VoHr}P|yitDxZEmrR?klwh{h#lD0EO z{xWc&1_|&3iXhF*z^jYYGdYkyp}aX^6yCNG7ngXt;MTzC%*xEn-0aL$Gr3VgesxLx zkF!7P!3VKF;O|a-eM9{@4L&2PTp`mXI-xvSpo5@XZ z*B{ccV{g8>ZJY3&@;?}yn1TVu82sj{wx$Z-8m#fRS^}dFOn_fB^b1IuTU8Bk?g@>O ze`)(WJ%jJ)U4f=tHNTn+RX75fwqjgBJdNA5b>D)P1>1flD1q837#AF#Qg-&D@<{$S z-+YYYI6%*1?C7TjQZdg|!T8=}^i?fqfr4-z~>?Sfx^Dg3GM zK^KrbbU>bzrU3r`?7jJa6~~$H?|!&1+`v&D`uWlW#JPosHa!AqE2iNk~Eh zV=$}OjSWIbJp&&HViGi<*< z7K?9G8j6&R->bPM;}8DobnU{0TK>@eL3s9Tscxu~*Iu~?Cyn3&JA{W8^!&Z@jg37$ zJ=0-HE!wGde;hw|^WpPXDAM%ps{G+$Pk^91e3thWcYu$-(($(dR%{D@mS(Me#y8@@ z7!M_<(NfnY5ejJN)a4J%HvYbRrmbJ~?&`Bg&_C4Gu^)@@LI(a``9>Y{e*oYPbbt2E z2k^I1Pe@^NP5hYztT_1mi_boKc(3odhQDjb-xjm~GtUcS-qOw6di?2E-@(S8X~HLP2l(K* zzX5jlzt=+WXKa?unPIxQEsim7iV_}g2>#%r{g+>AmtPS&A#UUk0IY`f7Xo-d4?n*5 zRW~2t|27rY)9G6``ahg=;UK-}gofu|eEQMDhfn6>+pLStgoZCK3n;xUam_&Wgp5AT_GdHsZlnE89<8`TUy0I&u5W7qFY z?r&iDdqZ=8uD04)o8UOWFJ%5!vQ^jN4=xni_|uV+qfgfyb+j&G+CvFx)Yf$e6wx>v z_rFbS{q1276v+qF#rS(3$^9Ajzuet?+u@G`i2RB837ZeNcd}XUp$eP(hDJbNTdk2h zz~A(ECxGbxME(FjJAc|{{a)ocfs}8GT2rhWXg0J_^Mw(H6l~XQl=y@FX^&jl;V+W= zv+@U5^c(_%dM4u(;t&OVHqd8GqlB0`peRPQt~efuhaIoVU$?<`k3Y06+f9*?=(o znE$(eo$Wt-{}-%@wf&$p>l-kZh*9NFYBr)#sw~(3?A(t%Y(Jp>Pn__ewu1D`h)*HBQ20aag8Zp9m9{;7m3)>=z!*eK z7*J%R@SV_%*BfrYIYME_Xy@+$`o9;-n(Q}gh=t@2S^W0j!NXZ&-Wof0S zSSJ3u-D3~?`9?orQm`&${#lLlpuLTsGgkLk*INF4w za)CUJriNLwv5XFY@+ke3ZMX!ZA3-}?}iyFVNaIsABhl`W>us!3! z7f(NV$u_{8zp~A)|0`RC@0+|aWC%RBFEr%U6!N@i;f&^kfj(mz8G+u|0}*qxVB!2N z$G(r_^T{%Q_}hh^K!dq5vc(^$Ydc`q*zoL&m+SG>L^sqRf6K7_NFi(dg*y0yFF1dW`{VrGSP0vW(wQ~;&YwTK z1^%LJsFn&^C?+T{CTq=DV}?KaGEfHj&3e58zVR4)|7LdRDq1zzLLm{G2YSe$92ou^9L?}umKkN zYXJTlq7N>E{8`kkgn$|yeDM`*fL;8`HshuP_!}~;Z1$1P&#xZaTv1#iYw1*!Gu>2y zj1wl>ejME0%a_OJHfDJJG zRbc)@)Kx&=xf=^%+fh3GNY^Kq8n-B?e?Wp;Q>Sf#jI38(e>~4Q6n%kQMl*r>gBd-x zV#~>6$D10LmzVMNH^AS^tNNi>bF;~xNnNHb8XKPje-3@6rEJ%G?(Gq!C8MVu>Au@? zv?f5XS5v2EfsBQvK5&T6k@1635A&_!37gKeoN8=b%Fg-V{15mOUw#GdZ<$&tL^`Yd z>D6^Wshn`|`OBpzdOAA}H?5q%V8NEH+qP}rv1wIV$=J%;wR^VK28Hu#22BnoP(o+p zo->xwZCKlE{>Dh-ttg*Rck*KI;Ui0mVJ8Cnj~;l1&s_LBpb$zSqvGYutA=q}vs2CA zFwLR4plH?gj4NNfJaD3?>qKe=H<(E8rgDYrJFyEfWCaPeoP6^`1V)G zU-y;Ho?iBvxO)$u+&;B`a#eXbD-x8=tEuVIb8%28$(lZ5nr0I*MjG#=Jtt2$?pr*z ztbEp{)vMuGl=J4*E#0uL;T-%J?kMoLI(Y24bJEUVU}gkRg6jIlJ1={hd*LtDUA@lU zlXUCu{qAGiroHn{l~Y7j&aVlveliM(1^Z0M7;M;bq%$T|zB{d`qN;lD{`2gX5$xNp zw6(%d%C|JUjCJ}qA@%&J0!s3kjTc_NJk)WeyQ}v)`>PS~rnpP>D`ro9$FD9!=I66m zzZfON)I&+ksD7K|Bb5&nJ$+Hd z%csnmZ#?+&<+LTWi`VSE(2IYUxTm}8_`bzcd^^M>{Ul3285P9j0VQXQc%-G}Q>s_& z?Yf13#}8g>cKynwcE;aLNw2VpEbu1@sDhHySUqp~vZYIxEMD+_psFBZz$VgAAft+y zbSOz0#bXgqX8CamDxh)VlA9sWv^H+bUYjU2?7RL zNUbr5m~beG8bz{m3pi<7&Dm@4s`&>GA3eU;)v$Kex-~O@``e?TnG*|Ugg;+EjeKb} zY7p}eC1IoAa2Nq6)@|5Ow`%#yRjaF^xX4q~cxL!>Wzfo(PNNnvf3|f;>P>%SqbpHx z{bfx#4eOfCQBz;V%^g;-hm-rE;bHDk@K2_ZoaLudXw>tUW4iMJr9c|_lMsw$i9a?o z!9#R&KwUQ){2Yu%^LPsd3bq^6#>t6qG&WgTnJxZcak3n1+KiU*#%IA=6TiU)5zyFZ z#`rTHXBrI!V!_&Mf9sSv{;J$NXgVbapHVkTFVE(SDhp(+wCR_J(K{$Pl0p6~0#?}o z9CXIYJf_}dU?^A6sRq}3RpC@B_vX`Gm^kfu+u*FeqCMzISao)P|h88ix| z)mShVG#dFxXpFw&jc@cjkYbtP&(%n$kOyG8j9UKOaNz{6u_XmiS|?!52k|p+-uUpB$7%<9Opwcl??hdtWgn z7FWPb@rN2Ix`@fN87f;vv@?ZhbH7jE^LZ{}MF^!qyZ=y}2 zbQudU-AtVL6jdcIVD|W%Xwhh5s*F|1PdEQ^S`^CFr>&|@+$w`Hi~N~20{&8D1QyJr z9Vnw^NO9E7B!5PYP@(h~5i#Rv2hM055{@>iF{At$WZ(*=#mEsejMg!l&US4mFszN9 zHFr(Zh#YnGLoygM%b!+XM+CiqgU;ykr#+MbH6|xuy7|-0j7O19qjVWP#B_%;z($So z+^sT+H7?MXUNWYfzn~2MacX8QT}A=1;BgMLF(Gx+&R;+V5wVmTMZ^M!GVsR0km5+6 ze*P30WDy3RQCfY3C5w$)Hp++vT%dHM15r~E*M2b4X#JY0=I?({NWB8DP#^_J0aAbz zAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi z0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS z6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AF zNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu> zfD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d7 z1xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PP zQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbz zAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi z0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS z6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AF zNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu> zfD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d7 z1xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PP zQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbz zAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi z0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS z6d(mi0aAbzAO-%y6!;p2)GLr?WPdt}Ldq5RDe;%`8BF)^iNAEuU)o2XYW{wfcJ*oe z>E>@VO<>9uNI8F_iNJINn0o#Qz*Ga6e*VY>rWwF2@J9rum%wcBM~iS;3Csw8PL4sPbe(PfiAa)II;59WAA`6CxNn4uHK zmTLZfE*yv(9peT!gFXkd0_o;Yv(CEs!RKJsoyNu$NI8E&$OQg^Mw<1(raeG1A&!6JuWd0N*&G%qaAPf9SUQXptG7dKN2jO^zV(7ndWr9D^%LVuojs2%< zI(uU9^(%oH;m?Om!ymc8P~Qe0zJ4SyGyJ)b>G<>M3^fsdnS~Vm`e_l)4u1}0dj4GF zUnW|ijyk|!zYzHIG(Z3Ra|JfkS;_K~R-<3+FEpm153>rcMyB{fFK1EAYP8fVw5eYv zI>$j6_$xSo8RKt^jX%3F#{?D5>`y)P(GM<1b0q!{f-z0;1hcZn->_jeaf)ol0kbJ{ zJ^{vafM@f6 z1I7W%(9U+hjYiwMEi=)PrJX;3OoN}pUw}Z7zko4OU86LnpX?|tkp*qW5C_DUV&jJ+yCG7k}ZkduV;m$U+>_#=8o;9BVK)X(LO%JiN@)?m|_ZMk? z{`nVz`@?9?eTa{o5<@|htimS!X)h}Bf)?Mzl|hf;c0;0 zgu)T~7HSqG8gV$<;#uQQ;{t`_9xKEHQL8afI**JzU5-rAT zLmo(+R3`ZoU7#ciB4z~8YLuGEBN20=1J&Fi1PuL~;SMAAXEynhT%e#0L(D{=)hINQ zPqcsOD12}EXu;}cl|PX{UnB=HJ%LuE-$p*M!UNIYJ>;VX*3B+|;v(!mj^!byTZFB~ ze09e9iLsGfOZg5dZ&^~!-%kS9pXChkeh5>_M@-`at;W2CP7uGLLz9F#KLqpDCEfh} z`qwgWYDxqUeV~X~zy(^3`Rbe~e)%DjWbM3VNjrZz0F)NtVKRTJ3zYa%jrj_l&3T1+ViUpE_ zG4E)Cvp#s5%3GBz_`irQaF~Lir(ux4LNPVU!+A?C5?-x<~r%P$_DEtNJ!%R!DwR) zFL;bmb27tUE*B{8gjLp1HZ{*nTa8|+Fp(HjY^XBUkRK!4A9&=-e$}|K4DlDm1qS4@ z@rR3WphC=y$WvU{3d&JL?M$g2Bbg=s!n;77P*%jOMqO*>yr^?rxbm7YM(j+}Ld2|W z@t2E0Zyyp-rG$$|oaF)(Hy==>&dkk(wPcOIaEq{3opIx|**w!~wA9m^B29AQh4o`{ zbE9B$(myh(>92hP5RZc%AaWwws7Q{ zv@XzU^yQ2mv(0Xjw&)}CYgoqN7m63lEPr|z=wlRgb2b~>e>er=32m%pncU<|#@<62 zEaM1O-%=>u{JrTseW3qP9t0ddT%;)IVlkQc=gGln=D$WGKaABva3yoj}nA zf;tLfGGpe!bI*8SdQYj1kg>r%HxHX9RE-=1P(KJ$0Gq2h#rBQ7ebEZ-|MpDyuj8+0wGQwZ( zMVKAA7BQE=_{M;$5i&IMs>{-~s40iVG}%LS)g$EK#Ok}j&5Ts13l zmtq5vD+elwtvT^PnRpo)<1e@mL0J&M-0~C`Tgt>YS{ijtn(39Z+=7ulv580(%!TFA zu#~xQBunP_Q(Yia!{yIL*tlXFWATk4r$aNvURKB^3RcV;h!9DrvuyC^odB|p2wQKx ze?*~jQDi0%YQ{8*{oCTg+$vF9t$wj7noNA3@ovZGsB+(pzi`j zsX|eth0LuP-6-~NjccZv_#4!hr-#V%>0E}EuSQEnJc^`TC*c`&jKw$Vy3*!p`O}u{ z-%QQ8k{dv({ofGfgiHpIyFf{*uoMLe6lz8^N&~PZH1j+dR^M@&aoG+dnl0u0DJ&=e zB7u?|;VDwc#)~sTkQyClH!5W<$>2m=3iq2SII1L1NmI|C3`!6{X`TdxAS#(uiWIY4 zGm25^#g+_AwXJY}*#d(~)G4Q*KZymEzc=5MrYRCdQp0LZyHV;iLPDAA2sH~=R2dT{ z6YMMt{7H^1K%gjAfFi3=WGqlKv{CFLXI!4_oW=X0fIK3WO0w5X@F%gL0w@`S82Mrb zZII)d;~U);LVr3A#`y?ctw05dKl3G^Ap#Yy{KdsV0P~>8tt#{+M4e8B93kqCD4n0q zDyzXS$>n!kM*JU4yU&6QAS1#z3V^s#tI=uLEvtUAx@xAIwi?|+gY*-@gQ02oOLV|P z%-&`2TSWf{s(90_0nj(<u4Ia$e$E=&CREWG(9Q_1<@ zisb`}1u-PhG9^}{rQSfBh`$i-SMh%^XV&SCZyabE`n`WMez+(n7*g&3UK$XU|}8P<$v95loU1F^uzF3AW*^N(hM zKgk2eXCXvQ17y5r9Ao|yn0yO1{u1vk6a0w)IxOgg%G;=C%;itJ2wRQ%VGe9A@fYUY z56isH&i~|dKx5 z-&eY?zkvS>0I0AaOO}{XW=v@|f>9pc;7AdFk^G-eAe+AUEJUj*LKdhQ+Bi6flSe`P zMdi<3gi*)%EJUs8Z#6VELmK6o9~>zoe}x!c+V>x91@>H^p1+VRM5f5|a=Mz~jD?ue zeA^`cqWeFWK&}4+Je)a+4~o8~({S=eQ;YqPZ{n{Yeg7)#To1-_eE%W2K;8O_!f^l% z62>?3h7HZ;V>C8dcmwelmUWpweimq)AB3y`Mug1RY(7RyqlGn@_#0Y?ZZqWn&=11> z5VJ~!0>#E(NqnREKniOy@>d7|Gsd5HKSVo4@ma9eEGgCY=48e<+FC83&CDOSB-(Y> z_;WAC;Emy+Q1Mx?*R)wnV;h6x@0Xlh{DD2Wab%A_&jo4#jL$-jnig{@1mk>JjmBDq zHH6?VIRZ1tpTz}Qr#nBR5i)Exp0Ti)8CwsA{DL;LAH8|_bCd=^61)Ob0+W>lj;?tYC$;4gmw)6L&oZ)wSM zGhG6`>5I=o=$Zk@#G28J-mv>O5{19K0Zco8b^=uZB^K0KO~|M+CN&$$s1C57(vkSf z7r^xM7s>@XQx~6wC^cosl$sHY{luHvO*H=U+y-ZWKf4Qbcfw5N_$)-LDQ-0aHA5R! zF#0JSmA|}PU?%trHHC)b8~cejwVTNNX)CC$8q(R|FPsY$$#dcW zhbK&YP;@oDhT|K(k@asxi&KouP@~QYe{o%)0H78WSH$Q>7eH;zP)4`!e*TPsnzl-) zVP}Rvy$f8j4a4^B+u`4)Bga@c&clhLCr+HG8&m8N7#|cvO%7RnV~KlX&)7sk3|g3U znZrgd<;tSQyN)$B;{T5wJ`DdIJ9_x=Q3yE)m?8c&1a1SwPMm1w;c71r*Lb+j!unAN zAX|iuTHzNto)OzM*34 z%<*&iYvu~|O8njM0W?b$pCTaJ+-xr6;O39xC-BGQRru@91z@)LlU<;LpU)o$Q0C7C zFdtBCh?S1FS*=F1c)Ft157 zR~~O;%xW~ZSy+q9hwT&aI|Hk)xc;8RB{;yJ1wcHC%<<>Bz^&}uPdNW;27&M^s`$Sf zE`U}R@-bTKj)!1@$!fIJ%hO~7kJ}JGi9gg9f`&gkfEnbE6S(!bmOlV6z~7AxY!hN- zAwQ#$F>KaoEG@Mf^MuS+Jr{rsKOKK375~RBHEcFEmr4Kk76Q2Scm)1#G61bCHpq(OTJYc%{8#PMj{uq81{!VcZDD!tq1TYAh6%@f3BtS3*F|ry({(>V4_LO9f z5kQ8Yoxf(b3ufCtk3SQDY3EM^AjePO&s~6>a}>N`1WPbme|`UV%MPFwl<-ECK&hr` zl=ur8lf*61mWe-(Uw}VHT0H;5faib;e+B?k&tJd=8u4S-|3D}ce=>lI!?A)A#VEI{ z)Rc`1f2y(6#E3pWfS5Hy-^Ne}zg+yC(D0`RFa!L_F3`bG)V z{`QyozY_qU>i>=()13omfj`j&y7+PaR<5e9F2hg`|BYv{@$lG5@z|nimo6=SZ^6QO z?=4-mX6@>gOKNJDtgD((HGbZQyAK>bu%NUUx5L7$SV4(s6dDfJ6pTUsBx5xEh7J{u zCPY*MaQN7g;zjG~HoiS&eA(MmCeB*3rf$pjz55UB+qrkc*7Yl@_wHRauDBEh9?xV( zxyxqHo?X4mKLI>`Ob1{#`11(#@H>8D*N5|}Dkn^+teQ9p!o;e|N=TKLm5wWdKSMos zeQWEf!v`B08jqhmd;0vP%UxG{o7c^FyL9rN<5${`Z7Ll*2GQ{6NdroJqu;QmS>LGR zPZ&}}{|7+%BhDR@_}Sa_(-IL3sEw=mFEk-}mKEG{q0^VDRxfz8n2NTn9LMZ2sb^ z$?r@?0y%zT5Wu6YtxZ?j+b_0Vym)rcS%$!v6&i)fU7fx<1DPsKjELcH_ zZgd;g)^v?p{(NI$`IEfso8!tB?LO3YcK?RbswuN}9%{eVb-C^0g_f4nrtDKU5L7RbkpLAZ@)dU3i&G;TQp|uxQT~a>zl4zZokmh1_WMeyVBj; zwspmfO5pEQ_nG6{9RG)Sa93dkC61A|YpCgZIlb9&UgNoo`X){rgde)$AKmZ|ZuomA z{NUA*`WAJ$q5~W`zI?&%qc<<_-!Q&x;=B9K_dUFQ?aJj#7cR7%J$?3K`{Dg9Eu}?c z;hd#>Le-?n0ADRT`@_qBYzBbAGmJlRzTAoQKRY`+CwY@S_#(ORI~L>*Jzx)B4LCZ# zV&bF;73JmSrA0+WW6N8Szm5)YeV5xIT<+{Sx20}IMe&sS(>=$VHka}Bm(2^CZru!F zUVI~~Yic%J!wivc{o?08|M|~;_S2vK^!3+Yf9B&X#eKFDot@Q#7@;^~0tsSFW^P>ga@zD_1(Zj_%w!y>wjl=}X;hGiJzK0AzB8 zn2K)%$i#-!cBfKmYm*bAOD$ zImNIm!yU+&F-uxo=gfyNXYRyFwY9Zf5H8lwC|h_9W$M9woD^*QfeVz%$_cAcE*eN# z>oOw(_${sb1OLaCWbXfB@#n1X4Dw;)D=L;Bfv{;$b;aDRcW&Rje(l723m43vKX2ar zS+nQ1w5%OhT2@g}R=f-@z?k^!Za*!!4FNkp3a(&q0;u38@&`Y{f&duR)SlQ_wUJ}! zX@J*4S?KLuKL);}W#i!#5c$Jv1Z8DIAS^`wy6eiy7Pjm83!ouoy!mTe7_J1VstUSf z43XgcG4}`l?=^qb{=D6Xe);|GcOvrV0qFC0R_p)Pw6uO$ zGJ5n<#E%15Gq$K`TnYPeYia4|;t5q%l}q=#PJtI|Za&tqb>)U-?0*|Du;}__e8!h8 zoUw4m4+jv#Nw6)@1$5WI+o6328~1J9yLHJDxC;302aRoAw;o>DwRHB|<)enb1>lW= zjV=oUf9L_>6mUOmf7#UY^%wo0!(VeV^M5-2j07TokqDFk!*Tc-a*T~p&hk@^5&2U$ zzijuR+5LX!+Y$D^tS83_mXU?8I*Rpk?bLj4e$Y1ZeQR6G;f%~gx)93LA>n=P0gG>70|Ay%KGjPKBi{b)3 zh@3$f8s>Kla~S{67(a}5!zed+&!;)>C99Q^qD z>+HYQf2O1ixN3H29Td-;Wk;eMXCq?=o&V7!H;6MRr#;>#U*s)z( z8jdtI)o=S?*|B3wmf`{6&VhwJR{na|l~k?+{ysGFck(1`N!01Ff#UEN#Ra;}yUiM0 zplf!EOP^MwR5G7LIQ}YtzZQ2@Wvi^p-@-l4^zbvK>NQh`3rKPpyv;&}(Kj%b!Jxp2I;$00#jvPJOxOb22 z0)^>D1P27bNk;lQyY_r|@Cf2}xMAn}E`QFq3qB0~&*N|XY#V?0B$37c=|B|+ z7(Bo*668ZLM>GbD$Id|jW&SFdV-nZjiv6wUThE5)@90tF@4elJnh?O_^|A{T_`~QR zoc4haQqtFyDmZ{~Z24)cU`QyVf0qtDH)?PUEWB4`fT{0yqf6V=P+h4Z-(DNs; zU;`z%diJbY)pIZ``QZJ7dtLxWHFs9`X&`|hmMF(j1e{1$Slf(ez>+cfg|6mD*r>VIMWLIMPg}?v3VZ&N6 zEMcL(zJAMA2pi{c^f-SvIe@#j97O!s>I)7K5!}Q5UvB>VAuSTTb}jVS=_~Xy|DTseYgVS zEiIT^*oO2Af0uf%!JDWqL1=6H?6coM_#DC)5S~JKHhYKH0)mL?Rb6ZFTKX(1aIsaSD z*582tL;lXA1LXLfQ1>4ae@BnD;q{mIcW~Et>^R)Lf?FeJ&bFRyxZFSd-TC z;WG#hfF6G$fR*ru4Df&*m%Fau)5>j^;5QqsXV08&K6bQW(~K^-{v5&ovE7Gv`2}~s zLxwDeWdp8157F`GK&GE;$LKr2Z~gUO|IOe0&ENj*|HSaW{?|9Z`OUxkJLU(!^{sDX z_|A8}^WE=$_j@dS|NDRa{}h?p<@}hejH*~hQzcuz?EgIeI;HbJ;13)iT!RkJU&Zo0 z$KdB*t>@b>UunM#H?ZJQ&F+qi1IeH30G~Y*07U+7t{GL`ajoz6%^ST}yRTesyU=>> z^vUB#8#V^`>)Er$z31qB;{9LO|2gij-qG9Vueo`Vfj<@mGQTp#yaITdz#Q+D7gqoYOz_f0b)?Hyl24;?%kG z?H%3MZrr~6;L(#i9V=(?hp^_twGUjJn+mi4y{+|sE`Lt}z-{glT*+P$v}S3?wYzt2 z-?%37cc5;%;{SSj_O4Y99r$zc>vh~;J)Z`SAM<}U{(uF`v*uL*vor)8{VW@4s%`x%cp+kMDHg8>LU5-VI^p^5x6n zN5r#d!)q!rfS&kZU^0&e0)M&%7y)D_fC7M=zng1U==ht})qU(V|7x*+dwLoz{B`l& zhkN-YEPwim5po>|_$%~sY>kB#76F8x{xAUHw`m#xzxSGzKj#;J}&A2F3(l;ktSe$j85EHp9#hp=VVbAT#<@S8N| z09_B5i$C_-22-bb7>!M84b1=L=5Ngf_$4cz0dfF?>o4F3yIl74J^nSIrl!idtJm#6 zeiHa=@4R~LRv+;9(VZ*Y|1thfivXTup@j!S~xN#46w-J={u5 zt0An$uz%k^XyhjF=Q=>#fZz@Ucfg_p{E`Fc@CN{{7x_EWykqNzWs^5-sHtV3z=O{V z8@Fzq!4mu*_`@Beis1QYZ~1lHpPj#5_{ZUf{_>aG7fr$6SF%aLAI=#A8aaRe8y%p( z0&4;Emtf$})-3#3%4UvlG#-f7AoF(u*WV-T5{b0`s94hqKcxl-cvj+X>{xgbc%1hK zRAx_|y5OCOd~z1O^WLeB-LL_H*H3g_y?(pz!NVs{`b7RXerFsHr~(Lcd7gy}JRHEV zX3gr^bLQZZ&4~Q)!w(^|o|khbpA#(dC+>j7Gr(sYK;(}(z*}qAasSs2`;RjX>(_5+ zYB~%L;=}X!Geqq8@s+|AY@leQz~5z;KQ6S!|Lt0_V#J7F^00kTkxm+ zo&)6muNZ#r#eXBs=4AHlsSDS=Ke2*O5WC|rckbGiEB16YwmKJa%R`Qy$H9w&AJ zUxCkx06G&X0eF!Iyru`Ctqtx^oJTZyq4Vcs@W%jj`MY`R*4niv;M!l~!Gj;J-g2R{ zr@OPW15PN8!X4qm2M@egJ=Y;`Y9*W&BDnzV>sDH0=d!Q&piW_0AyPb?-bAl@OwW2|7UJCRIdh{_>c_7(AndQ!E?=+ z0|&kk8f4EOT-<8#e-*74;mR0ZgJurUS%2Xt!4>eUe-T2MqYswtgk8slOKq1A!Jmvf z(zv<1_vWp;_a8ideA~ibWB}XRm}k`TcjF^r0X8|nFTX?pE8#V{n>NjN*m6GJJ$Ur= z>5iW3XLqk(v2^~Nnh(~j+}*~WOS{thV9PZAU_F8)@aOJ6+^1h${uTj!m*EFm6Ja2ZU2Ve;52+hd)EJE`J6W z==7?G$!gSB3FZQR6*Fp<&f*JgdD+aFTen<1W#SJ`<6r@XOE3&T*Z);80-?^N6~|Al zhdZZ@jfW1_A89^)r1{i|GZ!y(pE%O6Z};qZ^A{{w2)|^UzkcP46)RWb_Sf@&j{9@) zJ9o}I1w@Bt^?(w8%>BVxBkp8YuMWlEGX~)BO)`2c_0D@scJ4mB``D2>*riv!^MBxW z$&7Z+--{t%b0scl5t%uXU6PwnoIPAH~qs zQV475d^jQA(1<61m?4CE;LpDV!}%NM+ox=})yL=O^2UXuiYjK!{jhH9fqP%{T|CmT z>%+hd&)?MCef~UKfc5;r6aN_YuY`NH_4Rn}2hL7fg5ljh%>ThjAZ}2kFq-qH?m*m| zaPB1-;P1=r6_fGZR!i2bSqg8`y>#zB3%~o_KmYST{|khB5T;JWyLICn{_cKUH`U<} z07U+FPag-YRq_$Tzs`?8RQ~$@^rt`l@sEG}S6~n!oHBLo&h;D611@=g@tjHHjf=2H z!mr?7$G7+ngq;D(r-1HNz#pLpgf;k8fj>QgUc)+bd}Dw#ZASU@MN0s`rSl#B#*Z&4 zE-tRAzt1)wLH^cUfO}KS0ajPfuC6G-%MT)do37t>_*;TMhFZ1iLQnJY14s7l`C#h$ z$!|}pf-nU>*~{BFEA!`{Xly_4Y(L+h|H zo2R;H4FUdEELv2K>0KUw&btrT!&hLy_4fi?9svFjzkmJLKm6ej*RC-JXUtfCU>yMX z!Q%Jky*q&u7?>@Ogpqi!Lje#j0kErpcpaKu1%&I+&Jz3`-~T}qfo28&r!#xKMmB1C zQnN}cr@y}h@vHp+`Qz*@ZEE`eAh`T3@SPjR7=O$Gu7T8)+0|t-f9q~N68T%V4yF}m z_wL=DJHXw*b>A;O|D3%Hs;%uhK3;P8aAV`jRU1#8>h0XScW)5CTIVNyW8axMYijYR zQR5^C8IW_}Y7e8e^$~=}7@j<7f#<5q-UI#+K=B^1dkF@IB04~azZV_zzS01A7XWnp zpTpnH2irz?{0$j3v36M{CMSFN$xk0}{w^~9IDWw2?|=V$@U6Wcd;orM#>92=5x|A> zXTrV)r(KyX%wl0A-tFbL+T0tx(k9s31#<$w^H1LT%c?8Q%KVvo)z#(WasLf8oFk)j z@{IQ!{A%By`40NMrHxJh_1_T8{LO8J7m0BPh&WDr@BJxb1^+i`?Tr9`fZsizKfL;e z&jG;2ID9Ms(Evh#laD@n^5jWpd%L#*u3fQu)ieh`_?Wn8;i5_KIjgU)5BJUu_|KXx z5FR{u@DKyy$N78wq~-P3C)6x+_>%y{&YequIsiMs|8W3s|Ms`HfA_mP%mdy90A>E} zJly)0%ikMsy|Z%l+e2_`yV~c^k>4$Z{*UqJ;m7#{2m8C<^>7BO=FR~C=g!a(=uUGv zCGWBuh)*nY)PNSGQ#0M8;m@fBxrx z{+G*NA2`5s@So!UDq7kv1All41|49cIbT_vhk={5kkB z{s6xx=)A6+2L@YO_5f?6Miq@2H)a&XAiPtvbm_d2Bi{x7AlzZ$HirB6A3S6P`Tzod zRV!Dm3d0}X1r}D|j=3*5fVXblzKvIjnh`)afV-T(vNpID13#O)^Q4;bH*!>QWkvbh zlPB|&10TP7M{$9_ljqR?vBeixU+(@8zu&=P|FV1k```zwDre1{TRm%P1v)`-(mWD4 zf9?tldtmn>47(AGn_$NS@-_IcUK#3Dbs}Q3s{ex`qFJpP-YB-B#(}>13mASMOrHwu z6(2tQ@Bi^1|H1fk05tKpzpbOao!^ARKb>7({r(5l&8_Vjbh}%qB~|BM&Dhza^*ti|6Kg;0DZS^f&XIx93c8YWccy! zx7Mw%tGkA7HN+(t55N~MI%dF0AoGC03Y-CU?ZI=vKE~hBw?>vv!$%n5g)prb=DzjT zs0kCx=897?$H3q4;cqN~E%YJAADsJ%_;LQ=loNX843(J!XPN+(X9i}=lXcMh_5=+M9Y+rR%i=kLED z-~#;L|IPSo3i`h_ZSY1}?f_TeH3TQX4=LdL3jDP_yaFe6SFT*1J$lT@k?Yo>`+LUy zUpV)N_yI@{AGWkCEFCwlh9sX|LxrM=@50_wEgdabK|DV{r zb@RrBXWMh|7hHkC|G5CNEeP`WyMOKj0Ozt7(aoDXcg~zy@J6heGvKXQB}2z7U$*4^ zMGF_ahnt>7yWH#F-u9PmKPq-?IC2E8yBj^n{7gUQnf)w37S8CEk|U+#CrzI{XWq;y%r>a;2EtwQ{OzrGzi^zIM66M8An{kix(de`ZE2ieIc3U37e4re zw@|jguRtzbYHx2|xq8RWoxAt!+P!<<-i8Ae74z1uUGw4URrpBN^4b;4Yii)0X$#=@ z>~3FQ9rCB&e0cn^l~dyH!GrtvJJ0Ulv~1aus=a&m?Ao<+#}47l45~#hxhN_ zyVrN(P-D}v)2$bK)^PreI}qjoMF2ZycD1qBy!7ykFkS1}WthIcZhl??vUuU*%ppV0 z!;8h+FT)FVS+4uzKKPu^lxJsuoA74|4t~I25I?^7zF_W;T`zd{?CH~v%Z$Mi{%nGQ zB8MXp4l&NgAZCbh2MsYxh;yruAx2{vOUGAKzFUQRjpBxe>;L+%un}|lg9RAQ|L_FR z^?=A9hQ`Jt8$|;7Ybn`WbaZrF?zjwZC~rM??#!u^)fE-{4?8z=-4D2`jHma{`k4Pi z{FJjl(f#T1gA2j;?p-;vf7Z&gEvMjBTKigW-@bzjFY|!#d6Uxzq#}8D?%ck0^UUG1 z@UrjIPaZ$nV&RWHg%DVQ@hYHm28bT8Yd60R=c$g!=C-FCaA3m1r z@yDGXUU~7h9*+0J)z`rf_`~mOd;5FumAq-Ch@F0lY)EmeDrJZXD@B}avA7h^*|1a^ z661?2$|{h(0}a=H|A#-oS%}M@vj96g5C=e)KNNP+5>Pp42I|Xj~#D4*Rp%_ z<0p@iKjRK041mZV^ML68Pz)hloO!@Y$?&=S4S8eekRclmpYZYP>C>#exb;Bnfj+!i zii#nB{M)(*0kTk(pV8K^ja|fEMa)kJjN^-o;mPZ0lxr%-WJ61fFgej zDk^;fHHcvrFxX+@5Af^TutQvZP1jz$%?GTc7(DzKe;pn5XIf96YGV&d0flb34nNoE z>g?`nZ)*~h!clHM#Oeq_=svh4r>4v`GsUP5n>@oa0(V@Z(?EU+eVDP5YGT8v2S4n?Y6%sK;qmF{#GoS=c@!PB&!-mAUvcmm%4U>B|dJP4=Yp_|L^ zZhHLa$(kpQ|HCstyyD|M1>-%4Dy_imKAgA&ckXs>#ohE=ofmQV)4&HKL;iN{-UI)> zJ7Tpx-zkgc%`5De7{#g-Yh;OvBn@q12WhMK& z`L|dLAnZW`YjB_N3pI8P8lEejhX6+A50(-*E8DQ0uO{BLKi#EgoBQMZEr&;MTFwA! zc%vA--1)XPc>WKZUi)R>6aNGMakaa*_r?t7|B%1H0?Zs>H~@h^4xo|00Db@;=Wov0 zbNhflc+m@N$o8|xCYCQ>ap_`5$Cf4V04gJF$KC^{@tcERn`6heZ--rV?Q)UB#qe-6 zatG2I@ylg2SIlLxbqc*&s*A>onwMWXr!fkE(n+AW2KS+?&NqGeWCU;HzJ1eU@P{{9 zVavM1<@a>EWX%n*;nY2j~NcSA}dI5cy*OhUSma$Nb;I>o=N#KZf9tMJF*-3>h+P_ALJN zGvja3;+hq!@z(|H>7Dl>?1LNOd-q<3z%SIl&nFqDz&PFUjR>}}Sw_9t9HCd|Y64zM z#U=QQ-~ayU@Bi=ub|J6{!Cf%F3?l%@_;VOM+E4>s)f_ss3_hazKY>5I^{U|KtiJyB zKX&PvpZvLJf4IAVPdJQulM%!IIoWGgmX@~9nH2QfH+>3#*2e}_ntfkr#E8b zgNKhdKgk84`&$t26i{A*uj~T;5I_@us{3>B!}WK;{Ri$t&~J>Gaq{G*6)WByF`~2- z-lfd$)-CzqgAd@bvi383DA72wk!`+@(veiYu^m z4#+RV{F$x6zjf|}c>IAjhxW{bww4__R0AJ2{u;nxs&?S-i!bin*}k3K_Z9i$*IqvM zP5`~TZ=6AO2eRt&<+fR~;FjAh*yOqaUaxWO?mY%5J|l(y!B*uSoI*YtvGHMmKmQak zxCg>|z>}(1!4bf8 zxYzxhy$9R+GQBu{K70TnNRPqAQU2t%I7X*oxj8=-zu{obr8R&c`agdU;$Mbw&H+6S zDDt;=6E^nY_H8rZL*;EJN=)H@>dmD$p3DyD#{BZyS{B?C* zn%N-Z=hMgeL-rt4On3Gl_%}lOZ~|2Po(HeIT=qO4r@@&|J%RC!;lNI+*P5DMT!LW_ z>^ngJ6j0a&|Hr3-vWM`zZ{plJ>>1S%{7DGnbyOf_!*&lp`Pl;*Kl2U*_-ns7b0)aI zTeko-_?0do#{^aqsk!DY!EhfW-~jOy zP~>l;2OmT%{B797mS5)|r=Z85bNvPJgEe*AdAI-*SbdrMgOfK1^5tiM>*U=_=V#8s z1@=CB&Wf$n_z$eL0N%|Tx7g+vcO!lG0mZsHhd<^3y=ySaDWJv!N&t53dV$xX75+GO zmuAkda{*NG3-ZVDyLv~5- z;m>{n2Je6LU2L7X8F&LoSz&OQ@YKvlPaN$0n{u~r;3;5T9jw1=*>g4SH9&s__B|k8 z7WZ$&`5ut1!SFobI=D4~01E!kW$x0)@I(G!=OKGP*m!XK_U{+w zRGu`!fYGFkQ4mYA*K&rSyaYP{G6FwwMTSqw{~cj>h}_u^B^U;renvB9X@oh<mo`FS7Ew|~E8vPCZ!nCxVX!Cpn^AKU_Zx&2UMGz z;U#O$Cz$t>@dN&@t=)>=&)fXwTz!GRn>Vf>y#{aQyA7nVy)Ue|xb^~ucm@dc0eNt0 ziH`$zHa9n)z<{s$83C^^g7-N4dl2Ou5JzWPf;)Di|3d&joIHH^Plr!$N6**Qb?Y&{ z|A2?_oqPLTD}3TpfGe(^I@JOM_PO){e%$#z`?4P3gAklg z!QT|fsO#0IqfkQ<1=DBDm{2ig z@q25w?ZKB{Z99ChcIM3K)7PxIfr~E&*ZbkK2XOs(E8`E>MRtbf@#lFzarH&_clXk< z=2K_SoNc+tFTCIrSh&}?d6VDvaOlHr3$$?td=z^R!`lqN5$u)8Kluq@=$`|!a{(Nd ze<#?#48t}da1Pk9|JuvG^9?(ReK)V&yAt5< zD(8>;KJNT*+tFF?dNLhi9)Sfi>iF~e)EnL5YT*oLl=y=LAjlSBb|a4YK$k!H1>_(C z;}3UgPoJ_ceQ$5IKak05dLQuBxno&T<>a!p>vZ^i`pGr!|JZpM^5@}aTzzrj-M{&0 z)6r9KX@v()AoxGF@-pA&&;v)P>H(zH|{+8lK4Fcc?6hY|ig;y83 zBbmIe%XxD`2fS^gs%qBa^`|d(^!D}kUcc7c)5LC6uwPwr|0lUW^X7xOKj81&!Gnhm zF8AQWb3a^wg_RfiV^8Cs88Kq+s;%3$)|8DH@tXF1@vpsB-3WL6AxR8@461~`WM{F{^jfM)n0r@Zyzk> zuJfy3>I%mo^Ac(V@l^A?bbdx-!`80^C(s*$8i_fi+S*&rBl&;+^C!5E0|bNoc>vyF zl1uzeTzlE|e=GP8VoOUu+*H?o{NU~#n~I$&#uu}{_RBMDlfjo?&;2Q@ufO>~_t$rJ z&z=P%;SLB32p6-w`9e^{sDy^M7io3ZMDC_1li)AB<=^eE7oob7xK- z-T1z<{@#AD&Ec+|Fhgi8(e1eiN_)%F8>T>kFug!fZde zxb1!P2&`VqJmQL|WEa-xO`WD!D}UeuHC4Q`J>%~{J$`(% z*JnjV)vC^qUH(3Rm#>{Y-*9OChDDPm!8?a4mHA%G_?ta)^iFhty46>4e|>eYyz;fz zIe!j*Z2!A&ItF;*`>U@iT_ln8`NQL!x!&RLTHp2=2wxx^;12Ni$0q(>ymzO^011R&yy%}sNAaL&o?hpCHfG-yN@sFos*oA@T7V2M_zlw_W8!;R` zy{kt2St>tJ{I}u7)6YJ7bno_)Zk_+zv}x0U&1|{W@n>U9{u#F4AB)8|Dh)+S#_!c! zlko?Cb-H%pLM?yj{vbSiwp2IN$!o9NgOf&ZfgQp_3wr)u`Nqbco}THjq!#Vex<8Je zyZP|>D->z^c2)lHuqQy!9X`wZiaWr^U+MT;04uhIKTEULKI0qlV2p>7(`cz{lL!Sg zbn5a4W*dKBKGW8(dUy5NBj_LM>e!D(cp(FSuY9A9`9A<~2f9D|<^%ZKs3)YbxhDQh z0#+P+{>5h>J-pZVT*KcrmY>`HN>{UncEbvP?^}{A(DmSMzu|f3vZ3 z9e+#tgKA%X$*)_P2QFGn{JrvnI_Ccz06qTntM6dr&oto^xC4Cf+}{AZ``>FJ_%k-k z=FBkN+!n`}H$@4LHw1rh(f-RXwac#voe($j2LM*X`U?R(pobsd`>LA{@PC^M>*@5Z z8~q>7xp0tPbV9@PFFyU~;lro?288p6ZqM_7*R|(=7LFW~p-NYIqdGVOW9Q+*s|A+U?yS#ovM9loX@{MYS9{|{b{ITnICigcm{Jo($Kv!Gs ztW9tn;1@D~E7_{+@CO$PZT#s-$SFvokL3Og`(N&EzU}bG0Yv^p{DjR1+&kH<_fUn+eM2LludUX|9pGyZ>Vtawj<7TYWW~4|wEn z!2BO-De9nT8#I0ek6}Ih%H05)`3u&RnvT?G=$c}y5SQvnhZ)T%AB)m#XrrCK8hjS> zc|`XYf$x`M}k8fcX<=MxHWE33G}`bbud! z#M*vPn)MABOT?)1Cp8<*HN-;lhb(^k@8IFAF>j3>yK*)BMXd|)Qy<&|^`QIf?jET9 zuP3E@d|rkak&NH~KY1?m$Go1$-wil9bZbjB-Awnhz(yTnE(_4LI||{`9gx=Oaipsm z#whT23};?utsC)sQR5FGR4f92qCezu`0Ezm`yTwE@v%$B295oYgZp1U@Z_v^=Sh!fG_`}7?eAu4x;ESi9ykr|-&R^MP z*Z-BR!uL(y7%~K&+ZP)0Y6^K?v~WiA!9brejf_BV?16~6S+H>amSf+?@%dz#Km6@N zPoTkE8QJ0w)U_S3YixM-#Y=Vq$oMN0`P+wK#I*5cis5^PF_wbP=#jq2B{YMYH!e=i09Z(3RkWumSkGA)A zH`R_s;A+_$q^5KJO6Ja6So8AbI*n{Ov(n9A5&4lw?gGF-^0H7$c2$(w>v28}}_9TUI`6)9TgmE6REE z>XvR;*KiJg40jaxTOB<1+&O9IFEBF#C_#07aJdA?@79K_kQ=WZPVU) zr^+d!D(BaPSU(vB#DaY$WDGWJIno&uD&L(}R8dvEcmH{I%Lw*uSK3mjT z@{!61ik`lxVsiE7t(R}$E0y8DtL+y$E_ZIZ`ts#urEI8V2KbW&l;u-q%{Lx=`EuHl z+Qn=3Ug*WYOWf1lb$s9ADZU+Il75n>vMm*Be@+s9T_IBOEzvBn5HM@T0 zQaj`CrleQcL>Bmy1XMxEX{?^NeA&{aOBOG9KTuT=F<=vED3DP_OgfY#jpDJ0C$s#x z1QpOYamw`BH3xg|^xc2(2>!~!?W^r=m)jbC`?6Oo7doB|{saMoETq;LL`*o8M2#ZZ zxdogwt>)}Cc-8!ahmRiL>uOlLYTcTdzy0me(9DSiGs2%Qphmv58a0Udhmx?-Z#ayA z6YDl?s9Uvs<*L=yP+a6GYCJRixiV4&2=*Yt>!BiZE7Bm*jB)M(Hd8DgeGDX6gk z8p+sssf_Yx5RfC579&T@Fxmne4ZX$}&?8Qcq?*6s^0Ba#tB_W}LK`*XVXG4BH?q-1 z%-{l@gV8mcKuxWFc^!;)py}pMGc!J7sW*BQCC!4Tkq!yZsOvSpQP&mZrJTQ@MuI|V zHwyd(52Zhh!EdmrZh$DOF;JRkq@F)jBTPWjZ}r~Mp2bO&4D(mh$${m8Lv3n1ZztA`TGyIbFo)9&-)aOhIsnd zUv{+q(Rl~&N78BxP?Qu4zBEqES4h*R*lVC>XrtJL5YGsIz6=_L(rPRi3mT1lBs50f z@y0j$9Z0dv@aJlzQ^*4_T}CZ`?s4|3v0wsL4ey7?lnd6yaG59e#geo9R0@rH#0tiO zeE)$CT^9%m_`1~8UcSPG6DgNRtljUr-!Lm7BuU`TPKPd|T(46+D=&nT@v z!ji?tEgNOT0xnQG(t)U{h-*I>X|#ULRP*;gD5PG2S16DIqyQ;E3XlS%04YEUkOHIt zDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEU zkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS% z04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E z3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjoo zqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*b zKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L4 z0;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHIt zDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEU zkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS% z04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E z3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjoo zqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*b zKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L4 z0;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHIt zDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEU zkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlSS zVG4YWLh2PrGqOJ&MIq%1{FL}h`3$Ce_{3kj=P&J}Pc?r(OS}3s{&e#Lq)CCFrTPBv&s3$s1*ln+9@GkCi$7R|2esNXI8}j+@uwahr3bkJnd5KFAQzm5 zCn=CU{>Bi2(hLvoL>BoY0tYv9;^?x;AGttrjt6r*qx_Kz9L&%OV@oxEKNk+fjgE1H zn?aw0S%Gx(r&(uR{NQsi>rP|i3Z$IBAY=l6K_kujU{fIV{Hb0};!iaWHuY&d>E}=J zax#C4k>-1_DUb#JBrm7(Cm9Eu`h##hLoxK%`1+N=jPU0} zrs0oVV5o0{4_`kLm>K?D$aMVqbcULUzsy1ke*LrvXNNxrGChB;@h=mtP)8l$uU`oK zd77Vp{<#7h>a1k>NvqK>_7@t{(1%$CS0hvWp_j8LW;I&s724D<6P@EA4Ez-wz>M)X z#>SuBm}7zpXZEKa`sfFjqd5|P2*H@9c!F73<8Rn7n>a-_*(U`B4@$0J-9>1kHHPzmY={Sw&s*_3ni~w@}2Ac~EPI#_a zOVl3h-k6hUzpSxT^Y;r!4|(~;l)=k6{0%Y}sG08MC)(Jergux0v6+~Lbo1wUGA(~P z$UOdZ#+VA`UrjgN`L`Oj1zqp9ys+(tE0=Qq7=S_i(EmXQNWioCzX9WbWoT!+-$tYD z-Ikf?$kNUqK&HXZ;V(d-$X~#isIE~O(@%Dk7V;^p@72Ko2!hD=*WHh{gen8|J@1#$)I@XLe#^OFe&p zUw8st?1az6l#JhyX}06H7^bGF*HHOkItW!J?fh~4^e&KHhC$RkB?RugsAuf=2+lNi zYMk+cjmX$zXhTv%OF4foeg+q)66irEMUv7oT04BIv5tAtqqP{Sw7IirmQbDLE|eYq zv}b_|fD(3oBDYM*mvCnrT6UwFPtTgq0H9r`x~7NNS^12}ultKMKmYs-!Tn)0=RU+o zPKlu)N>*W${lB=bfzgHFhauO&XyO`=<{=}*G z2@`XjI1JIx6~`MY?pG~DEYuKk70MWYIv40mV0Cd;qu)jnvG6oNZ$jaSeG4@U5{)<< zZSkz}r*VP8agP<^fvD9OD4j<}9x#OOHIGh0R}DWJW6A9C7jS{Xv5&PeW;F_pXf;aB(6qAct3XL5$e*J41I5i~#h(1t6 zEZ_pI#(Z^76uhzSfs5zP8FP96H zcfu-bD4Uw+rL9J*pb6(UrE?jxd7$bJ3X(3`(w)o3Mptlc+s8Yhk zBhGRGiklB8QfKC7!dkM%U${kBtIoJ_+H9U_HCpQFO_3%!@xuBsxw%ogbL+(a%p8BY zTp;d4^m2`ZptnG-)o83`F+`c>M9W&n#7z-;%h^`W0vY5lrwcTQf`}Obv>FW!+Os1} zbD~Aq&B2{MLQEMqQ_4R(JX`!Qx1iO{y0o zkxl+A1ezV0!=K&-T8%MucdF!=-DJ%%hiMx`%qIP6S>?~P2wOOEO!_*i$+ZuXIC^pqhfOCP$elF1F5KEQpyL~b25}-F_Nk0&rYD|0zn-GF`2RW#u&Q$ zRZMd$=R4))%?zeu)!1`nN!4Q|G#$ihN2!zr z{zADxcHA1od>0tssAw7~shL-8meQ!Uj5$-O9V4k}J4P#kDjDG~_ae-WT#J}XV0>dh z)d(4ydDUgZ&ON@*!!HA*E7 z3EkWV@(gvIY$z@jP^(MEI0>~uwZk}VvO1aI&jnED1IIZ4=0FTwphd&@MoXifNi(^D zEU$35h*CdXB}*x#Jxp3kM=)iJKMjDO1@nOLX{9KFW=R*-Os<+0xl6Hu$dv;X#MYd6 zpiI1sjPV!ThoCG7U~YMei!Ej18!e5xCe8H9S#H5dpV&mC3g*J{XjsZzIFcoE{HZRG zsp0ZxBWzr;jj{N~kkg@=VlOLX69p^g4Md0})LAz8^G*QSMue@m-an#HxhOId2sL9G z#r|z^VQ!VEtyaI-l<{CPseUuUpA4Yr10|tyQ)KdxtY$2uGyq#dGs`T?R;OQVnt>(O zZ&vs-0w^gJf+B;L<2B9$C=?z0nm4WqEw+M(n99ejBXVB zx5hQoO#BV%%hN;T`E)Kr%U7eNA|6Fju9NVLI>zD~bzN!mwESsH_HU+UT*(a})&6gY zazZ8p$X%c$RalCG1PV1H8l?f)5}J7)46E-r&A4ob5zUrz{uCAz0FgjRj_?#IWaGsd zAxMpmvm2GNmSk|EErt8d6dYBOr=+RpPX;9jpfpbcLJ*ZqDn*J}t{KIs^kPc}rrK7x zzifd)CF+#Z&!5DC%HNxBO4AgHBB^1urrjuY8X=*~b%dIQE2@kMlL>Z~1^y&Q79da* zD?pLeC^8nP8QLgzkuxq&cFyAcP(U6LOC{NBCis(BPyv*TL5zGcgEq)<&GC(H3!y(9 z2jhH%u2!Ie#Gm;R&=7$NSN`JSAb@#Lj29Ocq8wEhzsMY8+?3PtOSzR^LOLf3J-QC}HzO#qo{BC4ruVjQB>O zkwFzT^Ox{2Q_kOr0DR zYX-cWSTmY2`3cRxMdB~K{~PHO=m;O5g$Okj$mE()jQP`&d<$m&lJ6_s*I&T@1pri7 zkR?mZC^M!s8^I_KZ*ZiDzexViCy-5Fd={eB6d?=L3~d}7#L1%|{-W~dF2bl|d={eC z^tT$Cnjwwy%ny##|Iglgw#RXt`QH8l&x^gZ&x_r2&hGI#Xl);RZAtcu-a|_kMG_f< zL7*5!A`v731|$L`K>}a^13(Zt2ZH1v=R^_&m_#brk|M9H*R3oqy0 zXw{Yor#s{?JOsule-?wVcNU!F6ni-(rw3z%IkPI>F@NEh2F5CXj0TJJpXEI7<5@N#-iw?@6)>l$;xU+4fvH-B5V z3dvIwRRXo{^Ui|foF>TFobHTTv)4D`hQE*jjCTI)2{Z!8v0&`gFd2=EamBha8XLGy z$FBGb6~O4{&yfpMy3RWbZgTRF8FIQXt`l#@YTWS`;xae}__KF`>PncX?41R7IoYL# zDW_wj5sY;@cFSK#E-)tebL0Zmv6%hjoRN1>%sH8Yy&KnwH)A!f`4eU^mTGYv8~izQ zfh>7S9N^G|@eYb8r&h3cqt>$ZjRCz?mx2wCCggzSjd;0tPYvYnM0=&fxr1+0LB)7ybDzD)A^$S z^8Bd)h60KmVkxP1R$HTelkAEO$tLo3<}?LAmOqr&$lrXsIjfq2t&BDP7#FDG7lc0z zz>qr4I&-QWW41=?G9znI`LO#0{BFSL%Z|T;I0T#cvjGsVBIfwhTwooU`!Vys0T2kk zq6YtWM+MN9g;0z(c~c?Sz+`K*$rqx@4m@f@{5bwlTL=RF>;a5H{wRTUeM0^KKofs= zYRDqQmW9xamW*Msmd2D6TVsfj*<}v~Ai+<>-=M+&k*$XFmc_-S|J#ZH*7dpI?=At* zmW2?FR*;cmt&D^}Yoi@Nq1r5oAqI+{kiP*6ptAh!r(0b%0LG;M+q#v-kD33GF~a2k z?%pK;+OiO`QOuE2td$F-=1^hskl&WSx3-nvz`)M`h_Zu&u+v2Vr2O?O{PkG__#>z9 z8rX>Ey#cOH7yv)r{TcbYP64F*e|I1Ne+Zy03!xi@0IJ0bjf($+b370fS{6J00Kc0y z{2_oUeky;~0LCPLh+k0tmJvY2kMIWoCTlPV`cAWnSa4_*aHQuH7p z;%Cbr07&@5`8OSZ;a@8c7~TAB+a_{<1V0=8u2T=l^S8nRXok!d6vk*KfH9gevNf{& znOkJ8DV!;m022J{`5PdsV6yzv__G2q+W8XzNbzI%QwLyWj)FIgU;bd| z#ktW)AeYl<tCC0q1}Q{wx5Ddj3o<&=Nng{|7=@@y7#b za5%Q0xH0nO8glYR1Aj(iiWMW`bQ5B>3F0y~EBFQDuV28Q7{D0dk9UCzeggN$@&_J} z`oDpJuCD6p`iAB!S1w*WnCSDrw=XAuC*HK>yW18NS4Pf}8FO+*BYy^CiaP*paWfgk z`~iNqZelR5KlJ4<@qhgQpwa*J^@`?zvA`ee0#*Ddf0ZZl^L-f7;J;K7ONGZyk`m(g zj*lNXe5mli;gS;4RaLbYE;Tl_T)Wa(Q&(M)-`IE}F)0NF zP9-v<+`hcLy!?870@&9p0x&lC(+Je?>+7#Sbs#e%Jv}2cYX^j^%!~{;O7o>8#>1bX zPN*IpzTVN++S=JScynlEd}?lSY2Zxudnr4w_RUQ8o=ZuHLp1b$5`f~}s240KRyT_H zW18gB{{c{Xi-X5F{@#gC$!=)r?5aEN%i4RO{A^1{S9ece-@w4&Kvz$9*O5S=IzAyW zDK*WXo&kcZil?V%W)w8248p^gW)B!${C$@X@w@e~3xkedUjzDo*azt8J$NK@=dPVd zAjK~Z0qhwb?wXmL9Ge&$8yTILo?E;&00PG+X6BbB>MF7`fWPbWH~KCr z{txk>uEG`+FGgCfMNZYriOq`hN~JF9udv%7{6G!gSHt(z@V82M^~djuOBAKD4)ER7 zv_lO&cc+?aQhizPUmIC_d~bPXYJ7AwaC7M9*kngjAdnKD0CSeK^voSQ0lsoF`@`)& z(gVQY4ZGJG9Q;c#&ZE^_uxZ54eE40X+x(Sv%7GX=!OG@$vBqX~W3h z^fb7>sYwV^vkSK_)MopWb~g_#^mU#0(ean`3+ZlA4`G^jBgt!3th0g%BH#S`pZ)A- zKmF+&Z@lsP>#x7|+E0G+;~)R{@BZ#bKm6ejk;(6W|9js9`d)qYyWjoJcfS4YZ+Qc7 zTUy$|E8|bc!2czuz1MN~)6YMAa2s9VNZ{7+c-w`lKp-nV8N~IcXYI)FDfB7Z&y>LV z`C%45i0j;GvDw*~jSC=$UytEp$RW!g{Q{C982MXVtWNR60GyWQONx(+-!nXXs%vIu zW_Wyh7CvTXX6JgYT-leBm_IZ=Kariya{-X?3G9(~BS6L$WQ+$dSmW=f$RCH_kA9@! zNBGn5^T^*j$^Lzvx4)Qbs!fVd@%P{R`gdRb`h!(;fny^hl)r-ssem6yoLoMjYOmb> zAOOxo;0e^IVwO#PoH{*>{ZK>k*$ z-+#Ot!nv#Y{(`#Idv|Xy_a81ibnxJT0|#^S3Ic)Ci77t6-me4Jp=;hVetWb3#BZ6>g`{2fb;Vmc@s@_rRC+cyDnYQp<`m*8xg=)UU~J^ zvYZ?;*JA(4*t;($C#SHe2-qvHsi}r5t1t3hA^__A(Iqz4{jv>&pi9%r+F!VE4;LZxor#6@moHzwaOTXJe^CjgDoA{g8{2%hS$wYz;D8Qdr0LhoYSeJfmizhY0_^Z$UwAF`r`2F^` zT=J&@sPlJI=>Mt$!>5wBZ!baoD1fC2@$ret%3OiSXW#OyMXT>>ztTdc|2NQ zlJ{QPwzszecyX}MC4u1&Js?a0n_&4%x}J`|=>HV{1_p@#6Y*zBAoAymKn^f$hn~R4 zNE`Vi{YWt)f5ycxS$zmrzu)>B7wcbAli~p#@E3@WBi>K(f2E4}amZo9_FV^MfpX5T`yzMet<-XpP_O zY;SM_zS3`Dj8*s{1skRx`)n`@~h_Ne1(@=x_f%t&$sgc zYGWQn@Z|vb1E^eUh#@jpf3gwr>ukMT4E*)>IpGhMA0#Ztr2~HI@>f}Zpf;-iv*k|& zipHM{0u>}whTu~KiDq_f6lL(1MgR=v|NMc#a9~IQ(3U@CqFxPM<~vf~+TGpL)7f~H zcY#cIBZ4mlz#W$K&CXpt)z*#pb+leNs`96NyWqpZ|7rYH=h^XxPZHVqKM^Plf6fPC z3WEk1M$CLLW|v0u>7G*%K%PH8aZK#^>u(w!8NTV9zn&iC?{GtV7XsMV%)3B_Ka6fL z{stNv5x>sPj*hm55`{kCF0E=&GK zhll9^OgCAq=6`hjrTmpPwRb20b~T@G(*V@^nnu>y|H0lPgd%({8N=!0&1`!H8{6QC zGxUCdUu)y>U1a{pxIYX3rw+eM%i|{gj_dx9Jje@7LeFzu$dkYu4g5KFflz_5SiWGB z*>RJ&6GJ^v{GY<#DWU%>2#k&n2d?7??1I0>_Vx~Vln{ns*iLJ#R{>=E8{(1W5AJQe z`tzR~x54o^HR6{e;f;TpqTh` zN}z~84trCrEK0UU_QVk52LFfrkqx?TX8*{c+3RO*(&5jQ@lF)qBp=*^E)ZpTj}em|4dGJ=pF#K>!n3?hI)E8>34j9~!?&&v4)hPeZEe61*T3zK{h!7k z9e&0BZl;a3n805$$t05jkX z8Q=k@r{-qxY2}G=_|3-f%^O1ly*;hxvgcs`*@gcjs}F7a1y{eDHXVmy1NNUciTG0> z6L+>^)E(fR|IbbMbo#Vaxs)F(E1}Ah(JGT&I`989{${!PAMghb5cZ&*^XET)wHJQ= zH9RsoH8VK{H?ZJQ&H3rEm&u>%0G~Z$07U-oR&C3lUS7L*_s-Jd{LIwE=i> zvnKu)u2!k{9F zpNij-;{KXxH&FbD|Fh!{Sg`3fhQE^kauxpbzijwJ?Coo{w%X>j&L+0->yJST{|Eey z69;I?A3TBzf#dILYiG~E(5+GY{nwq<2aiAeXmuLjC_OaP0HN~u@#F9#;=DX~O(h1{ zD8UBS;{KH7hk~Eb|D~pG*+VuZX%CTK z^jmcn8VP}6=8wF#!K%^_jFv?T4aENi=dWrMe#we6KnkFF{001Al}nz!$G-;D)s<0j z@=R0TAn-RiySTiv2K;@vIz#;*;ctiq@D>RH8o-S-H8nL?o;p>aRGuW5L^MX4)6;Kpu!&jSk3ZxW8iXKP1(+xn$mIt3Ox8+ z*jZPXO%CWk@P|9L#l!Q@+VHEmKYRY_@sGo8e)G*sMZ59$m84VfhjW$zjUM^4D;9n% zWoPzow7d|Zf#RWr zVlzEY>3m8s%OAS}W@muUD1gWxaeynQ&rtt23G0s=t<}{vU0ogUAU-^gpUq-NuP+65 zuz{j?8UChJ{;1Fb|5sm8v1QAfG+e4KFV4##uQfputN|oO1kk1@({f#u~lf1k=g=dat`Dq7{I}QZ} zrzi3-hzH(=egvfjhz-Z7qlM3l#G9WWel5p@`*A9e$Ok zUo`&aao>K&FSf*|B)jsg*M5;=r}nw z2|QQ!erRDK6iA*w7^}1JfBxYy*crn;XyO2s@fUs)?1x|dvk*cb9V@#6tB%p}iK%w@ zlX2ah=jWI1uH1k4=*g3NHvG8;aAJaZMj?N9K4b=9D+l<+7YJYmye9YDxq}K@%E$X{ zJwrp&3%74JR9BQ7++TXEs-{&zM(6vxTdy_b z9XNRCP$B%1@nCgjMMY&LE`K%ur?@`_zgxGoDIhvDTMx+bN8BIG8gV6a@}wjFo)G}w zKF6a+j&>a`zS7Xq(A!-LtMtrW{{Xj3vL`8js~=o?pYpc}_}da+4b#8=b5#``n)_>} zT66rt?~d;O>5qTBbEjBk5Pr>BK>#d1x_^h#+d@)Q+ZIpbFUf80d`wXbwyo8@QCyiR zH8o|={*uzNB85Ms5XRDjFd?q##0elKFrgXv)3;zKe~G$%cg@Ng?VqW$h1=r&xdo?c z>slUswl>z?T7Sya!w1g|%#Vza0a(l*Jn@gAsS@tlHaFwk51bu01jD<1i2s90ATCh2 zu$}T}T!E-J;nXb{;O~n|{+;-4tKzDv5_psD_=ATe{Pws1_>ceiPY@nJ*s}-k)+H+Z z-T$a|kHQ}Si2OC|O9a+3Xp7-r<;NdJ{?`8Z$3Onz4}bVQFo+QD-gEj&bq#vJ;-g3Q z??|;Agf&b`U2XsU7wrFOS?wtjc}P90Ag`^_M4B)mZC_C_;8$``;V(5cIVmZrwD}=f ze3<#G8ijjP!~y2#=jHp8ar=Sg@7(Qs3V+4;W2h4+Mi&P9TDq@YJ+`NM=X*OcA?$`v z^71yy%E5#Eos%QV@&kTarUdH$0)fVI_!-^y1fSpMOJw1Kg)ThFQq>AmKofcJgWjUJ zw}1Y6^6d|u48Z6BzxWcu^XJdMn#um^s}~S{1L3z2D1i5M{*>`|zlx%w zG(4`?_*33}KpwsV1CGC=uss0$A%4IA{qKJFyX9rVV0LzO%NYRh*pb5r-cP3lntDs? z?9(CeCxA%ze^7&Ut*(uxqRa^?8T*bFBYx$_kUz>^Nmti@LQwfTq&qi^5&nn+tb(K6 zdHFt`zcVWzu>74l1KkR}d;k8G%iwPC&g-Y2l9xeEOx(uDOFB9_J1b9|y?%XZwz09% zj9+lB_Wd+ z(%JRz{{g|8zk*?Skr;J=h~wVFM|USM{%^hyp49nM6jx6||3~=K@T2^JgZ=Gq7bt_71^WTOf@~3i6h8RZC4ZlO&WyjoDKcQM z)k-5gMYcx%RO3-f%C3SUieGJUVHRPp^WXmM-~L_YZw(yaE%?vi|NMc;Dc}#cV9){j z^Nt_SVfj0|^uXZ%uy3LJYQg_e_ow2A{3-Ym{s6zH=)7h|fWbiEDzLU~TYOw%+%||o z*i~9ma^T%}-v|C6tdejK!^4M<9uop}0D-^E$`dD?@P~JS84oyJ@SFm;vU2Yp?h*|k zfN%l#DSy5R*o%Rm&8~{GW>7D$uvG0{)K#aDeCo zk>Mu|ThCP2)-I!4b+84a0r>p+bT&)^i3bE$Uv1nway0e(mU-&f#o;_(bj>Sku9^0vpl`|g=D=>DEj|L4s8A$|bTx@7shAO83I8kG^pr_AAZ_HsA>iy6D zqQF6%$*GsYjIwN^9#^Q*6WEv&qjC=ak&>L1os;+8PV{?iZGZR!>_rLqLl=nrHP>J0 zJeaY2ca{nte8O8O1Mn-5(ecU2fy$GYuUu)kTHnxct+BPm?>}(nbk(VoC-9M~Ag_5@pqnsr)nprHZEc>;D?k=4 zjOA?FGy*RcpPYgh?2=UV$9?d5kSI@Pf9LRL2?~C|pBXZGv3d{wMJ5F>+RqF9u{IMe=q>U z{0}F9ss}{=Fm!fypJfT8ucah?F+Dv!H9ZAyC?CFc>&ErLe80b`L%Er&e!#9WPVb+s z5&wty8D@X1`xD~_8^I4A%-m?ot-Kky4zJR>Hhk~iDh|BF1H$KBN*|Dl)-tjW+5tn$^fjaKokH~{!rMWox68ts6LRo0q$P}adK?G-`~O| z(*wlsbL#(Gxj#CqBex-7^A~n-Y9HKN>6)In*@utmu>qZ=N*=s27@qI%?Hj%oXgL4m z=@aD7as}c9K;(~jK=gko1{20|TC^h`K9#>szu3HKQ%y&|j^DzXVC==E2VxKO;ch7^ zhWycQ>s18EhN94nb_Lt9i`W|x(~nKY)TAW%b5BV}TNeN6pS}kCOaN+25XA$s{2lUV z=mZK7!z5sE+KNBGZ>{DsJNjDfy=aRM7)dc`_!0i5r<-pK4_%)i4@v=rYPbzQ*O;4~ zpPQVRoS1+cA`|^EM;K{3_ra5==SBRfKSa`h3dfGX6Mb{@ zIR54xJGR^R?ull6-=rGW%u_$W57{I5&7wnv8`w?PiZN(iYSY;Opmv%dVsEsgD5Qd! zmiQ38q@&G?u#<(iGhyS&0HD4GQ8r=zo#zkUdA~P%FL8l3{2dbezt4fc)ytO|@5k=F zm}h@vJT>8WzjkVRI`;fjP@WZHv*IUaO@B~+o0TvyQ{f6Eu!!zfDc-R{M9#Hg@51QU#!9h zpR4M&A1r*~H%pJ_7H}rf4zH-gMYaC^Y4+!5*q8WcMJ$7NqdmLfsnzdGZkngxVj+OA z1~HAnYs@dy$R0F2S9|~gbj=?OB`_aZ;rKLOB#Q!0GrU95ZKxY5~e-uDV{!I7*e3ZZaH*Z}7{@_I~ zupnz9k4+puUNJs4J$<1V9zZ38U2bd{!fy_KZF+kzU4m70`Ei!RBk*uEatG2|;up+l zoiUiju2QJgCOd2Nm%jKynZ_6Zlqu@Yhc93FfJ0CfOySIEu-B7X!x$NUlci2p0ReP;mpBM5FP8pPn=wCSz9T>A7g;jidOX~jwW zbpd&L2VZA!4Q_-tHcmmH8}&zNCu0{_cDHvUf^AtWq2Ai;qE=-$0rygI2>$HruRs6# zch6xJ0*eq_1=DR920+4}!eCEpDO6S3-d+YDZv7v_AKrR3;HQkf`u-o;dZv>R#85kMTbrEzm`?ZMM0;Pkefef0Rr`KQ4ERDTPi zO#%5Kc%~lsLjbM#GrB(oKOBD#J$$4-1pSLG*@J`UDk|RJvLz)2-la_L))m8BG~lta zrY2GbxAaif9=~dARN=tRwnW9d(E(zX0NS+(T{7F;l9EMs1m@;|bQ|VRWDNd|awkON z5435&ngeB(wYQhThaG>d;4qDL;P11~R#z`wBKLh+{^;JzN7@8XyZc5NG_F8SOifMX z=E5zv6B}}dyu6s(Tn1z!v_$8 z^cYke`A=%|VpIy|i_?$17i`X1QVIy7|I^nX`ZkO*2h=!i`npD%Zb1_iVYr3eZA`eg ziXZ6zR^e{h+M4SBI0vY`i$i-GhrS3F0toz308RYO&5h@@^7!fWQT~uU2>yM_`UC$) z$QpKl5x+0ZJ1`J=F>b;x`ca<91;f2Z%e-EPwcG zBrQ1OPalSHeh-)s{>Y#wo&oC5zKBQP2lrdkq7`z@L ziG0FG_SpwIe`E=)-G#GSgOE9(_AnONhM7kG5I~+k^6ebY>DIrc2K)s45&VEZN*^=$ z0(%O6bnv|fV%zW+R-9RHi!+j8w1H>sH%imcIK8V=xS96sNzsf(#i5h>( z{tMyv_JN57??#qCQ?b0!^qt^pH8fY9uhMgj#xy`sAA*%N@SpyK12E12Az1R) ztu8(hGYv*E|Ks^njPxsg57r;r1n^g%+`fIW&cL7j1`OW+SQ{J8IS;%6q~v69nef!i zhffvk^qX?`?%))#wid?U)8x4tbq`PxG?1RbN)}k6W1Rwqfyqs==~5s!k@|g5tpWUKXw0wjI_w# zY9J@@;YUxO!hN|1xaL(B9?GK#_*eoBYildZcWy7&K6vy5?g1mCo&o$4{T~7FvGTLK zv2hYm9^`L+0V4QCoL->sUQ#(U@TcOpxT?^n-18+9Kk&HV{5bdsZY*R5{&*I=8x8#N z#hQ$~Ih0z-rInW6MgrL(7(F2RK%Kwtv)nWIz#kXp=9ic5tgJnFG-2isT_D8|os6~s zLH_FMEJj~^^pqKW$<_*>hWy=IIRl?-_aEX+4~AR_I2|P5;kp-}QU(+7x;P52`w(W$rQXxc&MfBLbWeFzqq)x{PfAL-rnAh_O`$W zAAS7cN5A^)*{_~9)1?Rbchim?740)KghAvFw-mvr0@!rk2lO>H2|Anga;DA>kI`H! z3q3!Ik|r=_?@iJRDJ#4EJ0y^7LxTgPWAH!!v&tXm`>?|6ckV#%-&?tJdued+#;xJe zTf<|M@Hy7#_g_LEL4J-452y|dz)RK!`ib}B@dN&rPuHRM(-yx$M_=IY?w#8`%kXBt zdq5gl`@)EeV=rKcGeDpZ$b+dRJ`OlLFfh=M0bld81zukS?{U`GAci?0w$5q@p1y+q z4*@*2^X<3aczfR@dcL{2l_&WA0~%6OPb>S8Prm%@(@#En^638RgC^AqpZFAD#p3nr z0U&Tqr4R6<&hOb5%>W++bAK}aCPGG0tvVf%Y7$dJT3T7z9nJ-kS)e6TD%_9pMkh zMKVLv_|v=}JNlygyFcDLaQ)_un}IR9@q$la;acPFU3%L?p%0fWP{s}LQRF=g?-2mE zd=vk~>rZ|H80vFCG8e#R>34$lZ5Xl$fjQuG)AEb8k=830@-zL(@J^cp3V(MW)Cu@o ze*XEdKYH?T?e6k}854htlt1eGsPn^R$859e$wY{01V+dx;!mqnY*d>ogwvUk;|~r1 zK{5!F8*#)3s{HXUAO{f$f4EZn{B!c9Us}r78?v)%=@A{Br_16qcKS}A5#jgA$II0J zk$D;Nr{QNg`r^RbbiSvn=Q?by&_D?U|3^k%;`;X zbm$(C1F#2wT~ts2FZz4^^*7%5#y9p>U%Yhwe9dwF%~z`byQlL9_icVH)1Hk(rr$qGtx{LKnGFL4I{f{U7K4 ztQQ}|{Q-Zs+S=ONj%)DY+z$s}X5>Zw$kX^YwrnXlQFpPf)VF2JYsC3c`1F5Ydo8~c z?)pR6zyGz@UVmM;FtP89&n>_LV+MY6)zW+o-x{p(cmEQsJ}>}&KA0DfaQnXM0B?Ur zt>j-i{w^-zJ9^h(C|8|dbBQV(flNCvMi5V9nsywT(XwFMuLLDfYl1Q2u}S6SE6OAJ zfBMtMxQ+t^&HQNqt`f;P{<2O_ss67*eOtzI9SE#92 zYWA@*{#K^sR8%&Oj7>~VPtW38^a4Bp8Q={T7Z>(zzakoa+1VdX{vg0J)PDFGy276d zA4~zsLkHwZX$l~|rzl(K^Y|7xJKC%XP*8BX9&5@5|UFi9QY z>b*}|toXxa?*|`%RV$fBR1poyku_>vC#cnyKX8G9ELxIRV|mxu*r!0C6@PQeUg@)E z^c|>uedo10{r=1ovmdGa9fOyz-5hCcudXTDu>;;Ylws)aBZR-aceh_b_a_>C4eoEP z_8Z^$v)3tq3Vvk$du<B8fARr1 zOf!Fe?uV|%f+J)Mfg)oI!*X1Xe+^-5W#uCre<2wCAK?#&VB?$FNbOnGGphe-fOka zN%(`mI$d5^DC7^_AB1PmN<>ZFd3xpnOd7!jPBRZJi23`*pPyY=Sl9#WP|4%=b^;>_-1kTT6Z{LQk^b?;C$!OZ*=Icp2TF{o(`oJ1Zt6vZ+@5 zSqbQG`|`72efapn+Lr?UmXW^;*8b1BUkLM-bs~S>jO5EC>`RhpQ!M=&daKpCy_CN^ ztX#xj34KuQi!bQDm36~eixq$0c(s=JKLtRIKk?|h(fG6K@O0_`AAPAWfYtTyvIG7s zizR(#(cRSM#TYV0VQ#Mj{$SJoi!X%3FAE(Lx8x51%!lz80(d|TKf3lcE8zZUR+c+Y%JtH(jan!j)Sc|O4p01P01WdF{}{kd~6rwe>$g+EvB&z3*f(NhRa zvUGni^kP`O8d=iBr2~H^bs71D#;>0J`lAm%vE&c&3&x-8I`ua3J;pL5xD?K8%<0@H zl)+1rg78P(-;MBY*RJMtC_7An4Bh+510&A$5WWwb56w z_<%?LUYh?yEm<85+M11D!DCpDzfu>#*8G`saz(qU({WC=RETr=xXW~B{26l^%GUHXq?5xi7|n<=4RUQ{ zz7vA>YRxq{M>ttA+Vj_f{_nYAOxBCFh&kjBS^S&7hKIA_wk9N0o`k=sH3~oV!8OnV zy1)7Pm$m-Yq*Sj@OJF0M5gg#hU-JABucz^M2PTJVZZ4;qsD4J+C_+qS0jgG8!F;L> z5*js*L^+)p8UA{)=XuuV5x?i9dJ`PQT=2*GLmK}!J}!uVX}mbWY+Q#FT>t8UUax-4 z#TaAR^Upu|oGyUr@Vlv!(^StfP`5 zdXh&_piQczi;{O&3Rruf&`CD*bbj*R`-pI zLH=yi&47R!wLSj|7QiZgzVo=~0RA?;<;&}y{dBSSygwSRDdH1?8LbHvU(EId|AoQczOK&W zX+Apsn)rLMC~k^vYHadnr7qDHjh$Zte+qpmDP+}q>%A>0$=mmK&)*O9l$r?Ea*EW9 zkTJ5hb zUA5PrmR>tJw$#yGoCGToSbr?QD|`yz?|?!`fdv1H7mF6{v`vm`{@xN?nhJ_gUCmhe z{6$Ou!raXC{Nf#WL*2sqnH(jPHGg{LRnIE-aDP#65WY z^xpNRotbHAwPCr`q!C=VQ{ zEvY%vdJBFG*8}{WG+%r0lxXMA)H4hyW_5Mr>WhVeCHPBqi?_*pl2-0NobSE3ch{~= z2ft5pv52)@?Hr!|;>x3#~6aP5;uP zoY5;mjp+r(cf zf1&8<VGQuc3&_i7=r!Hg_Tt6f;_@R^ zjiXEWcZnC~=lZT4*{$0l#_7jd*2!oM)Y1`83+QUa5z+XAIw>UX5HPQN;7fbAUj^nYxA0wcdg{U=}5o0bTtVWjX-~#U0 zTY7UDUN!&d@dr;H%(b3Aai%KgH^1p|%p7}SjPR!mD3C8&jRM5HtqG!f^AOx21i6d%R*y}Kg;WkMni;Huq}4I zb&5IuGSxe1A|*GTQPfH;&CVBPM97$7r(Xz0?V|Wd4Dx3qV5S{_8_t*!LU-GU5Ukk9 zXvAZu9mOJl)&c@zQDg*AtXmsNqcyyC`eAFWb85u#k=W$VN(NNOQlkxLB#2pEN<@tj z(1^!QJBm^MECi&8MT?OlX3^RR8!c+}E}%vnABk%I-sZ1`9H|Nk1&p*&&>k`>k$PPl zRm3b@pmH&yV#APAs9#7IBW!4N^C##T9kHl4Y7~W=1x+In5}Hv|t9PTQDoBfR{>&ON z3Prn-;m>?2>tQs1gIRS=MA;fmXVZkJ=g+7SD}t%lqRYr3X1vaIF@{mVnB~u)5sz3@ z8+pVGm$FVqL#^J`jbZ*cjf@J}8(CFMIbXKXh?v0z8rsXYHjFtr{q*&RS~***nx=iS zMvHj#kH6$<^{dk^UXMhp(L_DwueT@TbclP$*iB5o1B1 zk&cAGs5@ToM!f*8i^Fr0E{l9kU#Z0*VPy?0W;rThsTs7R>j*qPvncmC+SB~ zXcQwBF%}fRq)W!dsFh3754;;=i9g~RbWsd3)X0#eA8tsaWqZAMxA&YBdtEU@%&UMg z#UE;9(1ksWHls~@y>@pf6h%bc7~_w{E$ZVo{Dr^*plhv+tr04<&gsz@bNpr5Nfcei z2y{0Q$2&!tVHYs=_{*}#w>qAeH9VJ0&X~?%AfF3UaB#C0!BA~f}YVR5@{4&Mh!91rMzUL zKzZs`iNpdIs7ucoqntmp4El8ndM&z)3}WW%eA&h@sT=M5nPgxQi*h52nCVhpzR}bq zFVaUpe+C(N5jLKY8-17|i?o|J@`#ySAa|uNBd0-J`-|aPtLKbr{{CMXqF#Y-$RHJv z3P=T{0#X5~fK)&#AQg}bNCl(=kP1izqyka_ zsen{KDj*e*3P=T{0#X5~fK)&#AQg}bNCl(= zkP1izqyka_sen{KDj*e*3P=T{0#X5~fK)&#AQg}bNCl(=kP1izqyka_sen{KDj*e*3P=T{0#X5~fK)&#AQg}bNCl(=kP1izqyka_sen{KDj*e*3P=T{0#X5~fK)&#AQg}bNCl(< zQUR%eR6r^q6_5%@1*8H}0jYpgKq?>=kP1izqyka_sen{KDj*e*3P=T{0#X5~fK)&# zAQg}bNCl(=kP1izqyka_sen{KDj*e*3P=T{ z0#X5~fK)&#AQg}bNCl(=kP1izqyka_sen{K zDj*e*3P=T{0#X5~fK)&#AQg}bNCl(=kP1iz zqyka_sen{KDj*e*3P=T{0#X5~fK)&#AQg}bNCl(=kP1izqyka_sen{KDj*e*3P=T{0#X5~fK)&#AQg}bNCl(=kP1izqyka_sen{KDj*e*3P=T{0#X5~fK)&#AQg}bNCl(=kP1izqyka_sen{KDj*e*3P=T{0#X5~fK)&#AQg}b zNCl(=kP1izqyka_sen{KDj*e*3P=T{0#X5~ zfK)&#AQg}bNCl(=kP1izqyka_sen{KDj*e* z3P=T{0#X5~fK)&#AQg}bNCl(=kP1izqyka_ zsen{KDj*e*3P=T{0#X5~fK)&#AQg}bNCl(= zkP1izqyka_sen{KDj*e*3P=T{0#bqhW(xe73{kH@G$Z@QHW{K^fj1<7QQm{m-F(Sk zboXDhw?3-*`)RbRFWVp8{B4&V8088?Ie*(Ffzb_M)bl3+jA{U*pFimWqZz!DaI}1!9lCI7uMa!yEe`7WtC|Zfxe* z*2N}&(gm`8ypiKE%Aa(B8yPyLZBfnN&zK8Qqj73b6XfScRv^0h6O6Mee(<@Gb(d|U z3Pd@7X2=-+%tqPk8%=?z=g;WnIR1>rji$bAPxSL=@NztV2BYlnjix{>@W**MBY&K6 zqp80E+p`(N`n4-2_+!1Ci9go3e!50yO$5GmB``+#(;*Y^CtaYUZ-WlsIuaN&{Hc(M z_|xfh)DizZPcq=QPJ?jl@TWi~=1(>L_e9IlRh#%*7Xp74&Cfso%m5qeY)H}%ZH@YA z|4m~8`Y%C~jwf?$@W;77IIv=j^0(n!psB|_b*0x#rrNC& zyjEeIFRJMzyf@|1@{_I<~=Fxm`-Q8y4OCDUg})L;ggNY5a+d z;g;5Md7}PyoH^tZMYY?cgFx+WT&A9Fn>}jmjvd1IW zBFieB8p74IXy=c*KPwk#Bv6Bni-b!{cjeHY#yr-2?yf{#5R zS)c(x4m`yKmUC^%bx{-DrR`e zYTWc`Yc!o5h6KIasccabKM+KhN$N6$0q(n0P2@*?f>8$Tcch^IAShnVENnZwr&^o zcsbQ=b42II8h-*8$Xs>;6E{U|ji$3hNbu6vN*?F6hER#)tj=3wd1mbKXL5newI|py zW@}^$2{r1td&MV;su!Ob`BYI`+@-}Je?}L`2$BGSIpm|ZM*h@Lj>13%Uws$~^jZYp z5HCkD$sgweSyd1*O8{++Y$+kdy>_)BpV|gP@_HYt&G7sgoBVMukkN)9W<{W_ktrmU z=z8-J>H0#^g5-@={=@_-h=~cbHHIuSDIn4~tbh5T>Jq#Bi3VW}G0`AwYYbIq%pW#7 z!dFtLO$u3-sOIk{=KVh|h24!NO6iCRT%fHnWTC@|-)7th43(TPQ;G>$mT2cM2!Pxm{MK6>Vn!EeYYbKB@OJx#DJ@i4 z;-j9wU@p)(fd>AJc!nnRh9M@b4GB#X%pC*#h06t+_%n^rp~xN9e%~mCg(3*e8w>oo zbb*FZSkutVUszpW7@5H)hp{;CMQPf3j>HCk!3gBmA;xK-s-1|xaJoS6-E7J!`P&eW zt^VYfdN8eh`2?Oo18S5&L!x@9;u-p<2_x?bVfTODhz?zoSn+=VA`hJDh$B#iOZ`M4 z)wt=?)@VB0F2Or+#+(Vp1nkvxfsqEsc#)@YMYY;x7fVLw^iCMDHXZMG#jGR7Q#!CWBT z4-qSq2tjOtR9mBEE^C9UZVv0o)@@QOE@}%}R?rh-kiVcV&_WbM%o0FbqeX%C$u7D% ztf$(y3axr|Y+O&(W5LS`c1A4n7sLfxK}HbsWDF*>YTFzUj{ zpX&C=8E?j=s5KO#^4zw`aK5NWNHd(}3dq!m5lXc{q-0{@z3?>;cr*At;WKBF@WsH+s-r%OdPpL89vnTp3z! z&k?RT<-l`cddBGI&mn>2lFf)Q!>o6sE~opLs9yzXT)_f+kR&dgrI?ufillp~7>l6AigN916Lx_jPMtH5GGeHL`)^nyV0bo ziv&TxijqVn3eLmR1j(VV6e3b6G&Uyq0|)2~Knuj^ARK<9Rf$}>sAtV-To&elJyT6) z)0DVs^7<(#a8HK?o!cgu8U^0LRVHq zMP4NtPUlWAX6KAv4vYmFEA&z($rDTbi3v=kJJV#8wUNn=Kk$!UbK2z7OoGleC21!+ zix~1d%Vcwwp*%daIopLPw)hk6Mkg`=x)5d^PZe~oG0Amn*aBQx}XOPE>BXD%{ktUHifDtLHF`W>WQvCu?SZgtWV}!LyoLZc4dhT{*(!za2AMi zsQlRxmY8J6n0KSY?htgbmgKOAh@GZ2go%Wyve@I#=mJ@xf>C5mAmsFDWb3z!Gkt4_ z+T~goo1r~;7_NF_gg=rxznzzinL5&5}QlJ9F!kP22y&epOtb`aySH)QO+c@s21GLZ;?) zZ?t8OGl~J^PI)N@f_8PUK|clE3`s$= zMf?OoyTVszbn|DJknsR2n}MuWPAM`I$mDcsWb3z$3;KB@;Fl<9m+f`9GDbOn1{Mqe z5&~H{oKs{V8#iZ|z?KPd#sX1;(fMISJKA|O3};5RRZ-6$-`xTLSvhS%;f)54yc^m2 zgTzf;9pwCVF`F6{R;L{O{PCa|0epwM@!^Uhu3%$Md!tt{41Mjjn6TzEvdE}4l9f+XkJNIb^HZT; zfL~a@7BS-gpwD#{3;-ezx>2Qr`Ls1Ed70C>D9OvEFbB3q=DdvUYg5|UZ5OR7n@@?NRN9%@PQq=oDBY@sn2%6IpGR*1CxM7H68e$C}SeWy|5{(7^ zI1lKZ1qV4TAfq|G7{k(sVJ^guzc5#o6chfB8-f)U#6pE^6f>ssCme)rjpAm$+*HY* zler(B^E#RT@pC}i?hb=dM4(ozYvao$O! zVNORzweEHP^njW$i=$@841Z!5SbPz~rAwFK->&Xn68dQ9@9FRFuZ>I62=oq$MNSHt zcVn`8W6!dPh!})0i4xy>w}dK-8rS!BcH;kgJ38RM-ky$*9tc4j7(@IC2)qc0_4g0Z zu((9SG7Yy$sNRMEl0n!~EBqpRGh(@xIfEMQ@>-V~vPFcNDDACnWkW+l*Of3hNYq`V zVL=Jq!CDhj{22#fil55g099y-O|mOCB%8?BnbQ>fSpHC6BY*Sl=B#Q8wldcE zV_cw$Ul9H@07L3D>&&TkjM*Bk%Z#i=<-_h1@Vfz{FFXDY;t*`&&jvueikRb1bAfea z?#Imk20$SEiW>ai9Th-Z7D6%Fgp8W|ut}fCN7ge}e}9N46TyTNW3S{%pKf*802q`0Z|hbT zKW6?%#t4)DyL*=aXv;##MlnZ9u~sgSnnQ)jLw;NS-r81v0|Pt%BgzgA!cG?fkn-2B z@YiPz;E$ZXYhWXu_XfB+VF3Je_h;nqIt7sK|J{KA{2_q0EQD?p0;m=%G%EfN&hbD{ zXj$y|1N?5<@P`1Z_^JF^0~nM1A$~#mTSfp8Kf)gXn5@Ad=sV3OV!@$Nz>%I)VAS~& z8U?KkTapcbh@UNg03hKH=ihYvg@3I)V081hZJWsb5&UfUyG}hI&)*6Qpcyh-P#B|` z0LEy>$kxd6XKsk&7Oc65@9ty=}Z{AGtGx4WLi}ODWI0rQFX8~Z;^Jj8_ zmiUqVKM=}_KOR7X!?6X$jgc?ckdrqW_%j+U__HF%9hVlRDEI~AuV28Q7{D0dk9UCz zeggN$@&_J}_`iXHuC6m@Pc%1QfpEScKE=0VS9acx1bC$o-`%#LxH58%%$Soi8u>FA zUjl!C->sV%jO!15`Ahs?KLBX-e|^28IbbaC$GSijKgwU_iTr#YhBWvumBdovv6G~P z`0e9Orw$e#J#wVgg*@5snFTiMpvRgsz$2WT)Z!xj{mMy6nMPR3~F zk2AXDZ!>c>4x*d`d^<5YrRdDL^Sk!=e7iDp_a81Ut*SY5?o928sv~7JmHCa0M-r1# zP~cP|Gs^AD%gf8J*C&8|y&?c(gFlTx4Zptr`cnroGt$#DGP8C-$jZ#ffTJ{DN@6_x z8R~@W;o;Vfj`r4$-oYC;MkiMa?9UlR|InWy_1lV)6m-4-Bjbt+I!&i<&GQZ0f&YL2l~2u`Uctqfy(%V#H7?T ze|iQ8t}33Mo|#e5pfU&#Uz$B&bn*9HKE&_V!!8Uuetiw-|6w1Xr}yBI%$>V-B7qdY zxcG#mq{`vp(=$_(6BA>jqod1p+s&4-5@Y)&v5z2?_B@DZVs6 ze&12NB<9mjkwLMwJ8Z7sZ|4cmYM&DarVmo zAK8dn>^XNN>%I4~GLgUJg!s6Ggv4s(ZyNZUoEV>&n3$ehoNKN+l$Dlnt!sXCpe-X& z;ZN;uTTr|hsUx$nHnD*4cr{MH9lF>6zEF8Z*^)~dghLu0AD$o{o(c>=>cHy2H}tGFSR4}&)M18 z9a?8^yht$odd>Wy2VB72fS!Z?tR3n8w6wI8_;?sAx`&4k%uG*Dj89EZPEJit&&*F> ztUa8blF;0}aI3R5Es>1Bq+dvPi+TvtycYe$App-IVU0v*V5R z#i@xo96@!$m$0M~=R?aBT_En~l#XsX=~Gr{W*e*e3# ze*MAf^33%3*vQDlWF7E#AR!g-1BsK%2UP8q+aCnLc?iQ?r)ktNk=dCI3m}i5h(8uT zegB2HKSJRCBv_T<3M4MhH$2>wdf>o;{Qa3QUCx?=FyFi{HT5p2w6Ji{jDj(RIktln z{3COw%cyN$VnhHV#-B326Ug66_4|)^W4M~;KX7sN-rd{F{f7$=9z1y9KyGec0W3fg zQ+$5EFR2VRV66C?pB!QcWU$kt;0!jV2Q=Wv@dv-cf>2&wTHb#)^DM0F=DXn-_=9h&i9dR(#lP>o^Gmz%wjpq(z=lOgA)@>-0B+a# zyXE)$58;?GQwuRQ1Ohd%_<+5IQWih;_AffX`T36AiRQY}@^ac+mqg=lPfzzo1n`wt zUVXJJCx^_n*ncwi?#s!^DJ&`~hfrNz4OdoQv`eVlqXeMyQhcXOE z-0rH@Ze)$|>3z+aiK*$yzkHWoR&ruuVnRZ->i?FOy0>jl^ra>xUbEnDQT2cE1}bbo zLH@h~2*uhF=)f4k@fRc@gD?D}^LMjA5wBZ!baoD1fC2@$retRI}3EST* zD8A5gedP}idfTrY*t>Vno;@Wam36IMZB1u!^34Y9Ycjx-e_q~f&Pw*qyA%B8CxEPqR9wxwpIHv@l4U()dxS7$i>E|QzX z2j3L)Cms$J{@l1grEUch6O=^i>;%CDLO{w}w4_w=-%Z|4Ej20M!2%K`8QP`TC+ zLu9W0WFz9&*?PGc`0MTC_`53R50)P!EXSp{8u=sA=>E@!KLIE@e=Z1AxKJ6g08lfF zFtRm@GI&cP00#4a{=o2XU`PSbmOo{(UJYI5J94G98vyKVyvn;krn?crmjd7pOZsN# zuAXY{M*KQjuN+nRQ@&mB5sbg;RKRcel+qi-|LOdZ2Z?O?GkGeOKj(umtzKgYBW6Ar zvrD7-bk8XWV0^N={?*qXUFzlwel~#vbirRw5At`op}h+M>}%#-Aj2OSP{5z_T8(fTa)yQ+pWBK5D+r8&10)kb=lqp4wYPVu1hzNUs{pe74e`kG z2lqB!{rS(0+u(TYOOAW{&DSC9)*5u>1cP5|YinciNwWTM!yj>f4*27G)W{!WH1p?_ zKr=&74cqsoS{)nN6GM#i{_i^MKl@34bhGn6Kk`Q%pmY9;o7xxvyIRiCWr)_>N+d%3 zz4Gd7uWj2&LVjLuUjBXz#m9~wE-EZKax6c7%NAr0LzN0(`-Qe1I`-n=3;wU+7~zk_ zFNptB++UO0Dct<;%|){QD0=fv@P8Z&CK7Z|MEp4%giYhG^IC(lt6pHppDKTO| z{uBo&=C6v|f1%@VaU41z7o0zR_@(Yo<&WU^Cdm)`FGl`!$OQapMjpVQI0-+d z;U9#>>Ml@Z)_AftYG;KZqKv=x{51`aDJu{$f2SL1C$aOtqm3OMorFGE1mi6&=v!EZ ztP6i9mX|wk-~I$2KA!kBgij%S_St8jLwE+^(*I@oBLEIu9KJO;(BB7lwR=@3c=gzB z9DiN#M`wTh_KV8jn`Zu07DSyb0`Qye?pOX_ak-9a6=zj=Q%eXBeq{ee;qRn){H+SW zvvq2H_^bc;J~lcsd=q|p(A(2`E_==$f74|3!9D%*Ch|8h&<(!u9zXt) zE+$10$305c(9{!GcFM z=cdQjhrcVppAkUte=2{^SODSuKr5%WViXe=2~;p9&!IX8`aozH7}N0nolyD=TS|mC-}G zBmN2k#4Q9=|5vTv_*Fg){tx-fsH$)6=L!rc#W zk6m4nS^tXo!vPop!~ysV20-L*<#g8c^4jXkoux&VzuJ8U|F^Jk^|ayA;rRmNLJldZ+B{l31BEC5tjAYbSnkn*>3dXI^}<{IFy?h^cxLk~xekf%-);1{Vh zF#b<c07^Gps9JRkv3b$}Xx-+9f} z|IybrSXD~i>EO+1nM-J}@_z>Ys=)s-4p89#7J49%r|-+kj=;d0aNyL%#=b$|Z*q2V zd1Vdw`*3xJ_&>@Y3*gOLBm`)H=a(>4o;p>DuUWu;LaIzIQsoU#$cJze!?kPI0=MR@ z_|s>A8i3>teJkJr_j3Mk;BsAc!M>WBlk~}a_{HSD%XM{SSeF)(w{MNF)`wrk{Tclq z$6tNL`&+hb1~kj65AX89?|iUV@P~7j0gWnu1VF_BQV*z%z;p=KJRpw2z@J^Q4r;Y+ zlBLm|!8w0801qnzu)-hw)H*Ry|NAL(LXT!&8f{$o_3DfbK^B=z0bEOlW)HrbuUd%jsNPH$ga-y&J|?1*4xc{HgBGu>2+KkBW-8xRm6$ zxVX*e`!JMUsy<#`R$90#EfxJA1W^}38Z{z+u+hTK0Koy086X{j!2_~G@Q;7;ou7dJ zvo2OG*tS+LMvFY=%mEb}!w%cmLs|Cr|Dnf6C~q48IzG)&LR*IBEnCc9vEA z%>EDfQym~~0pcE@vIWf!z+Zfc`+&gT`SS<$uMvqg!=DqpweaC+`^k#3!u;Igyp#0< z__QK~Uw0k$`8I9ZL?~kUQ@;g9{$Tta#UEZizuR66gGDlBuZvsb=dGX ziEqp{a)to*S4S86g0Y1+a-fc;XsEQzg#tnsN3AUeGWE zQ~%d|>Qp{iqA-L9m>L(o&!k=*j1^~autwC@b5cUE8?#JK$F&%#iN#p*hrRkc#tfqP=ydDa|F8IXP<&m9-l!FKRJ10lf>7T*>1p?*e@H0C2 zJ+0s8OJvZ(g)ThF(%L#cKGAaO6l+OIfgki9j*I*G>q&#apE>{s0r1N&pFe;86@)D> zAp8cxZy~4vJ^%m}{w^u}J?|*M;g^oUKaW3h_y{~v|K^+NDt_wnSDPckFSAts06*aG z>#x85r=_K1$6zeZF0VdA04zF`ok>vp|Ji%*|0t4k-+%vs`@?zpd|!T@bMHC#+Uxb6 z@IA-gU|SCCu2-x;0fmtSh^PPpiKIwI5(p6lLP8mYB!UDeXMs>6F1zHgKG*B)UH>8X zd8)d)tGcUuW_o6XW=@s8Om#n9p+EJWr$X1XEKQ}85XGLf8EY{55Rk3H;24lDME~e* zoJlB(2WeS%CPJZb$wodo3-q-~R1yf3xH78uEt@@BrYpcJrp> zSe&-7um;}Rq4__;-=X>!FJ3%@R6X%}^r_b}YY@BsEzC2#NalRpG7`jrI0 zSpZPvZ|IBG?G!(BfWKT-Qj(7NEn)7j#kBhX{a;51^?wvU;166Y5I#0W8Cv{X;9(n^0>dh__7p^)gV1F_Z4v?=@a-RQ{?8jl76SEBNyM&7GNb8Q z%VART+ATW}zv7b8lAUYTpx-NOp84jRZ@+cmFZUY2LzZB=khu-pcdn1)_{*r8o|o}A zOZmf<510VpRsft1;MM?GAVx?YKmcrp`ug-Gz>?kj_HLl`;lC?k$KERV%$}W@!MRI4 z{&(Q$(W3|MV7QBc@J9iR{vfre_#$S_93Yk`+l4?!fbcy52M7R0ztRDm{>^WuXGj-3 z3jp%`!MF4CPH=ySTvXIYOXGI!+Zq)ey*zP~?*7>HgH4fj{$3F0N8KOt2X6MOuU?Pr z+6jIzJ$>`0ye+v|t5eVk3T;N(82>^3w7mvz)2AS~&A^z07yuFv_}%X=*OStSkgd@F zqFuB)CUrE5`IwIqQ`Y8fqxeeAKLDR@!SJ!Ds9upJ0Jbrl$=CpL_ znxewOEx_Ll4bvptf^b{sj{=DP@BRl%_w3z!=>f-|V+Z^k_@e*ZvFYsfBB2S-?xOn_7=+f#?EBdUSw29~>&HsJKG-LkEbg`{n?VKLTJs z9RWs@F(BMQ&AMeVOFhZ(TM+W@q5hpKl4DX=CgJ{pb7OtHko)f;oS=5O1+em!BYaq3Th`V-=(@rNDox8I`wlks=97vE@1 z9N=CMiUoXA94x}DuDmb{qdGvacm1;E=>HHujXw=Pbbp8f=l&ReI)8WX-nr9tJUnS>V^vJd&eLgurp$Cp8vk;)+DV>8v zlSv5hR|c=4%gxWv%iFpcpYp*Y!QA|eB~hjO_w6a%wQFZSn(}&JYNiFc{6f0l)GFAw zfW8mI3lo0S{Q>j9{mYj)ULG9Wfx4!wN=??D3_}y@04$~3tvCQ-3`lxl13-8bjO#Es z3jF@B^psqLV}d)YM|HD)hKu-S)UOgFi7QrRZ_3@0y*3T~UPI#_{`ki~n)o9CQvOdJ)X~m%*p1e9 z9`@{mr3qeYGXNV{p^K>3_yYjr%exHxMxT?uSMPkS_<>;0o(&A_AP~|g!_bBX06)Wi zKT4bY{QT$mNc@)&enkU5Nrmu{9=Mc79$YWWpETkcHO2`(=8I9pjvgf?rmoD)+ORs6 zu-EYW-~ZtcJb%mqeggy|e|6O(&0AA5)~@6LJkSkOO<02K?dz{Ue&S?x_34_^r)z4? zoN4fQwj4SL&#Z6*3v8;|y}KCSVAh3d&d$!{_Hq1i?vL+%4fm(v2R-Zj{LtmvBgL>n z1m!@RpE#lSybM5cF+hjm&h0z%^K)}ubxkejI=ilq)OmG)pdXF&=9^!f&H|PeBzrnw6&L_V^?+WHwyg0&`-yYjQzm% zQSS%t55KR^o{_;yU*Goa=m?1u%WFsn82Ub0h1{R064=`Zf-ummLrh9mXeO~1~ByZL6>voQfJ2|ctHYL zO4j~?#S{#v=ChfZ++N@hAAV_*U#|DHjQ(`@NBD!a;JLxU+U(tzx;mN~YHPcJJJR>U zCw9N!|0sEQ;T-tJTT?fiJ9`Gl#$Xxds4ai`Q(%1`0-J)c4wwM=74nB3@Fw0f6Gi*1 zj#_(GKPS*PJk0S|SJy+kUh?mAN*|l~!n{Mr4?ABCKNu_%v3h&A(>_>eV*UV|n?Dxf zUX6jI6-wd{WThr0Bzw^B?K*q*x4(rU2;ye}5IrEyfxr1i=MSXbma#S?P4j^~f0RP( zae%_>@SDxc>Z8-3$Wq8tLp%a1CKti1rYr)>4BXAjMf&SpIGP{8mi7?K$_qi z9wu*EONxTml75uj)YL2T$0okC@1^*`;tz)(`Lg!oN@-0E(&d0jU`D5moxn!_$Rj7l z&Bi41SR)~!?kqk}f-j&HK;{C?{N;JlbOL4k5!wJkJN}-+hzU2pf*YdJ6xw&73ThTt8*XgUq~ zuP3^rW6S}6#WZfF4`v&Lzl`GIeAroa_H4hw-`I1#@73`mGY|YtU@e9M2x4<0fR;1d zi2#gF6a}l)WN>@HULEa77=N?}Him#O2j&3e{a+sVzO>BD4d?=0@b?t?1OLb2hwjho z95bZdk^_T2Z&`M z^1>P*>4Eh*2u_s9DxkIwO&4Kae}(*^3w&Z=r^l+xF>0odz9kU*--#ZrU%zu zXc!xNj^6K!hht->@p<;?j!S(5XZ9BF-3M=(hCr7Q*<0zcBZGjbamzd$d0N!H(HZO@ zm72V(7N2QS{&)|FeK4Ja&`B@@@UP#@aKBJP{yea)c#C`frgM8u_%Z%)?FAow$R{6q z?+gB~ANZ>U(z-5bn`Gdr8~kO6-mAFD3nl>r1A~L`4*)SfGCqEzBO4~e8h<)~!Wy7A z0Le07v^EcUtW#%1i2sx0NBm!P_f=d4jEchlVEMVDqY5I&I$;NDVl*xeCME5wX==gS z!LJSeY~}=nQs6L~fw+BpBCKqY4MtY(*coUT7PWpl*{oNKXB^Y|ZY%^T>DL&C(p(p; z0jSS{5kN8soFTGOsrO_cfE8!XxZ)3K#(@5BdU}RH zMi@l=I+4F*fDgBy64SQrg{AOw0bEfe#O;ItPr^^1#zh>mN_v|1uGj=38{Cn5BZAFk zWRAMeSWGzzN@^X60pO#rfA^T1g$OQ?@`vKWtB~unA-CejrX2X-rWQgJddIxW+&i6_ zk>Y3YXS4S50R5lM_afW7#-1y86p#(KKR+%)Gb^Sga9jz@TlzWJ?M3F zvhUm>{?9T4Ay0wH5Kx>1>#Hz&7Ytup+z*4wN%$K^|7YTl!l&_9cKh~D3LrcjuI=c6 zm8m7sfEjFnrtWPQgt|Hy^I>me%zWw87r&{!rx_90p0=pCHxkEcPoPtwblVfFpZU|A zvJe-vpa(V_Af1GW9uNnCzd;vh@Haf%csdJmt88dM3>jDEPvq|k`b(1m_4nXG*^@VY{YB`4(ftW7aBqKqZ+135al0`&!3SV-o*+6ukNZ?$syq+t*LQQWZ=3iN zpTbE1w2lE^5&jTBD}Teo)g^ZHQU8~7_)y;J)vIA3SW$84SVhHd_^ha}KRY%yI*RKq zWYO;xJoV70zC=-x>Avjxz7#*&mpV@M$QKcNSfjR8gePL*P2d#kE4 z;lqTNeyHoefLbGq?05XjpK$c@s`)C*T8} z;PfO2Q$B(W%(~(NeE{;A86Kn_JtagCsN$}gKXDmyp8-rGA(HQf9xCl>H zVYnU`9>i(0;J@(x@928Gx9lwRfuZ2rq!AkIJN48*1j2Cr5D zYmC`h;QpqjfG}8QCQpE2DUhy519`LXKkSFW1ecW&|0nQg%z=FeknP7Z@kborsdB^r ziS$wbC-O(}8+$_N)A7SyU*?6s$B$}w*GJGm?P-vZ!c0<59E}EIl57p5{~6nsS~LzW zP@jSbt1#LK5GNr#e`Nm4@*8Z{U7ir({U6^A<0>!UP)_)R5e=Jvu=%f~_hnCih3F@OX{IP4;A!^;P?)~h0-#M87_FT=%#t!!OUHZh0 z_{zI?vHP8wCR1PSpBa1(*Pac(wW~DvBR#M`02=!N5x__IrP2cPdZ)H*Y1QIp_eY<9 z5C;gWFVO!o{KlT(Y{tOvg^nNTd>@ll1_@+5e-;+p8+raL*@{LB_Cl%V?}Vcy*a zk>x*O@^Q!f^bNdQ#q9O&tSi7A%#*c!esrs|P98pt!*J`?W$3@TWUKcL1{e zSO$NLzTx3hNlEA6bt5Aq#vi89IXuTYU-$(6bo`J%pikd#0M3s-^hNJCP|G!&*rJ>P zqro!zgIF+2h58pwP!|Y05nv2xEJKqqpgn&R24EN?1P;T3?=g>u%zrsE+Hn2>kKT@R z^IyPkax?Jf$K(S#+KQR!uBjw{Od*M0n*!aZc z4fqqQ0)LGLe!!oA9~7i5ya@0S+<3@L{D~~MH=6hpv$YsSb0DSCTPvCn9tHmq0?t8z zK%KwV!-s|OABPu4hetqg{9XCo8Y_QDC&5p6_V(o$cKq2-J}7@z4&f972YxsW2Haqp zf%6YK?S+$IqDwJXd-?`Pdiw`Ucb$Qq2y6%_15mOIV~hcz3kLoufW77L_v{My*Wl}{ zn_F5hHr7;@txZYJ%ZH8RJI~fPG&D7xt$py|(W8f7K6&5XK)< z=f|eK%=y(y8k_fWmc|Z_(ON1Gz46ONy?D;7pVK408oBF=C+Rd;coO%gKRwiXnZtVC zS_E5v4z`-yyfFbUG`-x_-qGCJ)^YJl_fC(eLwiTT)vN8WDHGQFFI*Tv?`Ogf_?v+K zSML6B>kI9EaqA1Ei2zq|^9H1w7#oJQW?&Ee9V|R!-%IGji3s2a32-<5-C_8(?S=OC zwzi$o7;KX}>^H8$4S2?!#8;7>7E^nBF$)fQljn5D*Jy`@5Al$ENNDKiG8N?ZvL zExz$Jod#=tF#14S{>VDnv!_p>)52F>!P|L?*KdHZG1Zg1tziGL>SI+cEsc%!g|J~^ z!-4$*er)aw7;Ua%_=yjH<#S(ztNMI<=jBUXUBh&F1qXVt_XYj~9La+Zqz|UdKri^w z9nJ94X?PREVZvYZ`}l^y_uhLCUWKj!h|gh6t1z-z@WXu%Z;xKCFE7qslM26gNz7={ z`1|7AHiN%h#^1dcPriI`XLf4x=EDL5K*SID8|%cER6fS;oY1G?S68PshYT^jxkF@h zftdcCkXDJIakZcIl3AE#{knAUY26MHi4~gjG8>-o#*`ADDy68^NFm%vB)i_U`= zCe)@=`l5w5cEA6APIEK&b0p)XFi*}y$jyb<{C)7@haUp5?;bx?3U7cuv1T28UIzcx zX#Cx8+hOph`@c!%0B?R?XW?)BBEF>U5p#a%`kvR-VMEb&kTzL}jYiuEr86*ulg2>^ z>RVEBlbeS8@gYp=Fqr$_LNZteC;0T~9gV*&P#djk8D%lO8XI4-kuf28vj@SC~b(9l>>uJMPn4oV#67@BJ&B|Etja z-{j=|7mpv_zdbv3;??|- zH=})8geZWF`AUd!r)*1(J;IfkKcN33!R-Hr@fke<5IRu|D zK6T|9{7?zEHu%Qu;mX{{Jyw* z1U5z+S!2whu|sEcjaZ1PRN8X=`Xd1Fb0_`?fR~BtZEcmDcIyt0&uaefXk9D3|F6Ej ztgL+7s#O`QSEZSny8`hW-MBR7Bw0-s-JiAhg$0)lZ@u+*A87nh`p5+M%*KtG5b$-M zrXqrI$nl3ax{LiC>U$Sjff|CxSiZOU2v8FtW&ri}bL3`LbnkU#jV)A^o!;sI^=d;Y9YR@K#& zH>RL}0v9-N)UeC>dkX*@1^-6@MEB=3_r;wp8h@c8O`flrKfJ9}2*&N1Vh!j3@6OFV zIjcKBgTFR7=dtI{AzQ~X-5c>{OgF$EdNDefog_j5`BJzz;|~aw@mC0o1|$q~X^iN~ z4F{{yKOC;8z#off!G^!L-mM`14*|6AeaRLVn0zcY{_F(wq`o2!aPEHJMf`(OKZAwJi>Op?%s1!}V~227D1_l~978@=#%R@<)e z+y*u2Xfb*Xl3oxYrC0>v5H|fBiUAFd(#^7|3QsYma zk{P9~%8WKiwc5(Dw>Ns>Zy5Q5KDf@NO48Hz{Jr%ZjlZR_UHa&c&V9|3k2mH2K9bxL zlTRvrYHB@kfcIaE<3El+SSNJkPu6H&%4BALYa0xTY-Dg%C4)cD=qT)MGtECJe=lB0 zZqxph1AlLQpZdS0OXHD0vVtsKd%^zquzZFP+Or=?)Md)zr{%Wc+-n>G&cD(pz$X8P z_&M_@&sHy1R;Ej%!Jni|pN&=(43{j+SFfzaDDpQtdS1L&;{IN|kk3BEON}EZ{$NE< zBXFw(KW_2?kM53ymNa?pz@J53PCluTIKX=^^Z}p?{`@Y}sFNW)jTsint?-Ch%JkhR zmBA?3_bO5T@Q1$W|0Mk3cJUYf%sxCGNB(e4nG(3w&i$e9gYf3~GplO|OJbtBc?E3`}4*h z_#KL1K6bx+?<=~$H_4wUph;V+5ggzb_n$KUi2I9z@!#a6i9c&fA#1+T1OpzpHu4D} zE@tfBDAvf7tfW6Ds7VQAjJ?2RZQRPIJif52ODbOy|%{?E`zCZC}$DxI2&4$y(W8ytUn zyU>)Tf0@Fj)ATTKqYN>-d%JGNW^Gy7`!e4?hS$zJCDE_d) z?eE+$)<1tt`P1-ABICbyn35#o`zEhcM!`!?xr~gI+({M!Tq6QqZnhM2!FaYYR%EhnaJV~)OEUFW^8!*>;;(sQ~r_&e>Vt!d-onW zvNxybv*bh}BbHJ)$qqtgv`5S}MObC0Z0(IHzVKJfB=Pw(()jc3#_ONHAQKS6AGp2A zN$7S7e~CpGZ{O{!$xM#-EvHakX<5NI>7u^N6A}}%TUy|6 zn(F+GjL**ntog1%C|-M|~Q9QBjG>trs7Swbf(_ z<3C8k5=s>gkr7Ij7N?yIz~~^6&7Ce!@U&J{74IudT*3UGg}*VU+S=G6l0Vy)LH>xb z;rZ)2;16H;jDgHL+5x}B6|EPa^mm+D#Z7;Sa(YUcjv+FJl6rdrJn|yxcUX~w{=VIW{$rz(TShN#7gAy`i- z(=$Xy!k^u(1Z=e2qIPHWX3V!4vm(9n%Jt61)@<@)Hp(B&N%S8g1Apc;z8ps?e{yv- zP{4Te)kxm>$YB5Axwgv6s^cfNZ{JaMpeQlHvng};-cqY_MoOyY-12IXqK`~17)15iou_!}Y&E@u!b-Ut|Q?d^iX5yc# zC_H?)yy@CC_%UWl{cBglzwlmv`PiZn*D6SiC?Icac=f7!Vsv~`BSXUjJy*NB&K_FJ+3i!3iv)FrWN;89qlrH;+u#}ft*>Wo{_eA*x29+3@kbex z13i5MLq}h~o)hlc*MmslPY}?|f_G!~rkw07d?TzSDJP=E#?ufYqZu*bR)T5d@5SW* z;MO=5(70;t#@zialQZyVB=5o-`fiQ(UA^Ah@YPqYroZ7o{-bSgXk=x96EM7tR>Zhl z396BY%)3UbHk7sCU(vWZH$OYsS+cvdprH7x`eLIE{D?({{{zj~Jxk;ZSEB?m<5q%f zG&1%@z?J(8c9j$sm+an`#YxYaz@N{E;g1O@l`lMvQpEHxnzf_kQIN@?b0IS_iKN*E zVw4xg&VC%S&B+UpF|NOGf#}N}3(+t%%J~a~1&u&K815iM8gV69qLIWOX&K8M>L);5 zJ#L9CMx%XwLj?-vYmtVXaBpNeNIoNrKj@q!_sC9#o6(`Z?pbh5iQizRmgLjDQIZ)w z6=D3@-e))(+&8!Tlnbp>#POHTo}kH;Tzp1ZE%kFwz9=n3#xy7W0x;?~B}O8UKL-KR zod8^P#uFbb>hTGSo8OZcrWCL zDI^sz)J92tNUuc7_1nl0vvGmi&B(G1LQ1KA0o{zWqT$V-q-6}m!rrJ;6l@lBjbupZ zMp>!ujk2QPT$uA`)reCl+>IQ6)>~N!qxBoit7{?3(P+7vo(OyX%o_0`SXwQ-i~?fj z`&R#Ol=149EG7#j#$W8(EO4inGmB^;goUU-WW;z5!YadVv30!AKxD;+di8CJ&h;5Rq@aylsFtxTt+PU5E9 z7$cECdyRmOG}hjWl)TRkATgEPqmcH4)?jE;=LQPkJkF)EGPg!<#=z z%jguzGzu@Hj+pFL-mp=kJawx?Vu=ehq!)}~&Yx8V<31&=7G6dUG3$N4X=9Mo4R`)5 zGVq9nxsgZAaw~7%XsMDL>BFBtlMJE=i_a+ZK3tbY>Ma^Y#4IjQxYIY0(j>0)%?PzM zQie5u|DOtBufSU>CZ= z4Pf~5r(9q-0~iVXDFVZnz{ucFb;99FV1)3eTws_27%}`Q7Z{cVMh<_P3tXl`I4SUx zaDM*#6SLCb0?en>@uF5BviO5ecu}jZDrZ(8!uT`ao;qIS3Pc=#%NM!es(OL~k;mV1 zMWE2ai~ArF`BMZgZsz#fMJ9jB1@djYnBx)3pK^hV89J_RVa?x9xf@~8T@wG z5Z?Sr`dNk_d@g3)Ro$opVa}fwGLAp1QMLMFQy}d5GkZCKKeKVMsjuo2{`{G|oXDTa zsM>q6DG&+#30}_3pI}^U>Mz3jEWxmF^@<4ocrRz+k2fxyuHjh{fp0+xj1c||$RzwJ z7wFTs!GLc835*#27-Ta33_5*ki2t4!nebbnPB?P-(;$=c$Bh3y(eml8E&MGAfji0T#^3T-CvhB&P6fonHrBxy96?>=vVhGkubrs^mP2`}pqz~m{xtNw0SNppLIBO}9h8Rk zBNKD^l4BSbgk~rhiTr(R3mNifYxJYVf_$mD!GoN=m+Z`?+a7z#$CWuU_!Hb89M~~N z`CD`@(9+_rn$m3|OX>C}+!kSaIjF5r>8trw~S^3t8p2Cr`6&)+AXd?F)IM~&A?KglAz@y*5M zjlB@{k(ccKY|!c!_WS{Uz7xo><359?r2CBQn%jMjzNEA()u;2m6yVb}5yGF;1?s)9 zt^~)O9@UKt9>JEzLTy_-bTzUq(pN*Usuu42QTJ!(0?h>K&4|?9&ikK#7R&QzLmY$)j>F6G<$5U5x2$+tII;QEFrZu^T5uytdN*2 zy!jLHgCj39Ve#JiBPTfd>w*&(8r}&ED9*Un@3M`gp@{ii9a(~~=I_TA{$v0ew{7qL z;2KAxkw-9Me$c@4x5VGF{V2!HrT$h&c73GrCvkz?ZO3tOOVrV5xjKLoZrWN)<9n$A z6ykGI-)m!gW#sW^ae>^u$2l?PXyh^pH0pEn$}f}^FTXPMXJvKq?_31(XLf;{AaM}5 zLow=T6fX_rCJfyqp59Qoyo=HGl6}*Z+hw{Ax5&%0Nuw0v(M33mruK zmf%8QpyUK8eW1F8H-EqWwZNO@fCmt_r1FSaT%e;dP@NYIFhSx@%O+r1!ks@a01BP( z#~%xbnO&fxF;Jm{+w6-bw?Jiy4SW8)xj^3uH1TJ~Gcc(a4KYEjNMM>^>ImR3ST4}Q zpQVQmMDC!r`(nu~5J70#NZ`*;7ij8*bq%fj1=R%xkr}LV5VLcim9CxZNM!KmjX+@@ zVjc!E?PUA~(*?S3W=l%N-=f&I`rfaNV43@hCqx2Gs8Iq2lyT-0?vITM`<;i8)NL$+`11!q zi(F3pVJB=!;PQmuO&r<;rn~UBFjI0Dhmpje?=Da#lp|t}MpEZu?q?&1;&S^N@~braY<#su8h<`^!ZLZ{<7H>HbB;!bbaInl zjU4og<#p1|`c;}^PCgkUjz4cM5TA$0m5GNSx4>CPqirgCgI~=Y)Qj!wq*?qZ&1+s> zFNi??yt+UeQ4ld(03D4s89JZ#qnU$xsdKH+Y81!Ejk89~dtTnIh(!LpxIjC|2x6{` z-bA*s8|}D|F`LTpr)hQQ8821ilevjUCVx%@+Il&SKe-EZG`bPqr`Gk-_3__rb%Sa1zh*`Xhk6(nxr;?Ce>N`Afg{5taeM-Z;Q@;o#t0)|1qI{eIqkb60OZ`3ajW_dD)E)|1e*V_UbiFJ~IA^-b&0*N{ z=S-mB0_iFqkC~ttZ?FL9XcV&X`@p7g#nW1@;z{3+{H~NafwU;|MJ|rRn!hNd!Wlqg z9)jYSDH6P#d!q~8^(4}Um7MfDfsluu+I2+8PCIa&nUOI3`SX!La?4i4xNg?H(U8;s zn5xXnvQ7quvU$lz zz<9bcO?p`enVk3o|L8WQLpt3gX-+h$8!Ms zAKFAya0OG_QDy zUFg>*X5m)?`-{cekR$tx{jx*`f7$?0ItoNN7=KQL#V0s1=HBRIb4Z$4KjmW2-{{DkV3Y$WT=GT^B=rhfJ;K`m{aloR zp+`R%fc!N&Mf^z?*Nrm9+#6Z;UOy$x45z#%3;6|-dIhh{@aE4nAQJ)976W;$e5J@r zAeYilBVWE_T++^q0l#=jy&UhuFJqYVXJWwwAR&;K!*_~IWaHut7x+9O&RHO8GWvcP z(uVfE8m22lTdT0=Pi$@pfV`ZJpom5jNA8V$`Cj6druK6Ef|xCp3aV2MfBr;J%m98W zJoxa7A|YdQN@t^6FidUjHk+WPGt~(XgP4CL@F%!H3xK>>7K$8=JY$xWJ{$QWyvD`G z_ImjOTp-qn90jw~h~Q6P!2%$Mr@s^#8B0?7X!N(WSQEpizez)Jz2eV)E%E0Z6-@ht z1T%nekzwRgu&ASvC1n?jpITD!Bs*gHhUce7zXZRaek~%z|3RA@ESLa99tBox zk#m^TozXVcVlYU!(@q7qc9F!N0m`SJqDmTunqYx|;(!=p@6aTUMu&87+NAjN!Tp;2 zAGDccbGtXbX&M&1e0%(`Qx0MXYybCtv_%OgUzFh9n6Sc95_iJAk;`OJMeX?uay7%8 zzi11A?pbh7xx&G`!nHBTtr@iJ6n{Q+zi+)CjX@OMJqzAa+5-qFT^hx@FPftT!!IH1 z{ht{?_bhl#X$u*qbZ1;N#Bmj|hYu{=bwP54y}`CDoQ&^-&jrnGuFE~TGFIRS6>tOI|+uUJ@*|2|l11<*YUKBu&JIXZkZ0RLwJ(8Pi$SjzxK5jMvfmhu{Kb{Vxhf0ko0Oy^&^YlWibI+blzG zQ2hDIy2u|L1=_X`LKXreLS~z7AV!BwLu<0*Z%HVMjUfMreh{9A*sByOP@MQ%;ofL} zBcZj}@)rsKBaA=&d5E-$+_T`Aaz&!FH0`8&qf@RSwAu4VEkP|h()eRbG5BEEDwKN` zoKrek6I~mv^qyrgt6Cql4oFGr(os?ZvI;4gRtMj(F< zF3_>L12ft}hS|C^h88p1+KVDw~?#(LPmY( zF9-mJHGdznM<3EpeQ&hUc>KF4q$lm_t|Gs^4Lj6px%7$ zS@1cf1u{ORe@4C93mfr=zkmS@cmA9SGy^EGU~bhQ8O@9d+4^NPS8#!j{o*fB0K=a@ zpIo5UbnaR3C#49PDWxCA1>((IjDP$EI1P>f{+wMPn+X$@-Lv3dN`9(gN$IoE48{T- z`^#TIE-)ha^T`FWzL@_MoRND_tSPyS-5VE(H*+z5^CwMU&ei5PGWhe&1@h!+aezY? z#yu#qlzPVQje5-%HX_Bz#YvxP9Vz^|b%7j!Qc#$P{u>#9(v&_KS=kHx=>j!r5}&Fa zG5pD0V8JmA$B!R}e_LA5lh8)Pg>x4!T&P%{pcCjG6q}S3GWW(6?7^OG79lZ6VUnHv zcxfS37B#Lu-`tGJp(Z=`aZK^Wix)e!(9uEE9iw4X3$5O26H)w`J7J0+ z4^L5s98h$)~D6g5nkw$A$b_Q!1Y5Z|6km2WrKOMk;8qGd& zk`rT&M*BQNYhipieFA=$p!enb-wy18E&MqEh<6cj{OK;Rij4iZ@n1U#gkMpU|GU8e zbYvkAqeI#x2o5kg8XeLFXmSFN+7Lg1Khze2gg<8hBalBzU{#xxKLF6e-;HuI32|g0 zFrzJFn60fbG11W&AY@L-y#Yw@lkwMK@_%Hhq0%&FNB%yJDY=A!AO8_$J33&civURZyP)ycW)I+h`ueVc#dzI^ z;Oe9S@H5<>nZHg7AYK2v0Ri|!03BHf+$aT*Wh*sm{tvEkK~P{xocII$E<5mt05bd- zfA#=IB!7sXSN&qXwG7%R2mTO0NB#gn z!XJ*mY5xoV+Ihh6<}W5j=KctN4*Yde4=D0C#RF)C%n=mMXeEF%S}}4o^88tAWGyL} zXKVo^_&M{}PG-Sm`ls_}2Vl7KCjpS+$MMHHU~P96|BZ$YpFz$r-Ku2}VEpTf*Iq4^hDZ zeiFYTaoeFIm21`|C$CA*-n_k_=s@|QBZn&X9@tS_zGqWS&5rnlL=-rQ$c%C)=j7yU zsx}6IZRceGMh1U6fjWL|7pnJfNl!~nO-o<73c||tv@|$MNluK9g+D_bm)YH2-_+Dt z-*mp?(xq#CgCi3+dJb=1A_0 z2mAYad#_)+cCDv>Xl!!g-0s}fDRHM;hWa||R>s9pG#F%#p!jcOkV#WAqm)0x7+U^> z+&@i7-g)Y5-=*5}#Pqefr<(^RM*2ayuC7a+?H9ZI%DcKM;^JZx5|dLr_b=8(9 zB(2P;yEc3G*2G|c&$Vk^moHxF=_{}8YKg;dby7-d`l<|ouY`>LaQTn405Euo@W;0o zYe?gBXlQ7a-q?#T;tjv^R{qcfj^b*-xvid+t5Q8FDJhAuvCvnvc6V+c^J92q!Pu{||0uC8_TN^^ZmJn4T)yO8FVwGf_jZzO5$vh^urO32^;?598d=}&(0 z;fEi7@WBV~zyIEQKmPHLfApjO{Ez?m4`lM+{_S7?^xvs^w`kQ;Nk^P!0(*-Zt$4rkA4A37mWOkjg=*OpaV`xNlu7e9-H3OUAqOo zZoM${g8;Fio|@`{r1-3!p3&aDnIabenRtRfa&H94_>9bP?->&Sk-wiHe*%902>=rO zH2!`F`~iM0`CGQalXv#|SG~0r%U~qfIrqD-Up=}zJux`YbN%Yo-o7f}Z%bSf;0F?~ zC~0TfYmYw&fFlsPg+|k<<03aesus%?U6o!x_l(<;mUMwMkpH zY}vFq9fr#*harsAZA?nK2`Y__ZndIdKEoY5!3qA6yVB37V_ITF0RLX<{t!Rxl9~T= z!JpQ@6UaYK@_0&HF`Q2EY&kZ4>*nOdh3)xUw{G3CB|AGO7bYO_iOC*MazZgIz}WFO z(sz*~ki*W1f-_j09?*oJz#sey3qnarQOSkF>4zzXx(2uwO2YVf*>d=jCMUrVFa`M| zAqoTHH_{5nz#n{DE&S0-ZT|iA(_cGMDoH2dpg=@%XXFzvuu*Mw+sF>#B-MN@!~xm-WBr&b2N^ z0KfB{ci%0}$|7Se{-3lB8?&;q^0#d(flyXf26tAyE%`VB(BO~m2pYqmza3h(ES}6k z3c9;D(T3Of1Hb3Up93hLLU01`*A4*UikU<3ZY2KCI{GsY|6rEscmE6ZBEAp*i3Q(A z{hu9w-N`8l%aeAzrQdj(XDZQ-~@$vC-ab?W^jgPm+ zEQ?Q0N{ByW!`~S5f3YSi96&+-+yV&MIuYoD5&BjM{)FM5f8#$?fB|UmcR5$) z|7yFs_peyCtPt^|02al?#>TH8KW2nc&4HGOjS))K|#UVwd;4a^bSwmxqh@TXLU-< zC!Ya$FwLNW;}1O`3;{QF+ws@lPWs;{34eB=5dQok&|sVJXP0r3_Qb}xT!@jhNlD0` zdGZVVkrf#_`or#*&VJwiz8~{nQkVvyH~tQEUG0ipPQ0Jy|B5v6mm`O9%U0(W96j4P z_4~Q=jVHHk*syNhy27h_s_I)BY7gV!n-AF5q=P5_yt&b!9ezVYh~MG;XBwK$RPHD` zUl5ad{``gZj?Rl0FI~QTrK>7myLI-#^EZAdCMhko4*1jBlJ>tiJH!5Wj65XX8YSmX z-W_WE`Qrk0jx_${8EgD`G5&J75tgwK!@_4QXz%3IOZGKF0rbvaclT}$KidB?{#YYy zItES;Hh)~Kt*bj*U%x5lz;JTjk=oYgy2=ioK8RCi!w`H^0NUd>G<5!a_0js)mgc%+ zyNb`BFDS$tK!iv*<;dUop%rO)fM2cF05bkk?D&&9~J zZ~WzUUG45-y(;Nt&HUw`)|!WomNp$22L4W*Z9R9cv9eJFQ19$0f^P=Ee}}^LikKq9 z)%$7?zvlWA1;F3=Hi5s>a{gfYLBhoH)Os_2L>j~YIq)Z8!Qjsi0yQoS{-j<`H_I?` zG|CdVJ4XQY=Knlh-Q8UmH2@v?(+2Bh&}7LwPS&>qfXy|hMHk35HzN2(09<8D-_Y>s z{dKK~UsL_bos2*2+XWxq_$x~S{JQsRtwH>s!5`U3OupFsd)SFri7G5=^`i!bO>dF@rThx#$Wp>bbg3m zWBn=S{|x-R@;3o{5A^9rlK=dfK@s@#-31aJIKX5C3T87vywPk6>aj2U9q78IJ3t@& zALXyGrm=+pc)p?Lbi2_BFADxlmCz4Q?XPJ<{F<5?8yl*NHUH-ie?30%7iVfgzOhXJ zn)vhC1!4(ewx*2DX5Xu1x-zgspZp*4r#V2n$YMACqx~=Cujou;lLlZ*U1fs~px)Lr zvatRSmL4%|YiXg&KnF|f`1*9zG|;}csj;D~{KFsqi2L(_KcPj<{BcGr zf4&lEWeAGl+ul;D&qn^j0OO+n>xA`Z4{489e*EV_{-^`=oxg(G1`fcMvq$JOL~m^^ z5+eS-^X~ia$7GPODJMH;(`F0>yLN8hmcMPsu1&Gg(a0W#0}Q~%qYdY1-;13u_`g%T z2!A|&Ui_ct{%TpH2;;w~F*5(y78M2lPhi19f&q$*KOZ|`OaJS8sb1M-HyH3I<1dHo zKXBkrbAWRG4hZWnwEr!rLHxi0(m~+)#u~B`vmpF^ue|&qAK)nlgu1%ADh!9$FQe#@ ze{UWn0G>M90Qf=gi*pYg{xvq7qW;f2e@6F9-5=wR;1@;G!}^PvKLauef4Wfw@I7Aw z?9s#UOKG--3zV64o*a$(RY8bo{jW2BwcXdX8Hk*}gEh2~`0?M)nx>{^LLW?m@sSp^ zElfiegulHL6U~#8U*P8B-bWA~LwNG!$x{f=ARPY{&mRG>{aE*vj`j;}@KpOebAqRL zt;PP=5B})rPh5Us{6$&$V=TxTTL$3w{Ci&cD|x;?l`2oF@y2op4t`|)MdNRuy#GDW z1>2!<1W56_V4i;n`~hva{<8A~!UyEF!P6^vG;-;3_vQNjh2gLI;lqbtK=={@#~%am zIS=5?n^WKc2L^`*@E03BJ=d>Y?Y;~@Jve`^{z&GqfBX%Q*@v+EB?|d#Z*K)(cuVYm zNfVPMlsC3Upy2@j;UE6-AOG=x{?Gpf;h!M<^FROZ|NVb3eD8bT`#yxX-+ucC5a40# zJMX}jlz;div3B`Ve(bE?t7Mnb&3V!P+40wXojO3@`0EA!Jf)}3w{=|Zy4nYu(EH&D z7HriV9=N_R{G9~;%m9M_WBfhi0fhGhO&yHhG%!9rb#rojba=3@_gdGLiydv}&K|b# zH!59!X8rF3#m~eanYSIa;g3LO+ir+LvJRle9|I8iV*nz5CIJ88hxYst0G&&*e20f_uf9b7puF*7}NV|z;lupI?{kpcTc zfAk|5_rVbHeF#7Nz=1!+-nmqJt7A(0WO57O$MwG*HvSJL!0?MWScBI5pM}3>*n$dy zjQ`G^qx_w&Z$8)FdF5K~;K=xm>A5=(KA#@IH%ebTg<(%=X({}OI0u0I1qN6sAxm@q z4RLI)J|Xdcj6XAgz#js*W@2VW`3vOE=s%;QHBS7|*$3NwK?^4%w;BR7Tdj77?A9=OlMe>#f*wX}|rl$JLnZ3Jr?}pv} zJdpkn3GhOb;A$VN_qBa)10b7$JU2Wb#Q%++gFtrQ7Z>k*UVmY46NJM-ZelN(P|RTX1ddRVdSv)D4D`_H*CF;z0f?4e$zqYuC;~ICJ2@fxS7KH=~CnK<SkOEY{p+66&Dv}X6nqb$Z~?wFFF2(Sv9oXFUw$zTc)vj>nE5kT;NCI`rRU~veD9uNQ|{Mlzq__H^AuabY9$*v~9Z;oB}Yux`r z*I$P9@t@iMb&;1zun{2TFCIs+tUVGFb8^<@A6mK6L(h;W4!N0Gdv>259-F*1GY|aD z^Ze2N_mak+Hvq3)r=f?2UU)vyaP69gC~J6~zac4qrV*e9AbmwCIKW#6vo4%J*V5c@ zVE?|89fKod!$X5GpgjiMfqz|lF-f!Ev-7v_-MfYSX}z!3{p$SL14ta;H8X&)vdr+a z`aj^0IY3+j#5F)|37YSKpTEF$K;W;ka;xz*BC$I7bAne!A6#qPx4SrhQ+7elzUp?| ztq9>!%l72tpa1;lgd(0l_AM~;2mS9g{Nd%xn~ep~S*)N879X$PjQ|#I-;ky;CMS^c zml6&95di5jA6eoP0OTgY!~-(^ynrn))Uig+Mk{FYGk{-4X3>spdVk5uSy@F@O%D9^ z;oEey83^Ss0h7{sO6K0C3(X`no~dtbZ)k0AX}{FdGt%E&f96D1;?}&peE22f*4#b2 zckkJA>guUe@N)`o0s?aoIs&|Mg$)5oYjQoX1V9UaF!sZ+cQ41E-5NBk!8}I*mmD#n zmzW6e8rrtKsQBplORaUKrN{QIS&ENK7=NF4?iBcg)t4icFn2nu^CwO}qL4qpZ}jCK ze*gR5-)Nu=e!BU1^TvW2UeMG9 zQ~y`DfBz;jMd1orcFypB&iuVN{xQDVsv3rtY3=Pj^ECYCzaacy2)~`1o1d>)w*>h+ zLGN$yaYHWQk2pZ^f8(dK;?WmY(<+;O`~e2w?+W1$@cZ5Ge)sj)lM{r&bvq8!m!StN z*tvakhQ@=XoeWF}f94(-0E8C-kyStgKv)O-(T{)dW7_``lC0Ts{K){-`SaT-E6Q>% zF*UQGps=X8fbo}@m;j-*b&frs)zPAn1K0b|0m2VKHf_q;l)3^J9}+a@2Uo1GzIlVT zPj5c{7;10L#Q0cy+u7PPp5pSd40t^hgf;MqugfDV4~bj1UTE&S%7%X?|JT)3QUX7t zgWuD7l9S^(v~Z#eJ6Y=MdwP1$?%&T_5)B|HTIh9l#&!fV}|t;>F9CFJD23 zehuL(2)}{A0GtB=HU5rk{Jm_-#qO8(z`uyyv3&>ZsE>+DW%#k_uRcbEUuH4>06*aG zx4-@E{~aISwF~;<%#yN01i)>1ndt;IOH=72M6oAr#u|)11Z1l)I0mE((LZ{d=TGM2 z^o${5U#j0mOIFqsNvkt=79f75+mS!YUQ5gDw}1QF-|YCihWw!eJOH?@-MlF|7N;#N ztbw<7X#S7zcc}iwixX%}^r_b}YY@BsEzC2#NalRpG7`jrI0SpZPv zZ|IBG?G!(BfWKT-Qj(7NEn)7j#kBhX{a;51^?wvU;166Y5I#0W8Cv{X;9(n^0>dh__7p^)gV1F_Z4v?=@a-RQ{?8jl76SEBNyM&7GNb8Q%VART z+ATW}zv7b8lAUYTpx-NOp84jRZ@+cmFZUY2LzZB=khu-pcdn1)_{*r8o|o}AOZmf< z510VpRsft1;MM?GAVx?YKmcrp`ug-Gz>?kj_HNMdgO8Q4V{a9FX3x&d;M}Di|2uH> z=+OgrFxN-Go%Zi1ps;e z;M@6mC%8XEE-LDyrE$CVZH{~)hXx%g*GE?jQ=2i+FpaV=~EEgW?;-g3;>A-{O)&`>q%)u$X4io(JopY zlR6s3e9T9QDQok#QT!@$w^R0}|N7Uzena>J0B!uAr@IffhfoKI-EK~4X;vchejZOo z)#z=FzidPY{U40)=VoVf(f?8W7=IdmJb(9rplM{VtLro*kBNz09=|*Wb6UD|O;KUt z7T|A&hG`ORLAb5+M*&3tcmIQ>d-m?V^nl~fu>*b%{80dbKMLTjTV#&jfdI||fRw)# zWpIB(@IsD(!->e=($5m$jcbV+8FUCx>%bpudqMvP{Cy1pdtc)I5WlaW%YXfPq!t-W zOUv4rlf5n#oFLz7dSgeC-3Hnk40{YmSE0>gKm_o+Z!OW2Y7stL3x6Vk=A`l}v8%+4 z?T->yuGyNmZTrrGy&H4a0el5bxBmR+zx>7E?_0uOdy8fM;pytb7kc6vbQdl-r)=7} zD&q74UA07(yO&y0P&X zUB73~p7o4Bir@6DTT@fFZxaI11(NjW0DC?-R8~=Oh469|A}>TM0uzqYE}%An^D1dP*b0qhXxYV}@Bj zWkh^58kd=3iHWN>=5Ed3wtfA2!d~OlpZ@eG#82Z7JK%4>MgJ$`?`$u=(U>^Ey&x0| z_@+2mgjrpAVHQSpfMD-IBdF4gFq2;~)O`$3L3*BLGtVT86fIQd5oo z)#HIDz&$|URgn2OG{x4fJ9cb``DXrsOi*mix}wUOgRqEk;^6+=wd*t2ZmU83o)Q1Y z;m7#1bAN~*gxj~TT!BTKc;fwvj~=b8g#RDG?ss|y;Ui&cid?IW0MP}4AH3dty81-b z-qw9boH}5A5t;`unmq{7`9lC_W~X2$KKvL7H{_F0U48OoX;ReD&UV<1)^;BD?1QBV zUTQM{8(5)>sMq)d0OQNM4E#o)lfGB)e69F_V9%Zn4D28f(kH{vh6VsX!+t+XoBaIz z=lDqcmk@qM13pQG@R1(4ltvz0FUy}a;u-_xC<=P{~utEgoK%1X9 zq4&HDKyooahvClcJM;5%b6s^!E$2GBu8-7tb%5kCFb7~>@5o5+NZshDwg?kVmtkh6 z$hZP!F~Z5HsG93NAjQxSzDa@ow2yq^n>Mtym!e}=b?-L{{K3#q$B&Ht!1Yn@2ksBQ zug{*5!Af7>_U-5hi4)}8pO2H4-IuyLni^_ryMa5>_rfQ3zu^BU zd3fO*_{LjPH=8?q2FJ!=8Rn=hfBI8keIEjwg0K#l0QeR1haT`I-ZK+L`>c*ydsaUu z&^J8H@mE*ZL%Uw`?{i8YoB6`LL&pz0UkyJPEEBPMd$-d*SZHGY0GpdX7UEuwfut2m z;tyn{CM6_$(C_U!d-k`#g&_#yX8{mBAkKlm`9|jtq~4aXHX}{*fjobdLhNyX!t3yx z&CBcz_et@a$p-#7{MhKv;{K=?Lzf2dq5C6L!SdVe;kjE=Ed#xm&H-{1JcJJdxoi%W zWu~WK>-;4AVZ;lt;lHcv0E`2VKClH4{W0l*odJy27NVb6=o=cU&SOBD;2Rz$Z(2)= zg4dFMl-$(REAq!CzO?V9_`%{2hadT}_Tx%vO%2lJfJtCRr;MGzM*qkoC&tajB=T4z zA))RpK2L%#pcFvn0?qv8dD3(OW&9D^075(dp2CO?{U67lclSr|Gx(dGt(Y4asJq;K zvGW@HbGo=?6vEUbJY*Re9)&+wLVh~b+Y4Q9_tk4TFcG%lPv3^%9l&Th4f(Gpx}#&v z0e{6bZl(`r8-%}%;^KVRS#|bozro+wbG`4?@gp-2{7qmjh5`s;b0UD2Gu(*)j7}5< ztJGw0d%#{D?MN7ZvM&$)XK`9lYYWg+sy8X)O` z^*IPml*lTewhm1fVP1cQ{GkhcVqmAos>?BIrjNcQ5c}VXGqoFFS5T%0*Ij5B8+(r4 z?~8|HW2f9He&fT(fHJREsi)V>!nzysH+U zX;S`p4~Ts*orBOxFaz+f-^_5oP(%Jau&sEDd;X?#drkN;{&4LDAAQItAA0W#{;wbS zs|C`!E@_)&;Hew@Wr*IZxXBA90RsbrgYXXkF+MUrexoBBCc_$kI)K6&pf>=?GGMee z4|%LpXGDnqljBGHUv>9YTm_7Z!vA3Txuc^BBF8#m2Wnz8E)FIo?W<{O!P~*F4gPHA z1cXxHFq?t6eS0FTY>^E{R_@ptXc!i?emdE#SBqyH)B0{K1S#p)7>Cka7pwuO&w>#^ zG6$4%Z;>M;N_~51%LKJ$(yvy7>otcs1XYgmU z_VNJzpUvb0rnL}&zrO54Fuxcb(TW)&6Q4f#+1k`CToZ%PIvDd|Z)41S>C_j$slBHe5!jx#sJJ&0$7)ZYQ=xR*6RV&3)10yp7qp-U zHXIB?F*(5pU~`@zIzNy5RA8z+59`-=bFy!n_!FPPNdUBt z0bddR5I`$`!^71jcJxvImvi_~-s;t>VIWvhap+h@#cuelsINaeHa0qn>n>!`?-e}t z(5JpcEgioau2D&Q+5za^Ncpn`(0#^|ltp7mAoxF_2bPTiMgC5eVrF}*sxsljgqMD( z^JhYk;x}D>oUQy3{Mf!1%leC*4@Cdh-;3$1?Fp9N(#O!~co&QU53M zNAVkbLg>@+!(CtIg}=v-YIxU2&_L~JkdVSmQcfI=24a$I4Ws`V+m>214lYoif(WZH z+6WLQAv}L%{>$#Kzj!W3xW3h>C}lM zJu`pIimeMC>cnvvZ?Wm`^!)s>YuF)b-LLNb?0erinE&=%&C13O_V!)+#EtmMyLYks zotY+6U+te6d=A&14ZpRkH25Puus;AA`vDQaNBE`E0`q#OwrpwD;%4_ppMMYs2&*s9 z|1$i>p5Sc8!0&~QAL)D_lT`)@WITTs7TghXgux#8&wsY%kM^ysC3G0!AzU+^e=w8Q34?Dz0?V}d2XTL2PEKZ48TgUqKVkB5 z$NcmSyj#WW_3o@Iz#PnzwS9hctFulXK8(X~>(=D0>6uyJ?=JACJ3w~;vi(>Fe~iB2 z;ZsRT=iqfCBO}HirqMY($2wp51pajVkUyYL-){iUk3RH8?>A7(HJjL?oB^Z3GWvs9 zFiVB{7fnzX2s;sA3}`GvlQEz@e-j2^7$XD@!-DTIkB7{EIWyXD{sE8Pj&t*0z;ALh z@aMqE?BF605fCS1LIU<8lL)sf5o89Lcl}b9J_I2;sVDXnS#gw#8q)) z8PM1aqt8O3u@i=eA&9{wRRG#UnwS}!)% zRFxw7oG+1~N_oqKS)Owl2dfr+DTYnC= zn%ulG0WUPY+|}OE+}hT0@k;kjkEcU>N5R#r?XW2m*84A97(nl5!VmbHfc{tR{&4FH z?S66V3#EwwS8?+Oq?;HUhP7s35Bwc0JY(NW=);K!;0FnCH~!sW__ghY_V%{6ozWO_ za*7)pu{<1fn#O?69`FV7M*zH<5%fUq&ule?{8|FP<0RV^)zjrE1FVPV68{Q`b$ z?h6=gu44F!4}axzUxcgre0%5ROI=;Vba@2_da(Be{sSDzgAb$+rp!Pu_|YBB@X~2` z6T@M`U-bL=;Py?jC)vDesi97&;tdrY3q^?9^$

    $rc?T& zg*SG;|9(z$Gxu{Oy-EZ4r z@TdE~N#+1=eqLwcZ~P*@r0o%Ne(3t1*VSP|(RPqFS%{5B+X^A+%$x!%ywSW&L=hqDe!AUyOX<38|x27uAg>AS0rRTU*CN53!7 zhmMeh_uq$S{^TtRn?HcSPK%!%L!+>@a0q^L)pNbJW;bk)V*T&^E1dtU(EQ)zB_`_h~ zuMVD((k}QJ)qH%M=dW^NTJwJ;n#b6@`A9|8#fDRj4ew}8i!Ww>C+Y$n2^uPv?Ed+mM4J3RDy?j5gF!}1Q&U5I_vMRiEjx7n zdWru7CwHVxqYwAs+-DB(?ug0%vDZUC()!+^+GL}}NQ3f+V%&)Ybe`hX{E;`KeOiPl zfQ!*#wM)|R^O))3!xBiLlZ>HP>zr|$yA$kyC2uNlfWa2^j!)=|Ja-Szw z!Kb4ohIqM~PadneK>Od3yX^(!?cFJ!$_rY6IZu0g?=$KEZ@;kR4_5rXxO)UPMjKgU z%%QPEXLOBNh^kcDa{c-v0Pu4s{s@4ViRx`_m7I3#4v)`j{_kjAE4=@&zP_xieA}v3 z8LL;NnVP!-@f+Q^H0C5(O%~mswfBVumkn>d^>-g={89SI1o+IxjhPVeb)Tjpf^o?4 zhc~*5{T=Fi7k0r*bMcfI>Zt>qo_l7;--{P_;d=}GeI};#2tRZ+*+OLufO>NcgK}Jo ze+}V!$rR)7oGDjye-NHMGryUQ+(<^*_~&%v^gw-G$sR535-A7Y0RTrwH!WQnd%|Yx zpUM3pe;DAU5)js5I8_aSo(eravWS^ODx(2S;x{5lLpl}eC5_^Z?To_yi~ZTNfstWZ|f)s;7d;@n%dnz#n=sI+&d#LIL?wxH#hv2$blwC{b%78jU&EH?h^1oWi7A`Wowk(IxR3FI%;i9h>noft!# zyOiY1#2U)_>5wh`8oDdhzP?`hBTvhpKbN{cYyHU?JN^(r=zoa^^y2;yzs2SMY+EL^ z0oS1K&OJ2qhuG=-O-lP;`*tBd*S<`W(1rzSvoQutksbGrrP>?4@OM_*uJPOkHR)+v z{s2Jie-XfLuf6Z$<4-PqDs_PKpBoD>r2i#ekMTF@z@J^V_Kn8ibdgfyPo9z)rLD@0 zHc7SG%CWaMdf{&v`GY>V&ZbJz)Aszm^&O4BrLkT5=#S2Q&6AHe<^MjC+!B*dDt&5d zJ#m2dUyI{Ejz3r@bmULgXkN->W`AoN42o=Ia8)IPKhNkW>}@m6KPZ1MUPx}!{*(iM zZ+)Nozokp#kw3D6EM0rS{`atah7j7bA4$|@%HpTxw&C1s90AV1(kH+s|A+WF^C!<% zFIHBjOQXS`q)eZURuv4FEX!A~ti>quH#&M=yjSA>Uc8XcKEz9nBPae~MNcDes{}u8 z@&S+Tj)aypdG5fUMO{ulsgXFqdoT0>pbP%|F4L%!Av}#47Rjyfh*`??-6)m8DA@NZ zQU36UzUcoX{NZ--7yisXJRV2>a7~#KxYf@6q3?t6=J+$KYY0nXqPlqM)w4&R-+w0I z?*h($ng2^MHMrMoxeKgix-$xhneWpdqd6^C9gO-~UuXV`;MpYr=%4%Z#vk|{ieNr= zzkKg2y1zHcpD3V7TdNTq;1~CwGX9ABi-Pgr8Jbifj>%M%yEXFWby$!XDa1AWN6P~|4RbbGVGM`X*R+d^n)j(+6090 z2Y%1suM(cWVfz2a-h02-aa?)c{uk!)?vL+2JGIBe?yGF<$gt>Vj=lM4*%l6|M~a6 zS3b6)V$CY}tEv|+_V>d*y5V7bZg6O5Ehn{@pPv8oHPXrFtBXoEHlhQx@OMk#&l?w~ zr1CFUSV?vc^Ec`c^EwpWsLG)9I(L5=f5RsK=kUjC3Hup=zaUxcX~FMRR8%aj2pWr! zBJ#)8RaE7NjvmetIKbby8!$ot2QSbB{>ohc2L;5Eyf*K%phhiXyhCw}ysX!KIg;yS z7=I%RFdI6aVR^274!^{C-XYhR`{)-%z@%VJ$ox6#8c!KzbBwLezy8e&X91$}S9g~2 zS5{V0=U3#H^wW3@ZF-*zIr21`DrVKjGUm}GXjI@2AJ0?xdx5_Mtg9+u-zw}yi?=19pFf1s}Ie7W(^^KYK91+c?kE#vPN<8RHHty|a3 zSpH63jZ_gYDJAlQLK)47#ij_mY^&C6tPjIqE0-knXV&-&cj6siKVu6J#vi!7n>S(F zW&G7FKlS*@rM78xmEmeCWUhFiV2`XNV|BT*rrz~`)9S$G^#uH1M=bZ(m^uEMnk@Qq za~b5%qHYrerqPS1FX0Jfu z*$({S3!gEtRYx!2S5w`6>Z{8q_D>X+zf3u=q)x{I84F3hnLz!?jH_(kaiXW^LiY^z zt2~^4W&YqbP^~&`a-?OGKa;vlTQt7=4EUQnXZ9?Czx|_Y-Y&1`KK*#$WNUz6ucS`T z0vQ>9rc=q=7&t}u%$$=^PjhT_^R}}WPaf`WVLxVb_=7cx_d{ghPc0MXIIH~W)pbDu znRW@S)0zhuycQx*#vlZy$MqZ@fy zZOQzNk-F4SKVj>sDaXnFav+t5#Ia;o`zQ*&xA z3i<)}jVE5b0RFCB9UQrN8~!B8WB98OgQu3lpOwZe@vBX=Bu7l!p(KpjihYfwlWA^V zf9%Q);IFa-ZAl>|F1oY+8VB{OSkiX$;SBI|O!uK@X zd-(CAfzCw}o9bDXpm=^sYqg0QeZ(}?CSug+=Q(pYV_Al#wllpSwJ)ixt8dw|Y#RQ# zie+22Y&vq`0{j?rMaRonmw)Nre|a;YkuWMqj4YsNJoMtlo*To1*M@K4pUAm=`|iU# zr*A;?phF26BcJKiipJ)p`}Pj7ErVOPu3zoH2tO%r9eMfkT~)I% z$7%O}0U8h8-w=#%3>i7Hb?&suehyPfmZK;}39-na_-7Je zS1>_TJ{QsdX^j<)lNN2=ef9QTchFb){l(kdRT~Pp&pOW7PgOOqsvx;PBmh4<6x< zGHzb!zjWo=_LnaoN@pGRAPf9S0;(+JHnz;1(J~$12pcFWh#0VmG!)3FA|{4Nlp2%>yuhCeQ#R=#u^wTO9NwBST3qM(z(<3eZT z5^1Z=$Ea_Nr@mRRhm(s?G48(zf$1w8OVJb>_59_-f=i$zjBrpOjf55g(Jb-DMka8E zUIM7=#RHL2G@AQcC{VE8fHXWIzLD3U`OFr7Fga69!zqpp`;n#~tA70B3R(=QLBcT#dBgZx

    k}ShT9@Su4)UTk&K;pltuo`0s>+wG6E>(-WJkmhS#Pa_SRg|BTkNFlRuLT zsE|>kk~1>IOs7&%V*xahvGa~H%AY|%2eGsm9mEX1EwIthYJ34b;^auG`Fl$~7k0!I z(h68;qozM>R$}c&Hgd!aF3>$0U9}06)asYl$!I&8ZvHeQ<0F=OqeoHFEO;8}knoJU zR^uCWO+i}9`3q_!D3o@iz+doGieU_XgGF@%L|Ki2+)hI3`BOC#MF@;qx{MNH>UkE$ zn4o}}~ zfAGe|R(YEDDH;v&^v}QSZ2hzIPTr5C)fk{CDHeQb+?cPBrcJTcK*`WXu?Zob5&nD` zGzz8FSTGhe8u>_QjK1TIZ}c0GVwvHOYot@i12A1iEr0wxi)t*GfX#0e;W6ccHSw0r z6Z>MxN&Zm^je5ii#)9jYe95GkW0jKpgZRcQ@yA?)FNzXFjg%z+uq2Jf{>C5f_$3|e zeZ`bmTmdu1A8MrNA|9sAXz6d<;m(Jmf~cD@{zTkTAGh$A2Md6%SsAMl8a0=UY0Mmd z6KoQt%UFQnX5z%Bs5x-~v&Y{Ai$)VtWo%ARPX;0;q8j}++-TY}r#-m85QM!yCV!Bg# z#YTfD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AF zNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu> zfD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d7 z1xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PP zQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbz zAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi z0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS z6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AF zNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu> zfD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d7 z1xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PP zQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbz zAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi z0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS z6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AF zNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu> zfD|AFNC8rS6d(mi0aD;kOo2b6ka`8ujO-g@DWqJ1H;BKKk6^mHPyD5O{LvFH7dVbWniTkPn(zPqSXCNafSN?dC9OcV_=8Efq}8U*sS0F_ zKlSwJxWpC69Dn0WTyW~1q(Jug8&3pEBV5`CS>%rhEN$k*-er?Na)II)mvTI#{E-VR zW$1*yrJBE=2q(g$<9Xl-^jXRZq?a$;mrQfLmNeKIhr%^yAX_NvL~37HU7qz+r+UNZ3PQz zQ^vfYZRsmldY~NY%OsPUCH~%Yf(5eja*Ht;L1WZX#NpQWPU%44P+uyM)Qs@wqL&Lm z;I9M$RL47!fsIg!+P>x(t_uotDVRn6-ZVmn{27fAMl9j1>VPL)eU9qX){VzG>WO8} z27i+Kg98&|4u2)*0s|u+YbfIu3bbxch}(p@T&nr|DL*fjKLAh*naiKv$SW)UXK=ua z*oR=_wv?nhmTvxB$O8Bwe>%uK{&dEqr43!5ZoESm&O4*7bxT^_?a39SoIeI&5I={% zpbHfE3mRo%ig=_Q?V`1>ZQYibXwTBlA38E4e*rHi@)s~Bs%w z)0|Q^{rr_{@WT~|6Xet7sam6)wTMUhu`X5z^sU=dlj~dh`FrcFw{!%0sEJy6i98}2 zUtL^3*v06NzUADhiS;e@`~iO93FO!bpNT0MzarCc$FDI=NmHw#(!(?mDo;lE)4D)! z7WR}7xbvc(vFH&TX>8Ou;)UyxvB|K8q`H=N{v7vba)By=9&}P9DJ`SbVbFfR?|Z4q`Z3M z75-_OpMU;oxyYY^K#m!nGLHv7t;Rs^1QPsVFE97w!I*E2*{EA3lnz#&UrNjl$;Hym zpNty4DhD|&_8Xn|ARbM zqhCi7u?RGX{Edp*c7%4~WR2P#UH+``r*VP8X;%pGK-6jsZirLM8b|Ll3V$qpo><&cdha`VuWIgZ!y3P!Oa7f^aBDtwuR( zK1T^qAvd2ug>kEpI}+z8ll)08P*epGGXiKeinZhwk2};#Io<~)x!8xi8?isL$)Dr` z1#K8&CIYQSp^|)}#pa{X?d78dE1OmR^aQ$y=?SzN^A?&_P-q;AUw^*3WS2kPB}K5{2I?xj*$ zK7u%9v%p_O7pTm_o`ymG5_N$IGJ{=Cu)6qFdD_K}WP`t41WM}=^)irar{gb47Z`t- z10{*Sl6Y+ON5Aw#VC^d>$OI~=IRq++{E*|B|7QTB_z8*ozj#FFuSr(?AAl$W7dVm# z z0p%>BHl`GhahN6k!n;77P*%jOMqO>@w5VfTxcr(vMr=$|L&T(P@t2E0Zw`*AQnJ}2 z4ss!~n@=cGYvyXgTC&Dp$VpfyPi3Vp)lIV+E#>s4NQ0bsW__RB)JUyaYYJt|9Dli7 zAl?trD^m$UZ-G>+(OAlCh&0TJW;XZ9wM1wwXI(inWRSm{F3=zfB4z~8YBW@6PmVCm ziDtF;3ax%~V%$&lW4Y_fl_QJ%<#2%}$QWX=jJZTM*!^)VX3WqTG1=*klaixrVzD^! zZ1QI#(CFn{{`4-;YK$X%sL{4BW|e-itnz1^gw0-#Bc^qMR--Rx^q6gQ zle9(enP0-v4?k19SZ4V%xIha>zDeT(t;TrLyQM6Ho17`H9tszPfga^oaYaSC`Fq>H z|Eq}wZo$1JP!#lX@r`*6LSRS>k&ssp&HUw8EtXC>e{V-3knN%fn91@928>29%2}fx zh2u?KikhL&)fcr-N`76Hkf!8{bC`Pm>;y_K(AmWkF_RP%4FLeHMyZ;}2Zq6wligb7 z#PCOvJEa!THl=2Yahz)Y%8&{>fc`oJ#Zf7eyj*-^4BfpV+JV(1MlK-L;gyaZk*aeK zVprxDOh11i31p`nL`;}w;~RZBqsMgPs!5Y7*4T3-NyTFwXc~xBk5VWL{DpIYc;Xtw zd>0tssAwALP&2ODB&AYq9&@5nI>w=q< zl_gJnqoq=A(hRSh#4|?v#3~{cFlUx~!;Z|EBU!S+pL+qQy$VD*IDa<6DywXa#W#i= z4$TmINg=B!ST?UCLL{Navd5q50!5*6QDi0%O2#yb?c3tQ*eX$5siN4F{@`I!?Pi2O zz653-P$p1RDmO(Y56Mc#GK%e6R76!Z4~a9my2$3L817k=UA4tywzw2<+V(T!H-B%>ZcDa$K4(DW;@ zex%y}{Y;jDqvw2b0L45WMPee4dqy2&@r}ItoS&LuhNPUu3Z6mJujJZHH-E|nnGB%2 z87OKMmZBhmLdl3mv3+Y?GtQ-eU!|sB*7JyDOgVoF3kraYKv9nH6e(om=8O=;Iw3At zAgUO{9~N|=!*@f;S=e5soE}-d zMFsF}>BdJSilmCwl6GTUFqE;5TTPS*yHhR>))3z47rGw&t-OrU~m zUsa_72$_7NP=ZITMqZR>mOtI1aw0$C^``UFrC)4(c3rUhm&%GA=Uoxwb213Y`&-}zOkx0(2|f4-zZd4Qc*L133oH){EZF} z7@q}u$!d$aI<_(4(M+s5;xEMX`<>TDV~|CU&qA(}W&ojNOrzZQk~u0GepRXWe=30S zS;$$^2pN`)XDk`wgpQcu1B;L^k!Tk9lRRL27DALXK<1Q;V@zZW6RyO@U&2jQWy1eS zQ?Sc|UZ}i{dd4_^+DX`I)OYjMrV@W)uKh62>+JeZz6P`ocLGKofnK$djjxt8ee1+u zX#U0o0gTT=*pfjnCzOn6)D!S(Q!V@@-?3C*|GhpY2w;2`LYEA9Ik99kWAX!g!XC(so>J_`{_Dv-$~qZsq2CHWf6{3YL3y4N4z{{jFiEXa~2W|SFIs*PZjyH^@V z#9t);=M%_=FFp&=N{WyLN`^L;261vPh`*@(@ktnUjL$;UlKxUdQ!=Dc9{JKZGV)i5 z?xlVG!De931?u?=$wFj`JTIp!8O~UUAARNqkWBCEbdXH=1fJ zM!Jc=g0%hfuwz|{`JDBKgd`E#xUjpKum z6~Ks)8LQ35XsNWYCKG?73ejwa{2%&3xF2FxsZgNU_^Xa@G@nReEk^zd0bs`X^X`Xe zhbTS^){@mV+Sc5}_(of;1+a+I`~YakfM%W5>1Dy$&{f5{P;LH;Z*&^p}t8I6!(wegIF#mrcH zDE^WnFq8Zl3CzucX;f?wRXf!hyj*;vsV@ai56NFL1ZI>!!z3J^g)k-cUd}BU!&qR> zOx1_xFNw>*tnw$gKtbpDEQBkmbAevT$VQ>##f*mMF985k&EFgR)`#}fdp6o1y!b4H zEUEEwe#xjtzu!fTMc^-g0MpIiJMU=8u}HkB%|zob z&t-51__Mn}z7l3C$7dm0NpYzWC>h$Qf>EU7sQl&S0yDv1C>O}*V)0XQM)5%jmJ}+E zZ!8jTYBQ1f(-u%`H5_Myzi=*4B+rcl9G)=oLD7};DvodTdRE+s7N;1Ap?aMa{^Gho z0YEJ%T*T-`4xqMVC?jvX$e%G#(-sNU?ac6}cY#ZHV%W877yR4ReT;=3CmcU|{P^+B zE@y#*I9A#1CJ;ypb@&Ct;9D)Ci9X)d7D1@9H z%n*MX0(Syp$B*|qVdT0KZaCp43meBGfNT;rYK33qct)((STd*4R@Pi=-WDBdy0kaP zww^k5>ZBV^oM7sXIAPcg-MM;`DgM++*ujtU*Xt;BUE=SS51?79_!I%z=4x{pOPfCj zKY>3cufpF@E&#K|pX>r%{Cxf#0A>C-fcb!8L#(FOX0;maU9wf1mrdvEf@v;(B7Z2a z%HPo8U{Riez04Yaf(zvM<>1c)Ft0%~7p}E2W;L4YEUbm|VfzI9PQ&ag&c7#c3J&mR z0T9n3bNqQOa0k2g6R!VyK_L8!D*o>l2hhqwK1NH~S_l@HtVT<@JWV$692?>%@rT+% z(C}vmFoXO#1n%h3@&^D0_`9`0}Ab>l1BJg*c0cd3*PooJkR;|g%_%j=A0Oe~l62lA*ep>!|9RS_s?{R0V z%K~5~{ogz9i1-QDe{7Bj_`ln?8Gu$6@;2%@a;r7DK*yXfOfkw^`FnHhiqjax>p!OK zi4(BX#Q=2pJMQw=V+QaCVc#`Siuv9ESEmhtpYQ%u{!TgoI{SaOAOL>|pp}LEjamSC zwOXU={~%8cg7Pb3;}7sVW8n`0V_7#EV3nzwocg1E!n5v14`akKt$G@1)}aW&ZAn z00tqmf+84$1PI0;MpmQ9U$95PmXb^{0?6>Q^ViE(!EE{G@n-@s?fhu~bnp}S;}fuZ zje<9fUNYDAdG1SV4(mlK$ShCe-k z8Q@QLfi8X;_b2iP9+3IJ-rlaR4O`ZR7}fgC@FztwAI&aA^w5C7G& zSS>tuQdLntuK&Q=x$_q3gosKG@U6<~ngtuSZks%{u5NO3%dCYT&+BwZljg3vs6DC5K z(A?AnNA-0zmF4hfs4J$OJJ)gK$l;D7$4;C+ec{s8p&PgQx6GQ{P_d`8|LVzozo;08 zX!zr#0VTfCuUJ#9Z`AQ8bSWx-qaqO~@%MH`&Gfw;o!tjE)lGPR&idU)PNN4rb?QWK zPxsND-ot%;tII1Yt7_{T8k<0Hu6Sc(bJOg-oI!Z_GUx%*#ozyuL-`axOvd2j*RvP> zKkNe>JvMhy^Q6g>kU$5&@#Pg&RjbdPTYvTPrGbHq7cN}rzkF@v=8dDPX1`lsv9Ige zrIYOwD#ki!aLBBnL^pECv?aMw%b#y7EPqn%Z&%eV-h1%U=>wZ;ny1X(*Lmf}&}C4r zukZB9-c#o;ZR+dWTv1V8RZ~~rfZunNyME28-LUehkv($7m&g`!ifIA z5K3{I+3QmkK2pmAdzM-LM{xG;{vX?j8acXk(SozRT@Racacuc)Zpi2Pjv{w@vl z4-5=k85|jGUpH?;eZ~H+p$ol-nkrrX_;6c6iDPsenV}^2a(c7-ov3wO)PKWagYY*z z{521M#lv5^;rl-vqpwkyDmuV1b@hw(p19L@U{h7?gcSPn-nstzcJwxc$dQ02n;Y_!GyA52XKj?b@}8-e8wrBo}_ig8ZQe z9LC*%qjMW3Ol)kZudlBuFNe9J``o!XSFc*I#?>M?d=E4}bV~ zfA@pG{oB7qCja`c|LU*)@-M&t{qO&;?|tukfAJUpGai6r>+2Wnx%BDz1Dh(U>!)|! z`{wJ<9^M?hJkWo>@9eq$ODop(_0^VFgSZWi6DBs*x%9c)&klh@Ln3<^_qp?8*REYH zT>vHgj;d#a$0C2u7m!WC$lu7w#+nA0fa~k)s>;WgHy=57U=DoU2H?^U0>rNMx9wS4 zTRFYIe|TWcG?@#4OimDw;u`@nu_85|yJ7|)^7mupPr~m903gH9D`| z%sY7T+kpd{$HA50$%p^=yBA+Pxp(90mHvz8&ktPM0sPIWs0I8$;_4N>TzmKS2Lo^j z!Z~TsJnDqVwW}oypp2i6KM_BF|Ao0fM&PU}Se4-lWc>KLbLS4!&Y3f3=B#G8T%Irp zVW|Cs+S=Qo((v%yAPQ=VaBPDU{G*UFV$@oe84KYpAs#;+K#>C&yrBecd0(O2B zoIz=NKm|XEKll|EgcU26uQUJ>zeg2#$f=2M?ZyUyqt7L1CrRUDg zbOzq#5B#2$KMN>tL$Cq(OACOwV`d4tjm-ZAum04_KUif(o`0cV%=h6x<>1>K|7YUw zTwQ(D_;HAz@BTP{xCj~Qten4R_wMc6H*DCja^>;`3l_Arw0tnLNgPj%&xX+O{qKJF zyCtyxm^cx-30%pc6$27CWnD)%vL^WSp7!R-+Q#Z%j&V+_y0WseqGBWWf7h>fj~!Q8 zS6fxN-@xAp_kZOI6&6sCzqkNGwKf7n7-9YmxWB*i-Jf*%7w!5F72p8+{GFMt^M42W z&aJH;H*Oi?=K#38qP)Dan*F%7re<7KV{>EEvIE>HIN5r8k9F)=y{VP`ZxaR<-Pr1k zckBFV^QZmIE5vZ3voOHLffVp+0hl|y|4`@t9c??7E?qih%6m(?1_tkZd~y4-8SmDQ zed`?n50)8D5coq62$z5}&zbn^?Pc?CnT9_TD2%^I1o~_<{!A6ux_4~+>{5)Ct*u4= z)Wt9G$980#t3RB6oz?IE{2vk4zpODAz+C*T>pS09KAw3$*Z(bd#UGCxR*ZXh_R{SK zPu}_c!()d(n)CkqQ>RW{c7FAaj;=!ow&2CL7;voF1ke6?bytHX{H|R?{I;y!f9S~m zZHty4TRL{yv17-3Pnhzg2XZv={cTb&vi2PmOFt)a-u^sqx$I_X9adn3C?+Cj| zJhx2GpME-Y`HSKLJ&s)d^cB1OA}Mvrw+8YAMEIuIdt8!B+#| z-$3JDN0i9mo)6m)zs`=`OM$;*JraNW^!&l{gM}O88#`3~m^8lsv+$>3!RIdmfi4$5 ze_Ai+nROUhjk*Hy(inib{9i-gxpRG|TmY^7xfkmjVaVzhebmtn0Cu+RlU<-N+=$>S z0dS&`zH5W~*0y&een&b!TFm)#zg_T=i@%MvfZw^b?r1Ro=kv!NB(m}s07c|4{3Pr& z?=ggtARmG`qA{2~b_xPmUd`9P{`#YfZ@%E?0dRm3_&a(O`CGX6a2EpD(=NL}fj^9v z7=OKc(fJ{MhdcIi|L5bElfN7A?18uZsEy8_3W~&Eco)cc5CGE=D4A7&M5Ag;^f(NE z>-sKu4lsoObNE}j0fi7Sr{gGygjLE#H6mhznp> z`?fdJ4*m}gUpsUN-23o(eUF^?^wnNU`d*tw;Lmh1^9sUIT4F1o(`h(XW z3?Y7e`D^lj6$%R$P*naxyFd+p;agSVR7&QDoLoAL|C`-+0URK^01VIHvIB~i!XI;g zA^4LH4p1gsndol$C-1lUN9l;De&$FXSW~n192! znv-3Af_Z;({${Y}4=nt-4p7hEI%)sKnSYnIA%5Thor}O@huhdrOi}pz^G%!9ivjLp zKxl7o-+^Jvd*dAR*uS^eGXVE)KLq%}?2Bs;y!<Sb5lg zQTg*B)9~jRWdPp|6YwVi5L&d=`Yups_IR=yy<7=I-1*ne-+^-%-4%$Qzx8d-Ac@z1 zi`$MI>16c5A{cLJ!Pvqwq$vEYxpAZO=FMN@!^Z<(K==~ES6_YgHH2>oIBHT zxj6jo`TXfKNpLZ{NNH9`MT5Ygh0W8~yzkFPuMj27Y>Q>}bcp-hJ@+$*1?OtcH~*K7@t4AK)Ilx(c)Y)$xZDFan4Z@KXUm^%0T3%^xWKZ+Lj$dgavN`2&vMb$9yZ%U?ha19-pQUNe8df@RnQ{!aa8r0^gA zY2gpCx3_AxT1%RX=`Hg5*Kgf>`0;1Ix_1TND1B-#hSe)qu7n>E&j28QjsZ4G*w$Pkk7VuE_&?5{3Lx-@ z08YMf|Gvy$_rc9AgG0xTvomx486Ix4@#m~Q_|pdteFpy6BxgfF7%tZVCd;4ui+Q0L zF>39Z)hN~yCNB9upTBF;{!2sOMR0&{7ud`nK7}gtw+Hz<-FFfAyK(2PX1i&Q5F{*SxA0}foAzu^2^UORg9tasTiBpV|3i+r(FdD1W8chCA;cp%IKfwWN{NM0V2<++m*49NZu~y7kyR)t51n_t1+Q^MN z_kq99?p%>s@SQWbEKoHxuc6T*iW_V3@{cXrUk zpMM4D0m$CacLzM+`;!0b-MwSu><>0=`p|hYAAT|U!R{S9TCpuJRF8Y7e4{`8y6#W) ze-eLtR=qcR^e8~Hb>qUxb?`eM92NZGoH3v=CV#eSL$qq`lF=B=5S~ArfX}%Tu*)C( z)Vi{=+WSi;Gp0_RKY4;PItwPxJAGjBPOj zPW&&QWIV&@?*;w_c{kjrU%3DCuK!N1S~Yq!Pi@_`a>cUQ6WGNRAZ7-T z84!%+-cC@SW(7LrBestpM(8%Dmt8hVk6mE)i z9zM9f1=mdwmQ0%h^Q()|&jtRt`%{*`Z2hrn)%fu>)$lCwDD-_8T6b++xuSLX{K@sT z=>H%D2Z*IO#<=_$t9AG@nw?gyT{0L$C)ZY0S2s*+f!_?ytF3Qg_df=A@clo9zXsrs zT>&}{(BY5$CLMnDFG2|LE!)w1>MXqXWZ?4Q&aPwKM>cm~zkTv z_4qRb$QoptV(swf1kVnC zcH!`ct6JyJY*{+v!#%zDv?7Erx)#>e{p@EyV-$(}@o$03AI!fO@Q0VrZy#O?lSQ?| zVC$|us}R6t3*T>Y0n`)dkWfE5p8vzr1|r;w_15@CeSN`Hz;DvD<%?RJmg?%JPhY;{ zh=spP_%*2mg+-lp%Ct2{j(4(fcz;KC@1gGAuHMuA{X>^KJNECMUNd*zy!r4; z#<{asuUfTw_1^P)_rlL9gart!L7XeVvuF7wARA3#2G#%=;190-FsxZ4@MqeChCP_4 z2;iu#3VJm)@UEc+3zxTUKX$siedWrXA5I>Fw@WyGzdE^C;tzITwr+#9(?O3vdHGR> z`~iN$&;RN7zyJNMLk@#)&)T(Tiv!@2#qeq(J_^Bca!vK4W6i&;bg)uP;T3E7jBnI8 z7D%nFdw>422#k}D$>~VjfBy;le;!13#B?6gFRCePH3rW>lTy<-ZRyfw%UhRn{%UHf zAar*>Io{iI`<6341M~66P;1(5Tp#J} zIe1`yL+hrEli>AG5GKPX-WX>)4>fb=9`C$#o?rec{;#iZ#R~Wt9sHiQp{}k{KnoYT z@FYt|M}Pmo!L@5eOHB>@pm*W;@jrjP>ICq|C*T|aK7023`STYLM!$scErfr7zyW*+ z0J{9`a`}6HWHwH}&J6tX@kwrp%gISB}dT9wx(EJ6!+A_}kF&?Af!YPai&f&~gd)++gYX z<(HQ)UxF|&a1)*{={(ZexqZ#nlP9lVYinx@`o9(KPx>k*Pj8u8HFj*J1R-KKYZfd{ z`y5C435HKGdJQ3uSaGS_{)JM^zz~7`D_a5o^ zd*JYgJ0GwBz()aaIe?D_zy>iw@)-oc=Gvu8-WK4BRUfW--^C9;CcqPW6XCPv!TtNV zcIm+X)@|Rueci_xo?u}7IRK7+y>WT#DXf}1Kx|R>6oPXF2;UQMfB@j=7aoB3{^1|) z-Dgwq0{~Fu55AqhItlI%kt-|v#h8jEAI>csJ$ii2OwaxC!Gw02jJ^kH!bW75hu`$MfBKp5CzZT|SR*6)taQ)}BAX}X@@81YU zIjee9Q$JHu2IDamFB7dI(LHCfszP^1>eC*ir@s;DpVohVlOkTch z*&N{Sz7y`Ta2LX3k3R=M<^W$GvwF>%)1L|aStsD9z@Gyk@aF({_bywbpFjW~0DumE z)f?gbuE7gAu5776_Qt$Z1#eudnKa3{1UO*f4<36#{|Eg24g$`;%>5yL-@=st^5xJ0 zWU#4e`Uf*wrZ$2T6i3ZtAS!=d)4if@M65QzpNzd)RNp0@ORm^_R5M}n+<6NYE?&Ck zgV|F7zNJU*{_DT~+rRnz{gLt4+Z9-UH1u7<7kc6zbRTXw*UwzMWKu1AD;rYy?)Gc< z9R3D4f4IJc;Y0p{?hkK|AtJ8(L;ODZ1RUAj-p%lCi?NJ6m~_i4G5X%>)vMp*{5kmD zyL>3cxpl#~7v11zQ z;I|;`-9wibSJ#bgoKQC#-#6)oI(X%p7vSAPWqgNl?AW*8K5%m7rlX8M9ez0Jzj*Qd z`QUzJu(4t4>}`Imn|{9OKU0{-J4(f{fAJ2-%EG-eKP4G6^pz9|kiVcy+#`~h6m0fGbXjT?{t z5Ak#PbMZs>hbRc{kK^a@_vFdPANTE=UxN@b=Qp-_&a9d1)~{bT-SvOKANsyK5FGma zIT-z4|MKnIw=URvDU<`0X5i6m6#_JT?W{pYvqcE-w-H`LH+%m4dGqGZ!n=HMOK|r5 zNu$bEuKjTJvL#Cv&qvd|eps5h!P$Oc({JMrcy0lGABJZNevbPC=7Ia?&+&Ts>eWT4 zYyHH=I@pJBg9pI+i0hAOUP&RsYlTlC)r)#&FGYxiGkUq?NKN&`mYK8Xv`lG2zjx^H zKmGGR|Fgm$1JL2G>)PCg#zudBZD@cy!2Lkqd60P*48`2Jixw?}_2&F_(?GGwQ|V=Qx>!#e%~O(pYwt=qS6+XnyNiqr4C z`v@NkckZyf?iC=qK=6YXJNNC`y<<)Hhg)qEu(t^<0yvuA2=Vws0PjDz15e_^kCE_! zd=~cX`RJpSwPo8+_QKO>J;&ggeXtyXm)cx`2UuZ>=y3T104q23`S=Y#Wpl4L`3CU= z!M^$C%9TY7Lg&sfjG+&JyZ^*K1~&TX7DWr6_Ux^uA9ge|y%OP4!#?fz)b zo_%fm_O-R`-+!o~Va|s2aL)=KV1b9KR;_BqH<)z}T;@dtas53gQ5 z(6Z`u--#oK4jecK+_AYAK5_a5|L2g086SdgynE+%=gI!7BO|a4v)#y_cNf@w5D@Ny zxJwW|0W$!Mx8&f#-~JXZK@h(Hfan2n z4g7~cc>ICX3nopO)a3d=kw1q*oN<7{i}0JxGdxB3bnv_10{jX1@vFap`*XY)x-@_f z-5;Y0w%;CXd3g6u*Oh_OM*%qpJcJJd%k~hqW$xX9N9S+CA4WV55B&G_t%K{p?VlL| zM1Ra?U^{@L-Hqt40`y(Gwr3s(Qh{%9kiBWGwhUfN`ir_FM+Rj6_`=tjdma2>^GCps zeOWK#PH9^k(q+LUKclT;8`$U{MdakTYOH0qHL9xG590kK_yTGHB zMjJqA;_qv?VnhEY@R!^DG5mb~9z59m@XD3;Gv`j7yukmQE->kRl}HVYE?{RI7F1_JF;1XCmSJIWw?-2?%Ro z0YK6J%>&=pG;P}Z=mKN#_cihd{!hRU-CxedhwT0yYzD$x;BPXWItAWk2nHTUdHDME z8|V69HPm;`-MGfpXBRwlbGc*2lTVO8bb#0vA}{R$vKiQ0gWy7mO~LLyw6h8G@>}E& zUEo(fc3y1HCXA}|Id2KX`FHdF1MkCALDL#=-^B?dBTv!${rdBfk$reSd*6xEm#*wz z)4JwEc*`^dXB&~fl^!QD2#A`n%_ETKM&ldpU?)NZ}xzKALkGEUhvk3e(~YWzTp2Z1AhmAw7%2sLo#sJ4gNC3 zzu3Dla=xH(u``(fLWE<7Fl+ThP-c0*VR9JX)}7cQ)U zoh|l&QII>H0t3UN!B3klm|IS?xKpNb!PWen*5?%li3AY%+7ekYN?I)D!!KV_x` z3zw~gp9|oQA|q}w1h^BvZy#>ruwBx9&g_aqAhRJH#Wy0@LPcsce#LUhVNlYYNPGal z_}xE#DXc;y7wGVZ;=-$tJ6fQ&*270;z=yE3kfJa<&O0N#)0rC`{CxfldoQ1%|1&H; zU|9Z{8vX{+|0(=A@VWeLeEfK^10dWSK5*g$>`aXs4Vb|L(2jds z0-?PfuK94b@mIdi(igv}1N%4;IG#pS;v1P`H4|uS)OqZQ_fP#)OD@9=Etr9Q2k0z9 zWDkfJf&YLm(C2S(@bJFrP}{achY&-~mHLVNokf4C7=XX8z5@TZ%iDV4YhQln&D(#G zreJh`k_%jO`SL(Z3*K?Nb@PT8fXzn?(MON)ITctcKZ5=1Co@_eEBwiK;WPjSuK`~$ z{t&<*e}jX2R+#8>{NIc%8|J~ATHr!(^X3gZH*a1ApPM^64vvfr597WI+w^+@cRjqN zFH_6IuT23SW|NO2oebXPnv;s*MKsAdskv*Yj*6I1|JGu-l4~z zf}n%ny-mCL&L6{%Klc*Ye=+$$^naK8TUuZ%`~F>T3mP_DZr_F^HvvA-3EsPhUT{;( z1YIp4tFaG%Ym%Bh*e>f`;!xRi&Z8PjKwoC{2 zcjpce2HVW+4lry5I{VQ;-UIj_&ck4WY#W*Xllb%3z~KSp&tob4F~_%elkfjz`W*i! z^XK3<@)e`c!w;YOQaAp-{Gv^CeGCoM-U%8~xJk>2)#xLpsn$0}uh`h?^6TINy(NgW z3*%k^;vz)kkF9@seOEg@mnTI;|0hnvxXTMTY-0Su6%Aj1@b#}|_T_hfrRn$K<_7~8 zrcY=1u~`-;T6nPQ!GlMS9zS{r6EN;TvyJK7z~3Fhc>%pxFV!MxdEL zk2?8EPvwtWadg2$?cN2~TYUL@@6n^37jQy!r(e(enP=aJu>S2oKfMJf*vC(tJ8sNZ zK6!%E@BMph>FfS;AMeBUw!m-gw)y1z3o06!{BOD;opYORcKk39GTb9uPzV zVFq^Y#KA70y9t9c@W1}6kw0f{9GZPTS$* z$F~2Z#mC3$-8b-Vn;%>}H~lOy2kT__IX~x7XOBF*8HeHS-J5ss-G2c5Jpuka2N(}P z{ydh?AE$3{aBpqxQFz_R(2)O!X*>>(^2rxIfjZFl_RO z3I>cOWsHJYlC47jm)~$)AUufx*MR;uG`j{g^LN7s3}cMILD=x!+0ek&zk(TUxc-1! zZ@Yx`FW`4`7VsBg@c|R<=KJ^hruV^K^=Gg$c4lB)YTSdnzTjUmxT_HGkhe!}-MVpH z;EydqbO7S6xW5hPKMdooLPp~x3^zl*U{5o#C!Cz$zQeyeU}bx45dL=+zJ74Cb^z}4 zj$FTS^A`MxRf)gDK7PQTgda5IZoEkFG2BGR6#iru;u{tI9i8 zX9&0k0Rlb#y0>hRuKxtQFgiE{g5&SXpR@(}Lpm9L(!ICe{My8ydGX=!cXk6VG4R3< zm%)G=EHiNZ;VgS05lqfj%=!LHSBC~JUtPImKRk)RF9CG`YPMngYe1NSfj#YbNpniz1KZ{LD)H%10wuNl|_e+L`SIQKI8a3KQt zK>>Uk|H&Zy+V*&FZ%@zS(HLgTXgz!w+rx`aiL0iPj%48Ze~D&G3ZTQ&P#VZQ2` z=iHn{&Svrb$md^w@vBcB-M@X~#up-gaJQpuWaQ*YKoI!@e~PuD=X0Fjfu%Sis?>Pw zjZ}e*x>mh5b;d-xB(wn0)?2@Gmci~Ej6TrFAKNGU=IgIu(!y6=!P|LS-+Ldz2aOH0 z7c5=7bI;BlU0sI{cPxVk7T#aCR>F_3eF39cJ2-yw&0qc67vb7-toP)Z(|vt|&h`pk z=)tou@E_pFZhRnpuw({$!H+KLgqKdkn;5n*{zkus?-TsdkA4KNLU#eg`!LEbjBXYD z`G=oB9zN5tskLQtBmCZ_X3`Otzh56+;PW?|^Y_WKuYU8{#}Dq@y#4u7A3($p_!~Ki zFRAK#)BU=n-h*Zy9@@^cz^K zef>ET{rdHp{y9wDbo-GrL0?%~-ZZst{RYwdar}P$>l@(zIDhiu*YkePqgU>Q9`XlQ zfZL9CA3KAdj|GQ7#-BU$GWZ<+M$dy6CLCyX=o|g#;_ED5du>K%r|@$m|D`Zr9fL4? zHoWHV^*7#l1Im1V*M^nw2I$?Br#kn`;NLcvzfXG>`TTkQ?vy$sWCO{2yMIncN?nd?ozu zUp#c^@a9b}f4J&!2!xxy?79zppAX>Z(ao!N?A)=uu5R>e5`7+tue}EM{MlO+X1xx< zOP3-it_{PZh1cLWSN#_cw5@{2qxk&$=~==5ZFc?N&6}S-`||ToA3wNr>(c;#?)!Pa z@SL9uAJYy;)E{3ZRzRb%RczB}biNcyFa5)qB>_S2DiHX?5a6#J?vXlE@HdX;*RPBG zZM$*L^?xf|k1=c3*3COk9ol>N(D&Stg+G)1z3-JBcP{e$y%+1^1Kr>KbNlwq8?#At ze+)o}KAd_nFbDealhKIXfBSzVm|`5P+}6_r7a3hgjvVSZcji=2*CLO<0p|a}$!+a% z>BDDmKIIPZ$&ljz`0Js+aOd7@2kQJ0V*@H4%5fVB=se}z;!!T6xhz7IN5x_%t;OdnXHPct^J$nIusDzK?!80po zztm?)rvp$K3#Hp5ESpZyX2s_?eME*zx=IldbSzv|lyGEQ4J* zV{F6<)TNbO7cYJR0RGCx9|Q0VQ@y8Wo1op)E!Q8o{%?DGH@yF^qhsU7O$#PYob>L* zCS|xcBYwjlj2Zh8+fA0;UvTz?4VU-7^PN9?-Q~}rk1c@rfAGOH2tS0NG?B!iz#pFI z68?9nKRLbxUYd(Za_Ddz;Jt_6nD~44>H{YmlW@9HZY2)=@IS)=>>1bcE+ATXu%fWX6z~SMUW5$&4Haz;L zxIg3%!w*=Piec{_2u^CD{#E#En6m}K)-7B1Y+z62D)PW#hwA{p_7}ix{p-xXJw3aZ zv3Vot|CC=^D>chM!%oboR21p35M5e<{J~$Hp5HK^c|ZeyPrq5F>*~APZry?T30&Zn z?Y>>l-**7OVeo$rfav~gYhQe_#pSP1r0MHb`NPv{gy5f^5<7$r@X5o6Umf%upwC|q zq&1lNvs7zsGrkc|MmYiTFo)4%wn>BnimeE7&L0q{<8K*k8n7@Zl<}hz-e125{lk{c zoAJjYZZPooo$qgE{tp2(&%W#t7g&6h8h<7M8ya6Q2l()dAb&S*Ab*WE{>;_d7(-jU zlI+XG2P*h!sn+=##%tBw-<;CGL46D z5BkZ&&sF{qJCDDc+WFT!F3jhe+hhd|EI2lUF>i`Y+*?~U8*}h?(0yFvsR1=@vXMUk z5a(Y6@LbN>xAgeaOW)`?z(>FGH(=QO%e)@v@1})6Q?=$n<8a0(>GG#9$&K1k=Rn~(+ygVHn}-!ZL1zogB?AWz_}XygvAHk zy4zY<()6_he*tv``Lssn06%%=T>!@5FLIlHpM2q+m~YXXiilXC&G1I841UAmyX5eP zKlDZar{NDD7k?II_0iB^S%f%n~9S6brIQ@#V zuk8L_C4aJjinc)`IKW?j`Zec|xxX^F{=0co;V)QHs#+X0$$$s3jbcHF%N55r$~{u5 z4Q&j;-}8v>F9d%Mfn#@Z{4|RXcyeZ&eugY;D$c(w2qVLj{7=;gd(fYKHS8`xIDg>x zeEzn<{WmPHmh|f^ic#fH>QF?ZR8~3Skd&7DAjka6 z`T4VP8&APasw>xpho0GUFV5`ys`(St4Qi_zJDz^^`4{d2*uk%i&AJ|cw}8KTryfth z+|?>M5-1r&OgNRuM&a9}bQKT1?gpHrZ?y9_#Q0n8R}?B1l0W3|FaG(VehlbX2Qj7WN`9EJHoqWE!sB~i^IzS75w*>yYadAp2|8j+u zWalt{qYg2zL(z?@3`(za_m}ZEZ1R5&f4r8kpAq;AlEt1D{2u(F@ud|(V-ZqB{x zL&q~L&$Z9tmpIQmu}tlC({Jh}vp3jE>Yc?y3o@Rwj*me8M>;tvPheCEEfDUL2;^N4ewwQVi*x-4@3L=b zz~@6phB3y&a{TGmaInT4DcRx=)U};2Hy(Qa%`>(DcKEAh{M};wty!~m>zWzM->IvS zD&i%jM1D{xqZzT-6k(Tb)tZg`w zkKU)V7sGVvPQdud1nO>FR>NY3lJeJj~}_ z{2T`UTBCO^gZx?4ZGylwdhzrnJb_SFHfq!;yy|oLD=Vw1>pu0xNKe}|>G}_f@Pb-} z3uJ^=wavMSc^EAc8P0TkRYUiV9jzZOtEuMxFTmf3t+xhSCiydt4D!c}hn~J{2mbJd z&luRMqZjb2sqQ}Y)#VfWCko47rkqz&r(=PPg{0n0p#EgWRW|Q9(bIFGdj|Ve9?riq zfAAWpR-HCE(z3~)NnNHb8sB{e{LP&+dzQf8{?RpWmsfP3emropH9)XeQm1EujEq0i zspM@8oT7VX&dI2!Ikvia+u4gJ4|lh)AG109!J5STAu{l%mI-s5RsQtqx}bpZ_KTr; z*N3iNzIwE0+qNCMb}wAGXvezcHB}8Wr>$DEGN_ziGHAmoiv^9z#RSaJjl8V3Wd6oT zU23SGu=UiHk^Zh3Rn>sq&aSRb_<Rh6k?=-@reSbNlw)hj&ixTrhQN zGZuqjE?GRI*xx{h5;8_U)2S7W%}e*~9bj7qw{Bg(+J6y#Qr%qY6NKSQ{qPbY;O~}FPq@Pj@Fxo>YpbX$7)OSey+3c}>}7lV zhOP}>=|A7scW}cL!EP?eGD+%6#o!%-wS;%c{nK`3nI=&G$P*e~xU=wL5kWocUI+Y}i;<+gP4^B-` z0gV%G=>u`OFM|TtKaS=`?B)^S)@oiBd#CCxgd@&d4RwR-2De z-xyDQvtSP=7olR@e-Q%HS2&iUDKzT&%ZCM*KuH+kpgV7&opctU(5uR-&fE&gC~(wrktN}JKr-}o$8OX4?}tEKsjZ`4%gBxQ_0 z<9Vjh5P!Jimn^nUnd7gS-$BzUS$alYFTFIIFKQ}~vB{=i9!Bq^IR6i8UwkVgw*q=Y9xvf7`1d6CB)S8EQ&Ef0W-^= zqLGYPs*N&Y%Bd8|sI(ei-3;?5X{0J-H;SqRO1^5NikRX8mHvvoO|Yb-pTGX#jf<`F zH1AV18sh1nf7#jkXXl-~A4#h*Kv7aG_|mvBUm;DKVyl6Yp^ah_LOdh<`7&q}N~^J8 zENC?Hk_<0u9STF&b-zvgm$^~oUEtx0w#gdc! zqZAtTh!ugu~>FlLrNt-h`ZdI3w$$obQr$}2S{Ct$ky(~OKqkxrv@89l^wr}B!88s#0g z$|TmfKwo;vm~#GtGWh4I8MSm71;m2q`KpZxshf8G0y2n*rQ9eY7C4nxZwz!Pj`Zp0 zPmw_uq4bQ>>?2HBtlzRxMl9e0r89jMB^7b)CnNRNFPUoo{wsylEASl(qyQ;E3XlS% z04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E z3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjoo zqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*b zKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L4 z0;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHIt zDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEU zkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS% z04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E z3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjoo zqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*b zKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L4 z0;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHIt zDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEU zkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS% z04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E z3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjoo zqyQ;E3XlS%z@L}`e?}qo3Zxm?H^x#(xdLwxe<>fqba$WlOZWJtz4xi+@5gCZpZcF} z{>IS&rd)xP^EZwNOgDh3=Z^qPHGt{ok6d7y0n7q_L|}Re%m#ln38$67jPOS;Fr@%y zhCgzFsU$Ev{JAc09ECI~@Z&V!|NXJ5G`IjYiH=KJfo$;ylW<9^O`TH}$QXa>>CthC zE08(<#+SI@)ICXo?D03A2$V*+v=6e#9}!sE%!$3rCV%7t#W61Bct-gn7g);B34Kd7 ze?JjUgh$8oz!T`Rlod!ff0}ug;|HImtULA1Q6S~~1tAmo3mR$EOHF~)^QU?_i9gj? zYU)#e($AmbG<>M3^fpcnwb>*iZlslhd&oGJ%8Nzr-@dm zvkvfAl&ip>r1|;hpD3`Q&PtMhXf^uT{zPLz`dCWA)yNco=;bVmS&f!*g*M?x7rKpH z^&9vrc)v2n-*_8;c4Mv)Dwy3LdT8R+kmz?I7%SL$vx>9E-}rKyURI;6U_oulm>0Ay zedS6IltX=)WKy%l-;Wi^o&!9I^J>W$fu0jzUCON3kq{7m>K?j`~W}`WXPY<4Q>B5{%?YL|;-{yYzn}8+7M&l98@!y$U#Ypk z;DGZJE5c{6aa&5#9ZENUt|trN2M$n6pvPZH5vUvQq{rD@pRRRFnz^D_mX!1702t)Y z0HDBM2~EP<(T+8maSCZ$wxqWhd=EctNP+dG)}%cdKb3c2ZtF@(TYn&Cfso zwA^=p7}H$^@`W&4?E1(lG1NmzD(s;@?B(TtJQ(w>G5h0I38jOT=a&+*Lvpcn^C#nH zAkaHu8)C)fPa1eD7$P>_iG+=Hu0re<@vGqNQb!KXn}<2vPw-IFzGSqntILqx?r* znTHdoFm4t3=-`yiB!7|%6g5J`i~w4VVl8>a;|{e_j`u-HF7_esM(odQ@+Y}KK^umc zi9oASs3f0gvH2);d--U=%4U^6J%KJ_dIGJ+yoF{J6dH%(*PpL0+2v0+344g?CSj{F zU!AdhVss=|Q@&lwTb5Mw_aoi@i;yPX4`E9Ah-qA))tI-?3E~GkD~Z;Y4}|&Zl5YNf z`AdN|O;iLBZ^Vg+1ze!jn6J(y157@!1f#C^M_*3!BPwGt27C4FgG{Mpt;4hjBRD7VIV1U2C3@v1i91V z5GarZ{vx_S6M;eg5_N$I@`j_BV0H1U3g8quAshVVB2Zd~sF#6UGaY|Py1@9u94JZr zmBeGKKl-H~G^<}RNhVN1%^^@pdA#h?tZu{&ErM&A|~>N;Z4MK`umg z^9e<2&0I}bOV;=cI|*y_sjSo_@+7N~XA1opX^<1oZ0%;G)~q#!GG>myTrQB^57A0f z2|;VXM61zQ%4~=<%!y_;_sO+HXf0=5IWuICzuYcRuL>e&1kh^K*OwzT!Z0VAHAlC= z>^CRI{Zv1eyRKY0vdCX97ifTtA!Z`bYRo0F!S0V^F=K|#h{;ZOoRl106N|-(XOll0 zfk={8DHpLAMs0PW5_61hvgS}d3TCo(BA7d3N>=$ZPr?pn8ksnMT0E^rO=Y2zY@?f` zEp&%887=+LN^Z;GCTCLGjCg45N5pzG*;CHn+tCP=5L4Y;e4|F!2ua4# zl?>`qR9K}-`Sb@7x}(dguXL0yYs9qF^Jgbec7bdcPsB_TOEh3Syiv*-`M@x^O0rX} zk{J3ha;MY++LqKzF^*HsUm0Rz2T)jtfI6684uWK|0gu8Og?y2t+JV(1dKGJIxs$xo zu_L-RVprxDOh12N3B+>_A|@~v-{{L3J*FF1O`24(#-1ZdDjw@V(?G0xltNkHFPsa+ z6W1W-d%5^VMbk)!nsL=8DV1vTm=l%KF%C6#$EYPxBqRKVn1u1vwTN*7;~N92Mo7?% zt1d}bqb46_({oH>#u8~l0xPjeNBasYr#6&qod zRW`=r8$%9K0nNTi(xhOId2qj}0!w!@%wn{W?4F$`m z^al@>krpyuGP+T0-yYWk zXfIO?tEOLxHJfVx_cK`rnLrMpn8%|?OyqITsADX?kyoGdQ!~tvl(SgDGidsiT$}0U zPq`qI0d!rUs8v{sf&>aBBO1l_t#QpbmjZs3ntoZ&Ba$)Y{3$Fb05SqaIl@z(Vhn#+(18x$4JBt`dzE_rcXT8R9_ zpLs9wX95*m`>HAxK*;19g%Uh!HS(f7v;656l@s|9uQ#2aF8vz(68&0a#Q(vV`z$B` zA`iZi(;<9Xjc!>XyA)+*r4-@7Y83L7ViduHerfniw6{MqdzaF;i2e^$@rGLl(9z2? z>g2N;S-rYaEDejsGa5^kf^*@r_m0ftG}X_(q|Ul8T!7 zOSqdU=Wld?!1yfKOIBOV)v=8Uk7i=k5q}}3-|xIO8iOo)d=_$*Gy@1FV;beYm&{So z@T*F_|5E{s&qB_UM#!*aJY&faCv?ONA6SHZiA1x&pX34Kvk;=B0Wzm#9AhG5m~bUF z{t|AgDii)snu1*x^g`ur)HBBU(@w%xqrRK3HkJ4bbM1$DUT4>T@-?7!xDzny2=uCr zY<#t(>02lMLi0B!2w;2`!j=qrIiX}kqn?0Qn`+@N`HrRf`tS8IK>*{k5V~Z*%ZVkU z8IvE-{A(os!u!85K7p?A@mYvaQh`h^8O4}CEy>ql<}dlS(!Krw{}%vIVL_HGF{8|w zQf&mI+`ZB`BK{)zKc7H0eDPU`R#Jp4P%^Z!G>DUXLHtGKk59s=V|*5(mh_hznvx-n z^2nFQk&(YbbT94e4>kjPE>O>3NERYfjCywOx+G15)^6{PK-haKxu%;&5>Bp0Zge^EFGK!b$wjZVdeYV$D~t1P^M_zTOr z%%5`=XdEAetN=!Y%vfzcMoXoIHJSJuRfuLY8$a`w_@hW|cp|1qwRHXCYikoeT6zMm7o^FJ?46e+dAXYX08fw?4F= z-m}sE;KgSlWJ!&e^GilG`u#3yECPS|1DJ09-g!q$9#7;1dczl=h0rAfkclOu8NF^7 zHxh-vya7x*e|7>@03{aGQBBCGGA30U$*6X)NXL=*%NM}(^B2kmxW}liT^)#k@yrd;rZ-#it0! zHdmX=SlawK_zC}A&Y6I>w2F9&}ffO!p?xp1wGF{{yBXJIXz58Ef;cN%72 zasEAlQ*eMk3xIeQnd8rMfjii>pK$%x3j*O+RPleeIDl3b@-bS<)j3C3e~&v`T^0Z{>Hpq&N5oIK{$q1Q!2jL8%>cBrkhf9Kkz1|F1v=(@VTw`S%HNw~ zSDeNmUjH#=Pn>|AE(V~(-*K0}9y5SH2>Y&qQq1=TxH@eB{CxMP@^{h!(AodH1p)X& z0Ie+KZ`1lAD@8TYZSa;1XD1Ze|`UV#}1$sl<-ECK&hl^l=ur8jfm(BQQ6ehfoTqY0sdTR z@%j$~UIQxp830T@e*qV0#EK$ShCe-k8Q@QLfi8X;_b2iP9uWQC@!qbk4V%`i`DpRdMT=(7sjRB0n>cmm?3OCH zYby`86_iLu$&slgC8NrpVl*IPN=|}5!0+rC4C?yBU;Z-xcN_pz{a?>9-8Enq_!C_q z$Isz!^_rP8>oC;Af3+-D3y+;tRg{lAcwqgE6|IYwuidz1|MtyW*00~XYhKfo2~``m z?mlsDQ*(7WtcQgmvw{-QC{!FQDHwzNNk$WBrgY(KLPVun-m0vwS+H^2=J(&PY?wS@ z_J@m>wXWZ?Y1>D;cdT5qc=^nAeSHhzyNd$XGMQ2Cx*0QO%-rK&0QMZy0hkT`JOVxZ zdXDc|JEys+v9YOn!bAuYnwy&7sJ^bIvK;;l_5aV_dA~=IocrGY!2RJod+y`MKKIzi zXOqM6+RHk~Yp>Z|VNlLOWF#RZfe1<@2?am`79o^#07)YW<$xp*lE^GgsQO=B>~{Q7O%Xb^TW^_g?PnALt*vIyN~o zKXxH|XG&yA-SFgRwMy+5$v6@lDA6F$EAs%gLHFh7<75H9Md}d8! zTSr|*LfWp}lNVb1hzA7yy4yRudzxLYW06tOG4Y8>$tfT>Q#?62H6^=-F$gzbnmu4} z@%NfOL~hr@HVhhmT{YK{!r)-t)Wq26=;+nqVZ?9l zM&GFeTazQp>c%Genlqy}(e*&qP`03WZ)A`Ob26ilKh3Bk#G?&%G9G>NC5Fc&zU z@2jhbOWmGb+dex#Jr2UTTz!{&`vylVT&}99s7M&SBqrhS9pz3ue)K#nzd#Fgjkorp z8^oJ`&wv-uh2L>4`X3P1;xe;Zrx?7YmIZPzv;Ko~_R9JnS%|vUar)rqEn7CHB7d<_ zkr7c*(FXjuYC-ZV* zvx1KT$o^{2!==(vagl;lp1%zViHB>h+;r7d#Kdpj(>QYX-p%>R@ev@fzi(hgXBR7%LH|L08@wmzcK!U4E3G4bHVi3fvq6_K%t8J)|YfA-<6g_-fu z(V@YCp^>p-7y!nDB@z1H&1oqK3Vq7*GbM0lhG!4s8h2`JYHD)r0;t2U!+18h&GScp zfTRmX{(!)^B6BA-0p$D!&{-&lThDRr+CLuC4HB)z?B0eT#WMp=>VYj|505W|7 zzvbKrknsf>`|GcsetdsvVRB;R>d^3L6Y{q$ zDjx6yiDQddU|H&Dp1!kGXrW>no%&OaNFnCncS2htM^Un3Q{V`R0x5^W6vb@5{~2$;r)3%7zI@bX-DGQbJ4-EWlXtH#2sH zBap*Ri%zap01f!*_=BggAQTrD7I&XYJw-88HNdq{66WU0Bj87x5D!DZq}IVYrTN1k zGy;AzGmvmu%%8HW`0M@m?Erkw7C>0A@F@h8KMuf6z~2$%uYDjXDX9`DnVhVI7z-{} zIZQraZK06Ik6r#n2RJj+k~vy`wy?OEw$?dO|J%{gz7_#|>800SFUrUuV=ew8W!LVE zjEwzxdBw#=Mdjt?aAqImC7dGwYW&eVg0A7?Z)KY{MUy#5I!u3+hBxqsT7mxyH(+4{ z%KH$!A&@T_SIlgJdt)H|AN8-q14e>xv**v1kQfuO3Gvh1AL9=vAu}z}`!Ae7UsY9B zR(9k_VP0NdW@hH@tQ5XIF`f+}>GhXhdT9;JKhn~mn!uGl6k#Zau)VZVd8AkK>h-D7 z@yU_zZJ?(W8yy`T6;aOo-`rgL#!b-)@iEbtVEU`HJK+!AAMt-Zmi$?=fc!ZH5Z7rf z3yvoXx|Vp-oB`~A;mAt&`;Nw6Ke)VoCjVFG8Z3$3w5b5`qW~60MMg%)l80O4;x@%3 zrzUSLsAEon?$*=O*;sw7qKN!gfq_KJ(Um|b+Mm8Z{YNhl!?Z~{Ga%Cg?gK{)KwR^s zrq)ZLHQ06dsx(7^GB9uS6r@q;k^5cAjDi~cXn z?Efr5A^dqopoSFU2SH2F$2>6xZv2OJQ2=zuU#V-z6&XRipW^=t74ajG!>CPLvh%AN zE-!w4tF!rooL#$i?ATE-bga6ut*P$Rv>{+i6Tr#Gi<^#E;Wssf_?;@b)YNk6%)!FW z{Eg{AV^8npD_8pZ`v+Xr`;}8?Up#+vWgFvDl201=Yf}9mxsX{ICgxAfiNc>ZE>NY6 z0A>v!HLH8;BZ@J7*qh+_`k!(z3LwW{w#fgLx&~cG75r%b%lKoBkaZTE9^CwKrLMld zq48YS#?tA8ywi2u^o&Z4zKWc3G zn=6Y=No~;h!|pfCz+avfe?m{i`18gEGLjg7!h%WvOZH&9>%LiDU83JNJmW9hH8j}I zdR5ZP8u{CQQE4DLR>~@!o=(_zuA#l7qxnp;9zeCLqh9dE0Qe&~xLOfIWctF1+QzoF z*4D=J`M_UimyW-SV*X(ILBf1QavtC}sI-)q{0TtO`1685<>-td_!g0)n!PrPGB`^k z0J`&kNv^>`*A)doTmC41o#oJE2?suCYzF{aYcJ|uAlKZ8;0pmT&62*U>5C=x?TBAX z;|GTrf2#YNamQbIJi5O+)*3qh*FpMU8~-N&h3C)nPM98E1q>ssM%p)L))HCF*2s49 z!#4uxo-8>>;}4^2jK7|Xwb1vrwYIc0H`Xx!r{U+8zj;`E zvEuKt2mBd9(edZG3nXN43}Mtri_UCkG}`>S?FoN%gTsR^&;1|eub{TMjRM$Id$C9B zgx3UrhDxZLH6@52IKP(Wrlt#pivRP5zY*k*Ex*L;{GZC-215(#NYHa);Ll?hh)2*D zYXVSj_Pk1lJ$>6G{x6oT|Md&!A4#RI;X%a#(nS`l@gMDf75*+Ywik+;ngPGYnj@4y4nGV3M+blKIX{E=smm``{25r# zlb|yi`SaKX3i#6%>;_MpD)|PSF*o`@0e@}m;s<^Mf6M`T!Czf-a|_Tiu{Qq%nSXf0--L(!=~yr}HfJ>R z=P7|^z~C@^>zj`1v60`=$GE`%L32z_w)6aDBY)HZdd^?|r6vx*wuaMm8p0ZzeFuoY zmtJ4DZsS%Ad$aar?%9hW|L~!M2lDa`9x5%13=c>4Fm$e00Bo*mYNta#;{L$@9VYzo z__^_a^Ya8hHvJ{;Pg{Rc=N}tL(`zkfB0&R1#Gi+qu&Mv`{HSi()#~nJOw8ZZl#ah7 z;7@UY_WaS?uxS6AUyJyG1Ehn%&gNRO62rJ--+|$8cg0CQz*UU={QCO(>gsdw|M1Rq ziXQn~qyY9*H35EP<_n`gLSPN`fA0A+cE5~2vizd*7iQp3gG|7mYSaVxrYGU2H2hjv ztmXnmW|byeqq>(LBD((fGLHWU8G8Qe2Co8tI0Y8-msd+0i68$Rs%>d$CG^207%yqH zx3|JHgmK5d1H#`rc>HW(VGP3f_$Qxy3gNTQKKmTP7Z9F28DIYk4ZuYLV9(jXf!?0( zF1V`Q2@nzjFM8_#H2%o!LtTE+@fW7>R}~gU{U0>3(te`GW=5XB?|FB<@;BmgJvpk_ zcd9x4z@1Wr-v1$gNdo>#UGN|SPJt*;DU#TdOF)1PnS=_`m-1QkIp_A ze-yv4CbDt>8`S^t{3(!Ga6+1!PETB2P5yWQ!T)jm@gDF70Wf1??&jk41*qfX*eE=CaHXfK zqv4c^zggk>GxLA*)cvvL7cqa;od3gyf#$YsA4DNRB@ih?!~n3@UW19%_rL#xAN=5l zKm5^;;L(~N|MR|M3%|;@!T*hd|4S*o(AeDGeR*JLY+`2q`puO)_wO%F9HaMOW#jS(xW~4( zz|4QQJbn!RkLM3NU<8nMzj~j!unsu|5@;t zc0l77nXF@1z%R!N)8} zGm$6ytr`rCgh1er07%Ao@MJ4?z#M?A2WDOHtLtq2AAN0u)j`QS9h@00a|sO|`oFs2 zQQiTX{2%ZK(~b`Eq(o8C3Fuf8a!bzEcJ%^(V^g#9i_5^@!{tfh|0sVvfc*m`xM+au zrx0q6m6V`^%;G$x-bJdo!5(p0S$j@kxCCL~>f^_E!T%Zg<41r9;ID8N4DW};9vA`C z^}pw<4`=VLs5q|NnI9jYeB^9(Q92$=4V&JLEH}G9W%-5Yub271qep=~w4|Ra-VX#4 z0P(>&%Ya78-@oZPU?YFf1*;Aa$AG|}U9rHQo!Pn3@A@P9fDBz7pSrfNxP0gCg9jr1s0TCxh|9P`G>{u08z8{* z&82%_B^kSH(r2GIQBqP;Utd>87JU_XT}U$WhaE5vAPxZe5#W~!fNctY0AQy!{yPsh zPjs}k;t%ROM0hw*^b4LpW%j{Mf9dr%nExVw@9ZgypzKlp%8QHg_oia2 zKx112NQ?;Jn=sL3vs@ej3IL>Y5cGgQrTkeJ>&a1t#JH=kRVH&fwqtDMj`#5T8y)}6 zsN=sWn22zpbdbDDV$el9V1+*%#bO~eAb(|>Hz$#;tSr3Zke!}!?C2?Y{Qu_iZQ$=V z@Be83+o$m74!~g;fMOV-VYCSX<&(e%A3T4<0{++xM4f^t00Mv10p6^9zq_-et>t8C z$%%94#%E?{rY0v~MB4#3Bs4eH?Z<~#AmnDJ!_$d~5CopTX;zauoFl5cgjE54b*0dO z7r;+7B7%57^nd4%6c-g1?Aw}1__HJs`AbYx_+ukLG6gXLsLnx17yRm*;Qt_)+g(W^ zRx7h}qvi38hPas6*tDIQPy?9JWRmrlnQCB;t_*YVK{VSnI*L5QI1!sG%3=Cf#9UE_MZR>1rDQ};<0sO)I_ofYh-U2uZ&&QL%THU~(c?78VKaM}* z0b6g}ICEwn`w=PM=!5_?IjyF64{EQv2zySh)@0y;xBrh8(D6FXO>TfSQ+SqtvTO5uP_bL3{ z8at`u4`#nr)klvW#rsZaP#!*_goXis_}J0x?1x`}{nb}r{ds{hxczuZZQ0Qy#Q@;p zLwh$<0?n;NyGd-LaNth}AU6l$mtkJ{DfNGlRaC52u+WT0Btevy*@QVo83bu@3A^(P zi}G_7{-DT&xEOpjVnTT1Ym6<&W*PK?}SZ`l}|v^!^i<+(=J z7k6B34d?e8oJ(9>?&+SHp&`-(lL;_HU==eKU3m0wc>tXPR2pRz-bk?6Zk%dzf zK5ROoX@_JE7;VHe*W+ zuy%prXITEz^M_^r=}&Wn!JWI#mBSzq06chLZ<^JBOl@qAKeh-u`#m`taA?q#m4)x&Ad3&w z_aTEjE?r-s{Eckeuwg^e@!AD=Pjq)KQ3ZKHS!!y*wk`O!ytFNZmE7F!wy_~L z{4?_BDlUe{=-_$Uq=bZMo~7z$?J#&CxK85h? z*|X=*bEcpF9>O0WXaL^2W#(_FB>7id|66`&H}P~Te(df8HbzAMH-Y{Syx&&@zd!x) zkAL{XT*G1Li__D~_MJok^Ky5mpc6E;l}bVwUTov1AZ!(yT?SFEgD3#Ltoc7tv7G-C znbo6uZ8V*LAYsSeL;3jyg~dk>0DlU5ZEb)1+u#3g&mWEeO98j-d$SVYMNg#bB`$FL z2e)+ml{G$r@WriLE16?B?!pz|Pd*vPI|4>W7v|y4gcj(nkDtDLxpS(vw$|+bih*Jb zQQI;ycf@Sm7_Ea4f!ws+?SJ{5)TL(+Dj!t#2+|7b{P(bfb>EL1@PscsQoV~!ceT9KkK84 z@~Tx58BNDAB*Z1|fLq|e`xOtFx+*T32Empu&dkR=!w0=MhX;hj+& zf7_~WFH-+EivDi{;qMETKZ@TcpTG?PxGI8!vqum<{P05{<{^MHH9Dqrz%Ezu(G$ma zVIef6=k3o+gJ|Z;@-nU*HsZh1s;a8eyLa#2!vOSg{Jj-kSX2vz<3aFX)^{PmA~XcW z0X_r&rvSLL^oQkTr3=1w>((Ice+hrkJ3N}$$@tr_Au_w91Xf}q;^OEc5JJfKV~`?$ zSE>Kg{x@lO zY#jI_4iMem9=LoKLtG#E*mm~TO(TDBsrA;%N;djG;{6DI$RCFv#-{wS9vJ4pY!Sxb0hNov-~7fqYE~tpN5g2<5$N2g zKZtHCK5k2TF2S#SH)XHoFMs(9;)nd91N__HP~sl=r?Ew2t}%R}EUrPjVsVy|b?ETc zc=A>@v~N3i?K(I9L;r_Y`d27_V)qB$PsI=UyMO=Qy_-Fi@NNqV9(1~q1j3!ijvd>% zArs+)!0=mK1pfy?!B63DG%>HNqM{kSt0xYS0}%M506wNez}!i6fh#NM|2A$&PAoik zo`mL;X(=0%Hz#CdZ{D1ooScOJl9Dy|$Ch8n@NfP0?Ijfr9UXw*>_guBVebq0A%Eb2 z!3{r|nml+A7z8c1>IhUx_$9sFir#IdT?baXVE$6@kAM8eH~(49sYQ4kv{W^wnbjmM z;-OL7%@B)=+p;$s@H?<`C;GkS=EeW|um58FsQ?oG+RXgbjZUHi#3h*HAQKDtrZ|{} zY&|>L5BF>Wf^}OqMPUDn_$mBx_}REW#1F#VyDnI;(I8CC$;v7{d9pM^;ScZw*LU+K zTr#8hkzDsEeh(gu9%(~)O>!|>2;BWU+@AV_sLFkoW1wdsQ^6$S(q5|@;gnU$TpZ9B5p)O6$j{^x)Gr;$JOfGU6RvT@!Ao*ce90t+C+ z!%ag~IXSSL2M<#mJb2*1$qYbo+m4*-^Ctll0A=rvUFkdWY7xKBKPUcA-}_?si|)zf z{uKOxsylZE24K-9ns~pWs;V<*;Q!Oa`7IOpD1hkv6#9t!ynpZh)!y2g^XID1m7JL+ z4v-B1h4-PtozUD6Fb5W*$(4{tbP}99gQx(1E_fEM2_7YA);`EAJ{;TTL@B{xx@gq|Y6+du)==~^upW|3%{Ne%f>t;76s094-m!waJ z$u|jjqs6aD_zecUk_zFsY7hd(4KCGkbrFRcU7EV#iBXRo-HMA#-I|`UWh=7R*tGEV z*I)l%oi30=xk-uZdCd(3%w{G9c0a!Zd8is3M@HXx7#&hQ{)Lgt+TUT3qslL7$ zu8x%9HLK&tjvhOD>kR50C<=Fsz)q zb#t+8VzjRfkR#(h0w3~+o|2JwV_|;oT2EI$TtC?k3xR_}HZu_70963lB8;*QP3FMR z1tWh1Kr?<*{qc%S2KdN}vIu{0zrb(9T3TEhf7c%3*pI^xmc7mRp`w>yMYQ(t;ltRj zHkkNkv@6&SZ1j&BaK@O?7*8&1#2l)>hEfv#R0pW^zzTra1!Et~^9OGnR|ypHr_eSb z;%^f9gCQIGKLtNeCLcf`=l%$PmA57)>iY+;TpnhBP8SDi5N<94b2HPk@UDKC8jX&? z;H%X&I5fNiCL@D`2L5y-Kr?{qG9chbmjQF8ffZDZS44>`{E6tJ{xAFJ(Jk?t&Yop| zp<)(ZRQ`~|59b}vp23~f(|D_6eLe)#nVISM?S#M2fIskm9DZ!=&*c6{U#hu3^nQx_Td4#$o7vwFe~}$u z!w2<)&)M0zx%nZu@&U_1bm1D_(FGR=`$xyeC&FPh+J*cHr@-VYh#5d|MZ^KBeQ?gS zaxIQmgy&C8AK@>(q2UzVfRz?s2~!uS{=~#&bMw&DW1Kf#Y(UuJ(`#GKUpx*0UNQ|q zSw-xc4QE+E%DEaSO(`tdv^VF)P(#QvZ!DNkd6U00M$X@GWUeqJ_Il8#EBed z{Cx)eEoF}~{2aPJ%3mG8HP8pB;e}!sc)20C|7CcX%!iFBRRh*!tU#RQCbHOzmd~Bu*7Z(A<%uI&j0hj;-c%Z!{e(sFc8Qodz4hl!L z$*watprEuvQF;@D3#6+sTpzqle%4g;W0?pbbP$gCgVFS7OH0`!j6Ylp5v;vfPd-TR zyRuR_J~5tI0)NhBl-^-S?vTM|12=eW9{$wn^#%AV_hfLm43F8}83@035BU4g2q4yj zJc{HQfXE*KP{dytz-|Z(Kb(J@xP9jWnFZ_k+pq`W3nTc&?=2}WIu6eZ!2J~TCJJ26 zx`+XW5~D;X+Vx@+ST?tFBZ6&NETP`o?DH%R82d+q4-h$@EWXwpK0w+gv@mcdXDZ%8I# z3P&4ixe3u-i9?87)IVd6@K?F z@}~n3t3Y~S0f6caeitulu8Fj#u0=H)ZEQzI%AY-AjtUmkx8NEfP@RLI z2V~2D%mcE8K+XS6PuCp5!m6vzz^5&L14{ew2%@C0WSlXAD=) zV-Wy(_8^YGP91+b_!Rz#?}Lvcxc;L2iKGYYnj5lbgI_HNAE2hRrG-34!vNR+8u6p+&lJEg@p z2Jcr}%RxY*s67ov3v4r!kWO2pmRC@$X7s*b%cB;~k@2*C8Bjk2gjq1pADREM@+Mn# z)M*Gtij`~-yDv`qN3T>r!Q$H-7d1$e&YJ9IDd2_Mk=LLf`g?Bn*z z%F=T9tvmNzR}}t;0~7%W@$n$3^R&O+6uRE%tx|~af-HgV>#KL)4(`Q{UTnOeI zAOX;lKkC_7W2i7rPENFP{V%Z-Bk~6X!uXGLz;pz-u#i6FUGIBqrE=x!V8#GK2RFEo z@Bl7w+<)+pK}R0n#W5lD$xAnvS1N7;e=h9daS6s5fKQ**u=}ytDRV0>1M%^>GxU`= zCIDx1+29Yye;R(*9@Qme)0hwZRPTk zE5n7i>EqR240N=&jF*EkD6e`Fx-WQ4*wOUnA%g<(x8-tD`0W}WKpffpHEZtw2AR8^Ic7{6<8 zc1~_~cYl9>9lWdj+T+Kceg4HKpMUw`y?naBj9xG-E$vA2wMpUx=kBY0FR-WGaCPaD zzRMzFvvz`ST_gCN*sTAuFr8rNz1nR-cHYlMvk? z@WLdy829Yi%GT)L{OY~9;z_E^!;$h7xHL;U*%-DSdH=~Lk00DwSzK6nP)|+d6K}(6 zzINF)0&gk0uMGWg+Kc$%%%iRjTg2R+TyqD=C^{-fN5GEC9V4-5;m>LxjFS)q(2BnT zTr2~{(5Jz>I`CHLqMZ_!59ze!+*~ zwYC~2A;dl79ON_6Eac(ip8o4AGj(N!+1rxgd6&5ODuurX6C8iBDt}*o_VI`JZ!h1N zzgK7C@7k3~cuNBC$EF?hZqrEup9Oy^fg%$8U=;DE7ArQYvk7697+AJOwv+l@bR7uf zT_79(8U0@Y{$4SB_SqvM`rKTOR?Tha7q8O}dNeO8WqZQOGQRUwydSLo&CjP-oA5K- z`bSp&)ZP~tS}M?XIbk*^7GR^+cDkc8Fm#F9Mj+~2d;kBt7@^u9RW zgK!m|zo@K0{2<`0gAxc9#rHwiivj1zd%{zXR-dgdOiT!0_dVkLkS9K@TZgaa!tfRZ z-onKT#_@gC)06Pf-_@Z@7vTme#^1`=8N3IRLUvm5f6(nJ4)FS|&kg*w0DBnju{9UK zufBk_DYht!o4x~Rv^=UUV)RF0swYcuTHoRustbHX7ZgnV)x&R=I>67V=EVQ8v)EQW zbCdEnmalk>y?akrR=b*Nny)$sgY81v1ugYb~TfS@W5{?chY`Y0kk?+!?L&B1AqE%qn31wk@{p zW4aWm@Hb=hf72iw1@JE8Z@*U4ooAK)Al&knx_-=jp59(bhYhfP1sfHZ&;x z@2#nd6d)`iG3g9Uwn)GkPtO=Szi-CR_pU--u4stbGcvk}Z4emVaQu^Pq%KCPA|6gNMc|}EDTH4kvX~~A> zK85(r?%n_ZV)rX@e>m5MKssNT_3rw{H~#4@=KmD_U<6oKmyQA7_GvhTKQ?*(@JuU_ zznPiY+Y{wm-~nB8c=|bUfXg2@sQe**Jb(A@&Chq4_)CHZ*sVi=j6P6rT~{A0N8qpi zw6zs4TJeYO55ni4o8F9oKi}C>$7RY_;S@>Hr@+HS&jU57H8mzXc2atc!&P z4S6g2U+Vd5Mf@0l3zqy@7mEN|G&eChGWtxBqoZ2a*A0IS$R8{oh~1w` zoIYa79{`B`F9ZZo9sSYX*W~`z9DiczlNASe_lY(E#`%ZJ-|xC{p~lMpSwW^Wnbk3^ zTa=6zh*{q%Y|AF60;dJ^YQd}|-8ZYRE|G0m8;kAz-!$RxiC6B=vj2qv*Gme20H9m^ zUJQS~Wtwr{Ogx6dvi5lG9F%>&kOBS`w$q*BsiibvF zUfZKu8#(^qo^kk7TI?+7x5D2OeX5%+YybC+Z~Qy;e;YPLBY&p3uQvIB{&%N%h7j1) z-wM>#@TeU#MJEMk}Ag9Mh^FdYTD5{Z-QP1D(tbspr$J&!8 z4W^T{OR(b)R`e7Ca~1xqdfykvpGjT5xrIiu2K~t_8vtVWtM$Jg_&-6Txjm-3gRv=8 z14fKS9x)$`=7MdHYHhUVuTbI7d=_>ow*29mG9@tA?EdJ=GnstAJy$Qv|CyRiC!bMz zG=c+s{NUqfX8sm%{;T%Ch6ZGFN`|=j5)nA8uL1=k{kZ$okL#DKCIVY zw2qS?xbYlzzt-;WMe?Ut*PyMyNbW*l{1Nwu{tt$Idj14C`LcPA_;eC?Y~(XSTwkzr zqyAhB#d>Vi_)CKEpRxaKK>ogbB3yem)STONr7g z#GR17$#BbPL)qQ*_HXZxrsS;;Y~OBfE%}7EPtH3W^IkeB5;6@o+=X% z5r5Yif5x1KvQ>Qz>G~FRZZsptRmf{2_d66+Nj(>nbA+80qbK|oY6W=|^N>IEgRgw= z-(Gz+YGY*7@gwj@Rq+`gd{xCPy1$v36yhmY`2*&^YG7xpCh!rxu@UY<_>%HR@e6C^ z{2#o*P~(qDFKQLFT>@nkA*NQyd!x_wMgC@It^FU|JMOLfvs?dj4-?)j2uManS zIRJa`{4sTT?6^23kR~*J`PoNLEcojM{E)x(YF<94n#dvnH3|`9=jp`A^0L|+zTKS+ z@qgFya5r>3hV>WdexH6>XeiR{=5GUl#i|vl=Z~sudB!N4Vr=~Uqt9o^1lZ{R5-trO ze_>(a@mfJ#PAyT1p^X*mZ=+SgiGi%$&r+gLVR?v(Yj;{OK9;20$Q9WNmh+dKmUkKfEE0Z zKX7}9Vk#MK!Vjx@a@c)$oL~KJO%g+{52wf zMS{cGCxse+_GiniFams954aKj;JX_~eaK&UTt?g6quIv%#5l47!!o>(Z)c!jRc&)R zIT!c?yNQbbOHTm2&HT**ezV>e9di7oq}Yg{8lUR<>rwfOFK)Z>aI`r;A(9&aLWycl zQRx9PiVo$MGzayKh>45KguiH7Ri*Jat8~5O3;8SZzIGwVpN+aH5Qs)iU!eaB3nQ=G zjf+$G!)Zrx+ww|ZLw-Vno@16#sBnOcK)vvAN}>-&8;RhTCMq~0Cb_e^`fO7{T&(K< z4E#O2W>;GaTS)RJ5|ErfXsf9E^eIe$!`PGSWY%##47Q^R+HNiMH=NLo{vfZGQy^r3 zjO3_-!zoGZRD^rDGYTOiC5cZaB6@TC2VK8@d*lbxMS5IlbA5+Dd~ACKBp*sw6&bHqfjmQ$c-fQ*5q-gJt}iP4iWp5><4 zEsX;sm-YOShkNN$qJ__%UNh$LWII&(6RWF$0>;)SPvaM6rzR$6x_VAmRpXy)Jm1=x z5F52+XZGGqvvOKavyD1O*h64a7;F2K%IOx#^t9o&6oT8C+6WDndF5@OI>FVW^B{4 z>($fJNo-Y&I>d~p;y0u5*Q7sF{fT1d5S_gB;He8UH*Vrz+E`wk9lJU)F1c(LOVpm(I zkmD~k)kea#XSAs-I3qi2fQ%`2`uSkAIF%6O&qlyhI{??5F~x`Gwh_Tyv4PR(ABaC| z0RgcfG6E>pwGE`v8eTj7u(sAY|DpJ^k^vR6)M(8a31U{K5>R6RH2TNl&q6?oSg;r= zVivUxu+ieE&IJrb{@xXw6jMkjV4#hH`jB3U9M@|jL(IYjDkmc<)(<&_`uTJ+!iom< z{Nr6g%V>xNy-}se-z=yaiI7l@qN6%DiVg*7LC&99BTk`UH*)-$Ph~ZX=3g+cu8AmH zqiJuN5cK>RHR44uwOVi)b%+_yb5)Ff6fk7@GianoEU1lo#0;mhN=Cy`ovRzd{OL3@ zDr9fuRWap!(MBU;1{Y|kFJD_f=G5t@%|F<|`J=Kl^_w?Z#Dl;8C1| z;oKNX{1MlniDHPMMusG9b4?m8>+8I^o#&+3Yl<0SP6Z4p{!k->F8pS&8Exw8w7Gqu zC?M*F7=Ju&K_9o_&j%I&U2A1*jc`!woDPj4$KPf=kP1izqyka_sen{K zDj*e*3P=T{0#X5~fK)&#AQg}bNCl(=kP1iz zqyka_sen{KDj*e*3P=T{0#X5~fK)&#AQg}bNCl(=kP1izqyka_sen{KDj*e*3P=T{0#X5~fK)&#AQg}bNCl(=kP1izqyka_sen{KDj*e*3P=T{0#X5~fK)&#AQg}bNCl(=kP1izqyka_sen{KDj*e*3P=T{0#X5~fK)&#AQg}b zNCl(=kP1izqyka_sen{KDj*e*3P=T{0#X5~ zfK)&#AQg}bNCl(=kP1izqyka_sen{KDj*e* z3P=T{0#X5~fK)&#AQg}bNCl(=kP1izqyka_ zsen{KDj*e*3P=T{0#X5~fK)&#AQg}bNCl(= zkP1izqyka_sen{KDj*e*3P=T{0#X5~fK)&#AQg}bNCl(=kP1izqyka_sen{KDj*e*3P=T{0#X5~fK)&#AQg}bNCl(=kP1izqyka_sen{KDj*e*3P=T{0#X5~fK)&#AQg}bNCl(< zQUR%eR6r^q6_5%@1*8H}0jYpgKq?>=kP1izqyka_sen{KDj*e*3P=T{0#X5~fK)&# zAQg}bNCl(=kP1izqyka_sen{KDj*e*3P=T{ z0#X5~fK)&#AQg}bNCl(=kP1izqyka_sen{K zDj*e*3P=T{0#X5~fK)&#AQg}bNCl(=kP1iz zqyka_sen}AA54LNk|F372xeq|wo!&4SKw!ozaVeH;I6*pFSy$;*lQou{QW%G)tB`T zZvHmO1`Kiqf}FoilEB~wFzES{00uRH!Ox#`fx!%5DDWo<3|<05gFo2`2P=Ug!k=`3 zK?-2V@F!hhP!bqA{3$MQlMKP6z|VvE`tQ$;N`nh9CdutJtw3n;2c7VmR$Eris6dGE zXFNT*y~Y&?IsPKnxZtvSIt4l6I^8N8gHKZ8-W_gYgR6!_D5IU|2M<62XH4c6x!469eKkl>H^ zawh(Gb!k-42fIsO1J^D3h@U0?&A;TYoOvIl?r$-I(kGYcpzg6ml zLx(>FGBJP5_>YN}M`vx~Z&gNtzX<00pMPP14RtmoX`8l2ZMT2Wn3+D35^yy_ia+#n zHj3FAZSn=$ge@KDW4J5dg1>;*E5!JVu;b6(=&pqVX4je?4)J10)T&^P1?;>@!J)=q zM5LWwwnn>*0ks+0JfO!C7jCG5VN;yPKdGU_-+CHsAd8H&G5SYP2c@iHbBk+dsDNox zoXan%A;RDK^$`kv@MX;y@MmrGi=IvjF}AmpGJMHnJicHX#sz^H3Wf}S8h!wv6=cYt zrO}rr^ewNk!F?;u+uX*ZTW)(>uqXS6B!89wQvR&y^hB?K>|<*2fE>lDP}9+^6Re7G zo*vZv{gRzG@BB#I!pkZAtu+^DZg6&DtMF|;xLu0BD-_)PDW1%PA2>iEfhvD%ia=3& z`@5a3%M%^lCe6AaN0uPxj{<1s&jLV>zcthe3tQV!YdXm!Ji1+?lY?0133mR_ky+tq zc7Z&9YsUqO8{4wU{A>^(-8R*aH3)wGA_e$Cl7gCEDm{N|#RZDnT3_?PZHbR=pX$Hb zgaCiz?()o*nRe*WIY;30twJMPLYnYW1lG>a|FN691=U+x@?hj+4Q6QTLlgX}zoFRsK7?J|J zsWp3Kq!u^FG^@kjdvP2myLww4(f&cr-%m~cPvio* z(~jcerl_saw6_llPTE?@UZ^J%lB3{^uvBA&}k9A=s@#^B!4;= z$ZLd%SpsNl?zE{5`B)tc$*aA|s^R!MH2KrHKu#Nim=%GxMy?QFqN~kEppWN^ z79?+|@+T%xK}<}bt1F3TcfW!V}8HU;lGl6t&-2O z1T}weiq>DaH2!)BQA$Hh-~w%pJ`3$f{9t9puci3{p|83GH-GQF$I&K;@&Mw6I36*R z3$!)*s`Huw#uwPkd3}~8*!gnAW;zMqG{M{f(BgFy2R4JDj@};3kln#%DDmgH z3ls@u?f+nJTcfBn>ojlMIB@$5>geUc1SNQx6k7bbBT(&wy;RA+yL;Klf!N*pguFVM zbupe=LXAI9J7Ix7(b0lLmSk&WJ9+%=RU`X-XWMFebu`xk>e+nfKLV_J_>LJ+MtE72maVHZ7^kA#ELFFkc*Be8h;|hh`i^KkW&>H6+tw4T&CadR-}F2H}y6J2}`6YW~6y6MF!;c?hV3`|ATIncjd~o*KFRyp9SR zR*vxQgb_wh>qd7FfC*7^XDmnc+O_TIL4eCH95VHiP}|=rYl%rcax;c?r1}B z07vB*GKB(vp1D9gaRFkQmve43XzJCbpk0MYhC+q=SSK2CJJ=MI?VuD>mJs33gH9Mv zU5FSX(7Dm1suu}@b`>RwN)+sey9ttgpD6^SP^fH3@COdiJ%HFW7KqVK*#AbW5_RdK zmNlp8vTz&xo-CP9Q{v3z^jlEmJ5I~eTT_}vUL_fJ*X>};t}{B>FcxS$pp!C5o>1aXOkgx!nRW+?wQ~fWoEtr9 z7(wHjk~}Gg+sBZfvp9r004<9c7#R8*fHkZ=wWjRnpjQpu!w-|rdEWD_~~Gw$DgqiCPFCy zx}(UNK*;IP=xIZ7ZEJ{nT0+3~Gt>t+{dKGm;g3y$m5!KBIp`;^lzWP-JS55K$jBex zHZEvol4P4{HH?Ca`R%YF!yn%T>s%l&l?N1AcsZKWiIG3PZCudIl0SzlbE}h0UH`*& z<>nw5Rb3$8<{nWbgpB3%-pC){J}w5(KF?}c1@-dVVT0QL{Yo!`o zQN);YBP-tRx1gCJ$!)fP?;xm`|Be~l{22yhdH@v{$ZO>(MP>rIoL(CFI{=bHg-XwR!**fX%T3VQzZ z%`E_sm(vy$z0tstb0dFzH*r%_yV-wL%%)2D)hP!*e|k`i0KQKbo|TsJiXvUX#+>#> zr(hV`+G#O<&1a|+Zu+tOP~cDJ0!;w&VwotiHS&y^a(Zm!58*bhZ)~@_ufh)c8j&r3 z9yKKR)3IOzki*kkinM|SIXyIb+ghxN;oIA+f!JU2XT6sAD~<}LeN2oIK-ft$ayeMk z*2uE5o%P>JR(&GdV&w+sw?e-FKfnGgLd5?;n`07M=%Bcp@+wlykwx!rY9l2@0) zZP*&Q{nla>!Gn4U`15ORe`W1m*1m=Jf1rxm+LC#;ei9qKp*yoJ3 zF~>SK`nfgzR-EL|gYNem>%uYUMR(4EyPVblLQaQ9ecjj0)~ewb6ZHPi2%vKo+~%}| z40Ad&t{LLEide%37H&VkL_>i;odk$=0&DPqKmQRJg8bRIK-=c_&1eZ3 z7VFFySj;R-_lQ4#5g3yESrX`;1*=xEgD933YvJXb8?EXR;B*i9^ACX`%AZ9i?3@Kp za*Dm2lGA}Pz?@kX?=gS=7zTzaf1C^Cbau{yXE{YKP|fMJk*oM>Mm^`x4*&)=e?McF zK7`-uz0q3ZIcLGcoB}VW<@DC5)q7QAUhwBTfWgh*?|vsFk0mk!)#h`~g2y>cknuUa zGpf~I-H12*`3zvN^Jh<>5kMUa##Z%{(a4ytSg(x63a-+&SN!=3VDR(jkqcCs&N&O- zzhblCu0O|LpT-{rP|qI&&=*kb5Q~eqv)UT% zt7KQKPd1USGp8x|@%*8@M*e1+%~@Fr)-u%i<6I!a&kcVnfIc;vb>?_G#%zt&Wd_#5 z_^|s1{Q98x<@?`W?1D}F*#L-V5pw*gF0h)6{kZX84+w-mQG@@x&H%J!!55=V-gpQ$ zFxeVy^7&}81CQDeKOKLlEd&97_5g+;f0V%LE+Kyapozch6=V`(%Ytu4OUAHROJiJ| ztnK9zZ*9QfVM37Y!q{(6l>)I zso7VU9OSp$_yVAS{%8U?lVTapcbh@UNg03hKH$KSO7g-jPv=Wme*&0{iswcnfx4VVqmDnb(Gn4n z!CN*(Wndb`&%~c1Esp;%;26-rp9O$H&!5Q!TH;66|3D}!{`3GE9F8q0-Wc`e8glB5 z2L6mjD?}{Py(7?~5CuPX{B;ZX69X6m{OMhwf}g_)7Dm2as2oPhw={| z%+869iAzY^k(Hep16OVJ&20;cS4N#9Gv?G8jrB^o|Tn=Arbz?lUO|5b`ld6xv8P<=*5t^ty0MAA=FI3#bUlzYlr1RU8yRH6oXjZXPc!NW@o0mcj7Q&m ziQzE``4=0;`|2vh+;r7d z#Kdpj(>QYX-p%>R@ev@fzi(hgXBR7%J{|MC~V`1#L&_OqY8 z_10VK*1h@WPk;K;pZw&FH-7wMgc4Bv!4JOw{nuZA?X_25edU#xUk_}5Hn`36M}L5%3r7Bcz_=vnfD;oFVj`gju0Z~#rY43*C#EJL zGBq_*ccCIaCSzn|cD7-+zAXSUeFDGb+z62I1sUV+3nqm8^Dk8X)=~ZtKf>RSfIr0V z`vgD2->-~EvEJtR+Z2DfJ<1b0P67*@yFw*t-lcWM+n>-1G6%mfkZ?^whj)) z=Rnwt!{yD5)6+9ESAn{YnHf;4(%2|={Kda#7v41nW6?I-wnrsKL-Q18{wOQqtLW2wek-Nx5g2Z{D~*-+f^JzTDiLoZQT$Y?y#V$0Z~sCBziL z0*n=ZGhX85v}(#ebyi+MSV+u|F@bxVWgOyu2LF?1Q|7a|A$*KYB;dHGKT7Y}2M_ zG6zYA>95l82L4bh@PFY3ENnn|AA&ao@+ISnnN4tSME|F`KmG8JcE7se-@kh?{)2j{ z9xxJon>~N7gv6MLO^Bc7{uqBa37Kh$-hbiz`Kqe2va%yb3iI;vGBY!GXQlA%iScX* zNw2^3(o1V#{*jgj)da5ep$J1Ugzcq`$|Jp+SFcZvj!%w!Zv#E8*y!l!sEBgr|K{e} zH*Shfh>wZB1k+!o-3fo_{)qqUvE{zGe%VX@Q~)*p`oZPxGx@(d*I-HPrcDKi9|f>5Dl#%UmOR`V7q=-UIW>7} zK^=1nbhnd%=i#=|>BW1)RRvjF5;wjZ1>nIng9eU2^nfq~j30#QhnTpAGKsCv5hM_;IseGX3zDKL(&X{z_d#uE+@D{S^OKsE8kd z97b*0lAT}GaC!0TTb<1xXPWf@s$CuRf-eTZAHl)ZiWnl(7f#eRwzajkHlEK1{yMvK{9P3D2h$G{<|C5x z0KY+{rM%=%0E)(+7X&ItXAHr&h#b}IwNaG8SsDS*o&QU64Gy}lC;-~>NAc?{hbBun z@Ihlc0N7f4QSSn|=0*fx2!Lsp^i54)EU9lt{8}16IK=o<-QSEm{>tOg{nfG7(D}a( z(*N4{KLIE_f1Y>3^zbTR7-2QizB#j&$YQoewv!*e5kU9+fdlk{zm5*VUrlox0@zuv zcYz##7+quh^<1olzPGKlrKP#ChWS4YKeznN!{Un-f0sSr&j^Z+KhIqtA%kNGqefbE zW;>(N=GSdc_^TTn9&~x`|0sV2wasl5z^2-ZJz6KcCipW{Lfxz>LHxk^wKO+1T_{xi zpEvxCAb)K6C0^(MRQ`;jPyeX%A0^=1Q9wnoFgzHAcz7t7ZF`i1k4 zq*B-LpyB}OB8%1dkM_R`f0vqD6ad@m&orq3s%=dptLy(@=@CO-8(jq|KY65{uTL*^ zeyuIdfL~+H5y~HjpN0RUgFpD3pF#Z8~Jzq;n;7DixGZ6yPcZ*PbPmOr?*@%r0u8<)XxJ0UXS-7vVU z`PS|xR-+G&bNDqjHrD2!AoCAz_?z&MKOGCk#^#J>{yZhn3>X}SZ-3KKJvQTOU9)`~K3V_X3P3?5(N8BIyzr%z-9zQq!Z+@QO$ELr;{b}nj>ilB^ zX?m^YOeAQai1_oc6E^j~o*&gMyIS3SjEVW1n$q!?1pFxu(4Ief8y4+<^J@`5aDa3W z*x6i5R$>@;>^m_0?XEb<2e^unpI={JUtN6;{vY0%PSGQuixj|~swTjX%zR<=M+mH; z{?9#s#_pH#N0why{=y9WX^;u{Q;m86-}EHFO?nu9Ei6`Zfg-a?ldVzR%MTG<|9ct7 ze}oJ@e|3Xbfj^uAi}}l|rH#ao{|?o*w6qfXU=oa%wA$NSVH(1?W8VScZyh{-wy-b; zVSN0PPdDKA;KT%dU;}RZ9vX1L!$Uot?Tx3)r(yls3;#!FAB;bWUsw}aIe-o7|9Jis$SgP^ z&CT)mt#5tn+u#27zx>Ppf$*>Y`kn85=eyti?!Q6!-uJ%u?-*Ww`Q=w$dF53SUVH6Z z|0~j}T*{G^m5|}cXqCw>UGaZ=$o*F~{80z!Ie$qW;IR1DKc!L0#F)?>@@%jSPadK=Fo;V0Q9@i zMZ?`HZ@u=KHGc#^`=eS}NtLXO4$}4foe=xKY!~$-zfOMl+p{0 z&F$Tn2ZqKbX6CQoT)A`q{?f!TdJk4ME`NY~Y-i^7hTojx{@C&h&tI?V|BUvxzON|5f~-1%FAdt5=BwRQ#WrKez=I0vZ2xbWr|k z8e2PhE)NWk!t;mMmu}sC_~Ft7zES#04Tj<)M>60c;ythwLT;dd-!oa7^KOV^u{ZGd zSm6(QU~>n|0eC|Ju+7L{dqZXB^h{?r*ZN)nI5O1Ok5qKr+UIXD+Y< z<^W_pFzbR}U1#h6=xZCS4ocqX;LK>5OK9-W|J4nT@($4C|A0T3c65*@C5nnpK*ySp zTXMFxs~7kio0^?pTn7FgE>9BwNBQFc>>nV(MFU(vg-~;>qy!yg7Uv=LE>gt}_K3^M z+H(TKB?tpoA3wee{?EuCKLSJme}%JPct0fezzCqO|2x!L_G%P%~Cz0Cg|JqqlhCH-9Sejtzlh!4(L1~gLs{!P~b8~KASSapCn z1_b`>iUt1c%+8ILCn7X>$Y1F&KFhB^QK0+h*E0dAs&a57xj0 zWa#Sn)U}1hr+$$d3TOQ~+#K_yYhtwejD1xOt+ZtrdSzXN!so)Aigz1tP-3 zfudjV{3){!Zu(2Fzrp+$`Fm$iSp;Q|@>gD5l)pC>TLl{18bD%10N;d(E}P}z2v7hZ zor9nU{3+$nx>!$+DkR2TeXTN?)3F_6BX_)q*Wc*)Z$=&eO~FKj3#EhPT@r&X+5s#4 z;V2dhp#k|T+q^l6Y-MHP6^HEfjAKVn!Q=lomu~}qw|W0Z``89c?WqOG{3iJ2yTvJ2N#o0VCQDxFMmr zv2H&;yaFLNI~|^^MT8*m{7ti()ZrXa-6gCF@T)6@4!i(gd>bb8A~? zdrNuy+zsFl=D#;>`12OPQFuO{1lH;X{>&pl#s6{q5f9jU0eBs?B2O-_KD%}x9<_h(psA^hR+_|ezbujl7827$j*M-jmM zyzR+$1X2VC2S``>_&G40g8+b9AN-2KpCy4x!A30TwsWJnJaa1Gw>zgWFOwb# zAj~MNZnfesX>fE5U#6otK#f1hjijcRv~{(T(0sA6qo=98xvi&fWOQb{wW0Z3UR>_J zef!}_#@y^6?#(W6K4zEc{MhmRcJxRw0C@P&-p!Oib1Ttq65A*o_!9!i%|ZBO zm{)#E{U2l%6{{62G~*FT5aneyVNOv7L0VkG?)<`{{9J`UC^8`~249VsP~M6jQ1Abe z>fv2Fngdk%V|(p5dwY($hq3)zHpV9H&e>miuF>_y9ame!`TYjx5*L?yx@Tr+i1ff@ z0t_Ey0sbFHrtQwr$w3A?bMS z0=y@>yRp75xwxo;yr3*KwP4#8d|O`H7Q#wyZg<<*5F7p(`EwN)!((*tJZ(}!LNw1- z78zMKHja#p)|Zs%$vXhy-H5l}daLx&qr09tz%QRdc=qhs^XEC!&wmf$4-hl}Z{0HU zH&l|04|}oxxBSp<5>@eIcOS4ZBKp4x^nc*}z9RVj>5qT>!yo1v4ntp@o?f=^Bm$V1 zyE_G)psB4?62kCe8$ShMtI+H+h;ki70q|wb|A~s_{GZ6I9@T53=>!A`JN6#R&o3w} zK5_u~Q`l>3``h3C{&#!+a0FNixNYB?l>je#B3&nQe_k3yGgp?EapkZP z|CLr%Rh8bod-om&ppWD4t?EH5iv@U2_7 z25J9G_=Dcz(Zo*1--Zp5*(D{g5)%;@M;Cz*LdGA16#2VK{huDc-@^f)KcAU7gbb$c z+_NudZ+a@uhIK8bHg*`f+rZ!eDS&hcs9p#`0N?oLHzU-nG@>ik$e;e8bU3Qz*Jstz zNlb6F--=7wv7h31BLBbvWUr<5zyJGx{)h4h1X}dJNyB5~z#nmd==S!&<+B*#`pCz& zv$t*<`GZTXw^mlN(f<+eNAN@bIQ$rY4}hShF<{W;x(KXo+!z@Vy>TN2a9d$PK@R0_ z8Nw0`H*ell@T2@O2bi|u*zx1_9`eTnm@@{QFcNr^UZ6Qm#()}s`(Y^t#TmJfj%024 zT@1W&EpDqaHpo)lALEbV7Y6)YXeRto{7CN$-VgD6_8j{Dr%$J+4B<&X8iFb8IfFa{5(Tnzr^H{MaRDiJ*zMyrlM=SKZObX)OpThen0e&xFb0@thqn7{6dC*>$h(&sc7iv0Q_bj^4<@7U%(If0|yLl_{r4d!GpjcXt`BKpi06o z>FrkZZY%9Nu-XOxfA-$|y^7?#`<{Qnm-AllPfxDLN$g3UBu>J3k)4cZ#=?M*&@T2x z5<(K&ppAWz1d?D7LMxKc0%%A3E)bH~8JJ;USjJAqj-5P-ulL``d*8KmS9MqSIj7G# zLg%<@7@g|6yQ-_d^*MJfy&UmU@ZbOa5C7jkGjf^{5eGBXh?!<1$&C1EGRuc>MN4}bUr=g$C;@YiDJuXbn*9Uv~j90Qqnz&FLgG-ONFP&eGO2?*A1 zUb_zaU&N2`C*bGe{t!P14<7Wwf{h7bMqWw)ve$u#7fZ=TXQNd6Q~gW(%AxEJ0;2d|>b-M1HG@J_n? z{2hrghmV{(aSSG}1#nCM{{8KPFgK$?c=t<2?IgbPj|SimCe=8%!Lc93k9a?VAB>j2 ze)*Czm<+S7MBaJ^0IS8@{fvA4$aN5UC71!orXhdvFGfxw!b^ouA=QW)W+O$2_-8cs zk|T+!o3e9q^S5q8_8J>+|NHNM|9dNc=m8D>;AP{Y4?Hz+a}X9l1_m1YEAsMSIS(GD zIC${Dfm2z4;@0hXl^0F{CIHHw?K?BK@2^4pUcMy$PwsuO`$hL;bAJp!pz8kpo*r1V zi6`E#xT50RIrw*$IKM>#9|aJdAES@B&qog*-Mm&)ec^oN`4i`+hy&yUK;?a?a3{1d z1k8hlXmTawDV+r8Pa-P7UoSih*9eaiG?}4hYfNevT+7Bgrr`n!+_2vb&&>Mj1Q0m6({ZB{OUD7G$rXaqf4&``y2b z{9y-793TLQ{2e_yb~-t2%eE~7fG7KU2jJQlyiI$g;rxY*)t4^S)YjBouB&T;t0SlJ zn$@wRM~)sjeDp{$+*)<`a4*GgF?TpX_lNjlxHsNid#)H(C~GcVx_I%k3+LIukGenf zfC7CFAKrg>|K7d3ef3vbuU_jNn5ye`?tqQk5F`M{xvOHUF!_^Y9cF2Xj4MzUGjt`z zq~9DI8YZvTQh>cf4RhbZA{6Hj93b?+FaQ()G(>K0ZriqP zqd_2jEgK!~A*WCkhzJw`j?vQWy#|D4k%Se|>@x#U7 zyU?F!AL;77*3?*gxeu7*@Imi8KTmxh@(O#xp%&mi()NL|sm8}oKJOFwQ%-@69#|Lx zvH>6k5cs12!Z{nFut-AeS*y!)1bJ zgY4!knE-POSunROF`vd@@>-5%ZYqunKb8bSy{`wiRrce1Z93UM5ngohcr-KXR zov_Fsr4TzDc)j}|JTubGQ_RmEyx9(*$OiuKAq>mfi>>!1qdyZrKnkG3uvC8c&V0-0 zP*)2eN5*{wKI9KQB`5Fp-0aMBXL~nXKiLKgfqnfhGZ5kc4FLHf3|ogLb71I#kv{^U z9l!DJBqoyuKJub0!XMl(@Qau$S9(qUrk~*0PrwhBz3up+qL*PswC2#EL)fk^m;`5Z zE7%Qe^p7TR)|l0pL@sM2d|Ee+QWE|Q2grJ01|W99*awUJ!5ha70#*Dm+D29UjUj(9 zWJCYQ@bhQ#0R#%}kMLJ^cXYI_yYI&J0seEkI8cLdXC9cFoS1@l^~2O?Xb=Wp&Aom7 z1KVLT(${CR4BR!P=H|+oiEn_?V?fpi zi)v%%o1UGWiJxBhdjb4`{}b@zYkxNPNBUCJ{h{|`?r*6K+-!DtH~b$vSjqdD*=%~pBlVnlB4j>%^ngAMuz(wH+wY>;l&hg`U-uQa~{4L}T zar`{GKgwS%z}3?QsNsd;Uhs1LaR1A|0GSU#>yC}U1C-+v)8o@KbJu{sN4;HE{tO3b z0*G^ToCA{~An|~BDVQxo1Ai1ifBA#21qsmq9v>JuMQ#AZML<}8hNoU2vg^|I>qoNT zzEzx{WL~Uo$8*6o8+>f09Tyh?#O!R2;sKZd19+glBYwV&&KZ4K+ztvyb;&Ln>rqhJ zp|IY>;sWU^jL-)!lCL$@{Ae};2pxnc{$MoyVqqb7kn@LYA&RvZ=g9}@eV3NXMn*@n zPr%Q)4ADF6$Q?4+Y~Ti`XW^$#Z_UB4+>^oKB0OeyzbE$LL*VaoD}Y!J@+gvj03v?` zKox&60J|k{{BZtp{NDYGWEL#(w|Y0i7en6?zvsl6;$!f<0NhVOZ=%5EtVSMc$gx$ zxCm=>G~8W)neXR)v2abKw@>2Fm;(C;Abw?y`(f<-O>`$&;3E?c)BjaXO@aTz0Cy%7 zgZrzl{xCK+F)?->1mfRddT|JcezjOSjFdqwx=uiZ0{kw0%ILtxc0|Ry(GSEN0d%Vo zx@4)kfS}p|nq4qkh<=3&F^dyDhPF+YvY@Q9jfkQ9_zzX&;IU7lDD{7i_|cmW9`mbj zKp>xd;F<~>0v;O~9m?Jbmp$P5@L4f{9amo97AZ1Rya#JAFpPXu9{b<{@+Sd^RUkdE z0zhLKQ0jv5egyc|{`xm~Zejp=to#|i4iewQwpu8Fj#)S{-1Hnt}t<Yu*!;a@Yj{U9@ai0f+#608D|aR$_w#+(&R&&ffz2(w*E)`-^gHg_7be+ z++j=5xBvt5UvPSh3;ccra%K1q-0-ke8vEeBjX&Z5EdWBhn-8Sh#(;P~76DLX590Xk zl=ze2WBd`{2Y(LZ`V0Gsq{sZ48?xtuUyT4CpvKx$&4PEMQLsR>5cOWLI%m-|YDv}; zj1i#C0fPVgPm#Z~JFu{CVm~D}R6+;SHvd^Y9-D3k!>YCQFC)>G_V>`@mlp^?$|`*a<+KgV0Hc zNg$3{-sTMhe<+&7AMG1@g#OoxA6*5&vE?^=O2UpS*76l7Vp!&$Y1yay)Oi^6wUwKTUuIJjJu%^$uV7L%0I6wlRBY)Jh@y0M< z92*;L7W!XeCr0EC2!!z;>4517aBeQMKd|2S?o!#(&AzN2gbr?SA>lDx;CS@-35Sk6 zzKdf*=#v-jEH0Ja1O9rkhsPxtZveh|Tg~ss;-@UExb!3?i-C^z z7V&a01~x0fLEz-Xt-0AV=>K|QB_=2U34iDTCt>v%1AF`qpMh^qO3K3@HXp)!u5c-6 z2>RKXSy+dJFM$((k4*0eE8gsm7nu2e^;Io*eY{~s$atFZ85*O7KOcz6W}_-R;&+w! zjzzP?1=_mcKbZWrmY4J5$)`-;_yk-5o10&_d;d|5oj>%IIR29^{le%E_}c^i56OU~ z7ku!;SbMSE`$AEp{Pmn(f=R}09Qom352iI_{s9rT`iE4)eC7WAnVUnS6GJ1TWg?E)t zKYRA#<=4+&zW)4Sf%!9}F`G6WZkiq=PH^Ut(f0y-<_%YuFUwsP5u3LYd`gWFI-|W{ z;h1)#1fcgS3CAd&qb|_sf`3m2feUPqBf|$r#3qK;CqGWz+1hfoz2kcG)%I&QdJd+h zcH!f>{r%8D9UUFlu3hVFZyzQ6;Ux#0f3VeO>i-ZwgFm0{Z>j9|tvOiuo1VaFE@h7_ zzuac@-CZK^-M>$6M};~-Xm9^C7DG-><&`VWU9j;)9G!$ngTRYpyeLMJ%9H2{?h#1+~Yc-|Ha#|nx?P!4#HcC9RN>FIG&eQYAIQwyvE#%E6@K9VX7}_XeFlEMCLiGNy4yOhcXxI7PAuT< ze=x>F`tbinxbgx2nCD|RL)SFJOQ)eG<(mnBvERX01g=_@)7)&Cgb??PbC4IRS;&)T zo!z&VCTmX@i82r6{@#W`_?k(P)eOPPbZ~DdLYEoJB0THCH+ zuS;$~#hC`qJ&-?>KE~hfz3{?>+I$LMtn|k2RjXFPi{XenM!zgv32B2s-thM$2ng&C zc2(EZHMdunl<>O&5)U)}9*v#GmsU~84F2XGea#%;y>Tmlz}`6Fk4}2Q`@xdqcpWwr zZ3ktOh1qDfopf{#hLmV-grmMCw~;_L4f*{d+>?gE&YuC`g2^B6I<{8L&olmtpv5w> z3(qtTj7>jUnqHcn8Nb-lf*mI=KF{t!{&4U|Mt?qfUmWj2xCzf+l$9cW5OCH(2?PuC zebDt{z&Y~n*o-5URh304$+4@xN1Pw>B!*S1@YP%xK7b%vgm~cyzOQ;>3?BNs*?;*G z+#tpITN*xx_h3@U&NBZ8-7a&0x9+~Q@OK5+!|;%=xd483g}hC%MS0xz8AhYyQC$(E zUkY12d4kvamfSX6;8VJwVB@b2zO&Q;zNDHH|Hsc_Yvtq}%HMDS^B8;foGq*DZLDs( z@qON|_-6K)4tkN-oO~cP)(dZv`&;b0bZPJEQi>mi4*{Gv=)<9($zSaEzVqk*iyzYK z=MOT@ojY4zURDObLs-{yqocLV;BTw~_(R6dwln(R9-O(kFTZ~N6i0x!zH;ObZ}kEh zYlXEI(xANg?4NFMqVu%p;3>Y0&Uq1{APVLcuz$K1+wvt{3S|6ETK(Sy2uA^Y!1>!} z)^tbJ_-%u~;v?1Y{xx{-J=~H9_pIEI88|otZ-$wk&RO%BF!we2v-iGBi>0ZnR~!5h z^hgIxcO`c$5CGx&etp&_N}nfJ!LO|k1p)WuRMpqB{`c;9X*v*=oRWGDCR-%njHh!L z9pJ;e3yaSSOaO8IVfa5n-@}`bS11~yE{z>Nqi4jzR3(@{-24(c;m_HEs**nfAl1CR z9q&ru-@zSKy{!KoE~{&W_y09Co+&NeziHE!&70CJ&0UW8P3>9@0Aly6a(_72hCn)B znDy>_@4fGQ!2KWN4@Q8swV4?3ZJ(Aygkn?V56`p=`J0@ax;J`eGd!Sc4{u%)2e|lU zy}=*iC-V33;p}X?jlWcQfZaKS$ruFn&UFpKau|N~r=_K2-ibeSe-K{2w7nStzrNcl z=Gl$YBbVz+_ON<`?E7XMdB@q@pGstRsBfBP1=2k3%F%1pbOzxM#ZDe!+3K-v9? zy|3Z^IDg?HO6}DI>5(oto)(dgR~UnZ_a@~=VGBj!(b#|CS_ZaM3-aIuVJXGbQrr| z@~gkp|6P^qR>$@EnET>2j}^wB6Afw90p44BE%Vom_;LQ` z9QkuD76Ei{KMeyR~@)evarb`{guhn-xj)DL7Y1D4W&8nvKJj}u{QZJ!CVX=FlrVtt%}cll_wc1W z0WtVX>BMzHH~v%&?(3M$F4U4oU2}58l&9jSQJL5EsLn=#Ke%TceoBj<1^s0Fy^>RX zY&rYC_ul&p>i<@+jz|7%b6<1v0sZd|^$a1rslQODYs#lKk`dtWSKj$k7i%2Wrng~% zNMJ@2F-1v@MSM|1&bW3pK4_5RTf%%L-r{4G7@n=(4 zY;L8ItU*7Y;sZeJe$D>Z5C5lVG@r*bcCa>uVZeyhC?Xbw(O$6YQJszM{1q|&>}TPY z;>sVcDN_RT?e34RJd?=>+;jD={GYAaB>Ak;qY)h7v&Ub)wevTJ^IxO?wKTZTVuc-K zy#)hC129VbS#t(rwC43#B_I6JJBI?AeK@bbm>nlUaLsw_ex2RlyW~$+*P^Y$NbW-5 z{1Nwu{tt$IGJlGkV%Z``VmgU?Hi{V`E*I?GD4&a^Sig-Xf2lD3v-ZFB$lvQ%%C%=p z&4o=L{*PdY1ZE(B;?gs}@P>;oaOccB^ncJu!U@SuhFex6I>4uI*aVpIhqDfozfM?x zR^^n+wiX+Dqm@7DR01?gd8Mk74Zlu?@&_;4NLBGSN%)%-O7bV_4}SpRch;W1F*`Fe zI>Rr#Ns|w_aqHdkC#Y-J)@m#U2l(_2n}DeJyT$pl=CqV;=xa%rThzPJj+juPfQ`cU zP*El0Tx`w}c4kI@_$x9C@+;;if9MDQ>U;nD``=%`CT{()!|2#}s$vS=-{fRE z@s!K_2j;&<;Ad+jh!NS?0Cyq0ruR{>o3s#zIk0_DbnZWFT#Mu zvK1-wN7Z#aW0Z{<8(x0#a*|Adt^P0hau4zs6BC`xIs zI@sV`vV$eM_yYkQ&XXh?#y2m&AQNDQAMyuo5BbaA4L@Ded3Za#SXV5`$SEfBc({zt z{JDVwwE`q~-4vhRj47Ny^1@Sq&%$2=@>i@loO@EV@#lWF+zKPWH+6s;;SWALaMXwV z#U^I8%sicHC`d^pD=<96JNfhm3RcxMr~8$R4N=MBT%nA z97_zs=pqq((^LhoOGs<0tgLD*Ox$4jKMQ|vr`>AnV2eurR05Lo2W^?UZ{ENJIEFvD zPG%jqVqklHVawgQ?)u}>=nwLmITb>N$w-c>I2=pjr=r}$mr)5BDM@`Y>*6=IUhin{ zZ{1Cv%A3X@@dv+*QLLp5@vNn;Uutugm}^hBX>`>T{$zS-A0`w?4d9*hCCU0US+Yt z8w0HG=9;e2nJX=s2{02$t-?zo&FmL`F(Lcy+hY#(bWM(E{>+OM<~edul(hQt))ubJ_eotvAvui>qy?l`BMmJ%BOHgrt$QfH+6F}6YzUXbNHeL_)(lk z6U|jy;pftEAA>9-V&+^vqluW}m?1F+bf)R&uC`QF?T=4REh#@+hEGx!mY0`~3=F{Q zh|W&GeY?pJD}YF(^Ji-r0TjEsrg01M2d^)hoVkUstGNxY>FldNy=(hMwu@ClGbhBB z$tWOZKW2!G_JVz!>8AL!jDqUwF<7y}7o5$E4&H=E%1fr--rjDN#h>kn=g%sjB%h0M z@y%q@_3rM`S$Ib#d12B*d)2|Lt!8#vJu{uemc=L`W<8aV8Li(Y`Ap>##m*rS9UK}R2mWTH zv-Wo*8u*h0w6frzF>}|>9Xo7U1rcpFk%llCt%ymd5=x_ZF2-cuI4MB|G;Z3JnSZ+N z_QKM=2aldSet2iHxBuo)`#0aTi~IQ0S(#Ah$)7dX2e#o(Tj?b@&u??%TW)6=7iKj@rX&Nb4EF7@@!f@@Cv z28TsJ$3mlwKgaWoL_?TZa4mMZb&5LvGBR8wTzN*9%7Qa;vxdo-?xtT5Mu$_0LjGI? z%y0v6#TnCsXl@q~d=(oWjsArAa~2Q~iy$L_;#}Ks8lB;F(+_LwobykLKPMSbAxDi? zoRJ{rbShyrhC!o0E&dz?q=-d|ks{_$+b|m)j_O^&Xyor>#Yu66lmdp^sHhL=mB?`e zHgd!qTp&9cRk0z+snjp1lTlVQqURqUD_X`xEaHs@MWJTF&`5=ZVN@N}yHRy0NQ-d( z>>3FQMY>Vo&weV)VYGjPMRjdNxf*SI(}alU&#I9qg00me%P1jcJy z$=r%SqZ+ZWvB3P2DVY?bRw>Ec@NSGI{)lTZMX|(CBTJIGxgw2@_4VG|-g8pyO~ouR zuL4FDf2ff~7jZMvj4t){+T6iV6c%-(j6V^#h>yGQ7X%A{uCp?(MmVT*PLIZ@<8Pyz zM3H3-Lvs^xyi=4Basi`{zl|;$ZHy>mM(CTHemPwf@^wtNtZw3FI2famKWB}AzX&n{ z3(mEDS4NjAc~Lhi`E%3=6^a}qBIa1zcV~3064o|QW0dmeAOlq>Qj8QahuSiu(`eTX z1%kDsXZ4-asYXm)b(I{9QOloFUnYWDz!hiY{3%c6of<BXc>bdl}3?eG!Rpr z$~!hHl&5Z$NUU&yru33A!uhkyV4kO<)gsF%AZ9<$cWn%jx{=PGO$HIM2ses|*-qu% z8*NqcB7NlZXOTe`VdWX6-bd)NNWEpFjF`;@N@w~ma$3Z7KN+dEX3mJ_@Bh&u;uUyL z2d#isKr5gX&mKr5gX&mKr5gX&mKr5gX&mKr5gX&mKr5gX&m zKr5gX&mKr5gX&mKr5gX&mKr5gX&mKr5gX&mKr5gX&mKr5gX z&mKr5gX&mKr5gX&mKr5gX&Tex&(}@D_~h z>TCWYyZs`)_7TnBk0V`uUH{1DZ>?^?2v;D&`CF?AjBEfSo<9v>L<1Q4{Am{$$pA(J zf11F^B``Yp)17dn5*Q`?X%`rw07eae+66`=fziVsbAfAhh$IDm9Ld*ze{5A6T!1x6 zZ?9+tqKiN1gjclMx^h+pqKrT5>FMniu0YiBw{C?CuB#_05Pkfu(*#N_ys|f)ve{Am}slA#mo7Sa6uL^u&1UB?4Y(4Q+=fym}h(a&=H;BzJG zuIok>h;aVwkO}V%_*KL(kaKW_ZfM9Z(Uw(++tqrjg;^8L?0vA~8pTawI8 zSEISxpJ>cZA4v(j8d1d`dN~)xT#YXI!fnEq4)-y9mG8h`*y|N#{H=52&)w*&g~Dbx zn;s7FZb&q%V2_3Eyh*{)#^1U)H@#eqZW+UBv$T0wkEJf$QUlATI!|a)qlv$dXmEin zF3!am8bLjjvW(5GuAQX7@{BdwVG(m^{|w zE4E==5T2o6)bMBG2LL)jhWt4igK5Ix@>&}_xY7d6Z9Tf&xuZd z^a{&9wiXY|QJf039o;#>sR-}s5zXIE`FV@ZkHj6k9OG}Lxj=h^^AlT!Py4~$QbJv! z$mWlEG8=y20F?w9{H-VgRqY+>c6KgLb##|B=Yl+0BAh=8pq)Pl00sV5P$#TxZBMP~ zC6n^#Zi!wF;+QAW`9nwMgrD67iu|n{7pQJ*$0iH0L49=B)DYGn^7)HX;0H+zHCZZ| zzm?(w)om@;JbYW~qr0but~OD?-^U-T2sBU=wK5WUL^Qs;xVo{Iqdw{*`%Vp6-6EgA zk1_a3AjeMl44IPN3vz01?S0L=@21)8adBqc9y6^T>%)qK-1fqQOYvACIa_4&C*$WppmD-( zh%GOFQp3A~A!1{kh`$l(Fd^UYw-&DZMHzo87igZptBZ3rnq`C{761w&e;)>V>;R7A zW!FHfquM{B`TIwk|5Ld@;k4Haaa+{YXxlr81TSsP^7wyL$l&T&gQ$d&)&JT!?iqdj z*<7G-?(5wcb2SQu1RFJ)&s#)wcyA>IpG#Gjz|x|SKkGb15M(_BVN;H}8s%Ms*$TeZ zE&Xr^3iMh;Fgnn@QOTd=0!57wF-HJhjbbT5#l1GQB_FSYC3(3wc{MzLMkjxg3ly{= zh&d7HY7`0yCc4~wg!_2GXhHHuD}QPN8DeSzU5!BtO$rD%4$EJDu)0Jqf2vN{KupyM zyBdSl8S{sXj?k49Y?XqRC8GKJC)N6kkS1OaAxfEuDO{kdF=(Mfh##!1gtW9^APiQQ z$mZ{pPXyW&Q4v7A5GNvLbAhhLV0B(Gzyt%EJ#WymL^^*y0F*l6k3NzRv${Z6W3WQo zn=UF+f|VsM;`#IC0{th@!k-n-;G~WU+H5BgoFT7|1nK@fS)L z=)IY3IW>PP;P>1wp??UvvjIBU-J)&%d{W`zST!_4Y67DfC83!t-vZg4v1Qd$CT z;_zm$)G^S5S+aZBj3)m4cY!LQoc$l{?P^q&=A0I28;5UyMI8e?n4*LLlcI}1Uj!O` zaDXa>cJ}}qIUKt?pHM(Yb1ueTOSJLlZzrtKCq7=0$dg=+d?&v@18U@u@9bL5fR5%` zl3&KCgK!~6}kqHWu)ZquNzn=!dFZ15Ar}{@e+a zT_9P-6ETy-5)Byl-ze=F@Pbi#P*O2jyU-i_ZKX8D) z0mP+Uor8(qf$*{Y$gEhNk^s-?s(0V{GWs*G6#GjhL zc)Bv}3lwMP2s(K;`qMCq#5J^Pih zt!P|pQot1z3kW!~ErVDZi`(|`mJl8M(f(K634<=g1Nzh_A;D!A??#tGxk=HyauVM$ zpie9!pbXAC%e5g}&N~NWi4Oh@|ECxQq8tDqQN@k0_yjk`yc_*&4n-3iNq!a)w%v@1 z2$2vSEc*DfcEUs`2B0sBoC$=S9*zDsl+d=8sJ|tIZ9hwWa5GfLiW2_#6qt3ybjm^h zic0yW$jL*JoSuy0@m=GJRwhZVnU=$-s94Aj8#VliU9jW=MXCIt$id6eoL-FL@m=GJ zX4d?9T$x**Y{vQ@wyQ7)!KmQ^#WwegA|+%zXW&Nh`0jBvfbMyg!>XuP$POFP{_kh9 z3^IWnKye>~A~BK24Wo)N??zs{&rd}&Lz2&IVc$Vfuh1PcviY+N$YcPS3lz2Tmm)iX zLe2n<;_+SMigsQJ_{A&g<$4|g86%uO3kwzi34x*<{!?Tj8y9DUAeISn!2(f>(f`e` zHnjiMu&vgL*0W3u$eC=ImWozD4wZpo-DlGJsSs!>E$a)kw-! zC$%%@-wjHzsVb9Z5*=ZWIbxQBi09LR`%V z=P%Yqpm!GBb8c`kZ}4mkachRGIL)6Q-S2y=Vll{~duPE{PGQ$_ z_-;v4A6@h3H-D?`0D5P^-<)MK`ie#L`0s<&b^yJz;CD`&mlJaa zW(<9U24AA)&wu~7+9Z$(@12DJa#|pha|U7zo|e#;;LKm>j}_VT5Ac6B04*%Yl7-AD zGiE6^0Ha*Jm9eGy3&{VO1d`_S&O%^0MaXP9{Wh));^bOr{sQHXcfzQncNPN8X-+j1 zIsG)sEx$6h9Qg}J^&)-#L3&`r1*-Y;lZAjOGQ6BBr+>z9G-oR|I6zJGK2w5162$^HC!5Cc%4X??GzYoK4*eLRU=m+6?h_g!J0>zEL4c?8; zClX$ZBY)ulFv|Edu7@a_$U6(JIX5IKkH!+c8{J9`qs^H=Y6q5LsyUJFber| zae=PQ9h}h-GA!1cF}#>LmhKmSp&~FU`Ew-DHw#X!;s((MTC9VY^KNviOPJIBy}Xe?_zN1qNaxR;Kr4U}3)WT*kC=c(WcRVE&XDtfe|^M+blYxj>OT76&*C zVZ4K)%4rnr-DuQoc_T`kJS6&6>uBN6s|yqWRD!}q4BW^8ROa-{$dA3upB_+CX7Q`q zQNy3w1r}6cIDh^;{B3D%BcYv!j;kFV9cAkh3 zRu@ubQR9nk&CU3)?aCGSzwPRkD_0@-tY8%Jry#Hj5bNmZq+xo7hFKcsNI0_w0VJKU zqgMDu_GZL#9dr6Ly5)5)HE4?pHC5V2Yl?5&xN)6@YuAXn(=<%6(CVu;QN^FN6Q=la z{yM2bGZKHdOaPsw@=g(u?Od!c1O)E0t*KX(A5kUvUbWxJ9;0MN$Ytx_@xab+Pm zqa$NjtfMh8(bX6vWNz7g0Z8yu@psMQ|Hx9qImhCn(*OMm0jz8ffWO-WKvx!mG&(^> zighv){+x|&00nDvB!(C$eoFp2DS&MH+d-GQTmXzp|M#n3iTDZQKhj6o{NL@{1VC37 zf;Or-V#PYSKxz&aCJ*^t`TJ;1Nf!oj{700%b`4g#2!NEo4#r=*Gk`zz_gw=kalapd zt5XKR&vbuQ{;pF1>H6O-2*4i#=*mLyMkRo}Sf!EqKiJ0uLBVBl;}7ubcHs{JR8L}i7{t!P`{s2J2 zACA9i{|kSeJYZz=w`PsX{So|J_`6O$pv>RA2%sG@S5O3_odCgT$H>(v@@KD+{Yarq zaRiXy=gwaznFW*SpTVCKfRWCh0zitNz#s2`*%$>c7(o|I`d`!k&AS8W3X1)~A!%yM5*VB!` zI{z@Izr_D_0DxBi*WRWY14aXXq6_5sQT~n|%gIT`kOKcFkysMkc9O6@Zf$+-sof>T z2a8UeDZgA%R(|T#+4FnTw{1)~efGk&zS4{haWEein#>iH0F6Sy_MC#z&Yxs-0?jF1 zI2%8rQYjzDZ%EvK=3Lp%o$;w#H|8EcSXg|jy!6~>7b*`Q`?M(MWN+^Q`0k>>NknFp zJ9+o+-8mP{0bqNZ3c%>#&mhpiuf5~qiM)*Tw6ye$jhi5B%t%j%t(4@%_&E3(>h;u3x*})!ozGcXN1ba(4J)?vC`h6SV_lH|lrCufz2ivz-*6csH5_D~dIZD*l8j zNrd>dVb<7pOe*k`^-0;)4J}t|OOrS4%s+Lpxr=x}(66Jlt)sK4xA$n=`uK#Tl+?6z z5S%NXmX?v8Tg@4Sn=kDiFtYgjfgIwtnPDpi6TkLq^#8C9aJ4P}V8)iMTaZAC-@3T< z2?@!4eYNAG!$U(uHwOj~znR-zoyO-L@dR6o*HTbh`$Ew`q1YIb4-gzN3?x_+&zZ>Y4lw_^SJI2gU8 zq~iA-(;N2KhW2AVtjODcn}dB8XBIMp1WFBv?*o%nY!_j zj^?Zlj6dGouAq1^Qb*>HlY2R}nc>3#`EzWxzx%ttHNyYl;cr;@YZl&r{~uScRu`#A z75Q78oN}o8#+}~U(uAaqyBh`{JiIeIHZlkVc6ap*j+EB+Hm}ETby7-N#-=R*UkMrg z;qo78fvKsHF2bMKUc4dA&++l`O-5s{yofLS+U)$H2b{vyfUEhb8#kq;rlh1K#>GKj zQP9_yGd?ypa&vfebOgfa#7tfJ@wBA1Wp$H7SFU7o{|602np@F8B+0vx6zo*2Uq_{c z{Mk=`@{=F`_(wnb(FY%Vuxiym{nJ1G<3IkxKm72AfB*LgC7}2>fAiOW{r>wu_`&zT z|5tzYmwy@Kd_=270B}u8%E3#ckNav%<2IyZwJpAU@%i1k$&sO<{=T06!Qm1Z049MY z1^VBOo6?gReQf!e5;!?2vWIb#J0mteKDKfJl<>Q1JsaE>`J-Pz(gh=bKwx4jbigSo z$q8}L1D7IyBu^?;Qcfo{^ zKl_Qn-zv%<;z#)VTi_4z`)h(9;qPbGqd0F1{H;w$+1Icz-&TKi?fS&5u6w`x?VB$j zEzFIL4&Lk^7-~fRwysYC{6OLjC7oP*cKL$OoYBYm7Ap$O8r2;79r{Yh<&s={Y z?vD_-Cjn+KUnCvM?mYp@pW zvh8|QVnjrMzaOLjWA2amKMQ~8{sQ-ZNdkZ$C8efTwL)m`NlDGGTD)`n)@;XteS7os z^YZeuQ*&Vg5}%lynwp$Y3=1$${7nwu5C{~oGoxcG6+jDq5`XX%7KD|Yl9cqzU%CPKi7SAxU=dW{ z{5f>L%>VK3H?aOUNdRyy@OK#bYwbx*O)UdT#>UDZ#zSxK8JK*)+Cq_tAHV#I4sdev zO7>7)RZ&R^ZLRaF{`czD)|Ck0cfb4o`^8yVWUM9rN#D6ED=TZ?{{1B-#l>gNoPjg@ zY=8230-(tsy(4HEAAdW&c5OVFgJi<=mo>bFKhz5RU#tZS7f^l)ArJz^l5xe%CHOX? z|1;g6Jp7~GuQdGoivY%dP%py+#({5h=dU+8C1KrK#LskpoIjj|OkRoKck#l7ii*>x zPai&9w15Bp?Ck7aIq711Vmuc@>iggQ?sr$f{A1H5s3veFhhhvR5VoCcV2|{gUcD|O zJ}E8klhyRJHpIuruU~hD`@fl))-`M6lamtSFT?bgwL9St-5>FPosRrDvVi<~1rXP1 z9Scq(3%ZVY(wu}p;{Mp^kIg<{`oV{P?{ok6mwy=`e+Gaif8F5n_S*bkZExR+4QtmH zB7PLWqV;ic@f*m)t%-?i6Vfu$wiMQKr$Be>>}+eOJX%^z{wu{mqG!;RKq%gqxi9l? z-yw#Z#^}s|Ob>)VI9dSWnl3jsU#_gFEGQ`0wr$6umZ6FHhXWObIh#|~e7qjOgJ}j0 z0)OZMVF;Mi2h$HVf7h;||BJEvKSxjqe*qC_B8B)tFcajMKgK_^xW9kkvtPtdnEjIJ zN1*(10DbXyvbVoCZXNM{%>NZJ@z)`T>(_41EvTr!KL5MBZB3u$?cBM2`}V^Aqm>OU zjkVpjBoqx~=Ek2gY26*xV(`Qt`y zU0r>{`J6Q;CzAJ{t!-_tJ9kY1po!qS0?--1@$t5{ixmy6EzNaRhl<P`Nz`;D>ix8I3BrKjTj1>yoZNt{1r!KD8sd+^<*Po7sw4EctC{N?uc z_jU7LmGrV!{`OsB4MfLEIVUG3lJ}mkZ@qf8>0FZxpwZP)FZgZ%{98Dm_S0UK2r zyrmHUefhuC-oC!x8w@~K{wRKJXQ0WF4}8|p3II0OT#{X&(A&t-#}ll|Nf8iS6I{3LIG^7 zxzuTP!YhJ5OC^lW>Jx|`IKL}Rjg1$JnEwlezd_`WFTW&7{?FjgI!>2JkU6pN=eG+) zM&x2`0Lo?yRIWzLzQJq~|F?m!|8*C(0<3HN}GX5?%U10#W)SYWI z05sZ~MwZwA!O|m!{VjAA=*+3Zbz*(4QRmlur3vtBs6I^j6Yz8Je{}E%e`hBVKV$jD zi9ZVqa$`$ID}R2wKm~tN!9MVGsZubw+4G|RQ}EZqFMi-B@W&ls0Q}W9HC^EZHrAAJ z0LAu(czF4PYa8$X{O8tXaNJIgTlaAcT-N+xS0k^{XU7Em8X6jE3XYTcMF`1QQe$e|~ntw*J@uqxxjmtb33# zHGkvd5`U?{A9H~2{L$O6X#ZPKgZP01q=Ue=rW&#m!@1+1;o)yr=_xV5Rg8jyy1Kf` z%JcAVU`HlJkNlmd0CrY10)AxX3!^_mU^VrBzWK9uznnj^{9^DIW8u$)Ou?UFlmXoA zPrye4AY?0xHC>>}Y|!LtH1-NXMCyO96#1%waCYlo{> zaA~BgyRW-pWO?}8{OpT|UwrW;gy#?#e+~f7697j?=>r>Z+joCYFFZWd+1A={_RIvV zKL_Cd=YEuVAkl& zo%vgHP{*<1A$aoOMrZrg`f?k8Q_A&c?*C?~`{THecf3!_KrJy^N8`~mK1tWj+?H|?8 zE-cK?&A@kLc#!i~w#(xGrj-0~{AQT@rlg}4M@r}|qsxg!tKAZ&)5$}ej5OML?xscqF1^hi@{9zAl?|=mWZ!-W} zto*gsmt{{(wsi>o@6=SyDa%(6?td}(ql+(m`H$g8`(NbmcpqK>A^hdA2anW^Vo7BV zFtq%!C+01O;+t-8y65C2`1|RX{{#NuiA-36apJET@2s@&cM}K>>>@^a6LIEXZLXomm&1r zeD>@C_&+Ou;s_7{{29)I;r)=<10#S^|9heGQ0}hM(qrt-{G_C`!&Q~VnRqNCto=Cd zjNSdQ*}Z%Ffqh#y(#F}pbMHi7HB3PIZ;p&l&&@C1fAILRia+WBtpMUO zZa)pM^f)k358?93lP7n>N-}oYq|ZKn{KSb9b#=A1WYL$w+lwS4f7k(w0O9~p909&& z0Jbpx0KhhL{C5Fvp19i5j9;j#;^LxAnLDV!y4YBt=qDn7Z1y2cf9dr%nExVwAMQTA zjy*i!j_tT1Zu=*A{f&;WGg2JuQ=ppW*t3J4v+ueS-c1Q-4p#E z?SH!%f4%@5fB`6mK^le{Ay7UEeDFu)Z$QBxpMe-t5C$OdM;+juvR`(zU2VB?>g0*z z=g*H!PEC%FjlzibD%_CJ)KI$*A6|iwpPLC!)*?a>ME)jtO=@wDXzUVG0r=IPgbutA zzN+ii5$}io@514d;-bR6TT%#rjszlqDJfq0b1atf=V+!!wKGFfT#XKS?8%7<8#Zj( zkqzHGn9*dD^_R&?KL54wm)kougj3*NIs&Bpt%rJ~!c+eeg^4>0t2%G=4BQ+V9%*WB zX=}Z5rgi2v@CWnXJ1+bM3g8etA5Q{rbqjy?5g_w_0)NBiOe$oY7J)nv|9Q?sd=2-nxz|YzND;yvI__6KDOY*Xz z!lKf;_U_ixM;aQAZ%xE;;$Ftz-QiObe=z&4s62Ay2;O%}1AF+05*h>i;bTWrQ=k9t zcfbAZZ@-v1Y+a0vTdzcwLR^ zgV|COlXn#q6&K_){-DU@!~}dbV)B`0yaFQkzo~WbE*;YW8vOCSwpZPoq3&UL-{v(N zQg`L;D?8uN`}O_amii0(EY2k{G5>7mWPd;Dfyo3I{*Va>{E_%OS_2nt>*`?ehX~Rh z*yw_(|EoKGJO`&JVpw}v;?HRb<~4-ToQ=R6j4V%XxHfnX!u|XA{}sZ&LHHH|r1WoJ zEAsbc*KwJ@nyk$Uz}iKQpJn+^<`2vKw||=<4DQ%@{tOHP0l zk<}eB6=Y^sMP65$03I_IdK=Fgn2gzTTW1 zd=Cd%e4xG$_t<{<)*R(;aO>*Tt5c8F%)xu2I~wY0(@Kg<$qULdG77hDCKtvx6ISx` zJ6eYO`S8!mUvEhXJVpo4)21dT$BS&`kdb9W!{Fdh-H8)2c?TeTyzb{8d~ovV(+B=J zz}IgeynXxjn{VG zC;jjE_!lE5|Cz!Ee}BATZ^<6q`f!WEUoDw^m=ll+Sajz6ar}V4-~9U5-%L#-gBv#H z=K_Fx^D@(u06|N8g@!*{u`+))vvE`sajKHaXxZ1cnV7O2Zh-^uS6p(uBxmbZ^n1st`!r#{he-yvx&*6puTou8= z*;5FgfBrcT^8~;d9~x#IaBpwPk>kg9Vj(nS?%%h66GXF@78h~lumS%+Sy54O^1*`# z4>16J0)HRG78Tb(;dl@{nDbo-um}x-Il#Bz{}_M^3%^-hWL@yxyLbC&|4aCT-r?!! z4$j}|)p5BePQXgcy2M1f2!s%F{y3z_-%aZOWc+>&2mI!n$;nTV!HgZd_vY=%%)r^O z)Mf?=0s6n^FHRX*X+$d4%Ab5tIvh3g%UR8I5|fSYTZ!r0_fh|JU8&;R_t z|CjOy1UmG;sRP3!z#nmd==OHQ<+B9h`pBQHRd?@L`GZTXcbArO(f<+eNAN@b1pGLE zkAa|tVPLSg_Y$zSW=-6>_%&-NfLn_S3-c&{ix3uQxO3+Y!;kXE9pI+bM~@w=^OHXj zz`SATgpt5I^a9OUG6ppH+XqWAD9+%;OeAaduM*&mYl&Of*dWJnf1E##Ukvbfv5D|U z@gu!2ct6DN?KjZ(zj-q;@hL)>zIEsB%x$Ui#V@0=1LdzJ%gD+iG8$H^M4)%0d=P0X zDRFaVKEdzIF3R4O|M-vpK>UzDbb$Z;-zae>{BN>FW4*)pLRnmc?!}+ZPR{xC(3T|f zRyMS6JwJU*82_RF!z=wuls~om1Mg?xhx|Qy^zh-G&N6tn1qBZ}-8cf_j-yA9?pU3T z@Im1C&Ci4XgTU}({0*h-KV4edgx=L32Pgmt{80d((IH^|7`nivCG>x5R;Q&Doxear z)2U7AYtl9*XXS3(n3k57ivOpkneLA-zmVbIN87fYD6PMG74VySB6>gUeE~n@4;(PK z;aB5h2M+>+pyd{cK!bz;`FmDw<{H zQ`7t({_qFRp8+7@uf@(^?a&xHKwN@31~T!0Z;FFy$d;<1Zn$R?5Ukz2b{+P=h#%um zz|Y0~A$|}ZJm`f58xz8eyquhqr%s*BV*CMq;QH>|flFo-Ka%Sq#qaUsp<@;2&z?F8 z<3Csa#6>{X1LuvCX~;L^6``=sn zLl0>12QM2Jec-8qn}e_bGBD8CUy+vw%X#oH#leFI4xGvY6t`~AtGsXuFac2ZY~Pu= zeSZz&_wpt2e{%1O-7mT)oBLz<0af?!_w>M`O+4{_#T6Cj&cVO4#Q7}}_$Ywr{1|=2 zeLi~l=;pPW>I>&9&!0FqMI0a>04nc8g*%~zAz&UXM3XBaPw6B$e-cpv{(9kAxJG!C zpveq1TVqnY;954`F%1_;;D-HfcxDD(wu{p%VfnA3&BPDrdo^E{XCbDbYe23C&8tgu$gzt`uR^NNL7~KSmilx|Nuiu_ZHW z^A=>Up>ghazx&<4i~L~+OdKEpi2NNrI(9laZOgVT0)Qv`dI#Xz7rae-q~ZL9i`ADd z)zsG1T&}BYN=?l@jn}M>9X)dN$l;?$is9C(!-sn*ev7%o0lGiL55v9j?%H$3utHgL z>C(lEpItc527c82p$8P`d-(AF!~6H{-R-Nt(t7n;@4!@Dw{r(<+=d_lIL=)aTZPG= zBb=&~SbMn-nB(w4?>j$FeIN1)d%~d>;6Bp!fw8H^ z$4@@*6Zlh3fsGzm7y_~ZAO#TkqX5D=8=|mCSJJj(p2)FdpOce>KQj9`QP<5oU-*1N zI$zoQ3HX6r#!L;D37!qIo3msB%q?WW+_J=c8iUDeIhMJpI4X?K65!3b2?_h_XULj9 z<~P@q?t}2mNHTfC|G>`Q1D7Eu%wSEr1*u_YwGzKlGHGyxViLGt-^z-EjS68!QC&^}EbKhyyeL zwy`7*ac%BEb<3$ z95)D5@yBQzRq;25{K1e7{U5{6pUDRhD7ZhuU)kN!(Yo%w8`lT;&*|bo4Z@vyU~Y0^ z3f|QZQ=_3l7<@JN_Vo{Jhsj7^pM^hZ1ZW4)SOx_A=rUm51h9gtiHfi|<4;8&^?$iX zj%-d^TUEt>p<)VNRQ^Q359b|k-@={M6L_m*T>%DbTheWp#!lgP&Une`Mo~}&3zb2) z2kcEjheD;a04R+CSsyH_jh%0Lc6KIydg1Q{@CW`+z>lx}+1wxLOHKEO-jBJzr801{ z+1=gni|qIsKByo3otm1NneB%wAFv!m7p~zQU2t)*duU{2G!|B)dyzlo6qsBEu>%OM zh&Vu_56+ul*WyG)ME=zD5&kmk>&xK=tW8N}Fm-|IkB*KtHT91_!+F!CdW0=0v*yZ$ zOUEFGA2A zxog1Rquwqne})4z0mL~v&Vk7gka$466wH>Pfj&mq^&{DE-zqYed9k(~&jr_P@UfY8TwDYYv$Hvh2VepW;DPpz`1vwAXY^%p zJ1890CA(y-M?q>n3#6+sLLapJ@R;5Gp4f*EfxpkK z0AfAJqe%V%i2M-%Rs6*O?3Tds!}-VYd-pF=M<(*OdN;xsL*Ej==fs)fWAMBH+)qJo zqQK>>OBi4%F+_BtT`x9)V{>~qBG`__66&4J{vWk)ihZFC4+w)mScm`i`EP&w7FS_x zgFwn3Y74JIZa@Glu3XvetN#_=>CC4f6hF!zUV~-$+1CCH{>o<0yT2m?F2h2y1jS++Bc~@8^B7a80DQPvXy*0{aIber1jOVeI@(bSGKhBNGqP z|5Z&*f&aq*cP12r`>U@0Fg7+ZF?JmU;@@C;aR`TgwOBfgltC@JPC$eL{4RaU=)lHy zM8&(&55yb+bgL1%WU0DdE9SuubeS6<*2DKb>N2Wv1ejC@oc z```icCjp36AU&`GKw}wD>Volp1o+ne`ZsuPVgPxp{29IvBRKy(c=TveQe52T!V_hu zPd1g2@EP33I1Te*GVO)ICSP{Og(?evmoAyEiL|HGqNa^DwkIRy&mA#O1uN=XI86vN z<{;<+`7$8)fP5j)^nVi*)rYaL%8GOF*Ok8>);=PFC@Cx%XAR=Y3-Nx^Ug<e$$UAeRR>; z=zWFG_sfyXist6}wJdbp@9UyIGAXDMhoerq8=zxDYHj zKmwp6f7G+_#xP(U8yjsF`d?xvM&u6&gz+EgfawTuZZ5Mwu-^CXQrXhYzN{XE4sLKE z;W1p`c=Y%QhmJhHi(^9QlNatRE|uN`{(7;8$0ZnV0KR!!&F{zJr!1_v^du$aPtsT3 z*Z`c$=Yu~S|C#trKjwWevM1m2^40Up1!kkljb^5kSg4GFAZF#rdzDD-i)N9)Z@;B; z5LkoZi!kT{UHCH^!h~#Ma(3o?YN|Q@qx&F$WI^Gpug)|6aQWqnIR$_2lMm=<%N7@U zvwHD1eZ1O>fsXbT@p3Q*HY>qF;N--ux!E)5|9W91CMW<2f9L@xVf7e;ag)>FU~^JZ z9{#ZT5Z-fzOF=`>&(6%kIwX7vocMcWdOuk4W_P^6%=fFWYPsv<4J$&%(~Qs17%lwy zKuk6pRpAl8tHgIKnk6pK)&>8;{KDP)k814vp|8a8pLFRL zMt{KH9`Jui1}wecgCEA)i|yVQiW=pw=kyXxGH&C@4+ncNts(Odh_KZ^q!Q*U_wUc# z92%V%8W|nmU3;aAt-*NnfER~w6bOCr(wRNmcJ0V1@92aV8C-AeXuDidk(H9Pb5Cww zer`v1cXutkt9<&|vllPFe*W_H=MM|apCOIewCQlu^cZo1GmnhE7uYjzxVn5PP=y;n&%M)4eVfkqendol=IV1pbPJ~$#aF|!HWQ;2FWbjNnT$-hwYyw}8eDv!1v&Z+B=I7=f*9rYET=22UVEn;s^p}E6kR2@~IBWy<%j*(J4_pQ+f<0J$DbmFfN7t25~ z^l9*}4!p~@cn5@?JJV8g4;?yDg+JTU+|*cqATx8vjuR(T_<{eM-P4cs8Tk2{e1OC2 zZtJ|>-PPSYv4FS#!59ze!~YlI$_M;oo{!xOUDFINorao}Zzcf7eg|JDxN22SbF*a< zLfkXXL0+h4Ay1xlcHdf>tUXnoz1MY;b*z^kF5L| zy)Q1bl$~p9ZM%lOF1Y~}XBs&7K>ken7=OF>!V42>^C^6>-$VL%ShZ>uycmwSWAw|y zm5??FuZgq!gE zMOi802LWdtlt8dB-v?bU2Am`Bj?FkySyfq-k{rA0d&K#1{P^dpRrqQy3?D$?sg}>e z5qw|u#27sEceDTUCAdL~^S3m74)4LFkey}z54v6E0B_xWY2oh*u!rFxUvmNc>I!+A zVvF*)?K6x<$D_I;M!yucdh!IX^)0z=xWK1$LBYmf9eii01AIv}C;pG0#n#HnJCwiS z0_HLH>^WOj+1ps%bmRNHZNCp8rh{JOH76fPjrGEtE?wHYx|HHa;X?rD4f=5C zXYv>Oz3=?_|Kf-A`uT&5bLY;MmzR~n?-17Y+~{a6Gx!^;0RE7%v+ayNxCdu$?#r*A zKgAK?t*;#U!&|*T##&*mg)}H{KKrK|oaj95Ie3aMqjO$_D2Rf21?-=$#kPD&mjW4o zlUDyX0m4xLA8`KmnKj)}HGbRRulPtcynhYedk?qd!96QCWCjk-z?)&Fr*qbPCd_?J z{_MT)(qd`q>eU8+1U=FL(_P6O3j{#8zF(j9iPGoERq$);LqWhjIaT%btpB|`UYZVs zC8wmGgUJ>NIOFLYMhE!t?!w~p0uw--e;EFc(D(2r!Mr zPWW@SpsM7L07x}&Z^ye5_;+weRWIv*hs)|(;r)LNjb}!~c3x;;osLH_0(_;W568Z-<>@?}!CB}sHS zCjAK8-oR2d~TtFwQ>={(jYg3pGyu&j~WtWKPF)Zc#E?AZEE$_?Amf z2B!n`M!~!!>66!2O5_{P#u9h`H$nJ&6_ERL?0;dv^<6#O~MZfBo=(ibnH!Ok)RYQy2z}SdAiLK^W}?yB^iq z=+0jeD?f`z%)2LDpL^U^D=u#Gf^1AVzCmk5%%)AH8!ZpxKA>`it3d5(L+r$L`nJ{k=>6 zWOXgtDvabV1kN9Ef9U^U*eCO+$SIaBawMjcxM!o75#n;e-i`9PSc>)AX!4f|<3DTv zTaWy`ex+P{w$xnM^x^*qhDcxr0w^v$^9ygd_yTv%yhHy7jU=3q++?_AHKGH2`i4z_ z8GktIF!}3*^=DO1scdVpp*LFjlTIZ-qm);wD%tSsWGH{|qK#A)f0Km2NueZvqW?aYioaW&KWk1)*@nKBbh$;n z8|{b*6$;oWd=C{>GS0>39ARf>^oPG9vmn1>e)5NY@UOo2zrX+e^=snRA3F>`s*1mJ ziLa`dLiaa0nNB?AGXH`3uMzm!8VO=VHa5Ur2(KxB6u+2e!T-S<3{C#H^r}`d+9g~@ z6=Ft(0yhR-U*vCU%Gv+Hz2kwpKezQi-!S3Lf`H`u^#$vF?;M6bME>j*06*mKBO|Yv(@5lzuo{(!@$>Xzt*x%rDQV6kjP%KTAv9nTnLW5$M;U%Z?o6JV?VOTOHL{Kdq?CYc2ZIn6{CLmMwP z)JCU*$uT_{8GyEm`f)OF(B0(v2jLHIkD8id6(Ju*_`;GmP9;00M-_kAIXRxGU~GK- z)mQWm1j=7h2ll_WfXKwl@V?2O&Y0N5WT7NKi&Y#tsGQCxLt0#MIFjUQBm_F;bTdM! zGJk*|ITHI*gpUq3IG5~Ti7x&?K!@`r$%gUG%P+_TnBj-~f!jm=@^`~e*K{7<4lmXf zOEPkbi98-IqceYQpg^qv30^nFr#E8?=a0Pb6yUS)*MR&LD-P$L6m9&upDnk-2=GlE z;70g^&kh{*A%C%nSuHb9ry2@U63Ge-&+tw@y@7&Nb=?qHa0_qEX}5=>KA3$SZdf6B&Ov?I>wkTC1m84O!Rr#z+A1rn8VeIQ82-<~-`i=o+B(>xl0TJz$|z8YjoyHOJ)MhL{h8pQb;rVg#6%qi){WB0-EwE+>vQK{pL;G+{^_0 z9@8AYr~!Ty=g~xS)mHeqblk@v%ZQjcm(OS-rZ{Fui~*f#`njtuRaN`rlT%B|&z9kn zl!fKxr6U6a@H(Qi({JByGQQC?5 zzLD)>mC(!yv1Kv}h}n-BBBQ-vA7{ELJ}sl5x_S&&tndYAGoyny;gRx^>9@DHTV?TQ zJL37X3Mk3vVqAPP*>t_Tdvq4wkx5>dw9sC4Fl(!sT~^ObC$VKQN{CrcC1ghHw@E%z z`9!gEh)>&cu>9iW?K}8O8;kQ(!#77qCrbW*_TKxyiX+YU_doFcVPCWNwXb{kd-vYi zov`7~?2J8u*}Gn2gRv5dNSJ7HG$0VkU~)791Og-^k&y@jp-d9N2B(?v*ke4ivpXB_ z?)^W$-{-07P*q)3r@K!dak~A~jCHE(=?eX+_jxLGpXRHt#*|aT9cF+(SwLAnrPl=G ziJ_sIxpU{vTYLEQm1{j0&bFOsZC)tY%{kdglDbkhI7rB-ASM@^oH1g1>+5DO-*|H5 z{@A0(@T;JQqkY$V`}%>u5$USK9%O+(NkEl_aK?FW&7CtRkW~;dU=wL5kWocUx|Af1 z;1C&`1pgTAAR`b{$Tf&YrS2+`AwI2Ug&r>_!9*5Wb-4w z7=D@;D`I01G2uEBHHu_=%J>mQqv>z$>KcWAOJH>D-h<(D8&|&F*x2-&ktYAF2(gUt z=L_hoCd6Dpgd{&&v_=hL{&gm7^b7Wd5n_r)4UKQFUER2L-O|N)j%$qbnHm0E83fV9 zDAz_z5#mK^)FS5hwr~Mn)I-2a5h7Njt6y;ZWhFRyh0=tm)hL{&iM}#!4=Wd-qF3Y# zVlv1{{!t2zdj1LsN`W-;J0Td$5`U~`f|ux~fVyrp_*slbb9)N~3YHtx#z~29G!{8+ zTDJIu!O3#1X){{d8=nPhPW%QJL_lMq8RO4*ooO@_hy`o0NX_^YqCNLYMEOJl(q z*{lUJPP6HkhtY5;8RX9*V7(2%qBBm*qq{948hwrUGYbfarN{`Nm|I&&qZwYC ze%M-b&aV}JCK*s6qeew%WQdtArJ%+FX!P~s&mbU2EG9>(A|SX4Jal+_qGn_QjK1TIZ}cmWVwvI3)kvq12VlC4TK?SY99Lt(1gxJ}fX9-@ zt;mToPwa~&C;3MyH0lv67z>VH@+FgE+$tsc2l0(r;*YroUlb*V8YxNsVNn{5?Tz2v z@pE$QeZ`bmTmdu1A8MrNA|9sAXlZZU?#_pzf~cD@{zTkTAGh$A2Md6%SsAMlDmCYf zY0Mmd4K|6=Wh_8pA{Mxm z=Wh%&DUS5%=TDJA7NPiz(&!@$S*+c%QARA_0;MZ`9yt|p?H41p*3X%0{{9Pv)GP1; z1yX<%AO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS z6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AF zNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu> zfD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d7 z1xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PP zQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbz zAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi z0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS z6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AF zNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu> zfD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d7 z1xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PP zQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbz zAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi z0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS z6d(mi0aAbzAO%PPQh*d71xNu>fD|AFNC8rS6d(mi0aAbzAO%PPQh*d71xNu>fD|AF zNC8rS6d(mi0aAbzAO%PPQs8T*z#mXZy#i@Q_D54Fq+Ed?5q~M~!E`sD_)B;HrM>m3 z=I_U8SD)ISZvNh&4otZMDd+DEA~4+mrk+0nFx3F2pFeVeX$CM0{1JiaB`_QO(IA{w z0yDxNxxkbHm>K@a1*Vd~?C|Hfz&9wQNr4}y`Tp;ZRi(iNs7Z8O)Cy#aKNy6IT5W2a zszAp0Q!kH>i(G-s@mEshf>ZM(1+vFq2@xpuaB&}Gkv}4^xS11Mmref21&V!K%<+u! zM=r3Kp%dDcYW{vAT!|y7|+Lvo3z{SS+LM0%6fY<9rx>Zfi%o$n@F#gWl|RW?Z0Z+bdtSyce(lNxf1;NQ z@FyC_PuFzT#NZoO0yDy&51EEPa)F_~4L*G1NML68b0O35=hGRgBfg#|Dfo@kAe4UyWjOt#q%N2Z$dCuuC29%*I6Yj8ll(-*L*wCy!dc<{0J$g_#Owhd-A-_%uR>{27h;bYXsZ z)eg_Ew5Yk&+KtCi3l4LBCiyc0$oVTe7Z~XAFg=&pIf2^E35nI|6_jfJe(GL^;QM&g z;N=|tip>QEJKVjnarg{YZc9nJLFwkt^<)A5zysh3PvxN(uR#tFPUfnoo5~-t_ZVs^w1)pv+&fxIlehD;-s^Lw)V`)Z`kI0sbaV zv^YT7%PIWH#^gkeT9UrAqm~lqe0|NiQ{&X7&69rqCSnLlV1U1(Akfs^+73%B(Nt@w z^u$_`dj1%GdH``FngvfS4S&yZ5XPRyrWt$OYj@%iW0heINwp~L{5kj;T%byzj=$&S z0-c_=@mGjjUSR>tbzVXMW`{rR7_0!O=kK|=K(~Lb{2_q+#C*CNN^r8fl_v3-PmBD# zKTY%V&p$0K_5Gj81-i`PHM{Y^r_~rZJAnkBfYMSw9*p_cn3Xz3LhfMU`K82cken^u z{7LlTL5_8ZeC^_q4NmdAWa3iCTfq>qiCni-P2vZUo1@E+G5&G@s8hgAGdoB3jKn@8 zpdj-1a>NEis3z8_5u2kuJ=OgEAmIPBF3`X5GCvxOS&hNd~F{@E1B;RQ0?$w{Dt6qO*;Zt>OiI$c@{sOZ=Uy(8h{*e;18kKYN zI81;Fx%LDKj9Y}H@9C ze1%T7+l!{Od}S$3J%71eV0Z!*{!~2kle%b#NwgvPX@a>kz+W^MsQ5rZ!2o}O5n4Do zax~F?FP6e0s}RVL1^yzsKofyM{t|V82{MDdm|$`7iwfWrIUyVTF;{O0d z5jRa-BG84){X`+%c;M4&44iFCh!32avrJ8hf9y9nav6TsxUmfJmoI>332ktib7?J+ zyST6!lr~1KOvxVOFiZSJaDf`7%>EBfwi-2sS(2jmapBI_G%{jkni3)=WsARD1bSm| zM3oXgJz_5x;B?~!MXSqLOlU*a_zOD-YXmAU*Ce`0R-=1T=+9`qob=h&W=5*aT2d%u z=J?Cy0{Q(Aoi^nVbm=V3YBc7u7$Ws^k|$f*#7z+@%UM>=6EetO2p6c81raj>XfH6nLexz5NUe>q*CUKECyi9oASUtZ2sU5^$#EoZCX1l|aK zW{w7hDb99IHuX!3FnVtN;7HQEHW+j8`7vSxczj8 zxJ;CW-Puj zqUOr!hR&6fojh`4_#>st=!e>>q8Ag$k#hck89RXDI;5;jMNArJ;~N92Mo7?htR~Sb zQe(@VBo`c;!A6|Ax6NRqP0I?z-Q zs~ja$7WfP20`bB%i1{uszEROM(xIkbwMj~$+H=f_O70kknzCb*63CJX{s6$->kzzj zEn+T#@r?mhBP3}0RhOhIQF9)irb&KTmpxKiK?;QyWQM<-0HWLm#P}ebywOyml&k2}SWkj+l*b=6|>b!Jyd!^phsq_+u_m(nUJ3 zkeef?RE*(3*SCQ*LtZBtPM1zlvrEP}2Wo+8g>lMcd9uNu1E7|`a)&{5au%5iWVF=i znKYdnNb*h&7g6$u%VarADGv`VXGbt)i$5KJe}0&@b{jF0~jr3yij!OQWSag1X9*0`pdiNBaTbK{fE+y7(z>i9m@1&Vzh ziXts!ZqDdNv3`4851>8Icvv;@DS&brNft;a@Vih^#a%bl}eH>1ZTf~l%tThw-Nh|~a6!45nkzcST zXGmk@-oieHO#E-;+(~C(#L0h>tRf@)i7co90y?f@(J6AXYIBA%x^<`Q(-~!D^7v7b zSCSe21Qrwk5gkXe7!)~q^*Qq~no2B^bVLoBqTw&m2L5b(?oN|Fvu`T29TfR z8Ra}7j0|2~E|!Mn#xojo6@x*;os*Po>$1e34+;S2Q|Jhm4=5JI&;?q$#A>wUd)_AE zFNDif{2%n0wY%dRpEnKTUcVVX9F!9bsrG*_P6{Yt^Fs3?Sr;X_VVuG)Lox zUq$Nup9)}n7INk^LWVix8HYMp|Q;EMYb3Z)iB{u()=YZDkPQa)m&?`2w z@%fUbubub{&EMo8fbm%fn=|O;gq#tLdIFwrs)fJg80P)I|!qW@mYwP(_d<6a)va@ zJzpG0M*a%XytJ=B*a+;oKs|pUS%^%L=jC)c!x;< z{`f4=*gpta0gMQlvDkc!mO=|_GV%9vA*#)g|3g0r_e0Do6$%s^e^cWd%@AE{e~BHRseyZEa3se50+@0@}>{aZ93AXN^C1D+X^22Zf5yf<33rS{d6I zZ2!2Va^$p_D{Dy$&{f5{P;LH;Z*(AwSk8I6!( zvGI(B#mrcGDE^WnFq8Zl3CzucsaI?eP36TJyj*;vsVxOg56NFL1ZI>!!yp`=g)lkw zUe3uG!&qR>OvQ)hFNtYjR{0ZLprCVn7Q*G!xj-*xWTVjV@r;J&F985k&EJpQTOZm_ z@7ZX-^Ww7*GN;DN`8lH+{dSLQECPS|1DJ09UVmLno}1_r=yhLw7DDF?KqlslX7rjp zzL6;W+CpnsslpK24;vq4T1Xsu`_4R@-RHY!zd58S=c@W0c3-)Q7il+$1`HN#+*5gw!G$2 z^S0-JoX*vnO!21DM0P_LGhFE2l&1yB;n`A3CFPqNS z1=Ae-ME+1-mA}E0!K`iywlZt{2`sjsWiKiooAp2B4LNJdGyESg|G}AUEd=Q;hsp{$81~ z{vrl({>PL(cMf*C7=WC=GY)@UW&pn*_FV(Tc;1h|)oBCZ=es|Zzw;bGzW;Xz0`P|b zT3N{7s0Gk1R%>+pADk0|p!~Af_yhbdS@=T$UHn}B%m8MRKg2I5f1?N>;>Y*{0H=B| zX!_2x=~xJ9)NthI)EIsKv_?%U6HBu2hxl3f0{|I+IRED3FZ?xmz;yFBWs1)IG5jq2 zo#!4<=I@>eU=T7ZD1tFafM5(_WHpNX1zS{UVI*h@w#%LpEWt@(_}Te8%T~c``RDOx z0x<3TX#nK-3H-SOuro)&8%8h$v+>vWfA{PFT0seKR0))Fsz!;wpwWnk&S0l4Xw{X0 zX&k=*e~z>`|HFWDK!ra8fT`y%-~x^KvHd>~%EX@xpyF_>phPjsD#;F%;t9my5qM8vgVEW`IB01v>a?+@HuFctG%f_-MlJJ=?Z#TeD_$rTx~8 z8@;{Pu3o*`+cz+L=hmU7*$ovdkM#^(?r5qgMgDvite`|Ux&>=YijZPj&j#* z-gpqUUvMYpX4^${gZS|89`FKEcwOkx{{^8Kx0$^*Rp2AFZeY(c%YOuC@9h7vji}*{ z-K!eje6yh*`I}l+T2fYCKJ66p*VlIq5$wHw71a$3TYH~885_NI z^EwcC>0$*%+saDID!>8O)lZ)R@U3UFKivLfJpc?|Wc-Q!<#we1+27wk-Rtb) zi{!$uGsqu$z#-fX=vY?UFuks}rlzK{v=qjQ+8*TZ)-7;-JoFD;*te^$rfk)*>q93` zuAf>e-w)Ar5KCe##4<9nO*tLxO=eyzCuG^mFqW}U;UaIcIb1qpE-epgCctv_qp?8{r$I! z7eEQW4)tpASmcj?0of3Y`~iWLwJ-qJ)KpiLmXww)@9Ejn-rqmabG;9~br=TQ4>ea; z%x}34W46*6(A`Rka2VeRkO}1kqPYuZgp3D&uT|8nIPu_K=keWBN-O7IeE*++|J5&^ zjNQHkgYcEBy^Oz^WmSM5NIZ4@Syy}K_6GxS5JHdCX&!Y#r2kgY0x09B<4?rT-+y85 zj}f@I0#;?X0x2meUDMOEi1+w}hK7dqsLtThs;a&0){)8|42JjTfd(7)&br4+tG2l7y)H@2#jlzyS35<4*()p$ai2Jn0f~(kut`Se3vw-6-)a(wyK7W_MNzn1QvEmPlkV=dyx0bB=z zO!-vy9=mN zm4W&PXA8jH$)hLQj_fe+It7?JvpAuokTEe`aOytgZ=aHA^iTIQyB+P;BT?c|842+>E7tz z$H!lnKerPW?FXL+4}V-}Id<%L>w!g6whmOU+}(1z?bx1k8UTF+pBI2;{QCPlI}hz` zZEtTowtr1iXJ_MDya2X@#Xrqf{zkSGn)uUtDwn?~F3=^(I~~MlOpCS-3{)>aaIC$f@{B?Fo{2kWw2g?r@Moa2e0)8?1(}3dh7lA;B z6PF?Q)RE(vBO7%Y;-xVFbNRoT?w+3R3l4y6mZ$nZmp_hQ=XU6_>bKu(J$<^PqwUCH z*#!#SjR-y$0H+)2>mNA0<=AP&?^Nr1t6ly)_cxe}zwK4q^WiT5ipXF1L72nf0fvzv zAA&ieF?f3H6a+9gf8YQk@Ym77_-j7djsSKZlU<;|A4ZFezq5yrw4QEn1N=_5Ha9x_ z`S|7JZ?qKgbGKh2j=ziuxhp09!n;6LhyWNy<%0e3S5vEOR5K=e9EQJ^o~u3G=eYw6 z;r}>)jYm$la{x~qIegY1go}bdr4im?^AtgWNhWNF(T@-Ep$ANM4 zvQLG-&@K=&^Wyx!Qo*V@icvW?pF`&V;N~vh|GT7Jf7ER0zS`qBK)%UhAAdQ2Ymc5h zb&3Pner(SP4?wT4d1QS3A8b8hSlP~ZfwsT9VXfGnbLjlg`vHEft<4)ae}eln_&;a% zhp#`l9)>@~8Tp6=2vqqC?E(P`wb<~r2J_~_m^Cl@KMjBF?#&PU1pdGQGXEEWzm}5- zpz8v6x&Vs(4e`SA2lqC<`!|21ZiC}-bxBFdgqI*}+T^zAz0Djy7<&=F<3}1dv-L+5 z{ss|0Z~MhG{vsJduUJ3{@)wrCAVYGcK|F;P*^T0fdB!#VkL}M%{4GKLxC0E&U*pje z0)Xwuck?mW?QHk6K>U5Nw6mkhZ7!hrL8F36upr;fjngK&@` ztS3~hIob6~$up+o57ChLs{#HT2WaPyKZfPa|G@u&1LPjC^CVw}xZJs)h2d}Fjvep% z0d8VIICktfdzkX-wcV>YdhBoeeFos!eJ21v3))wmns55&sS&iOV2}Gpv_dL%37#T8uEmy7qf4BtJ^SAN{ zcM^R6h4Z)ih)W+Vg7KEt>C?(A?Q|?o#)a8#nu|!?VDH@aV@t-?ee$ zZw&cE00sVh2PgoD{N0~B=jQPJyLU!M1_%0XT!&vCTsYg+aeUWymA@hF{huCafxoNA9ipo}5L@cQm||NPJY{4f6EFaPo{|LU*cr#0XE z-uJ)%{lEU}zlNa~hUgbxeDS64e8jVzwPdgU*}W9 z-?XiVj-Nbz=KSR=H~I!g@7#az{?n&peVh0*SW9sG1KcA#q+XEm7yX~=0Hp!=OCP}Y zdknx0BV%KCZx0Xk_w`=C;_$aq^?yTJ{#^V<_~whZ{TGfuV8POD0)IdB`2+t33ww`0 z0PuSbfH?Kz0Dk8s3xDW%?X{Y%)|}>SdW$>$I{xn+z5lD}z6JvmI6xzR@CYgdHvjAB z=!Nlj+ScaQwvMysFJJA2pMTvMd-TCaACL9n8>KHaV_3go!+iJ=@j`e@B?gcap9@Ur z$-r-G_>*UVK7ha<1MnS{zthKe?i?8GJj189?C;Rfk$07^9z6fz@#nZdXZyv$kNLlI z-uUbIzl9~U*)EX2SL9U30V02{17rXy4v>3555RBR`E!0TFW2N9+2FM2^oj@>k^P^U zKlnu^Y{8iLYlG1h5R~{kg#2CXz6Sh_-W_}V#N`hjWx~MTvH;ICK{#^c(9xrtH*Q?8 zfC0&fj0_~gi*Wa*+m|-LPd`{Mv0wC?`U{PWz#rl? z&l!Mmg2x>ojKF`Tj=;_5_V4*0v#KQwyp0r2CHI!L}b?e{9@19oWNAad7+k71QBIYS1k( z=x95KC8E*i4*=wA49=fB0~AJJXA1a3;LlcUh+2(pvbNWlOKS+p-2H){X>ed}cGum#}6fTEvH68K}c-mv93N3UY{K9IEagZOfy=`T;31i3hTjK3Wl z);F!5KMlGC`qK;`Ga`WC|AG!s0FciC`5FW~Amh(mY?xZL#N=wPDN`_={mW3*rLW@s zH$MLxG>yModf2-pdb;@l?C^)PSnLZYESx=eTSG%FJ6f~|Z#gVkvUJnNT?50n?=$`$ zoB7KH;8mD_Vz|yjE8L(sfqY_jW03hf_h~2x$N+4=>o~yk=U>|n&x>u_y7irR-rLy= z^NIfcKA2I!@21*MooLy<2!Fi-VfT_cA1;wU-V*pL^S1(q-#!=|E}v|KFKbE3%Z$N_ zg9mp4fa}&SpMiuy2zC%l;YQ>SHd>S!AX|d)HL&6VfAGU^{}BA2xmeC$j#{lvG8jWA zS5{1&I%~lKyzZ*11^aLVW^f-gnC}d;wtIh;bi<1z{1u49ADZD;|1yPh*X}=i;qujM zy*F;2Y-{g4eQL+)k-NYjtbgx|{PFqUMaTcav+XQ+LojwGcYs&;1K^xL7eHtZ{65~} z&&k&9_&S}as5Jdmyu0GtSU-%glCaeJW*9X(%2mka>zrS+=8Jr3X0)K}$G5{Nw&#GZA zP*0%K2a|w57eHkM7B*qN2a6E!fH($!%i%8vV){yXdrVoKCWR(Vdm`ZX){1p27x2P> z!TIy|wVC*<>FK=z{J|2KIY5Cw{9Sr&?Yu1=U2x|L!pXy}9cNFRKG}Zu;`QFao99oT zJh-xQ+4AKp;FpZcmTcO%anq*eE6vUB`a|W#=KHVFBTU7t& z^7nfP|M-uiqm04XE8cw%hv0Q<8&|Jd)Lm_-lFRokh4rk;3$F{DA{}cCcaQ%&lAB-MV$lzQ>OrzYpPezx&-kK=?-p zzyCcZ&8|iM${qghf3jz`iy!*Gks}KhR)DXo!!HaA?vIb0vsS?D?;83)j^982!$17) zcOxTf)_@l@(BSs31AtSno0rv1hh zW%%{*>t4Seen$63S#@oBb-BRSA_y?CY;C=Mz4zFbEi!p;Ls(w&H!r=^@ad=b!_5GJ zz^}gg>NgOY5C8VJzx{g%z@HBwF!@;Gum9eIy|dk+75O{9dLBDG#G7Eh{N>FX5Mbgqa_UsuDHy9a?>>LN zv;WAEBd`bP%-me}w;m~mP&RY^f>{+)rj&~iG9DK%K5+2hK|V?R5W`0pKHfX&rB~-R zf&UA`A38u6z~*M)PXZ8Kpvqr=|Jc}GT>0|w(>LaCSpNpZU!LLNr}DSb=g-BD@%P)` z{PwrM8Cty>{9r@KKbNh2*?bu)8Bi;0kFG! z{l<4T&*k*t-+3!nteg&?3m!grfIEk+@Za98d-v|$`hTtzBFlF)8*ceyY$=&Esid+JH-VhM zr+?lUjr?6^ey z-wAf-fPeHyC0kq$h~L_EI~(7gjqIIj`}cqUKmUjG z2Lu|%-k{4LqM`Q6a`F%QT91pa=* z2>kmoAbj7|tHTK3BLI+(ztz3)EDVIPYm0%umnToDn^svZQrq*@fT_Y_5fu3 zaRAvAaOeo|_Yc6|quI4nD(k1sKX~ZSdk|U=&aaqSH=}y~GMLix0CQS=>46iR$mLa0N;nq-|Qv%k&s6JG$mF!_07dgmaSO1XwDpD@8rpU`PYB_SH|D} zfdCGW0m%4+=TiK7d90=P7R~^13uZH_;BgYE+>;GHs#_nT6`bkU-%@4mZrzRw@}KEUtZ2Ydj20t|ZNZ|~;4 z2li}V@X06l!OJ4-2)*(cp!XQoui++$z#m(N{PwpW@HNQL5nP2l1pZd8Sg~UH^5t*8 zjS<{OTCrqN<%ES>H|*H9WyAV)>sGH?wQ6(kby%E1Xm_{&kiRLb@v$=}Jm5)%|sXvzGUvyi~uxan!J$vB)yV3uRJz(&003H0814IO$e)#nIwfCCe zJFtKAyE{%eGeE306oAbe?(#b!$RBt>;Ew}%4Iaja1rr4PhVd|S!UT9nJNyp={;L%r z8}t7#N#4fBUp7T-+$->h<2fv?ap}tkUmSg1?+5M=@x!s=mp5*_{WgBR*}Y-5FUb4h zr+nJ^GdI9Z$p4MuZ&|=QsSy6o3qnqzAN^5QO!VUpHGYB^3uBb9a}Fw}*3X%@;LSIY zz19=I|EGWYC&bU=59WW|0Rn)?-?2@7ZPm4J&U#ZIa4Vn(_r7|2Z}gow_}-zzM~)nA zf&aq`UutTWYmUlM8Fu8HVrcE1I=s`grK76p`hT#5$(HHr{#RslF*yxMHFSXE%et^Wvn zs+>O#AdJ6!0_XtvAHK+KowH^&c=hwwvhg`yABLMZVLSTrr3>d4)zq{}TgjI$!D*NA z`~3NX2TOWmxIe@X!=sD0S{7`)aQWQH6D>!3fIF8yyk!ggA$(7sypQAo>SOosUFzr= z9=y~2(Z?U}6Zz9RK>kw@aR|osC@z5+fP;sfjlfa1?DciQ*u17DCqboekn_h})xu-P zeEx=q74Ij^{Y3A_@xw*hYAZ#|k$K&tMlh?I<{eup;wxeATs4xjU?oFkRZT@LviIKc z5f}g%f1(5AGeG76*&I;Rxkhk-B7dAh_=^}`hiCQ?I3-->_&rzv{3)ZaviBmmKlFaw z{c-$Y^7ZK9u1ELpwfFU2>;U9&+UEuEmIx{*?;dP;j|`nZ1Gi5Ocf&s5KI0PDdk%~J z7|G=ix1ret&{=}~{or9|C;F-8Wij)PbDb3W*n6^={{z7%P1w=i?(qkHRUUi={BWxB zIq28NZXY(TS+mCLL<>a-ig#WxlEY{-+t@|ygO#}{s<8^mtEf13Y?$rqy8yBQ7#*NI z1S5bFe>HU;fm;3?=4||-{}cE_uatB4=ea+|-_A#Uea9~KTsVK#`PX#upLE7Bb{~GQ zF*Gom*xpTj-Ao+ufaBH!p1z=l)J_Ul?iPEP&)j1^(qoKO|3l>^mjqJ4?8^W)& z&z~~_!xT`O13HsHQEV)#4ql{-pKNS`PA7#&twQSgM?JCY%E=($!1e*qP4z+Y0 z-rTf#^E->+HPin4r`hDu+kOGz@t+kjhWJM3ksDLV@e7u6HV(2;g->bNeA>CMF3tgQ z2=*p{;xBYr;}*U1?oxylmQ&g0`DupIlvP;I}&a}pvQ?nff*oIdYA;W0< zHgJPO@GqU-fuEY=&Iv3}`X4@g{K1n+m!3TR2>f5b0kYp>h6ND#;{a;-8<=-o^?ojW zjK9@SKlC3$UCI7YVB~M%#!7bUnDMt|N7E+w!2mu`!H9!R#8rnezzy>2I7#_@qT6`RMTnk3ah4lOFJYLH?Knv;pWni{Pw6kUs`skiUU}d95;kK7D-s3#$!2kiwn1 zHQ@c4o8kHU%1U_WQ3>2DhJe#P_~k%L3w+htP{DTzxK21nK781r13TM@N_=Ao#Ebyi zTEuUN31kC;?u|`(4k!)5-Wcrh2LhiwJP!)m-P$@o7k}t59XQ+%!0)rqIDba`ob5k{ zL2>cxZomLon{W2@E?9tf-0qBe0p4^r3S%@s?R^-lAH#zJj~>4N^vTnUlP)^`PjY}! z0UU6jhk1e*Cj=x<=1-)L^EYqJnx${nmR6N+*tv7psZ%>KyoWzx!Sx6{aU*Oyd;88f zPi9}!Pe1kG1Nk{(1{UHQoq`2Wq-gwtH96M~4YQ3czMf+a&>w;QMF^V%iu^UN$2RTV zvm5?e{9j+qWv6>w_V5$=1OGSwx_9G4T73A=z6kp-Z2MVVf$Y3_^ZJ4Xu*(9YIREZ`ZKiZ4qJg&CU@3OmaK7i<&e}$`Hyh-ElL(3j; zfIkoO_kXYO#~h!?pG+U;k2^n@0wQnxUkDiJwZ ze;?-p5IWv}B@tVMZ~&btGw{c^0ww+!eFFpY{qa}D&$<8M48J~p!(;fsi;Exd7r>7V zzDGE9{$CFi>l?KI#;bMR2%FEiYY=`P#NUN+MqrV@<-Q6vHKK2m<9z~u zK7sg^1}5fT_wmy|dgadmweIiH&WF8M=g&vr#@LD&wxl1T>wAEME*>z?cMm<{9Uu_+ zyYT)Kfj^@Mboo;~AO{fnV*rZ$Irt6CKkdd;{BEoKIo@v=`E&3C&RzEhZD8JyjlI7( z(io({?`#bV@r|JoOS0B=BWzDq8G)UBXbzx}KXrcRGbr=to(K4S1~@Fa#=h1t1j5iV z_W1FBar>D)w!--HZvLvHFRVWvUcWMb0S>V5^E<@sT=>HWyOK*0qfC;iAD66o{iG6()|>C(YLJXG<6V}AeF^?x3Iz@LlXmmYpF`g-^s zZR9kGUwVj(hy_@PZXeOv?}SvVn$>EosaZFQd#dc`HS9*P-~btbX8t%>nwegk zqK^e}8`a_TnqX(bso>Z1m+DfR7&Te0Z&A{$+#?78e)*SOzX5 z4*z&}<9^YI&)UsW|Ui2ri}46|NL z;?Ku#_yOPt>kecO0vmmO?|1a5-*0k{`spl|lraioD)#Z4#9F^@)C16;0>ZO^5dQ7o zEc^*;KnJq^f#KoN12x|I!#x85WOM4zK0Dy>hpParu%ZjXZhL;e&1aUS45ip2nAcj64I5a5pkIEJq; zfB@Sl5a3A*c--pks;c%LcpT_D{45VP#xGs$g^kF&_s3une)k|OJG}LW&)*2}=g+qb7-e%lwes%|Am}`cAl(WE06qR-$3Xh7xokO~1B0U@ zci^=e?>~hZv%mgu&jS9Kn-MpkfBq@*2OGnd#RuH_b+%qy_lNx5-^S;AIN5{eTyU|0 z8_w|g0C)dz`;TqLy#M~lmFqYAZ}r~nYg>1!rw5lHz6Xp1;4|cp1GsJV?76cSZtgg9 z2HvE3zGt9$!@5;9Rq%W11q&C!3pN>+{a<|ktIvM^=_kK<`uKtApc{I6rqC?mrMxuB1+ z`^WCxy$An1RYyC#UGl=|&a)RTt*WWHc=hU)D`(HL*8_H)J9oCLtLrA?&&AK#eFpyK z?ep-{Jo@LaeLegh?u6;x?a|SZf&1X!*fJLfTo`saeb_Fb53oi5`Q$1z<1D;KcQ3wX zYSN@dix#%Ex!W*4fNUcgdjD5i56ITRw~#*s@X3sZzx~@+D`#Kj%8ZU3m=AA!a6*T( zBl+OVU;X0ar;i`p9eLR2+Tar(!`d)>{`_^|Z;X3Cmp(T2>%&`18vX=-6C@!Kqpntu z4V^KeT7^1lnr4j?VM=V`~U+_NPq#eD?9xN zE`JYNH2mHE@XKHQ{NpE&#_rtyrNZAvV2|;~R~>*K+jTy+7PRC2pzX2(8@;ZRD|f+= zQ@x`?w0Heo#|5$n0w3^wg#mT^J;uJW48Spmzhwe{d+*)j{M}dreO13;?e-HLw}zjN zTpbu4=|9xoj*~XLsm}c$K6h^#eVr$taa4uzo`q}hFNy5j;qZrdeK~w zaL=Sk8{wZ>*t@p6`o(X`__=S7fAJ-JH`n5qFt};zXUUDh;o$+e;e6x9wX2s8ADZv; z_q2JT=l^y)!!JxMzyW@O4)FfZ8+`zA{6+pSj5(_g#P8TKw@a}{-FV=$fJS4jSf|m* zzZ8LXx(RV}Hr@4>A;2Gmfq`AD8UCMseya!o1Sf`p`#(p!S^MGP6Bcf)2L3QCUc7tv zzOEC^Cl7tw?Oc2_`-C$O*(A?C`bzHaLC@jC%O~%^u@?iv$AY)$V7`+(z$d#WP5R~^ zyzt-MD&v0s&$?ZE_UziVbLaN$t;bs~p6hJg%KhKq?Wac;;v2)Ns&;pAB}QTGd;9i> zzy9p!AAtkB|H*L^e{9w9>n~bb-2TE(TW1&0Xhw|nx!6KEUWoQWneD&ePs4Ej1_yQg z@gbPIKj3eL-_p4U2JSlit*LC<*bMJq!{_q)`{5;e7i0!jjf~)WaM6_agd4xM(f8ql z9kuW?beBH|KG^khmb}i&v18@*=`-G(UZ)_i3-KFzYx0!$ zyvd)x^DK_O&hY!-!Q2;K_=A@m{v7;p0@%_r4+Fk&Td5+6Ly14Q($&BpJa9NPGIkjJ zp9~2*YIPjo(?Jt|UwrxmJSJF`+;z&abXXu`9@IX0CC!8bAPbu0{mfsf8hecYzWQG zham9OLj6_wtA&>s!cWgvtXMTqRe&GYJwpfh_`#DewfuEmJhq38*Bbt8jMWv!sK17A zsfjlU_*)PE>Qs9>`m^QC11jV1T8{95MkZCHDZj_!sboAGyB$DgfO zYn`!;crkuY1!6gj7PGBIC}5x}NC?3nn>z!31G@aR4R38fxC_Hh0l!)Ue=mG@=Mefo z1W?A0kG>LrfS>FC^qdqb)x@7xKoq5}?it&Ie*bfS0nFFG==NOxZX5Ws6l>@-UT0h* z`!Z?!k|kP-0`OCx`{Ph;|bmSQ<&I+3{r{zf)B{ONPL zMqR0p=GLk;$1!U7>wq*NaQ=QB$^BXQd*K^6|HDw)qu>`9eV^0#`*#|3U2V0!21e!p z-~aiLHvv@qUoQT180D(du}5uf5)_$ah}fnviOlW9^;{?5m zw({o_xGd=YoEyKm_=RV#o|pd%bhyU>kH}Y#X(!MDe*DZ`0DJu1zRlJjE`Lgg=PXt@ zAy{WTql8#K6sdWI=3q2bdvo*09~qA}{94CfK6o-GIOTwK^nI@U$pR|cYK*P$6v9Ve zx_97Q{(w7~KTS@tY_ZeCbQX_o6f;6xE;znXz80m}aK@bc9Y_AY{Iz!PS!ua&n8W`u z1d+gc1dtEE;?^_we(>bXbM$}ENx}umU4}=h5gp*juO0pveiJq^ujlc12L@+dPN{6Q z*yN2Wf6}EyG)j4;rm}*v4x_;cqDu#UewE`LI&iN(2brC2waezm*F_P0x?l$CAX2>+@ozUde4 z(ZM_8VfHsTxMh6F>1@^=S<;HyV z8vGyd_w37cUST04e^)p<3SY9!A6M6S-Kef(Jn`$#e#sZW4u8AZ{BIAqz0L5xzdLs( zOqf(zofO2{mj$owO1*Z6&uUw0H~Db9opIp12ErzUWr37JRPOX|JFVO z{)W7w!bUR19}dFi3m1&=7}hhk0O9;qG5+qj{9V300p2TF?Uy9v^b_4!;f&^sfjVOv z8G&xogP-PNfw~3k{;$IyJ|T*p&<$(xr>m{O5_6#D0O#ge?7Vxk)_l+lb>Y>KeSwqngjc+V^6ZvppzDu2h3KUb0PpIPHC z>*E-w@z86us!o@i%RnMFh{-3QINM&gE|p;)nk4@2VEG-}-c*b9+rinN*IKQ%ZFX z3S`t(l}K9j_EwivR94Q1f6;XBUJHLs(OZ{6{w(TFgTOWV^%t-Jo-pC%N_gdNW#w+p zA097gANZvA)>Azw}d>5t_r%+^%ql8QP~4Z4?VhmYL_~v^K)thEszoTiw=s@ zyj(fW1?w-Sq@v;U`7>QtPA_2pD#|1u`=JOqY_kF>r~_h4G6~ukww`87D6H zo@;9t`Q!g45BnE()_(PDSj`jWIIH~W)pbAtQw9os!JaI0@%7~b7>;ZpaN>(@rk zFRH9)m{C_ZKd79aGiXCM#e&AqYwh!JB~rt7V%qRu z0EK`FYrpzxlcAl~6;D?t_Ev~Jq&XqHM zol$eTh(!a1*WzBOrcS?DZNY9%#n@24>%y%&*O5Qi9NmWi?&bI`Sh8g2^470n4Zn5i z=1>7D;Rs8J=;AB z01opPqJsdTUOug%vAOvcf0_5)k-qEK z;3wrxLtoudB@1<&dj3=aCHXAI6VIM)YKC{l_1|IdEQEK4TsyRO!Aw89ET5mwV&h_z z5K}KDF{AnomakO4P;idb<Uw!pJIW^p22KbW&l;u-; zO)$Q7;>5aXb+cy8TY9+dO#9i+w$}ZJ7YTNAPIi){u9OW95;7`?$;BpTjM(1#y4lM& zo*KayX~VCA9**{1zrpxBEnRikgDmhT38=D=Vxx+fbSX(1C9(qoiYTFR?!0B&&)gY% z_&)p-Rv$gNKiGZcYVV=n{AQbYUg&r>_!9&S$S3w%x{N`@gzHSy7)_(;Z|%O+5C3M_ zy|KHG29Ir6@%EZE>qbVJLNh0xm=XSb0X2xF)u=(tzm$ZHe!*b`Y-n85xN6OkB}>_i zMdT@JJTv^c0&3+;r%{WT?*esK>UDo)qbrfP{~{FP6fC3&QL9lnPZNEm@J}fhu7+3S z3t}?JN&Zm^je7nH2ugu8@;f0I%MyRAXM&gLrhvL`H27JJMss@$1qzlM)W%7PZ!{J; zZCbYYgTcvit!Xn_+8dt*Yfk(I7eqi~p&8@Pc%5l96o>_DvE!{%=J>0xw@6rgMoVMC z8QH7_GETGUmxs}CDH-I?B4E7@z@jrw%cHw3BIGKzFdBW0_%jO#h^5E~pqN`*NTV5E zn||0@bIz|7e=WSFGQ(U0ZUa_?a z=9KjF*B@@>Vy$kP_bD0;@$`?s>}vh1^Df?xq}3RpC@B_vX`Gm^kfu(t)l{~O!33aNT{Db($Eb+%&gD;8_LyeRq|F9^H#`ebV z?)W)5_P%0DEUti=;tw@abP*5JX0)_7Zg=NHQ9;zr7=I#esgGOu%Yy|#*Q|`y2$h<1 z#x!P*zXqE`=`t3eyO}ugDXLFg!0hqYV9}@{RmS?{yPJPGEehqT)0WjHZoR>nMgGhh z0e>kn0t@EWJ};xCNpaN8B!5PYP@(h~5iw(HpP$j#By4R|V@CNi$iNj!i;*K{Xsu&3 z&30`lFszN9HFr)^iyU?JO)?lW%b!+XM+CiqMQ3#R(_YGRH6|xuy7|-ej7O19qjVWP z#B`VPoQ)dgxm#rtYh0i&y<|)|e?b}i>(umGx{Ly1!Rvh9#)Q;OJAVNgM8r~V6cGzt z%JVk{niNO+^z*04Ad66ZMrrgBhAh@@*(f6xaDmd5K98J=xb};YTI=UbHGltwLh2QG zfdVN&3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*b zKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L4 z0;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHIt zDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEU zkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS% z04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E z3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjoo zqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*b zKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L4 z0;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHIt zDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEU zkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS% z04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E z3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjoo zqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*b zKnjooqyQ;E3XlS%04YEUkOHItDL@L40;B*bKnjooqyQ;E3XlS%04YEUkOHItDL@L4 z0;B*bKnjooqyQ;E3XlS%04eY_Q{WFMq+WqEBm1K%6jH9hkBGmN_h7o4PyD63|I*(2 zRP*=aw5w0;Pd9&WPzR=5ft2(21`(KU08`H&0hnq4)6XBdz%&Dx1^$S@^b(j2{%81#2k^|F##t9X_$+4Kscl?=l=ByaOyDnQq+TyJ z1yawS>g6QZNi=|bRD_MH}F^Rc4dse5*vSZW3C=5nBDJssN(sM=r0!0p$D@oeK=~c$l6`?3_UD=7hxR^a@Hfe?N6EL-2h( zYVdLnf5qkkgB|W(*f@L!E4QU2-Jo>y=X$aLf8YUika_&+j7duyx;$Nfht8b$iMrY? zX?fQt7m#xPIDkR?7=JFE{uK-S1&#h`;~dlW_PDh{Te~eW(Uzs1KXhao{5XFBFDLRB zFea*Nl*06#9i@bP&ehj$P0gn|C2#uqE7kI+2Tzb*hH>dswVM+$j#AZ$QXY)0MseqrkS0idq!fP z5l|5MdpTkQB2*LW)QHW|o}Oy{eh~0~S{LYFc$ps!#;nHR>G4xybvDlwzS2A@37s{3 zYm8@RkH3Hm6t2C@#+cP86q0W=boc5{)K#xPv+$|9wnR(IAb){bpsz?71pi2hS&hoM zc^oD{g>C=`-kG;UWb`P@b*$>V+KHY4_DHu;lW zpr8#y%tWBoC=`-UbiDZ}bba|~!SZI6KRtmCVtN9t#=M1Q1r!>G@h?ALU9!ucGzdE~ zd5Gx-VXHA;oiTr6bR<_&zD>$omQ?fiL*4$1kS6X%Go^gQG%nC;%v08ko)UwK7BOm%@) zW4=Ns+wDbDTE4QBrk=lCE-*ZS3V$k|`AJsxxvDF{G>Icp0S4@%#R8ZptDv9o)i)a3y0gU1&B<}y>5uLv#S@C}W zqKKO&E)nR$<$j`&Zana5H3rVMCBz3#%~_@<#6R{M9JvfXYus3d_{$eSvxGJ{&AGIe z$X#663`!fLR;FZ+ahN6kBDg?}QfB`LCtHo0!YoNq`?zrDYZ@7`GEE5)ld{EME&{zV zIHF1kpB}N73vjyef}+)BEGD!eYy5>Bgf#+{munK;B&*RqDfDNwUQYUKYcnHNW-Td{ zF?0Ooa)JDQh)$bw2)cBZW;GgfSqzc-ImwePZQ`Z~mE|le=Ls3)FN6!!%7Ta)0kj&m z#pOzj(9MaSnyX!9!qQrd>l%@}tXyYgk-wZSP%jEY%tWBos4p*Ps;);1o|dy!Z~|`x zKQl*z!W3saC!73P2{d^*2Qj@1v>I&!+if{|H(9g2DMqWU5ynhm4O!*SJP31=H2S!R zXw1sYv=0r<7G-pRj$}E2-7wCZ$kjb;?0JItdMMX)lbZ&B{C=Cjp zYiURDlUzC5(z(f*lsY3G8ru=E9L?z|=kK+M z1WFhwGdFP0aUD`trXnVdv+<1qRU;&5J64nE z6{)f1PV#ccj_B%$U6`LS)%;E1$b=*iE;@*q-~!_teL17Ybp5JHlM2?@b0kUGV;yKJ zh*ge~DGU6CbAfo_8pM1T7~iO98tG8eui7M~Q0+P9L?w5OLrvK+N(p4i1b+Zv?sW)W zx)w2)!1%_1su2=2{i;jSm8dxnPtzp7tjiuLtssR$3o^rBP5@DE17ds-PTpuLQA*eK zY%opAA{>Zky2)ai6L(E1u|M{ z^h}!04J3Idhl?or!)3CZrId$*rk7cgHPd() zH4RIw+RX6hE`d3LY{tj`h*E{1$l&F8&NxP~ersIQ&BR~Kow@PJ=I#Hnesz4G>H@_+ z4@Hp{GB;;*qgcN^t_RSbXFRN$b|qGAs{P;3WEu1TisyI~iHUC9GwK+NZ*+^#`Kjq< zNXnV5;3sI>m0X?a=1-ZB$pAXDKvAo(6a@(saz-?Y^;_ebel7<5$~En>UPmNj%K1}R zPyl2EigJXfNFf_HXM`Y@330&!QN6;p`IEa_1E46U6%^U1a1`Gt z)}JFD=;|EjkBd3bs6?G|`uUSVQ2~67?K`_26^SCLU^S=R=++nYQ|ard*$Z;M(k47i zu%0aNC%M1?fTCCdimXPFu|UqyMzM;Vak;Z|o<0sI$Sq<=N!FSP{v;Lx019|UrN}Q> zlQX0-a&KWDLni(=a_*!vFyiDtNmh{&{zMj300A9WvFH@JS+zMs8Qr>5_UVkWGI{(c z$t%eWe*y~%fQXJGSqzGty!xE^7)>P>NjjniP0{d|Xaj#XK6fe#Q_Ig}DHvp!Bm>A# z@{Dqx5Jm>CE*DF~a^o3|xr)J{;m%1)wsl$J&j$qn^eJ=%%Lfz-V(0=bU1Bv_@;z@8 z@fX5nD*g}p%-Y@YjnA8gaj)Nu9}db1hE)5%7bgXju=%2j_{NH=Os{!axkJ_|W>8X?1+@r*@7oX`+6d|(mIOC*{F{v;0=pM?-P4UqAiag2%dVZw#j z_)EB|icI)FX$W>$&gv}ZBazf6CMm+(~H`T&l@(oM%{O_g7K>*{k5ISeT%ZWLo z8I$kO{7WSM!u!9;K7o$#@mYwFQ-Ms*8O4}CEyeeqd{mQ#c*kTbNgIEa&5LHtGK&mDwO$M`Hn&FL>SG&w^W z<(@B&BO`x>XkOaaA8Z8nT%ex6kSs)|$n$c#oZ*ax=+b=4B>tlNKbJtQ{{uXnK8X*C zKBrS~@_8l?nxljlZe!jphp}ti{M* zApp!6f8PBNZ5PF7!J2bwrM5OFF}~4OY5{F#{O@pJ`3S;>Rh0gGqO==_;^Oc^OpdCspjuT?yV2) zr}u2M-+A#_2$@sk<@}scjefhwH5P%t`~gfif3Lr;CC^QC3G})zJ`1691|So2Ml*WN z9^Xh5{_+Me?flsZQ~{J&Pf4# zPstg@2PK$OC^)`xoOn~KiOioigIcQLI2-(hbAcjxP8{IygozJ|E~i&;e52R0@r`J4 zijf$q)mh;$t_u_Z)PmxQ7~SXssLdJ5=+-^XpD|F=W(n2o%eS$Tw~6hMq6HU zsd-y;sOi#PnbLIO!iDorICqYzJIuq76He!9O{Vx$2Vst%%ime9(1^s}9UnlmRPiYS zvdzWjG8Q*~96y0SCa=QZU@ic&#h>f~9sGR$IDj&LE`a%fVneL5%4W41?M<>3o0m=J z>w;+xejW5xz+^RA^5to= zfyZr#pTr+(3qix59l#9o#|hlmrR5I*4Dfen2U~<#S;)_5WDJWn8Y?TU#ylajWzPj5 z!%xTGImQ36t%g0u;xg&~UPl1;bw%LsE(6fYLY_twWUN?|k@06X+5pPeW+a9gIDT6G z&T;^qo&~G-^2Vb83t}e_Eram5C);_(S}x`~iTBKb(K_@fZG@JYc%{n=(b`{uq81 z{?2m`DD!tu1TYAh6%@f3BtS3*F|ry({(>#4v@jAh1>0p$36|ibF#PQNon@ zGXa=({xkq``~?2o0oa+N;0+@fg4y`%`@eg30Ii^eH>w0mIaQ;?U(jeoL}#$m7PRWh zz%-6ufImlCod03KIiSLy0l?Jr7jS__{Mh~<2xa0=22gQ0R#2iC<#LsrvQgnrHJT7H zqz4(Zr5Flv@XN*D84Z7W05ia!>;fJ9H11F24?H0FKYTP{_nvLrx2;*Tx^daE<#3<3 zX3qR2E0!;YR|?79Z3QKgQF3HzPRXe9rx*>0n38R@1#S9L0KdzZFsSPffBDP&-x&Z< z^?zNRx;bDL_!C{Aiy!B2)8<8ssxj2Se^o421&^Inl$E}*v!`eEh7FC4n|AF#zJKq& z1N&Q!Z=W}PdexFW$Io}QEvqaole*gqN<^bja4@G}4Du%#^@x~LgOJ%$q;e*fS5~du zv3J+Jx#hL9rZ3&Nx@p7q-TU`7Hy_xvW$pSUTe`d7F0ZIWfvcFzD0lV3g$oxQ@+W{@ zojL%s!JkKCPphk&R^KokLPPzuX>e3iU0GfV{|t3mX;)9rp^N9wox5=9 z^5vduH~WWg-#)QsPJP9Sqn8GH+SXQ-lmZ)m2WdcwZ}bb+6zdyx{0U8x2nluIw>Wo9 zD)80P%C(12oIlgDqq<@4@@^Q=Ajhu+0i52})6#zH z#*NLLHjN8m!n=0^;S~sv~ndLu%vv>CY*hbWF z$L>`PZ@$@3kNiz7D=jH2FQ0Y_`RnVuh6wgvzj14DBSdc{NWFO@Pi-x^G6^XD#b5mSpMUqe-}&~pzx}Om{n^h2I%k;UbTgZYVh;rj9F`CMg)-a_ak(Fz@Om$9RKI@7t#M! z2@dd;%9@(}ry+D*uBlnN|H0V3JELb-E?>5E>5`>O7t}0)1xR^ib!}~RMH6hmnD`sK zaX}zZz|N1tkW`!=P{B{)4}OIOVg34b>;HfD-uu0ZBhCBvKk)ppm-oH)+TCY&pV=9Y zcb|D@XU3jcYdqs|5Ch6N5QI=dfCNGj2?+&25&}smXM_M9Wt1gAiD(;p907Yg+TButb5 z7=BshFC-+y9e{fZedX>KdtZ(F!|vC)|BGb++ynfTB7gnB zUwkc4GB;NXTXJx@YGCpKYYW9Je)8pCbbyPCJz3Kojm4#kan(=4K0`li6fVfU;nsF>y&^1ya#n6=C|H$NnGy5RZ4{i7B zkUzx(s{Gvmmv_|Q|Jqz*mC<|l6d`^Tz+&iRBBRN}tuZl?kqOC(`-<8Wr$CPz9_(x= zuPq_})nXu8wv{x4;(mnvTu4} z?ctUB!kpCj-EYAPOC(G)Xkhq54+ulRSm=Ju_`7-){9lOC|Cxe9_;W;{iWK4pL5ls_3jcl=el#$DlI#QRD9uUHa43^@!B-($LUC;$=ZUQ*@cz&ZnGyLY~5x=vQmpgkdpDQdLEZm&|G!9=K zxpwXPjT@t`hCJ!im9kTJQhhQs$))K7V{pB>tSZK!q{_m^pyd ztQ@V~kc>Z3ystFUeDlqpVZgrgrx<<$;dKZ<#$bQ_Pb!Rd!3=+gdH%1`HRd`c;Ya&l znLoJ^avH(s!Ob7n+S=PYIxpnxu3Cu8KiAgZ+kXBk2S63U=LMiSe)IE#gO}<%`}%s@ z8&8%D4i*;S36Lo)el%P1w^ALQl-vRQNz>n04uAD#{Bb>%%%2k%D3c`f$IY1ZzvKw{ zaLu=zRuk#-9ee!cy2i(DNS!L5zq}Tyf#`@Sr)ps#?%0L)zJY78 z3B zy!_&@|0P6dn^MD{eHTbFVF1HTZN_T<%PCcB)F<@mz8(JB#wNyGBh&%f@PCxQ!q)CS z3Sd`j%dpxB?+E^Ml~8t@D@otm+tc0M)zw<;0e`)SUz^-S&c=Tf7&$HZ*70ZC1!7{F zo%*TCSZ{V>)E(>1F7bbGahI+EbUqx>=M&&2;pqd$E8!IT~Tbk0ac z#6X~)Kie)4pwMS)zf@ydZy2*oi~f(pU!Q#O1HXYkaDc@BIpD9Y8v&GEV4Vyg+upG4 zU;g0Q#>=n2u3rYn{kX8Oun@Sa`D%H&T%(H>6hG*D5xP%{AD6)s4KV1T_xxTAk8u~%!i~jHA zY2=T^&yD|++@EIpt6qQGJBCj(;Q9*zPHj0Oe+r#E{%m%_MuxB)Tcx^XSIx!CEgpY} zhK9d*;7@XZ*8I`iu$1v1_&;!f)B_H7(`kszo&4<|{@$vsJ);J=hykI!y`urc#N@dX z6g~3Uw@v^YZs-F1kUyAvfCB^sn+t97r*^;8{qgw=q5L(-{HZK(_*0Bp0DoX7z)eyZ ze!|UG1(0V}XtFdaNBLkw)Bld(_>YjG<*#jg68OU@FrUBtR_Y|^`U~Z+pjD<1Cc$_~ ztG~Y&T%gRI{OuS1UWS(s+`2t8a}&bHAAkG_gij%S2H|rEPqKfk0@$~X01h{fjl#*{ z-35a%1SANy*!6!Zf3u|fm8ZWr`;hNHI2vN)PiBGF*fM}Df8TI+z4E90eC?FV_nk^g zA8;pSq4j@e{8hT(K?WQFQv9yy=N}sWfHquzDQH612jmUG)hoC(a{b2Gjn13f!{4_* z{P4qHK==p(@FxSP@W%q!w}t@D&d%SQnVy=OoSYaRbzQ%9^~zv>=egh< z`kmXWD~k)WGgI*7!L{L`fsV5^dj6KU>(4TOt5WydMt|JY{e_fj_#0vRU(zmctZDmT zK5+_xKp8=d;Pvv$@Z80J{wLf5|DEr^0~+7`F5H&)z3+Vw`d#Rv;a2%qzx{1<{s@59 zrJ7kumCTGD#M|O8m+%J;P~xvfzVIu3bNEZDy42C#e`REJd}elW_4fMa{YQ^BX3Ob4 zSh=|T0qzlQQuj;$i~djV05u)(6BWR|H3DGS%Erc>TgyxHv(r=K5`T4i|F^{DPsVSB zF1{$sfA;tT7A%^L;qM13f8gI>Vz2NA0De~j5C?t~z;C~5!5=za>r&0FmMP7X@hx)y zEBU|EeE%2knuLxC9H1$Ga0@B~GX5JFn1=p0sj9iNcVKvAbYdDF|GK?#_rZJbZ_MHw zrLQ$(C@m|?golW;;VqRIKu)|b&_{O$zQo~AI|5Vz1pWwsr}g~xchuD_EDm0wLtFB> zwA6Y=_tS&>Uljf%_a`mCNca)|cU9?sCI6QlmPS^A_^qOil^h`QCp$m_pw0nO52yh6 zk~M$Q6Z6`Vlszjrty3yl*xbfxKk_?rDP1+Is2o<%(0Mva$$}ghC>kxDlT8XJUYVTPwxPq zQUH-ZbbuS_)tdhILh;GmL$$S)(w+ISafzodmy~2=5E!M8J#R(S7~P+dKUnso^N&-f zQh_}TQB5_ad8zP_8Z--ZI^4z~k!V!;0|4n9gYqYj0GS?G8Up?R__NB^MyaNCvaGLZ zDz3pMf0YyXEWhFaDSr`oBe44CQ?j$uj_2)9p-q#Ye(d^qGfY6}>nDJ}2k*YS$>EQB zKs|ssj~u4~mL4Z2Iv`Y3RaIrfQZg1mfXu0^tUP@hP=$jfbScNk--dPsXjp=I^5n_W zCiH+Ze*j>WI{s_!g1I}a!@w`9v9Y08iyhVgoET8_S}4OGx%7rL$1!>=x%Ppil`i7T ziBjJP4TV$`KEhvZS!qc@W)d_Dw5K_M#E1Zb|1&y320%Inq;nAPfP_EuZ0(fFCB|KS z%@P^o$zO(w3x6B0ztQpEqFMi2IY!LtKpV-_OwESKDz?K6f0QTLH9N@^vJ2h~> zSans^>C+eMreQoWKR*j23V3d+ucxc6CI_Ehfp9K2QH6`;kJbb}wfyBl_d5%n!)SLQ z{8+=n-XIJ{H8s@%fW<|}_90;qj15F$s1f;tg%;fikW4}799ZW8zyE`megOW@JX^}2 zn^G;SWMZ@(9TOEDeIP3f&pS3Ys{t2a78{_#bY+;-UHQm$!HXo+8HmImn&GK`Ersbt zjl4Fc=`kwx+iiT%b}0-DpSt6bGpChwwp2YrMZg-NQ`Y z-rdm&hmPdcHFml_yYK4jXv)(zFzg*YH@rAL&dh*i_Xqxq^?+@y?Id(`pbw-yu-pX` z{|8f*)2DNAilPar_V@z___Qfy|Ng3~GgVcU4fpQdyAR>3ufF;fgkMAW`fH3zOF;f2 zCH~eQpHGwVL;ttZnw1>|zAh1eFeJziA4x~$!RT)i{U62e*T4GJS6{8HoIDABFr&1l zrkVhlpOYMm2pStoX$as??*Pdh*f0d73(?>DKJ9-=dSkY7|7&7aO0}#K&&Vsv7_E@U+L^JU=0-rs5_63CImM_2j@|O^R zKNUb=@}7pj`L)gZG`VX<{yGXW$nGT>KkeNI(El?00n>o?S6@LfUoJ0G22+kzfd|ab z%RP_;1R9!3X^0T{Qk#68jd6g?D$IAKDs#;m>GRN`(#cD)EelqYV+UaR-kT zAbzDK$ALe1Z5Xgu)Ytcyzx?&DX8cu7!sSuo04t%;1BY`G;6+agehCLI-qZ5eiP!l* zyL)#lYX+yDcoFQQk8aLDfPvdePfu?T^wkyTMn(qbTU%RU4Nw}n$?mTdDTWZSKQrq< z)b8DpEQEx|!-p?4H8s&e;=35$!|;B6=&Nt1mw^AX!yh_88NlXd;7(r1P7?hL9hp=0Dkj(jQ`_-qGU`q%u01^)MaGYjY&9|f1;oeI^WXD zlAQgC$X;*npa1;lzgY4&0q~F|7#G$kqwwUxhzQyHB_!-`xVJ|A-!%1q$lqrQe-yuu zK7uQS6@|M zUtjf*ghv=u0P9~3EiS2e>;yn?fZzd}n}G*H;KnaEH^184B7N}PyR!d_!2TC{hmFm~ z-4cDfeja(CvN9|*G%O|t7lEWehkr^Rjr@(0{#T3Nm*{oBc=qh+VjD7;oRpQBn{y}y z<|3LlBLLtB@}~eOPeJ4!7y+bLg5@#bKm0?Ol9X;}veo)OI6!N)OloP==A++>*>^Ax z@hd8>D?FZt?Dh2i@sEG{6Xg#GH0giiCuU}42Z-a8tg^D~D5>*>ul|rW@ONR6yLZDDg#E=uMMrk+O5dbmg9KPGURUwc z@RwUramF@(!~+rlfxj;Zfxp}U!W$+gmJz_a03hvu1i4J6X;bDsp*u)4k35D zsEdGjr;BtQSmOaD0RQ2IH}1MJ<dcv{OqDUF6DMVc@>)VfYb%zYjWXQFuK5h|EZFa*wY7*`O zoah>FIC>0T9aUIZSWp1cXGO>E+n;u%p$YGIzgS(Bo0guLmfwo_eMQGz()a;@FNZo5q$LSqp8V@%@;2;R-CEr zl16}7sx1JU%kI!CA;=$iK;Vx8I0-l7!-NR}{=#V39})uZXovqW;9so(*@*v#L2@8|WkK@mqTpK3qg0vl8qr*<}sulcz zi;(#Gm7kCovHTRTq(UICXCWU+9eA<~N9+!XJ$Pr~?E5k-zrx+1|K>y$AL(1XclhaP4b)dSEfl9*4E2y@ISop zB|bj4`b<^j=?ds3PnDIImys}rf^KbX=FTwgkLi7pKb(Bv?1S{a*!`mWL;Nt@o4?U^ zt^`&nTbo;;=WDt^2Y%4=N@0y8km!5(;NgS&_wL;t>ggM}=9*l(HR5gtLhi(8JmCDi z3mp}_PK{22kv{-%Yl}?mQ5Gh^lFJla;F+C=8HxO0pY*+0r|=hj@uKYfFlU?i;V<>W z52t}a;sV+BXSYeyvJvdId!u3gAt$HE$a}hPkBqU=iD^m6$lj&SU&ERz<&Odg{VyE= zN&x;@6}fltz=0H{eEM28I>wvDaPuZCM~~jPb~PtHzE`u9eB%ZjHj2OJPd7Jnr#*0g zh#!W#*XP=@PF)+l+TGQ5c?`Id>BCF5z#qc*@Zo(V4^ZD&U%N3dw!C=T_1^pMH?aJ1 z9Uy%agzbWHK8jOd0^nkcv=F#TroE~zxRp^-B1P!wTcrFESC!q~uJX6Mtn+@%*pKyo z6hEA#6i#HxF!;K=b@uN1 zTHoyS^#MQ*hkZ%_Z;7B%^44I%du3_l3S2&EaltxZgXt7lxetpxj3o1i%g|&1C{00r zx!5ADL_gxZEGDCa>ZGHOyeEtJKM*`Lq_(e5;Sc<(w)181!=cKjpx-RHd{}t$qjqXX1-!3dy+zxYIjKrVk0 zb5{JJ|6}+=ujF?0r?@}DU)|l=+4dV_*G49!zov_S(iy|XIy|tkw79Y~dvg}%M$@pk zJ>nW0pGY(Er*VKPfVfC*1|aZ9uZ28fC6TrXe|q?&x3nRDuq?PQHnjY5xt2e9?Dq_! zA6$n^i*0QH+e!2(5adSGeDh4y|(rx{7I|)Nj)$O0X1VlX%NVY zjaenai*)hZ6aGGd83Xt~20wZ2k6C(F)?Vn~&*1*H>cGop!C3#=HEdvflxcBs3GVt_ z9fuoa;LfWta@PyIqYLgw7@xj*bN1lqyXgP84p4p+jy{ZO?1H5^Fz7}2Ll+7XY;poeE!RTBLSfP1;H9>qmWq;!iqkppnrZd@(`4|d zEWd#8_-92-gXc!+mK(F2Jae!I?(E(~bpaS4Ri*zy0E`Q_+5%>WAmpe7pN$wHEMHsmL z0Z+Z;!(-|j%8C!PCoi9c(}3D%LDW7;lXhT<&vo8|St zbGVqWxv`NuCG#guejWC{z#sIkS(R`v=rp~^+8xJ472ma6C0LinMb^!$bD1aRP7BV{Y-cP2F@K^BYUG*l^eDX(uk-zLyG33%Q z;jgl`q#Pa?z#A$Eaj=MZq6Gt7AfLiP%KHVRmGL5zX>)sSM0QQHC19JI?U!1#v_yy1 z3V^t#1^hkz`s-)73L`B8se>r;@&P^&_?yY9hUvx9qBP#bZVU!{1i8(vO;}|lzVhz92lw84{CEufpOHV}0IdL2 z?nRJhA;=#A(8%AyLPn>SKb1Z@|ApBG?MUHD-AV9%&CPKCeM}6z^C%3i6+^&bA3QnG z)&@Uy(pAt^0;&^^ky~0MI8ZnT5g%f~`3T%`!z?>1>&`e%CO^{;K2YET`6*&1EO>5|GG;*0Y+7o!wH12(!K3%; zv7g7;Hu_z16;1^ZUGpb63&x8y>N>R611`|}VSf2b9e>2}vHWT2qx@0l2SY&QjsAs@ z81s(H_`7E46spI0;rApt=n6 zN0z^agCN1aL+Q8*ZN=YFnLpMEQuntG{jb6O0U3q~FuD9ex?sYey8Z|K@62RYCQ!G& zMlQh-1j#}mjQAh`d+;A}Ny6G%==J-+-}~+WgoamNNkk?g6hLXn4E)ihKn;I{zJ-NM zwg1)QCtd%Lx?dH)yMaB>KGvG%$-&RVU`l%qsbAg^475;eHszxq=o|Rg>LdG-l z9E4s6QCDH49+>6tn5sg2JnP%E+p`RRDuMWu1|~A6RQ&k2Ua2EMuKT-Nw>3SHnTfz{ zkQp&7NpGR++r&;6cZla(LyveH2n7DF-G9jNXX*iE{`4M@0*L$(09pPd{1!6%N0+Ew>MO{a5yX3Vy(!jNcOlKj?iG{4N(#n%EybY_o_NSn%Aa%~)5a)@b-mXiC*5wKT@Z z7q8-)DtWwyTnJ_yAOX;vKMEFSq}QwQe{cXn6Gt>aAdLS=2TbO_AKtokc-*x2Rox%y zedz+=-L1N<$+65)gbpSb7ywuTE+Gy-lq+&@eRyr1@b_;0`-DF-1k?eDzYOgv3~vxP z-z-gp75-HG7BXXFmlp9qDL^-?Y-#vY@mt;m{9xXJ>_H&Cuj>6SUsl^q+oEbb+47aq z31WKeJ+Bg}eetRSpgIJEdjTQ*;SU!4F>^o(viXJO<<$%E%KSq<0s$mr>W@FZAn}K@ z0Km`4A3=(^G~NOXbgy-r8?H>ar1tK6INQY@7l(lmaJUCOJe`7|e}pikA;jG71E^(*ka{IvyL_HwL!@O~GS%urPk(`ZO#= z-dW#(N%);6n06@h50$?a;7_6tJgtf!{1#aF43Oy>Tg!syMje0JYz;=O*_%@7?*|Z+ z?nTgC3I+fb{$RyG^IMbIlD-!fS66PsYc=jaf)TSi|B#OY{)n4l7oR`;0QrN3VT;KJ zT>6!kUS#)&{H<5h@g5HL;64|eY~X@3d~f3FA1?oq#hCl|SH`Dq&d*KXob4^{85_eX zi0T0y0r(X8qX1SHq@|~2R}5Ua0&mhB8Cz&BD?Sk)3(reuW#_;PHVKyVpMCnt#~*(1 z_!p1vwaW0pcSvekclR7ZxU#Y-J3nAg!VmjiSlu_YC)?xs$!J7O)7pM3RVk(97G8>D zZpQi@&VqlxA@wp^_#hRuF*g6k+MPA{^Hc+U@OH^-{e#2TZk&jZzdkWBK0Z85UJp2Q z_3H4@(9li7pNyZh`V9PKG${CSZv9i|z6yR@buhfUwYs{punzu>Omnfrg>IM9hxG#b z09*8*4=11+!|)#6dVI}PXlPDOc5kn|45I=_7NVi`KjC^nG7p|Z{t&>2`%-@T)3;;N zCa5y28y7O+jSo^7kX9r&pM3I*_aEKcytA@3E8E~3Z^J5E9vPVe{x+!hlj$QvzgfJr z#Nm(eH%1bCV&s*ou)#C>s8ptm_>x-)AoRiXR)pUnfM)y^;%pH|%dkw^N1g@pw^KxQ61{NCdSHi&Shy7L45gCp_+9|DN(?+#vhlYAd~ zJ`9f@olnKvnyM&#q1rcgzw$~>UmqO;s#oH`0}c)j4w1L}{SX3u$M;XqwYGI%s;?=f z@7sbaW@$2in{6EaZoT{DlMmm2cz5IWt&ep4odWgooT_9D=Zwgb1x{A^U3 zPFuJPhBj8&^GAJ4??^6?+z_})*A*7@{ck(@$r1oJB>s*v{ME0mQT}Ffp{+uFN%wM+q`>p=Y4+vO(37L~UR-+nZjmWpMX z8tIS1P)|9+)08E5lxYa?2Vr61EUAY2F;6cQ0f6AdFi`&|X?LIzZayJlrU3ZEaQN`K za}7gX&E1z?k{cJ_%pP)OiwyFtdtZ(F+Z=0YIku}7`(6wP9|_8&gZNJB03V(U4gJR7 zz3?w`k)D44pTx80&!0V8S65Th+0l0W>R@LT^?!@E9<^rU8^dB_&ka!}R$=aY>(;xU zfBfNl-~iVjcbM@xs_*?p0@@N3okZf({k zz+>n#e-eDK>L*QkrJ2Wr2Y7uy^XVyB7}uVDRMg`72Vu zDbMimlTV2Qyz^eW34i2r@56_%IKyTQ1ZsD+d;T(-^XKL#;g9+PNR9pdlfQs@$os_q zneaydq@oWEokyXhUqgE1n9N^oXFt6Eud}PBwl+UCb>H69L>&TW5x=EFyLMkx27l_x zGu!)0-S6gR`U@}o-K!FR5`H)UY-`KFfN$K^72%6r4S#T^1;8KNaJaOx(E|QY3klil zlpNrrMKk_B``{sXOt9)wC+hY!yM8iyLA|DWKG`As!3?plbj^%E=zcML`suvBMEoTk zXjt2zcdgE}c9tHN4tayi!HadsU(T*w;Y|o24%@8UA1t~6e;D8|TtG;J(A<0p0*&?8 zr=GtAc!?oAdY+eeB15kL-A{aq4)EUQ!zWz+hOW1tC;c^tKP$!({bS^xgS}XvR|)tl zg}*x0*N6V>81aC*{!2eMI$*e))54uzc zWbPqPK2-kvMH)X}1AlN{@Jzlr4T+Ki-27b2-*K3GtgfDeE}QUoi^rc;ww7gjY{ZjM zTgtHI#%N)-su2okC<-EM@JGhZfZqZyeL~8inx?ZD>KObIO!#}@<+>&Ge+Zx!Kid0h z_yhc8|HtRVU$JKVaRo$C5)+@2HR$`FsuN&3|3$Ya^LNXHKZ|Tl8jY6esgeAcxNS)y zEwZIQL!Z{yJ%8=MA1)m5-Ji*MnDK`I68?6DkGbi6$+cdo|MeGX{CrLMG*;2}QB2x=|IyOrD@l(o1Uak-) zm&!GJV&w2Q0C7y9{C)1o{aNt$!izZm!w^2E!_UzBKBxZoXB>59ZMm%mM&bbPf4HO! z0CoP)9e+HG+?6TWoz~VD6q(83xJm#T7byXHjjOYL#K7MA&pJDw^@h6zQfg`tN$_F@0sq)7u)3(uAUG`)Q zH8L~m`GaMyLB{#PCOoB$9dL&+ON$uO@gI`&cdtqW9+mi`b6<}8dv5$0)Rmi>eCtmf zBRfBbC71_KW&Y6pg`85ye{TIBWWmdpl!qU_GqQ*ovbAsYO_kj8S0wSLKY#0eOa4>> zj~d;dbm12#zi`jh^YVX&23I(s8``tSxGr>n??06%zzTo2Zjt$i%%85o-Db-iU@X%! zqXsc=DAK28s)L)h>g%3Adds-8?$@&arJW~ng1awR_P)=RKdpc|Z8^qHxC`OEC-N0I znLpr8%O58tn>X8NY&_Za*vKY?ZEeP$8@1=6%hsOJEq@)z-;>X|YtOoxGrMm5A3+ca zOhy1{_scFlQ|||N&OAr|2aUv>khaNiPj5sAxbnHgAHgrAoOnHjzuV9`^HOT^)@SQ` zqn!h(rT(ocHEzghQD|i{prVli-f;LCMP?acKE}Q;GUXma67}wihSimGx>lU zx6V^%xxJq-|B!=nu5twXtuuCh`qBHJN>_k+{N0xMV;YUkPTtmKD@W6B9~g~@F%@!b zWPXR*nvy-Gx(4S6W;81NWy6*^f5mD>wzh2Yhg^N@8{c^8rSRPm5f!K4ud3pke&HG& zyfYp~e~XKi+e;oTjp0vGm!qxPYSai0@Pm(~35b!u+YluGCrOV&+Q?RSh%CP@n(o>% za!Zv{IydTN&{a&%SQjnVSK@DJ$&9}z_{geL_h)1LX8{(z-2El^E8`J`5f(@J&&2X4 ztIL&7XMFnUFCJ6=D1J=;TZQ-me<4afv|W^4?S@~CT*Ty4@x&;n)t-a0XL&NR|69&C z@qfVI(r@>+_F4q7z_=zjI{#cOKkc1-$R?_U(|6 z(3p57Cnlv5CvT88lXvvrUK*A9>hdModSsLU)a9q_TGbaiV7mUCk6kg`9isXx9NlbyG{5z zdh}>z!;RC$ad9zhPD)BPQr`BL(Ht?4DZ(mSWovGP_qYNOMu^!^Q?o8zoaA!5fo!5Ox1upnZNUhANs$a#b))*eXuZC6CV|!$wx`4iIsNz zWaJg)lQ`$~jSGv4iOGb&Xj)%y!C#5<+65th7V0KJpc;Mt8BBmfLf(jhSMJ8doTL2V zepcVYm}by`v)<9s^ROS0*0*$bs^_eJOsA&g2Q8AEzZXjFLmg_}2rFBk|`Q*spblacUeb}C*Q4W}rb7(E&NS?-D1*EKqQwYQJu zkN!;_@)vfBo;_XGr?Imito-rSl|TVw`je;a=T;WxW*3&>-EG%yxW=a@S4MJTqEhxH zCT1FyQ&Spk=%z^Em?TWV?A$1)<)-xB=&VXf`&zHhu1xl2;7lZ*ti<%INE>!)>Hhla&zmBbw2Z``%MgN{&X6l*s6RS zkHZ+(o-UqSUR;=8TBR?FyZhkQNK0-i&A`}dOv-IDGACm!(7I&4~$k7tC4s8#bVw^APC#86aaoF{;ZL$DTetw+a9*(-)$H3!Lvc zbL3E}bckL_CZ%PzzBDq38MF0~(U`HDGfj<5N-1n^o}(}GzOynrH3^TDmn=QIt(VN! ze!%mm7f>Ugg|X}D)AD9`XWaa4^3FndXUOEGqOARDa;jS1hq}6olM)Xc z$T-r{d!=u9u(z|ZC5N$_rz8hy)YT+|9UmEWh-tI+ozY=^lM~a9o$6V^7iq&&L0hY{ zQ!|9We$83i-3bQ%Gy>{b2wylMR2iAMAK`U@1y*QkvY;tx6}i*pS$ zqeXo^XTdTh{(}7?plPN-#-HhV2BN`FELdi{-8uywf62)f67D>sMP6J*ij*T*6CN5Aq8D6$NNXgaDtCQhYG@$1nZ*f{iMJ(Wr3PrwVLD7hZgkt2C z>ba3u6vPEMe@2ZMg#z8k@Mk=g?JydD!K}IlqAZPuqiICI^QYH{6~WMIfo0Snra#YZ zG5S!zpyf}ekruIlHfj;moys;Db)|Z)ZV>aQ(MYe5wUJfDkn(vO^@!+j zAmLAyfum5M8vVusM7g;`_)D>pD6ouvXl^2o=M*LTT)^PtFU3Nmlz=iO`@Xs9kJCaScV${7wGubk z#2AeHnQH|61&|R~Ft6?NGFnv06Lo`m zVx)+f)K)T@jdrb2AXuAvR`)5*Y9y)4uab!|X!+ynD~Z4taK{;C{|T#ddHFtGXK zw2VR#PouyxDv0q;&9^gc|NMe40JY7sNIK+TyxkCZxbtxrZ% zTQy}s^Y{OV5bz4TAc9apC?FIN3J3*+0zv_yfKWgvAQTV^2nB=!LII(GP(Uak6c7ps z1%v`Z0il3UKqw#-5DEwdgaSeVp@2|8C?FIN3J3*+0zv_yfKWgvAQTV^2nB=!LII(G zP(Uak6c7ps1%v`Z0il3UKqw#-5DEwdgaSeVp@2|8C?FIN3J3*+0zv_yfKWgvAQTV^ z2nB=!LII(GP(Uak6c7ps1%v`Z0il3UKqw#-5DEwdgaSeVp@2|8C?FIN3J3*+0zv_y zfKWgvAQTV^2nB=!LII(GP(Uak6c7ps1%v`Z0il3UKqw#-5DEwdgaSeVp@2|8C?FIN z3J3*+0zv_yfKWgvAQTV^2nB=!LII(GP(Uak6c7ps1%v`Z0il3UKqw#-5DEwdgaSeV zp@2|8C?FIN3J3*+0zv_yfKWgvAQTV^2nB=!LII(GP(Uak6c7ps1%v`Z0il3UKqw#- z5DEwdgaSeVp@2|8C?FIN3J3*+0zv_yfKWgvAQTV^2nB=!LII(GP(Uak6c7ps1%v`Z z0il3UKqw#-5DEwdgaSeVp@2|8C?FIN3J3*+0zv_yfKWgvAQTV^2nB=!LII(GP(Uak z6c7ps1%v`Z0il3UKqw#-5DEwdgaSeVp@2|8C?FIN3J3*+0zv_yfKWgvAQTV^2nB=! zLII(GP(Uak6c7ps1%v`Z0il3UKqw#-5DEwdgaSeVp@2|8C?FIN3J3*+0zv_yfKWgv zAQTV^2nB=!LII(GP(Uak6c7ps1%v`Z0il3UKqw#-5DEwdgaSeVp@2|8C?FIN3J3*+ z0zv_yfKWgvAQTV^2nB=!LII(GP(Uak6c7ps1%v`Z0il3UKqw#-5DEwdgaSeVp@2|8 zC?FIN3J3*+0zv_yfKWgvAQTV^2nB=!LII(GP(Uak6c7ps1%v`Z0il3UKqw#-5DEwd zgaSeVp@2|8C?FIN3J3*+0zv_yfKWgvAQTV^2nB=!LII(GP(Uak6c7ps1%v`Z0il3U zKqw#-5DEwdgaSeVp@2|8C?FIN3J3*+0zv_yfKWgvAQTV^2nB=!LII(GP(Uak6c7ps z1%v`Z0il3UKqw#-5DEwdgaSeVp@2|8C?FIN3J3*+0zv_yfKWgvAQTV^2nB=!LII(G zP(Uak6c7ps1%v`Z0il3UKqw#-5DEwdgaSeVp@2|8C?FIN3J3-M#uWHF5dvO;Kt}e5 zyG00a1%4>_3-A^U?CJ~t0=xYJz4igk-;V-aeNq3w=5LQ^zyMbu!1>!F2n=ih1D-zt zU_b*H`1}bM7{~wy1Al_Rz$Gv^_!FIQpb{7){0SErpa2F9f5HU@B!R)hpX37fh!98$ z{3wvG|Ncm?G`IkLl-S?V3IrE_&N`z=fag!|e%sUu2M>P|WPJW)l(^$yZq9vV|`bdAmHz*30Sm z(;9szs?(hKjqNlSPmbr8>^|0$vbbpipTD;-*d)-v-;N;Athu=j=Ceq%QfTnD*wR=v(-v3i?eh)OBJCP{Rii-XPr}c{1?map@%Ox3pw!Y< z{Q2XOmtDZz9p@tegNHwEAFKnA&);)#fpYs=@`nJ@1HEZ($U#aj7wU^|Z)&8beJzmh ze|{}IT=jouE>LC;&sp9!d|Mg~NBbZ`B_KRp-8OEiRu2o6vV_#e%)N{8utM5ofz6+W zKHPCL57C}GyJrO_dz^2!H4SeG28)g3vNgr{e8+KhcnN}xKQ{pK6p-W0ouhn&&%Pa? z!1DKo!wNVk#-l?WR)>3dK=b!~ga6~YK=s5U)J@}-rO|k}=P@2NHce!|P%jFxoz#A9 zOpgpc{tPaVIrj)F#w?9YCf;t^ZeIR@yyE#s`ahOe7iV!n$e&>ps45ZxLEY1BSsHc6 zda>&RDzv5hAVW{H&{o8gy`bbz>jD{B5HV8#Esabj-feo?)Vg$XC3Ml-y(?G4!IOuX6LZa(~7o;O;Mw86?BpFjyQK7p1-uZ1QV_#21q&)-{Jf|ox{ zCoIXNAja#2EsfsljOl$whwpOown|>h643npfVcj_#IdW`)!6c&L#^W-VDPcW^jR)MsIcAF~InUJ1HBlWeIft+yJQQgx`Eq zgP7h0S{l6-+PBTVV{-FWmhgb*&z%djPoR!JJ)Yi4y<>>+X+^x#1XBkAf6iQ>&Id9I z8u&Bx(9FS(H+|afJ0&y6Di{(31Ah)(pc#Qi{(R~JeaH;9q7Sq4JSziEjw6DDKX(Lb z<{|oFpsX2>KVP~)&zsqhQt-DUZd?7q+iKv1H?e;c=<4XSFkNyFc7usO2QHAKl)3+dgDs7m%q*guwz2R zVVoQsMgJGX(|hYqjvV?!4`F*CI^MN&CBfp zLCBvC7s!J`CEt=Vl}CEVxb?ucOI&#epOi$V}HBhb>w z&(CcvuSNYH=eAaG0IdW)(oKczWM_3uaPntKpqZDG5aYW*OQV&*);2e->szyRRZOi` zl`u_Y*AT4ynRmjJB#u5ZVq6z!Y2;*PJIbnceQC2@AJClgc&KBn)+*=kLF(xB{mPTii>yEZ) zT;G{=Wjfq7t%t*WIEM!~fA2UXP=k?fq^GqxF>-Wu5M|oB8iPD5kRzE{svKqU?Vvim zLrrq#9qKSH;Q6yAQ0D@5h-n$~+~`nq-QgyUtBsb@Xd~_K>56o|%Pq=zHii@d&L1#i z4In!YiHOi6rs-!rHyTuR5W#I&PNb40$L2mtN$v3luMCefQxgU>e<2hZn*_p18xdn% zpyx(aPUl;^cICurGUnLbL}`+Hw8JUDqkMFUf`LDKE)Y+ggP7_9JvZt!b+pTAS8kLp zQ|>Y5k-F3#>~iwi) z34ihwm`;Q>K9E()Jw;|7lBD#=$d+%p&1q#4WtnI@jGT)3tk|I8Po4r(0?CMvey~#6 zpvc6_(UhJT+43#7In6Bi^KfNudy*;Zf23U{-=}whY@6GnhzpsV(z%f>-+G%5pmmz< zuyX3Qr=dKJjOgX%Gta9<}@=zxlQKx132~aU73N+pKd^= z1yC9VvRc`t$VecQ(xH(p-*TJN&N~6WNKU;h&%=>1!1>d$paYN)$jV`#A|2VdIKu=s zPuONGu&FcJ-}P%l+pmW1NdML<;Q7-wHwQpgPD@asDFG~#y{xmEY0A%oVN|Bl|C#6lJ#Qj~9N7fk%Apa6g>g_2<2fMS6dxAz)+wVc%+VejecYNp%TDlTqx*f~l~4>?(LHCuT}pERA*F{#ZQXaw-nQWv74ZH~ z51{8PxJ_va8K(5ixMPT8Dq;>FSeWB{vKb8gX*{6kEZ9hC0vS!|iP0x*=;KVR`15g5 zQ9)7~w zN#mC;__NL5E+c@RvtT!+(aSL@9UA!rJl|Lg{(N7tfFA$7y2}Wl=PcMxY4CDvN@qsj zH>meH3jXZ-zg;SUlJK6h;2@eFpGqKUKF?WjmXd|ckkYnsXAq~Yh2YOAe{v^`I(p86)0FB|gOk#x zQQPu6W6zX7e^f8f=O3g8R$L&TKbtH#rbzK}yp;Bg{%BHf^A!9!_kS{hT>l4nNNwUd zDEyRC#=bY2<+vU3X8if3?APtK^-esF&ObCRkk|jBa1=lj2|YK`j7_rj#%P+Q{|$ma zyR2*ZqoY96_Cd(}U_{7Fv-QSkk*R-8X8gV3k79$!|Dhj*>mlYU`3n>){-QlMnxBY& zEvEeW1Hd5TPq`k#Z6eQEuuK^p!!1pU^xSBbs~>IV{85Wfiw-va)5W~RB@;?GwE1|@%{1iELztW~Tail*6`csb9FW_9s%yiNXmLtv2d zXVM9K&Vrqkd@m=Z^uXw6&djp6&7UuZfx*fj;{q9-J!ipQN}dZ;QaUy=72nQH`~3L; zz<}oOhw`Nl?zeJpwAy%{vtTnN$IGcHof_48Z|jx={=5e;u=)G>&$;Bukurfw^LfsK z?UV+{*p$wUO0~Cl!wG+00~qN1Sre!SP{V@0RefaCGp5Pbkx^g4ZQ6IlpSJ)8K7Y1c zpwx7pv*0A97BXE*2gYsUO<#;N{=A$92LXT9E>NBc6O}z@!C6Xns$ocJ+o%U)oA#aZ z=amZ#3jS=lK)EkwzctRtb5M*anT$O*ZWC|%VjT0wO`y-!WIs6gv*!X?@}zBmLlMSv zP0dq+NyQ1@aL%uWB}xXB8%wUCqnxMZUSK zh$@R3UmEP~#s3F;df?ySKu^yA1h*9oBK|l8HUeT-t_;(#yh6h&4Yx?B*^K~_PS{i{ z{2_a0#C%Otx;0v*HP6*+3lBA3+?%^gu3ft}B8970iMq=)EJ>l?U2TGjKYb@m@ss%* zrV6cS_`9tFXfBoK6am@h*}60CZ2l;I41Yvk9e<1N01Phvv@TG>PvwsSsO3)v&>K*! z5Q~YmvRWFgt7Mg}S2munGsa2yvHYRDdj1x>jY;JgtYxtA$GAWlKR5g-0D9GE=80pi z7_&5*=jmUI%!k!C;CCH*U$+0fie0dQKMMfyEP{?d#RWEyu^%)38wP>!C#v&*w`Bk= zS@6bakv0~B1x%Jki*#O^tiYo-#81N?Y72qGpEZC%$R8!JVTj8g0BGRvb}gBNShC=q z(UdXF*3=jiV`=mfGOOh703`VF_`9m}e`KlQylHkp>HmI?05%Lc;O`Cr(2@l&jb@OM zY|V^>KXaoMK;GI+i6I7xAD6#j3ZOLoy+W6|EC38j|M&Bsv-mONKhj4S{NJ5B1VBp` zyf*SVlCm{(fz<3ROdh1SbKL9XVfq~O@ijBvDO(TaRH6_QW^2arDYU#5m3;qy4Oa1^r z!XJ*mY5xnKW*#uG`P;pl=l%$O7W|D+52)pDjRnvMnI$NU(MSMeG-70FWcf4JD8_;j zqsdq=>lkAWQVhY*n!jN(3ntS)g+DU@1D!t(fD}K5Ke+>z#wd8f2)ba>|Em6P%^E;U zQ0yD^1Zq<1jT-)pMpHz125Z`kR$d+$NAWZ8CrOLrKMXhq)bVEmV8HWdaDk@yk@Y_i z%8WlPfI5d`35pY=HeX#ztx?CH-e`u1NxYFUs~D3^B>dd*cZI_rAHX2sPwN6D{5bB9 zZg;oSM^n(C7$3kr`OJqFi#;}2%$<{di>uN2ZYwLStJ;O-5>JZ0`Avk)htBG2s!K=C%aIp^?eh zn36FX`O_Hrh?vJ3A+w6nH5gktJiLfj*d;1_qCw zNZz-99}-CM3qt@?d&k=P=4NK5rza;SCZ=Z>mT%8pDoIO;%5NWA7#%2y3P=7_7A!$= zZj>|Trj(6b{#2ue5L-L$sbh{{a}14&D{Se!dA+SRCi%eOwt?ka3$q}c%LN2pAD^yu zx#}Y#!lPp1;uG-qj&jFWoN9vQ7hH+C*?S$`Am03Y9=w1i{G4gg{};keTxM446ot3c z%7NU=to>k|y|n&E7NV91&YeivyEi2n`HPMS4~vM5OzJ`YW@jf6!Re`)xy6;qx{B0< zsN?MuOGAAn)c+wKv`H*M@x(~;HAyLZIlftfL;~brvDqMeM+yH|4*w~I|B%AV-`};1 zpCu=j#nxisJN8u&gq8_H&*9vP62^8uDhmg*0#BNBf=x1zyT&Er|tvz zO3COCm;Xo$0E5>Fe{6fn4XJ+5&(EhSjlJ_W+~GH9aca2ZYl)wNm7A*15 z|E8oS#Yyx@%g>a+#YL7q+*Y|$w&v&Ob}oP#{08)AgZnIh^an_~VB`-7j7fkFI6giu zDm*Ma;@H?&W#9b#!r0U-{OT|)_FZa@i^^=9f<9Z<7|`6h7-rXVBS6OFW7u?`F(Ksd zRs0Bl5`GGQ5`MrR;O8NK??lDtb#1NX?oBxg86j7C#<;EPm?x3vqvhz{626E5jK`SXlVU zv9TQ5;vp$1DW#~+;*r?cdUEMV&mVM$20GM;x2?ik`kKZpJ=mT`b@#>B@r_CpvNjgLRlxVf=*d-Y2Gv7<+h+Y&%nu<$A%e@gEwcfZ*CYTO@ozs~(%ECb*k;I9<< z>j(bgYk`uvxmwtggUeL|lMh&1C}#1KFaM$gTwLtQn(k;UE-j_4b%EFa4h;0~L;zoW z@#U9GGBe4jhy6%OJD8c7nU|kmT2fL{Q&R(vSzgSGyFdU``J;CPE#u>F)qD0t(mBZ3 z*qGGtfS<}=-U@03{_kfxEG$5I2Z9p<*_?63%p$lqO8lwr4~BnS{^;=Un;{mZ8BK}@ z42RCunm<=ud{o#T#7}j9GJiM;S?r0-yVTTFUte8aT~=0{pP!$Rm31g5iEU5Z&V`Wh z@{2FNxC7=Nsi{y+;7S{SwUUxj2nVV<`xt+YBig~X@V9r-(~6FajEo4Yk^SGw zO8@RXk#Vt6k(n_4kmT0#H+I#GKT{TvKTiR~bz0MmW66T9kqRk>rUd^-CLf&H2bq3o zyI+U=DIQSe?*_QMqXz%i<{GPv-m|9&@uL71Lnjj%O&)HIiHVF%NKV{W)TTHEdff0} zXG3{y3Hh%U1KF%WR|26VFC#DGJI@ir)VVn_GthtFXaTtEzTDN@+R)liSXg-Az`>J! z(+g`4uhbXjq{i=l3szVnVVXe$!ykG;7y`yZ_iM)A)vMtDLX7^;6coarBLY>V5I+cN zgm%l05&6@&zwc|iUpo7hx?iXK$pE_JuhKQ{3J)XRPx61olK5fBVR-o7+`{^fk+t96 z9qPV#Bt1PXEv;z0yrHYNtL^N9E?{#Lz{$t+8x5M_H$RW~ovpmw*>m|^Vew$$?hK%D z`0B{DYu9hw7cFNN^=<8vbM=fnjnlo7zp z0igc584&YCk{>F0nt2g71>#1b^oVY-lB$+>M#-#rx zN63e3zU8!~#O%pb*Xum+ke?)b&d{{Df1-qsea3uKxb z5qvHHrkc_>ztB?I-jDe8bY3iw`BU8AqC5U-Vr#tN&j1R`pZ!jl!k_?#8%91DvqPit zaF1gUK==HC19ZUOzyRT|xw{Vm9BkLRK!!iu++q9;x3qTl_w@pP-JQ*a5`QXwZuwgc zNBrdF7l-{XAwt`f8vg9NK#~aq7;b7aR{LL0sam5xp-=bi@YgmrG3FYf4$y}Gqx==N zcK1;LyINa@)lPUv@TaSUvfErq`rh82?(VLx)?yF%>qY$96#dm93Wj}vF?8aolDmp*(et=(RXLA|lk8yt{{!beH;p-2k?C_^^ zMk*o(0`>gac7XteK3n^x8q<2im}Oe@e;oe$v9}M4S%`FA9aBC`76BK#Q@mXagO%E za%0Qie&X*%xL&z?ABLRltn9-i6rMbBJU{=$$um`FQbR+LJq-Qn5`fM1UH#I~4?17; ze4Qly zUefCC?*$hqb0>fMg};~K3-$uFU~&X`wxzW82OV~;5D`kAj{uLs=m$7z6^f8c1q>@ zP9>!exRbKb`ad)NDqZj(1C9WtQ`gTwH2eW=xc*YmgsuaB1ZFjjBqIA4wD@h3rM!U<_^hQI&MQTS-~j9I>vBQq-@!y}_vBCB}G|7DZ=uaLiZj{mE4 z!R`3y08RKi!}>qUUqV^);Lz0@uJM_hvr}*{@FLv$u`oNiZTxK@e+VGMpXvY^0Fl4- z^n*8-*YDh3U0Gb1otc6s53UUl4RoBX(et;&U4NGOTa~)sHu~eH?k}WN!`}$g|B`lr zV@=x!^NCXk{2xc*uYWb?j{s<0s+pBk$;{|MyePN%P}2cFQ332*BLJ4IY;4@Q zwY)SxJ3Tcn@mHt!e@k5cWc*g>;)}BUXOBN%!J^q1{;oN~?|=UKKNkF<6;qkBA8+RYP z_x{E#zES#GGltT#vP^i0I2+zli2>G0@V-DF-5K~2hd=EIPz4b9BLJS(^Vi=|SGTY@ zc!dsa$>-8i>lxio5AJ_a_>-uUv^j;Sq0*^iZ)hqfWGBVdSYH% zlCoz7r*%pt3!58S|7Xr0Jdp`YFlPMqLhlL)YWTZ^{9SiV0)MM_Hts!?`GZ@TFp#$_ zz&%Y6T3at&zFcwYR8|%Nk`U<_NQ4&?!Fu2886R)})H%Q>Idl>t@wc8{t@VF{{q40` z3yT-dR~{kjhet?APmjj?P9q~A5E2>wWcMdce--`~Vfd%=H-dki>y418nj|uy%swvG&g@@FjS)kM5HV%nIqlf%iWox5U(>ht! z*EAK^V3WVf34E4cae$P+2)q$k{qrf=*=fh~_NUOM$xlCaeY_baAoTSUz~6&+-`(W! zM?Ih(K%7U8(*R446B8W}DypigvSBG13m`z|R904=J`JeC!4kTZW8`l`I|4K;!902L z9oAvs7uDF!DKUY(MXK=yKS@RCDr?ve6SXBU=ltrPz4 zne*okzzG!x8m zF+V>GBMNwKs;{T3ttJPbUV(5fH&KO)<&V|`KDGSiLH9cgox^B%A^cdw!rmYZMm06n z0f5Ct$Mzv%5R45(VyMwm{!Fu_{F$0*sg_kTG1`ufiHeRskd=k!9UGg~fD15-4Nxv! z8KxDGKXYC1A_;W{BJqb-c zb;T>cI2y3V%{Em*oE#{>+zPzyTtFz~A}v$K>CT^j**c zE9SHJ)`!lPWv3-&rKe<LXQXu$piN{3&Z2h z-~8t5+g-?DG%yJKwVWaV79KkgPh23MK&c5rJ@bE<+Jp!xW4_gMBR@Z5EZ}!2uQ)%8 zmIMrDW;XPi@fSZfJp=r~6qqhJCwzCJa*cypw` zyD2~B=&@sY@Fe5W-11YW%FCO_o15kNhn_!m4Co#}6+hYi!PpN%MMcvGHXY#8rnj<= zYS|<28rpxny1QrSMs;;3g#C$lwIoC0Z|&~Iv^cqQ1^!_6+t6?-Ej3l?e%ti^Pv-Ay z2*3XI>MCI{E$_@l?1GDn3JXr;q?mTX5()949`(N@wXyLrM$VYar{_j~VTRb4gp9)C zlER}Be^4HD$5HSH?P6RsIk@2x*P?SEzfK$=ka-I^ocfyt>9t z*Jt-#eH~4C+6IQbqvwVf$H$o&urS?+@Lf)D zZEDPwlamPV;UH@dV4?UWq+MRUMfscBzY9Wj>#fz5X?TKY2wKeS5|7tI}YJk*z2#p zn)1O1>-I)~K;W}y&%S`r-16m@U;YvT@TUR@Oy1M*H@~)7pC)&$$X`c62HCwNrPhp9AG6Bdf;$Q0=(!+!7t&! z#d})*I`KOHXLs*zWzFE!6EA{&^wG^32rzJ4>FMe1fxf!p+{nn_d~0hftN}_RH`)D_ zBE=9Q_Ge}th}yk7l7*1)c=+&zrlux3NPHK=dl=rY4}JCR^b+uYcKAaFCH*%Z+?DbqaJ6zG`zMav#fLvY`?Kj!B5X$zRI7BAK~xIFTVWpi=~1B@PjGIxhIb2 z9nC$I2rrt@wU_t8giMHUx^k&efY zAHNR)nE`g@r)MMpU9Qqorz_GaefXJ?pO>Es-&tFmo49h=3IFP=>g(&P9+L0~g9>2% ztD(gu6_1?&2o4ZDU~@C@KnUFU<>uyBn_Hw0zI#{pe-YUKLhrD#*|=MxZ`aQw4^&o$ zg@%U3#NZ;36zK3z>7$XqQPTfv@%s|J?ibIVJzZ==29uMrGIMhdrNCT7(`KZN@xvj1 zAANF0NlG^~*=qeC9H6yYCbcwb^U?3c>^qo;_!Sk`6&_DR_Ii5%_{Tr}iSh>on)JW% z6Em~21H>UoR#{ngl+^jc*Z#)4>l*%A5x>vwZc+b7ydS|2`IGQt`Fj@_np;K&T`j;G z_`9&k-Me86!v5l-q9eO@rEk)(K>{ooudDcJ_{*)RIAfbX;sFVOz~2{yz+Y|v;SCcL z%Lw3I0Fd^-ano=w41|rz!@%DgyLKlg#l*+#lg9=!eDJN~udt!9jEw&j{J{MI=m;O+ z_Y6GnlPB|+k-@~o^t6oq3G}Ln)O1Q?JLRu0Q%NdqXkpM>H3WKY)E1)Ii;3BrbBy3u za0uCp>goBzAO7%1i9d9JfB6gYHw^#u6&OSQr~|}5ogJG~aB?3k-b*fU|Am#?n(<#J z;`iCs)>ba`zmogI8&df0?_K2Y5%9D=To;)WPQe2R!wG~3%gf6T?#g2E+gM*;TYCWE zAtsdkAL)S$_Ep!^o+JF(>VXvi34atoG6Y;|1^#{o{M}7U*d3Fcl-YFY(nScJO_@>A ziTmO*kHV0a1{l-gOAn+VO@FEXE3K>@0N;mUQyTlx(I3GN!6*1V!Jq5g+zDVXI(}bL zJahe3X$YtMomEn*8!QfLtg@w=l_+(?rcpgiw-pn;_i*mfy!@Pl2a&z*?%)0X_rE9n z{Rsj%Kms7)5AI7*%cWaw({nfi#3h&tR7nc>rZ^Y|?rWT0gL^i6=H`Y|_k_XxqZ9Fy z_+xrs3-?F(d-(9d1J{K-c&Ciy{dOlG$;qiYbEYa&f)giBR7_97&1HA!l@R0)JRtB#0i1-J z@nOOQ0e@jM><> z5Anmk;-i_Fo4)r`V;x0Yx44v6l;NPYGhI}?Wp#$Y^1s^QKLq%TXu{Z z?4+HT=;VVLS$p>)d!1ch|K>NpLHrc{VEjiNAOMK`wU^KK#wF}Mu$LjQ3eF0yeN9i# z%yumXzzSt+a|`r*O&93E4|-lHtdRr~eGeZzd~pBXy}Ls_eFN8A zlS{WoB>qetAh{Et@qqL5E_77zIyE{8M*aZ6tt~RKM_HHvOD;8sRSi4>uyZ;|px z++TKkyUO44vd;T4V?WmWQT(u%D6pi6I5MSq^c$E|AE)eFQbcFM%CY(lw;Aga#Kgu& zB_MkjJ650rApEfokd6R}2P9)aR_A!e1+x563gHtmoPvAy5jb5KrTA@T0e?9D!fudp zf6Uqo#d3V+~N zwVf}69}ZPM1^s5p<-@|0Cr>JkXh9Kzq8w-3aAUMGTd|AS8w-ea;a& z$5k_^yX)x+{i*kPDs-Pdy|;T|Zqx&d+b!)Kon7nf{7DYb1P~X=od5*>=)I6fq9kme z@MndOy`>HLgJr?hDT@wWIwbSQ$9``h`avt)TCAu5*!H4NfxsJ4+NxvODPb5LiVHtu zq4ufBUPa{)exyzQSPu+CKxquf27#j3m{mHwNEf$3`1=ZG4B-C+{P^0R&HWjJKb!lT zDFH8=3uFE3*Rg@|t4xD~L-5q+XeT@%15aLckf&bY9bND|LTAtI+r8`C9;5$LIzawa zIQnHwdl$^+z@Qi54_%-We`@@iN&kDLrWT&jN=*3$jD;`Z?4Y~iz=4}xIBL1RXz3zY zG+2M3qVeM4!o!D;b0Zia3VgM)t^!Q_jINmQv;ql3s-lX~=k* z-B%aKfY=2agFx{cx}M?TvMJAV1=dkM2Sn6Pf?;rMx3dod;-IQS#pZ-(+$0e7C; zT48+}-a!fhNrUIFx^O;pySEod0(~$M815e)9h+EmB zoLBzHH$>nM_`faP-BsinL0p7^`ycSFm!0sn^TRDI2M@t>tH{{;b5&J%J-BAm)dfr2 za5M27aG0CRSG0G*)A=AgtnNEO64L0LFqFluP^gkia>;m^4Qtq#nye_mGQ4FH!j=Jv z2b{)_HFJU`?6%U18l z=_CB@dGy$P2z4jzMF&w7BpPd|?0|s363_BI53g7~lqZHx5#s?jfy= zH<=upJGc?qb$OcVmAhZJ%Zfy%rvYr5?^`u z-h+Ejo;~XT|7YiqI6yZ5jb{4(2f-D)a?cDS5^kk z-zO)-JCByayZ};}(=HeZ<@lhkdo6bg|kEW;HhraqAJScE?=KiCHk6IVCGXEzzK)(PE@aJJ3 z;>8I8$&>jL>7)E*@7=p~)ry3agaaidCu(X+Fr3FPvEY0Jp12W~osD&894C`Cg3muU z-~;(7Vh$_>H?oWcP^8`987op23=NZoEjphg4$$m@%}EFu1B(2W?Z-NuI#~*TUHI!w zYh%sB*~2aJ2mUXo+gN&$CLe71ncO%dYsii>kdoWCyK{44l?8fp4CLP11k8Wo>4q5^ z1s1$HGC4U4Q8SBLA3S<;3j2ASZDY4j7NJc5(KUaCvtYbQW3EHX9&ms@5A&;ES@Ro-?E-@v zy@4IEQuhtpC&8uzG?!uiQ{?aTdXQlC#tpa%?Z)3W&Y$Q6sr#FR{@3RIfDGFNnB4v# zT`-veo9lnj|K7Tpn*-EMPLNx01VOS82qQiSz#jaET#_&`v8eSv@b@$nfY9*fD~ZS? zgaXKh%)lRA3Y7RG^bHK;nEkI6KX(6vb-yNl!&CUc3&#)mv*AZN-*QTw`PV>9 zE6d$C>jKQ(l9`^aoE!vhip+>%NqPod-!yi*ctAYg1bW1AAQ1Swe*dAspQ8ul{8>F9 z1rYfo0E+xE{04Ha@;xhlV^;o{_Zvq37=FMxcYja^;{8b9`@B5gPJ`Ll3KoJJJtG#$ zQumFJHC0Ow%+{ePfR6lG$9E=!GJpI$!0!d%u;nKCSVI>GUCY$HduPPuXVUxfXRqkZ zUmJeFUw3CtE_Sf@v5$UWJoZ96i=|}#M zK$Abt9QePjTL%a6(25@%)BC^N{~7oJe;mKp27b`{8u(qxr!&f$Tvby|3y0 zE?qL)O>R*$p6o@+=m#+?_Q9(}YQJyP0MHx)!n1%7{^LJf_!H)U46^=#;o;GkA;jG75y&R($AcS4v7<2Rshc4Uam( z!uXBW9$1LHGdTs5@H^*V+F{H;O#ViIKSm#TS`$C`EQs*gAhR^K%tCOZg+Do4n^880 zQ!4%Z0D|mU1nE{V0BGB0Gvea&7oQ`4 zurTa0`G8x$Z0Uu&Kjd%nI34fdU=N;i!N~?LIKy@tSO0MNk1WRAzdzF1eY?M}=XP)H zzM75>oPwAh&=-KS$R7pp_?~qe)@?dm-_!(e(roD%C_AukcUlU3UpjYF9=u?aVA=om z%dcL1@%gjokM5Oo_+Yzm<$)_#`VhjAk!kMyfIWsE_PwyWZ);Dn$BUBDj+oTiQ7hFb zrEm;y#R(5%{SIfr|31Zf85urE1#OJYKQ(b@0)C#Vz7F0ldHrfb^Yt6M)6!bIx;i_X zo5|||8?Rk!ZftD4P59&Zu}5EkzwFZneu_u`?7c7eKN#MPjgF2COoD$S(_HLuq1&bO zVZDGpz!qoE54)fm&F~)GQ}~*xMT_$CHr3YhWf&7cvJefe|CQ1Ml6i0+@`nIET%Gmv zpMR3Pu8S%&I(0S&-uS>mJzJ5Se*M++r;qMU-x-YMfe>8=)_+>&K7aB49lc->Up$`cp;r;jX>gwna(7Y1|9q@k$Wca^Q*8Rf30vzBobbymz%44XHX zmY!~`F1vE!J>IzQL0H%{Lk4;7y{|F(K>nsXE?(TexET9h3C-TjzrjHjf3ypx z?hp9eVb*lR*?~KZzrD$Y2g~67YxrDVe?PoL@4C#u?vW9k59Td7@BHAsgx}0`@e24F zI_HnU2djQ;%FAXR4<6wC{hUv}BNrF==mQaw&3Pxz)A`5X=-u-h0WkRH^T{R_aLUu% z{Q4zvfOno$I`Bts_da|Gi!)-@K%jgmA4SU;7_n&77eG35_3F*%Fb{c3{GS7V1VAc! zW8+B_ihWczoax~F6<1w__y1K@7Zn%pT)A@fs+H*$1Wq7+LmL+_IUi!_k9EJ((;MD- z=Q|4+e+)ky09I6FWB7XrmLei?DDekZx(E1!2M&iurY?g2lOZ8TRm=fC8g%0C>(3v8 z#{{b-b-LwPI*gJr4C2&yBJ4=or=4;4L=dRRaF@!>>-&)uBJzPCTHc z|1F^S!QTQ^9oL+@GYZQS&;{QvHtlNu-T?rI!2eMI$*jTP54uzcWbVNyA0~g%B2AsI zjX$_9_)fkv4e87QPQQ}*+XZuv(a}=qvI&1HzP*Feku-Khor*69VP$ zl`r?_!rwdJ#ql48gboXSw%+$G^}in~)aBYLTMdlF0p9;&$QS@x{9h>kR2YRSliB^& zHWCy$$>6(60GiA zLZ0mo9Kf&y69EB{CpWrX0$)BQ3&qjN7#f!1k$jNBs50<$a1m_34@GWiZ zfG^v#bct~s{~NAfKO-n1lZtjY>do5IDeJ~51FlSg1yXOMhUTSD6*y%s)M_p>KmFr`pCGy z?$@>drJW~nf=kX~_epDdt7TZOR-ol`p^0ip9^=1so-(}?Q^(*Dtv!&+3VF>?65JUnq5kQeY>i%%)55m;j z3=vj6Z_k!b8OC8T^ey=d4OelJh^-Y@%#1wkdE^hd`rfp z>&y5X8gk*s(GMBTWXA=42>MG^47-wHTe@6ME_zC^* zF~kq}TWI7%+r>DPccN-kBE~Nzh>@q2ufaHSy%_HQhIcynKj3fn^*$prk2@bmkB;c) zEb~Xzb-Zp=ml>;Hz4($&fEje$-{1K7!i9^H(~O*iltvuiA#KJFtlKb+ zMtv>$l57JR8Gx4jj6=8jLI+IOpLb$c3{OW{#(xE~z~7LOmDioP;txAv=L;8%@EF!C znSfCKQV4(JoWHiVh45a=i$74;?RpI(tU(L>!DIU1|HcV_+qP{xdiutZeW|I*VopX%F_P~` z%jk?)V2ZHHZrM5;;XSXEzg+77to&U@{A7vBRqDLd3--e~v8!+A`^DzJ>`fOfzV} zS#Mk0NjP4%u5RdVcg+dwn9fY85Hw0g;LkrOta_nRI%llDn59WsS6iAIJFn)FUnLpF zFvRB`xCQl#GNwr%MUjXuCvqVf13GSNUo3>gu+hYqfPEfAlwb$S>>^yqO)grtxwdul%Xi zWuSm@!|U0~(vg9_-hp9wciZ(F?Va5>M_Tfdld@K)r{~y}GgI1a=%z?em>4Et_HX2A zl_|qF`m0jL>hjj!k(+hdI1@=DD>1eB3!o4%qTtP&Lk{(HjgD#lK2ls7LqXl(rZJ1~ z2QTj!9D%PXPtL&WfM?ojHm^uY4V5x@nSN7p#QcFmOW{|lDAUYVnK4gkF=l0+xZXG3 zjr_sl=xGG-6vZ!h%a)SuRc``yzv;2fpG6}S+muh?aRlSU>|p8e;6VS-D1A}f-3MbW z7q_gW8Q5D*NI7Rl;bQD%M#QMl&so5X(z@EyXLcl}t=NB}w1j+>^2CX{+g)AoE|?SW z8#bVw{}Ai^*&q`@v8yW>J7#A~M*+ZL`a*PYf&Dd~ZQZz%onlo|Na>nwB#i=M_G}|$ zv}YXRN>?UkWaXEY_0gAk-x=xcz6oC`FC2O^Zk5c_am@2)6;P7T#aKN%d#DWF8P`8f z-dPCm47qusAa{+KT$ay_Cwp^Zln}FCO2mxTPq2KY@`ZwPOij#Ky}P7h@D6>O^2E@s zo4vgQg>T+WTTb<_UJ^l;k8y zT`3tHM964COwKlPMxXV~OkcPCV9f}=NE^NtG&9=UeT(pSRk~`g2l2q4B%qar7&clF zlP)EaMu}{jfFeq0+>pJksA+s^=05y{)su&lgYBJNJr}1pV(@# zWwawETxUd${xn*-vGhhi{ASt2)SbJ7l?Qh0+Pin($Vj1Q=EM`@gg;Y21!A#kR3K(v zN`#GO#$E)>%HNy6d+(MlTgiw;KmK|*Od4Pj*5VenZ_A^j@KEB zhA6S%n(bWc6nFe(X1Yi?|BNn`1!v@Djgm3LO}{XV4wn*#{J992=?37uGiHR*+%6)7 z$~HP0{T}h>EFd5jLq-6_xwg?XI>YOxAJ*16s(#k5gI%yKDnWVDnT zT-`Y4PtwS$kh@V-#g_7I8?A_0T%e`CVr?UsQqs?yfAGS^QhA)Q6^#!2vEToat2M9A zxOj6XR*g1_BE^C!4cjvnQj{r{YD?+aC>9~?#|eL?3<`x})fhDv6dIXGD2%4#4Q@0G zkoMw+KdzBVAp^kJGAjAw*Ey%gs0o<)arB-}j(*M`%RG?}mKFQ##d%sjIG% zgE4OTQ|ik^PzyNkjGRB^rMy*RmKr5gX&mKr5gX&mKr5gX&mKr5gX&mKr5gX&m zKr5gX&mKr5gX&mKr5gX&mKr5gX&mKr5gX&mKr5gX&mKr5gX z&mKr5gX&mKr5gX&mKr5gX&ifHBXX1~8@pjD7yJ3yfs|yC4^Jj-l;LmQ< ztv=rrh-xk#e-e*er>_H>U)~34bPJ3jVYU^z>;k;hRGO+D zKXP&L#^2HeH*H*vZV97mv$T0s%Ti}a|Dp`H}71at;M~xT*S_?mF^r7 zu`-Q}Vw%678kZqBKiUPWy&U6DZ8S2Q^BsGG&tVnpg}cQ>x?-`-ANOQd{ICmFL1yr$ zGDccj&-tm^+jHWuPgIre5*K!Pat1NZ9|KU$AE(p2Vu3%q(L8OA_mr(YXLV4P?iLx* zn#DSQ#F5$YBmU3s0!9Ajj|)^cc0`-mB{OyDuCXq;`8CBpe+kCAi-aLVP1Zx^Z@#!d zbz93dkKU2GbobcE)g})3`}kuCJa%B9CTe9w@*UCm_V(3{Js0&+mmE4aVs(ps{yxUw zkwA`}u#K3K!83Ad?%+9kN$FIor+6;~cseIe_)`MN2Z3fkD{yD*dXgzBCt*ut#*Qr> zl~PHjjzxMkM5<~r&mVDrPA-tH!vJbbE(1DgCsJDa%ZIfzByw(Ze>r+O+qp1*7EgtR zJ}K7uGw@UQ!4?1|?97AEGDZFj-rA5e-&Jf4Yd95vd|7fu34x=+sgarXr?LF}^G_2J z1pXWd&GW$})yMY14ndka0+e{jbJ>S()C5UbQtoVfjP^AEmb9RpWjEHT0&>-^n zfzJx~C??RUKC7cVJ>K|J5olicGIQ6y=W4W{9y}&cW79<53k{4@nEo|{~91)%vkUBp3lU$&n z5kbs}Kv$!XNjST6j7QXq3r7o*HeUHt6UY!#6XafLe)*F+VJ4G- zn5q+YHHND*rjHmLk;^IEDupdeO!N0+<^0!-6j!5(QYK;w7wBpXTj&Vl_W>>hhD%O_ z(ub={Z1eZYCnjvFJrO{B8b(CS<^o-f;p#kZfQb-yESs=piFN)$04R0BAAKYtW_5wC z#&Cs>Y_rds+`^S5w&%Y;{se={@dJ^R0p{K|Jv1(*-&aXy-4YE--@3U@Jy2yWm;baPl1yAN++P zQ0{{r095f8Nf#KrnQbXGfAiw8RXYA7gTM%k0+swl(goTu3LX)0{}+sC-|o(qAYS~R z-(jEyHq^xuWgfa3&C@*W1qaTWa+x(E__0xM|5+GG1MkEUf8n}7XARxpbWWwT_;2H= zrm)n}Z&{Y)0S@Ddzfhw_SJXS~Ti$p{vEA zqi2SwkeBS-&WTU{+zE7Gj3K6Wfv!e3f!%E(S~s$0_o_Hr-74Xj$g3e<`E%)n2}%lm zIATgXU5$#&JV&{;ZX|7L8(?d*Eps*6Pxlk!(z+2d z`KgcnT$gItPxcoR+x-24-urdGROB9zOF? zoF3!+{lX`K5=Is`C)@lO6}tL}a%^46z-I+KGQ`Z3pT@R6s-rs9k{dr%o$50#=J|6c z(C~Y7$AXy5Sa73H%`K<%)^y#nG_L2k7%fHmKUC)JKO0+$nCCCi9YARwB97u1ZGs?H z{-jRW)hJ}*dso@8ibx}4g)MZHkvi~>s*J#ynF(W>zlD?vuLLer@F%!HJ7Qjqrkwuw zRPCyWlQLG=Lqti*105&|2$YW{Q9SVH%?09xD-biiTyUdBQ(uRQc2!1MGF6`A9BD}% z;82lwfLv@z;(|W_F!VeGFI|ZkCos6trmBw!MZ2n^R5>cn!_ySeAE=W1ii=97P=mPP zFC>5{w*z9dpN+iHDMu+@)w1?DDT#0(p2?%dIJVDCLAQ!5KM0zaly2d?_##KZO0e-? zBKp6OF3^oX;sPaIqyranL!^|laX3)5tu4+WEsKWJr4y{lC1a2SYlhYWgXBrl#0P&2 zK&1~%WUJHYf!kr>JM z4Wo*&;6|Q($gQH8Au43DsGp#ySLDi!ZT>6+G8sTV3KYchN|BvFA*D~FP`vxDqMhdh zeu;{DxnGDMYmD<}VZj0*Ay8DqJ4F_6)9{7`7pbbD#EE`3xMv*aFO3y~Ih>-hoV~0F_4o;A3M2;d^ zYFzLqv0wvGz|${9X2yz?9*w?Ri#Zs!@tr)v2{eDsYl;6PP{CzSO0olJ5Gb5)?wTph zICGC#%?NSNi;-s^V_VkCD)5VB_nR0e{tuuqS+D?zz?eo6AGYsmWN8)2NEAsM$mo=4 zp2Q<~P!$D#5v|}qIeVA+ZxQ_;sA9070MIn5kLjqlz#MS7M?robie;(Yg#s5K@xi)uje4lTp7~pB2QWAbUQ^n= zoRHF|QBA+=uN0~;<- z&7VgWd{bn2IaNw;#%MHYxOr;+{QEynpwj;V9;{7*gQ8B!GLF2_DaW~pcj7N9W&g}; zThGVy==?)+fvWx&g`)sENEqBmGj_-}9HV2F(Kl%Rys|FyM@NB9 zSy>v33~qGGHHtQ8{-`CQMaLU|d?^NR4BLeY&VqYNH*0cWqrLuf5)+C)ut%;Q@yDOx z0u=xTXCXvN7jrTM$M$kHI;M)Q!2^GhBQOs6b8&&L%^jZ65i-m+m@>Ip*$(zeo`n zm;5;r7@7sAR&j%9InCC=%LO+&)g{XD9{Gz5fpN;8Lnj=Z1urSpUXG;re_;a{>-@PB zXa!JW!P=@3GFlmvvh`)OR&b7veeoAAfU(b?Cl|(^UYxOR=GJ50k63s*zcCa(CicES`t&R;WCXhh;~+yu~Bs^Am>+0NO9GR|-QD1HKe zL|zMjgP{P7FaBf~$nZ1yqX5eMaR9>s#SO9K6gR7@(Y;D;*@k6P`8s~F;&j{k_V z*RH`z7Xgs+*Tnd1bO!K8Uf(q^AJ6*121VN$HWdpV zjS7y;lnSHCpVFwPWyGRf_(S|$`2zq6e>nc8{V)7=@_@0;-;yON_ebz^;je{yK$*V@ z5kNa+uAm4;I{|{xj*+WThh=|JIPMh7T$^+vlem4G? zv^f65fMY-le+~e~JbyM9=!hR#{{x|%_>%#&I2>0{{21kYEh%NAg+Hs&2@!{QJ7aD! z4w*3gLh;w6;7<);9PlT*K!%^f{fYd62OJw~YP!~3da|UrXw&9H2MY@J<))+$ z>A;??OX1CFQuDZi;>##GGHXi7XywmhbU?%@TDqK&2kKk^zqT6~tn&|Z`b+#@698!S ze~k^QF+53~BH$h3uukV<$<=5Y?_RkoZS$$J>opDe$;*~W&Fu<`Pot2rJ*8l@^CubAh&acFWKKFk%ID+6aB^?>t~E6@Heai5Z$FfOru1Ir-RVc31^5o zE=o$xFRSWpttd{;T)VlVeq?;Gx2L<>*|YUOvJf>~U%ESM z)vByaJ_V{}2z_B(9(Y zG17b;QgSb+HZw>VAUjTb50{P|Q)j7&75V#RYTDki)~U9N;>6UfO;xuZJ(?WryWM@W ztNli6d-v_)iuSr?_^D1wOV3=n8sOVcMt`{cM_K?FY$g1O?Zq3?-1hhPuQVEa{_TXq zuffhAdcYxE4XEF?B5P&(inO$}z*mk~jLt4_>itEGm z)g{XljOA$3+=>PwQNfLbY^Q7)_D*J6DkbE^|K}%kIb~^&7XVz6mbSaR?@4DxaY9mR zPQ%QrubjeJ#2L`%s-iA*dhJn_yvJFXj z#VD7hF`&6EF~VVRBS0qPW7`d#F(G6w`1{4DX*;T?CmSx8E=fqvX}$m3-^_mbXlkUd zx94VOR}bND>#`KU4+C#ATYMpeEL3N(1R}q-ZI8NK;c_2$+!H*}0iE zy%+vmAW2F2#V_0d{KOSNSg;5yM3p}Qz)yj{{RV$HaOvwSCg{;W?d?S{`GB>BeIkDR z_AdfBI9QX@bNS4^{rhQaomKU}_4QZhBY@xi?hk%Yn3F?BJ>s8?b?b6+a(3+8SqP!1 zs0hAgd45OgSpuNRAAKTd7{C5@{L@bp=^UiOCx0%W`~rd>0!83(#mpsyHb&F`rI-LF zK(81Hx9gAHv$ts_IOCPJDPnMP_13YQiUr>18cXOiWz1w21pZ z0C35tiK!_`i8-AP{N28G4a8cQvd}_>3n<86PyiuYHv&BvquBp~1iTA2`MUuQZ=22k zRkU{;UH<8(1&AL7a36FsiSXMzq#+U$S7feQT~J{-1$tg{L)Ga+#f9X*Vhm)r2we$; z!X4Q=vj6riVz{!ek4_CNe~~oc?#iX=n)1`-r}Ojk*REZ^x2|Vk;$hRN{JfQEOFm8j z?qHfhgTNnpKo|n1fcxuYgvtFc@CO8@EL8C41PbBL7l9_*gg>W@Q`j9F^K@yCq)kbQ z-uMqoV*rNY?`V5xd%{xU{h0sT$HZTX93~{J+LC|ja?8YT?>1gJzjecgb?ep@bRIfg zU0YpoV!#rxwF%(l&)XXfI^oyfkNBN9da0`BQfdCahWsVjKx6Z@mh0DHQNFGH^bU6E zd?WHVQuJvG+y?>tDp_MH_`3#iQd2V$)ZHQD&yNcM&{;#{3$&Z=g*G|*)Uw z7h8*({P`e|bTguk*YbqHWXrR)L){&07m z@z;EjIKSGOD_5$@@)>_7ej)iAO+fb-oIfil5`W%ZAmKp(Ohuq%wgMy?t+t3Bd*SbB zN0;FMJ@`M$Uw-+OItpNQ`Nd|l6P_3RSt?;1mK~*iFW^^QRlY9(f3=7opZ==+UyIZH zgEzUzk#@AbtAjZ}y2#?*|5E-6 zE?udq0Rq7T)>WRYHUKo*n(oZ4|AVDR3_I)SDp1j92Mfgd)Rb3U#i1X;ud3_-hcl}pxEB9A6@?N=El!{M%Rnwzo|(dFJ1W41q&8r9^y4Re;Dz@ zq2HD2>Z+>C<@txn{KFT21iwn&D2n+9`adLsp8{JG5WZ&^?fiKqP?fD%sJ&F9P*s&g zV+8jV{*SEBN&Ia={-^`=&R_neY5~By%cZmr=8es_QR45rfBydaOIBmZ+mxHP8AJZw zJ-c`9+`Tvd*wG^^7Xf)R9Q=#{Say;)KZ+j$Shk1oC*l{v|Ba3gO8g;y=JN*&$^4h| zC$PY&6SlmGRs4DEgzbP~{ygIlcnVeFyJpG=?y2}gG$j7gfIsE{-T9-BVHx8;@PE() z(?MXv6*>*!-0^L6`1`oH_%k!WO^p2f%FCC@vy?YGZb7hGUj{t@(lq4~3QzvA)>VI2s{71-;`K#!-$z~vG{&tqrMxyI4l)pXYoIdD)@s`%rtF_<) zId^;;9sX{O!|MlbV|el6D+n(kd=23hgx9ZMpZh0~KLoJ(Oh;RDQzJa(*H8~18A9O2 zy}tRQ?vGD@iThia)!aNdI8*`$R{pp%5&#f(Y2%9gz3cCO<7-jBzGDd(q zmWAy9kiRqqe@ENldki=Nr1&*i=N}S(KpU>V>^X<756FY>khH+9k=7d>H>z&W5q~q! zA$$qp1q1;=#^396KQ{n8I5|0a{7`Ri-|e36Zg}duv$L(e_4>7@`m0r?#|M1+KRWxc z@HdDzhfb;ZV=mBaeR&Vt$mH*5fB1)g^hba6$AA3C{||&e`IA5W(?9*QKl`&khw$#Z z@BRgbzx>O;{NDGz_Z|t~|NbBTUkP$yeEEgCKlFcaZ__rLGV zA91|yr8-$jm7I(L;yv-Vr5*VL2gvv<;x~TTR>5D!u?v^4Ty1J;>%7%FI5IvtegEO3 zsoq2M8LTa^`~lvvt}a^h-)|E9Iyyh$9OBY*G+Dg-kAtFP~Y{x{=TSygR)b4y!SPv5}E_|)A8Po7To;v1!} zmtoj{;6M(1iFgydr4qvrAmGZJzY&h(+0!ci#1Y`E0pP(2AVMgWve~jI%ivy${ z5a%Am1HR|ZAN$6ePH9X@>UU~aA*Nbf`fy+|>v_xmt9{6vi9+-E*@4fHp|2&lHR3rELI;K(@Jo>+i zt{%|=+WOzxAsF@5*W>pj_7@f&DXyza-Ffs(c_Z+5^H%@R=)^Se_hh<{yiuR>#|R_< z-e@DCoraDI42KRKJ(@>&q%IQCLJw&IY4V1Epff!T;nJl`LK=?1Ml3V)uUs53VQdq2*lB z{=)4mQxHJtbd-%lBGG8_2M&o)lt28Yb)xz6DVsK}+qGj&7HyxMYqqy`mB9q01HOYeJO=zdc>I|1C*jvh zJ)i}^4h*a_!v`QQV>o>581T4xGZuh8?q_*k;d>vaPZ!ACK@Aoy`T!~VNZ^kxzVN3H(2OB#5W3$B2Nx|`n!J4J z2P73npF4M8f8oBJ>sAmFoB@Q!rU3pJhI!^B7rxprPC@7#1PG*a5a@ylf6m!@DOE`f zM3O2w8NJ7oABKYOGc1GqZ*=@O=+yrn?I5p`=xD!20A&6TN3j$)@^{m^4LMnizdUl! zVat}BLkCOY^%IkXzk9_0DftTlATS6cPzbO@f}yG!LMOo#{xJO7UHP**K(p z3_}_W|8C;=q<1_&A3m;2e?b^Lcdi5hEZDnc6}G3NucAiqfRsOB1ZeiavDx-v}5;D5KY!)qkW z83^-#XoYY6%M@-XIMaN+t?Opbt=m^>>l&`slw2LT1N_~G_bQ0|(eWSmfADO()CCi? z1P{ph;{ZZ+dR+LM1pg;GKw$}xFTo&xv*&-l;K0Ph$y3{fj}h*!uZ9)#n-fnO&+p%~ zVP)EethB?0ZTPriU*EvXE89|2DO!^MGu$74`h}g72JVmW_xd-#`OVlEXYky)i=_m> z{JmROx%d>;1dD(_;sB){7#3m35Rfmz{E&J;90Puj@fQLy2M@_gRm0?JR41{=0)88J z@7tMcWR{b2y4H!mw2q!z_%5A}cESHGqe)jl$UfTASX%@4u4=E8SJgLHU%gV-+}hnU zc)R6l^|@Wi+qQ4t0pDcYw&l>lg9i_lb(WR!`G=LiHa-UQ0HB>e82e#3e0ajbpST7s z0{G_q&vRw=)~qEj8#-QnrKa)5@#|F(Hm@g()r`NP=QUeQ|My`L%zn?D$y>j2rQ!Y< ze;iSRzgNHc&As#~7Zm(j45VF3c92l9`jguP}cb=MUaplDs?#e$XzZmN-D8 z0(7Sm_@isk2%zcz5IzX&&fFWN?%~$XRZEtv*tm5^$(gG57x&xiE??MTaW2Wp+e(|k zTL?2?n1%4pQv|T0ypn{=mvIzGdtlQ8QvQw{fl;BQgpUpWT-Kn81AKFS$(myjKKtzS zzI*o}JouM?`Ilcq08;)H{1wLSUrYFFr58B#MKyf6oWlkDmFMLx$3y0=jpY}967tuE z{V(AUvW60V^{bJ+d!aAR&Mw+<90A<5b5lkNAYp4K6BFRi>HyV4KspE%_)}#o@TW2h z&F@krRZ9Ems+jeg@(cDA?%&P$OHNLLu;=RC|K;~(g$m&Av75Kh0m2tS^3u{*q^FWq zFo>u4tyo(=I!2qPd(C19$IHh@M}~(QFIQwhA5**r-Vc?TnYDKHDtukus#S=~wrx$d zw>sJM!-ik`#r^O#y2NFvD-z+wp=QW~0MD{iRdsjwR04XY1@?AfSh{q=fD(WGLtj^K=JYLG_+ioIJ$shmF@Eih2Dbd7;E%XJ z%HOvThARn!S%q5>z-^mWr74y+jD}dqZ?*CJY}^6TE|{;v{E$t9|I+k-kcEN)BV(00 zxKUA(B`P^>!_GbV`2~gh3z0wcdr3(Jbu<6-KmYr`PW&Ccc^mk{5#Uj%+S=^A)$pPx z1HTn(&);SI^-%t*@J{R3U*DbCatp^^a2pKo#@>Pe1Gmwd8q!%GE^TRP=r1oXx4Xan zNHK(EYjSeeCM{W#C_+e}+r0T41lr<{A3uJA;px+-r{Kw|)ZHe3p&TFr2>gA6{C!IZ zlmNU30E+y<$MZ$aR+GPva}FH*6b?UFRLSU*zkCEH^nduh^P#T={C@TAx4`^2-@F+d z+yj0vD|2(+&h1+_Z%8K#f?|UiBrJrRz4TVExd_c2AiWRAdSD9RU;drZ|B@`kJjuS1 zvC16WXe-AOk({|<*KWWMykBwQx;5#@UhVXM|M!3Yj~jnmx^5E(2$M~$T6X^4^~;uV z@3&&bn$veDsQ(lA1N<0&co!D=gPUULmp+H^?AbG*`~HJRE^VOn z;ot0?J9eyuZSKr8nK@U%-&4m(+9kd7Ww17FW?98L3r~9WPD0p1;E(>`50)B9>5c^iS8H&ir4-9y^6HH{ zb^?9{`%1ERtwZ)|rvCH4{_DRee?Xu^|C`oT18-!b4iJYVxd#qxN@ATaY}TB)J89() zZnxf@naM@}XX1zaG5kdS9s@^|!S;*58u+`ViA$Csev22c*;i1ob@Ad2(=<$xfJ?>> z{AC?Je8eMvbO=a2;J{XJNW=q9UB7-B2AjhKK!d+eY8(3c8zJ021`h9o#Y@sN;iWjM z`2fIlf1E!C=?A~~tfK5nJ@J1se&BeaXyEVl>sPP(`*!aJ2Gduq-LN{1yzQ8^oI%1u z^0W)_fV>AL&mqtU5lj!r9!B_scRnzZvK^TPt5xcwgB#^Sq@(2I)myd`{Pt`__VR0L z{^LLX<3AaH=m7uce~`as_-Cy^`z_#)tUzNtS|1$T7j|8aQ-(%$O5%4tGT#}fVK*2L0Tz}}$q4kS%Mf|2FCnqKzFaU}B;|}mp z*72g^6TqKm2grtiz#jvUcEH2ia1wI&F8IH7pQf(NOg~HRTVJ?vc}Mz+tjyG+V;Qxf)*Twx2{vJMj@SweYM{*+feoHd9=H-qcyZ6rS+6AvF+Ouc(Zk*rj-o1O< zw!$?FlS+z?oj7*n&_TH4xOeZ~9(ZLjW02ncCDV_+Eu6lgA>Bt6CL zUAsVBxHrHMG#Y~OC(MEISwFlD!fu4nTfsO8#6I{her}``b_6DbSYy{P8!^I;H=}Wu zw3D2iwPsV^mYlU2$lk^3-~RS@zZ3bx4*0(jK!d+@c%!ipj&tJHZtJOY@Vxu+^T&^ES+_A~-L7)P?oX;i*$6Ps0DD=>Mjs5k3+qf9N+MFcv;)K^p^YB+ z%|He+jagFwhmgNt|N8FTNxo8UZfg%MTzFPkFo#{xjdTFLy|w1k2YXMEg=fOxF!1M& zAMi({dj9-Y88S!-G~3{(v`7CZ7VsA2KV$eg33w;T~DIE?z9ZRC%eQ;&Sz6cta)JvpRa@Fm#g#4<0&nfP@af5BQtja!YW3LhpFu`UYNZ+GJ4<0>yaR1)D z<|}pe*V}InjTbrdXFP``0r*WBV1h4G8)1tu+sHc13?0~_EJ8TEaN#Am!PDChPu9v| zaPTVOkFaw7e3#+>fIkU882Zt%AM1Qya{OSHhJ)np?p?$KN&pB4qUcQ{3-qI7m{ULrB?zXm8V8y$s zsRQodoq~lxH~xqNl!kz83`j?SY#kc;L&?6fxV5hC z7UK^c>q}wmXW)m+q?i{z#&ZcrG;*7|V1iw#e6n;D!supp0~}A&`0iTbC)={J@}Y9=?H5+A z%&y08A6#D#%hk{;ovW%Z!{=p>z+0vv@O4BwdEsl&AUyu8h+_zDbZ0kGrRGD&LOT^Y zs3r~&0Tjo8*aaJdK=BJ|NH3!XA;#Jh>W zVQwxbaTm-#KzLYRb;K`}(K%r#i(8>ks!MXoco_#aPnZK#xj?!KBMbuZQ_X$g3G#3Z zJkSKi<@Sw8!#m-iSKa@@YS#4B)Rt}we<9ai7=HH*{-AfwJqm>NbYo+Y=VNfE1HRG; zxRIZ^zXO9pn3_xv&fI(OaMQ_0kDd_zBnN2pz$SqF>%e3cMjiux`>U~V-4~AuE6ToREEQ(!Ugpp8FJ=)!)PKa)Pn-)HcfDHOnk zAFQlwX(=l!_yF$`FIozL;09wqGUdQ!Lt*8GF3%n?Ah8lg*; zs*4EnJ4^U+Ho9O~g|SV8|MR~%f2i-3vTP8cvK}+r2%xxp>EI9DdoJ1{nQ~j@~B^>;bIz@8fMUSgyHyAM$ww z{5=H!haIrt07(z*03a?yzqY$T{ydD8KU7BMPo$5}e|PR&v}jdYQb|ckQB4i}g@9jT zfvL#gtg-G#?z+9C_dRf!OjrIsf7X4`bWNl^r4}`9w6OyjDSz&W1Bgt7r^?neDzOh%>b{W?;sV_rpvd2LQ3}zw$@?OIrVB(-fr*@MYAx}=;83*tn;F9#z48T}? z;{L~HM}X$!gHC`={z&gj=d^ccN~Uji>#@G<^i!}uesd;#F!Q){$!s^dMa_7!7b&A3#H`o{uM(*} zB8WHQj5!D_0ul#^0J`wU+ktM8vi`y0k;j!^C!XGTeFaSx>C>3zF+nV@Yx`=}`Zs!kuAjMByeE#Bd#-G~#sn-7(f0M__c#k}4 zfIaNw6wZN>ylGtj!v#QGg@G{CakHndxBFJ_fxQ>&J3AdG!R8pyyc1&afLqxYlfJFq zoR+Y3>ALDBpb+k5+^RWVuy1Wj%9^!n*R3mruT#}E!F%auU%&eL<@4vCfBE2RdVhmZ zxNzlxD_8ok4~83tz#nmaB7X1*y=3WPv2_LIZH*lnqa87!OuI2cRT9c59HTDKSOug2 zPO(9b3?HN;p6Bk&)V-O>s#